Epetra Package Browser (Single Doxygen Collection)  Development
lesson02_read_modify_vec.cpp
Go to the documentation of this file.
1 
9 // This defines useful macros like HAVE_MPI, which is defined if and
10 // only if Epetra was built with MPI enabled.
11 #include <Epetra_config.h>
12 
13 #ifdef HAVE_MPI
14 # include <mpi.h>
15 // Epetra's wrapper for MPI_Comm. This header file only exists if
16 // Epetra was built with MPI enabled.
17 # include <Epetra_MpiComm.h>
18 #else
19 # include <Epetra_SerialComm.h>
20 #endif // HAVE_MPI
21 
22 #include <Epetra_Map.h>
23 #include <Epetra_Vector.h>
24 #include <Epetra_Version.h>
25 
26 void
28  std::ostream& out)
29 {
30  using std::endl;
31 
32  // Print out the Epetra software version.
33  if (comm.MyPID () == 0) {
34  out << Epetra_Version () << endl << endl;
35  }
36 
37  // The type of global indices. You could just set this to int,
38  // but we want the example to work for Epetra64 as well.
39 #ifdef EPETRA_NO_32BIT_GLOBAL_INDICES
40  // Epetra was compiled only with 64-bit global index support, so use
41  // 64-bit global indices.
42  typedef long long global_ordinal_type;
43 #else
44  // Epetra was compiled with 32-bit global index support. If
45  // EPETRA_NO_64BIT_GLOBAL_INDICES is defined, it does not also
46  // support 64-bit indices.
47  typedef int global_ordinal_type;
48 #endif // EPETRA_NO_32BIT_GLOBAL_INDICES
49 
51  // Create an Epetra_Map
53 
54  // The total (global, i.e., over all MPI processes) number of
55  // entries in the Map.
56  //
57  // For this example, we scale the global number of entries in the
58  // Map with the number of MPI processes. That way, you can run this
59  // example with any number of MPI processes and every process will
60  // still have a positive number of entries.
61  const global_ordinal_type numGlobalEntries = comm.NumProc () * 5;
62 
63  // Index base of the Map. We choose zero-based (C-style) indexing.
64  const global_ordinal_type indexBase = 0;
65 
66  // Construct a Map that puts the same number of equations on each
67  // MPI process.
68  Epetra_Map contigMap (numGlobalEntries, indexBase, comm);
69 
71  // Create an Epetra_Vector
73 
74  // Create a Vector with the Map we created above.
75  // This version of the constructor will fill in the vector with zeros.
76  Epetra_Vector x (contigMap);
77 
79  // Fill the Vector with a single number, or with random numbers
81 
82  // Set all entries of x to 42.0.
83  (void) x.PutScalar (42.0);
84 
85  // Print the norm of x.
86  double theNorm = 0.0;
87  (void) x.Norm2 (&theNorm);
88  out << "Norm of x (all entries are 42.0): " << theNorm << endl;
89 
90  // Set the entries of x to (pseudo)random numbers. Please don't
91  // consider this a good parallel pseudorandom number generator.
92  (void) x.Random ();
93 
94  // Print the norm of x.
95  (void) x.Norm2 (&theNorm);
96  out << "Norm of x (random numbers): " << theNorm << endl;
97 
99  // Read the entries of the Vector
101 
102  {
103  const int localLength = x.MyLength ();
104 
105  // Count the local number of entries less than 0.5.
106  // Use local indices to access the entries of x_data.
107  int localCount = 0;
108  for (int localIndex = 0; localIndex < localLength; ++localIndex) {
109  if (x[localIndex] < 0.5) {
110  ++localCount;
111  }
112  }
113 
114  int globalCount = 0;
115  (void) comm.SumAll (&localCount, &globalCount, 1);
116 
117  // Find the total number of entries less than 0.5,
118  // over all processes in the Vector's communicator.
119  out << "x has " << globalCount << " entr"
120  << (globalCount != 1 ? "ies" : "y")
121  << " less than 0.5." << endl;
122  }
123 
125  // Modify the entries of the Vector
127 
128  {
129  // Use local indices to access the entries of x_data.
130  const int localLength = x.MyLength ();
131  for (int localIndex = 0; localIndex < localLength; ++localIndex) {
132  // Add the value of the local index to every entry of x.
133  x[localIndex] += static_cast<double> (localIndex);
134  }
135 
136  // Print the norm of x.
137  theNorm = 0.0;
138  (void) x.Norm2 (&theNorm);
139  out << "Norm of x (modified random numbers): " << theNorm << endl;
140  }
141 }
142 
143 //
144 // The same main() driver routine as in the previous Epetra lesson.
145 //
146 int
147 main (int argc, char *argv[])
148 {
149  using std::cout;
150  using std::endl;
151 
152 #ifdef HAVE_MPI
153  MPI_Init (&argc, &argv);
154  Epetra_MpiComm comm (MPI_COMM_WORLD);
155 #else
156  Epetra_SerialComm comm;
157 #endif // HAVE_MPI
158 
159  if (comm.MyPID () == 0) {
160  cout << "Total number of processes: " << comm.NumProc () << endl;
161  }
162 
163  // Do something with the new Epetra communicator.
164  exampleRoutine (comm, cout);
165 
166  // This tells the Trilinos test framework that the test passed.
167  if (comm.MyPID () == 0) {
168  cout << "End Result: TEST PASSED" << endl;
169  }
170 
171 #ifdef HAVE_MPI
172  // Since you called MPI_Init, you are responsible for calling
173  // MPI_Finalize after you are done using MPI.
174  (void) MPI_Finalize ();
175 #endif // HAVE_MPI
176 
177  return 0;
178 }
std::string Epetra_Version()
Epetra_Comm: The Epetra Communication Abstract Base Class.
Definition: Epetra_Comm.h:73
virtual int NumProc() const =0
Returns total number of processes.
virtual int MyPID() const =0
Return my process ID.
virtual int SumAll(double *PartialSums, double *GlobalSums, int Count) const =0
Epetra_Comm Global Sum function.
Epetra_Map: A class for partitioning vectors and matrices.
Definition: Epetra_Map.h:119
Epetra_MpiComm: The Epetra MPI Communication Class.
int NumProc() const
Returns total number of processes.
int MyPID() const
Return my process ID.
int MyLength() const
Returns the local vector length on the calling processor of vectors in the multi-vector.
int Random()
Set multi-vector values to random numbers.
int Norm2(double *Result) const
Compute 2-norm of each vector in multi-vector.
int PutScalar(double ScalarConstant)
Initialize all values in a multi-vector with constant value.
Epetra_SerialComm: The Epetra Serial Communication Class.
Epetra_Vector: A class for constructing and using dense vectors on a parallel computer.
int main(int argc, char *argv[])
void exampleRoutine(const Epetra_Comm &comm, std::ostream &out)
long long global_ordinal_type