Actual source code: ex46.cxx
1: static char help[] = "Demonstrates calling Trilinos and then PETSc in the same program.\n\n";
3: /*
4: Example obtained from: http://trilinos.org/docs/dev/packages/tpetra/doc/html/Tpetra_Lesson01.html
5: */
7: #include <petscsys.h>
8: #include <Tpetra_DefaultPlatform.hpp>
9: #include <Tpetra_Version.hpp>
10: #include <Teuchos_GlobalMPISession.hpp> // used if Trilinos is the one that starts up MPI
12: // Do something with the given communicator. In this case, we just
13: // print Tpetra's version to stdout on Process 0 in the given
14: // communicator.
15: void exampleRoutine(const Teuchos::RCP<const Teuchos::Comm<int>> &comm)
16: {
17: if (comm->getRank() == 0) {
18: // On (MPI) Process 0, print out the Tpetra software version.
19: std::cout << Tpetra::version() << std::endl << std::endl;
20: }
21: }
23: int main(int argc, char **argv)
24: {
25: // These "using" declarations make the code more concise, in that
26: // you don't have to write the namespace along with the class or
27: // object name. This is especially helpful with commonly used
28: // things like std::endl.
29: using std::cout;
30: using std::endl;
31: // Start up MPI, if using MPI. Trilinos doesn't have to be built
32: // with MPI; it's called a "serial" build if you build without MPI.
33: // GlobalMPISession hides this implementation detail.
34: //
35: // Note the third argument. If you pass GlobalMPISession the
36: // address of an std::ostream, it will print a one-line status
37: // message with the rank on each MPI process. This may be
38: // undesirable if running with a large number of MPI ranks.
39: // You can avoid printing anything here by passing in either
40: // NULL or the address of a Teuchos::oblackholestream.
41: Teuchos::GlobalMPISession mpiSession(&argc, &argv, NULL);
42: // Get a pointer to the communicator object representing
43: // MPI_COMM_WORLD. getDefaultPlatform.getComm() doesn't create a
44: // new object every time you call it; it just returns the same
45: // communicator each time. Thus, you can call it anywhere and get
46: // the same communicator. (This is handy if you don't want to pass
47: // a communicator around everywhere, though it's always better to
48: // parameterize your algorithms on the communicator.)
49: //
50: // "Tpetra::DefaultPlatform" knows whether or not we built with MPI
51: // support. If we didn't build with MPI, we'll get a "communicator"
52: // with size 1, whose only process has rank 0.
53: Teuchos::RCP<const Teuchos::Comm<int>> comm = Tpetra::DefaultPlatform::getDefaultPlatform().getComm();
55: PetscFunctionBeginUser;
56: PetscCall(PetscInitialize(&argc, &argv, (char *)0, help));
58: // Get my process' rank, and the total number of processes.
59: // Equivalent to MPI_Comm_rank resp. MPI_Comm_size.
60: const int myRank = comm->getRank();
61: const int size = comm->getSize();
62: if (myRank == 0) cout << "Total number of processes: " << size << endl;
63: // Do something with the new communicator.
64: exampleRoutine(comm);
65: // This tells the Trilinos test framework that the test passed.
66: if (myRank == 0) cout << "End Result: TEST PASSED" << endl;
67: // GlobalMPISession calls MPI_Finalize() in its destructor, if
68: // appropriate. You don't have to do anything here! Just return
69: // from main(). Isn't that helpful?
70: PetscCall(PetscFinalize());
71: return 0;
72: }
74: /*TEST
76: build:
77: requires: trilinos
79: test:
80: nsize: 3
81: filter: grep -v "Tpetra in Trilinos"
83: TEST*/