Example #1
0
  //================================================================
  //================================================================
  // RN_20091215: This needs to be called only once per time step
  // in the beginning to set up the problem.
  //================================================================
  void FC_FUNC(inittrilinos,INITTRILINOS) (int& bandwidth, int& mySize,
	       int* myIndicies, double* myX, double* myY, double* myZ,
	       int* mpi_comm_f) {
// mpi_comm_f: CISM's fortran mpi communicator

#ifdef GLIMMER_MPI
    // Make sure the MPI_Init in Fortran is recognized by C++.
    // We used to call an extra MPI_Init if (!flag), but the behavior of doing so is uncertain,
    // especially if CISM's MPI communicator is a subset of MPI_COMM_WORLD (as can be the case in CESM).
    // Thus, for now, we die with an error message if C++ perceives MPI to be uninitialized.
    // If this causes problems (e.g., if certain MPI implementations seem not to recognize 
    // that MPI has already been initialized), then we will revisit how to handle this.
       int flag;
       MPI_Initialized(&flag);
       if (!flag) {
	 cout << "ERROR in inittrilinos: MPI not initialized according to C++ code" << endl;
	 exit(1);
       }
    MPI_Comm mpi_comm_c = MPI_Comm_f2c(*mpi_comm_f);
    Epetra_MpiComm comm(mpi_comm_c);
    Teuchos::MpiComm<int> tcomm(Teuchos::opaqueWrapper(mpi_comm_c));
#else
    Epetra_SerialComm comm;
    Teuchos::SerialComm<int> tcomm;
#endif

    Teuchos::RCP<const Epetra_Map> rowMap = 
      Teuchos::rcp(new Epetra_Map(-1,mySize,myIndicies,1,comm) );

    TEUCHOS_TEST_FOR_EXCEPTION(!rowMap->UniqueGIDs(), std::logic_error,
       "Error: inittrilinos, myIndices array needs to have Unique entries" 
        << " across all processor.");

    // Diagnostic output for partitioning
    int minSize, maxSize;
    comm.MinAll(&mySize, &minSize, 1);
    comm.MaxAll(&mySize, &maxSize, 1);
    if (comm.MyPID()==0) 
      cout << "\nPartition Info in init_trilinos: Total nodes = " << rowMap->NumGlobalElements()
           << "  Max = " << maxSize << "  Min = " << minSize 
           << "  Ave = " << rowMap->NumGlobalElements() / comm.NumProc() << endl;

    soln = Teuchos::rcp(new Epetra_Vector(*rowMap));

    // Read parameter list once
    try { 
       pl = Teuchos::rcp(new Teuchos::ParameterList("Trilinos Options"));
       Teuchos::updateParametersFromXmlFileAndBroadcast("trilinosOptions.xml", pl.ptr(), tcomm);

       Teuchos::ParameterList validPL("Valid List");;
       validPL.sublist("Stratimikos"); validPL.sublist("Piro");
       pl->validateParameters(validPL, 0);
    }
    catch (std::exception& e) {
      cout << "\nXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\n" 
           << e.what() << "\nExiting: Invalid trilinosOptions.xml file."
           << "\nXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" << endl;
      exit(1);
    }
    catch (...) {
      cout << "\nXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\n" 
           << "\nExiting: Invalid trilinosOptions.xml file."
           << "\nXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" << endl;
      exit(1);
    }

    try { 
      // Set the coordinate position of the nodes for ML for repartitioning (important for #procs > 100s)
      if (pl->sublist("Stratimikos").isParameter("Preconditioner Type")) {
         if ("ML" == pl->sublist("Stratimikos").get<string>("Preconditioner Type")) {
           Teuchos::ParameterList& mlList =
              pl->sublist("Stratimikos").sublist("Preconditioner Types").sublist("ML").sublist("ML Settings");
           mlList.set("x-coordinates",myX);
           mlList.set("y-coordinates",myY);
           mlList.set("z-coordinates",myZ);
           mlList.set("PDE equations", 1);
         }
      }

      out = Teuchos::VerboseObjectBase::getDefaultOStream();

      // Reset counters every time step: can remove these lines to have averages over entire run
      linearSolveIters_total = 0;
      linearSolveCount=0;
      linearSolveSuccessCount = 0;

      // Create an interface that holds a CrsMatrix instance and some useful methods.
      interface = Teuchos::rcp(new TrilinosMatrix_Interface(rowMap, bandwidth, comm));

      Stratimikos::DefaultLinearSolverBuilder linearSolverBuilder;
      linearSolverBuilder.setParameterList(Teuchos::sublist(pl, "Stratimikos"));
      lowsFactory = linearSolverBuilder.createLinearSolveStrategy("");
      lowsFactory->setOStream(out);
      lowsFactory->setVerbLevel(Teuchos::VERB_LOW);

      lows=Teuchos::null;
      thyraOper=Teuchos::null;
    }
    TEUCHOS_STANDARD_CATCH_STATEMENTS(true, std::cerr, success);
    if (!success) exit(1);
  }
Example #2
0
int main(int argc, char *argv[])
{

#ifdef HAVE_MPI
  MPI_Init(&argc, &argv);
  // define an Epetra communicator
  Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif

  // get the proc ID of this process
  int MyPID = Comm.MyPID();
  
  // get the total number of processes
  int NumProc = Comm.NumProc();
  
  // output some information to std output
  cout << Comm << endl;
  
  // ======================== //
  // now some basic MPI calls //
  // ------------------------ //
  
  int    ivalue;
  double dvalue, dvalue2;
  double* dvalues;  dvalues  = new double[NumProc];
  double* dvalues2; dvalues2 = new double[NumProc];
  int root = 0;
  
  // equivalent to MPI_Barrier
  
  Comm.Barrier();
   
  if (MyPID == root) dvalue = 12.0;

  // On input, the root processor contains the list of values
  // (in this case, a single value). On exit, all processes will
  // have he same list of values. Note that all values must be allocated
  // vefore the broadcast
  
  // equivalent to  MPI_Broadcast
    
  Comm.Broadcast(&dvalue, 1, root);

  // as before, but with integer values. As C++ can bind to the appropriate
  // interface based on argument typing, the type of data is not required.
  
  Comm.Broadcast(&ivalue, 1, root);

  // equivalent MPI_Allgather

  Comm.GatherAll(dvalues, dvalues2, 1);

  // equivalent to MPI_Allreduce with MPI_SUM

  dvalue = 1.0*MyPID;

  Comm.SumAll( &dvalue, dvalues, 1);

  // equivalent to MPI_Allreduce with MPI_SUM

  Comm.MaxAll( &dvalue, dvalues, 1);

  // equiavant to MPI_Scan with MPI_SUM

  dvalue = 1.0 * MyPID;
  
  Comm.ScanSum(&dvalue, &dvalue2, 1);

  cout << "On proc " << MyPID << " dvalue2  = " << dvalue2 << endl;
  
  delete[] dvalues;
  delete[] dvalues2;

  // ======================= //
  // Finalize MPI and return //
  // ----------------------- //
    
#ifdef HAVE_MPI
  MPI_Finalize();
#endif

  return( EXIT_SUCCESS );
  
} /* main */