Beispiel #1
0
 // -- FACTORY METHODS --
 static Epetra_Map epetraMap( DataMap const& dmap )
 {
     std::vector<int> e( dmap.nMyElements() );
     std::copy( dmap.myGlobalElements().begin(),
                dmap.myGlobalElements().end(),
                e.begin() );
     return Epetra_Map( -1, dmap.nMyElements(), e.data(), 0, Epetra_MpiComm( dmap.comm() ) );
 }
Beispiel #2
0
VectorEpetra<T>::VectorEpetra( )
    :
    super(),
#ifdef FEELPP_HAS_MPI
    M_emap( Epetra_BlockMap( -1, 0, 0, Epetra_MpiComm( super::comm() ) ) ),
    M_vec( M_emap ) // false (zerout)?
#else
    M_emap( Epetra_BlockMap( -1, 0, 0, Epetra_SerialComm ) ),
    M_vec( M_emap )
#endif
{
}
void EpetraMatrix<T>::init (const numeric_index_type m,
                            const numeric_index_type n,
                            const numeric_index_type m_l,
                            const numeric_index_type n_l,
                            const numeric_index_type nnz,
                            const numeric_index_type noz,
                            const numeric_index_type /* blocksize */)
{
  if ((m==0) || (n==0))
    return;

  {
    // Clear initialized matrices
    if (this->initialized())
      this->clear();

    libmesh_assert (!this->_mat);
    libmesh_assert (!this->_map);

    this->_is_initialized = true;
  }

  // error checking
#ifndef NDEBUG
  {
    libmesh_assert_equal_to (n, m);
    libmesh_assert_equal_to (n_l, m_l);

    numeric_index_type
      summed_m_l = m_l,
      summed_n_l = n_l;

    this->comm().sum (summed_m_l);
    this->comm().sum (summed_n_l);

    libmesh_assert_equal_to (m, summed_m_l);
    libmesh_assert_equal_to (n, summed_n_l);
  }
#endif

  // build a map defining the data distribution
  _map = new Epetra_Map (static_cast<int>(m),
                         m_l,
                         0,
                         Epetra_MpiComm (this->comm().get()));

  libmesh_assert_equal_to (static_cast<numeric_index_type>(_map->NumGlobalPoints()), m);
  libmesh_assert_equal_to (static_cast<numeric_index_type>(_map->MaxAllGID()+1), m);

  _mat = new Epetra_FECrsMatrix (Copy, *_map, nnz + noz);
}
void EpetraMatrix<T>::init (const unsigned int m,
			    const unsigned int n,
			    const unsigned int m_l,
			    const unsigned int n_l,
			    const unsigned int nnz,
			    const unsigned int noz)
{
  if ((m==0) || (n==0))
    return;

  {
    // Clear initialized matrices
    if (this->initialized())
      this->clear();

    libmesh_assert (this->_mat == NULL);
    libmesh_assert (this->_map == NULL);

    this->_is_initialized = true;
  }

  // error checking
#ifndef NDEBUG
  {
    libmesh_assert (n == m);
    libmesh_assert (n_l == m_l);

    unsigned int
      summed_m_l = m_l,
      summed_n_l = n_l;

    Parallel::sum (summed_m_l);
    Parallel::sum (summed_n_l);

    libmesh_assert (m == summed_m_l);
    libmesh_assert (n == summed_n_l);
  }
#endif

  // build a map defining the data distribution
  _map = new Epetra_Map (m,
                         m_l,
                         0,
                         Epetra_MpiComm (libMesh::COMM_WORLD));

  libmesh_assert (static_cast<unsigned int>(_map->NumGlobalPoints()) == m);
  libmesh_assert (static_cast<unsigned int>(_map->MaxAllGID()+1) == m);

  _mat = new Epetra_FECrsMatrix (Copy, *_map, nnz + noz);
}
Beispiel #5
0
 static Epetra_Map epetraMapStatic( DataMap const& dmap )
 {
     return Epetra_Map( dmap.nGlobalElements(), dmap.nMyElements(), 0, Epetra_MpiComm( dmap.comm() ) );
 }
void EpetraMatrix<T>::update_sparsity_pattern (const SparsityPattern::Graph &sparsity_pattern)
{
  // clear data, start over
  this->clear ();

  // big trouble if this fails!
  libmesh_assert (this->_dof_map != NULL);

  const unsigned int n_rows = sparsity_pattern.size();

  const unsigned int m   = this->_dof_map->n_dofs();
  const unsigned int n   = m;
  const unsigned int n_l = this->_dof_map->n_dofs_on_processor(libMesh::processor_id());
  const unsigned int m_l = n_l;

  // error checking
#ifndef NDEBUG
  {
    libmesh_assert (n == m);
    libmesh_assert (n_l == m_l);

    unsigned int
      summed_m_l = m_l,
      summed_n_l = n_l;

    Parallel::sum (summed_m_l);
    Parallel::sum (summed_n_l);

    libmesh_assert (m == summed_m_l);
    libmesh_assert (n == summed_n_l);
  }
#endif

  // build a map defining the data distribution
  _map = new Epetra_Map (m,
                         m_l,
                         0,
                         Epetra_MpiComm (libMesh::COMM_WORLD));

  libmesh_assert (static_cast<unsigned int>(_map->NumGlobalPoints()) == m);
  libmesh_assert (static_cast<unsigned int>(_map->MaxAllGID()+1) == m);

  const std::vector<unsigned int>& n_nz = this->_dof_map->get_n_nz();
  const std::vector<unsigned int>& n_oz = this->_dof_map->get_n_oz();

   // Make sure the sparsity pattern isn't empty
  libmesh_assert (n_nz.size() == n_l);
  libmesh_assert (n_oz.size() == n_l);

  // Epetra wants the total number of nonzeros, both local and remote.
  std::vector<int> n_nz_tot; /**/ n_nz_tot.reserve(n_nz.size());

  for (unsigned int i=0; i<n_nz.size(); i++)
    n_nz_tot.push_back(std::min(n_nz[i] + n_oz[i], n));

  if (m==0)
    return;

  _graph = new Epetra_CrsGraph(Copy, *_map, &n_nz_tot[0]);

  // Tell the matrix about its structure.  Initialize it
  // to zero.
  for (unsigned int i=0; i<n_rows; i++)
    _graph->InsertGlobalIndices(_graph->GRID(i),
                                sparsity_pattern[i].size(),
                                const_cast<int *>((const int *)&sparsity_pattern[i][0]));

  _graph->FillComplete();

  //Initialize the matrix
  libmesh_assert (!this->initialized());
  this->init ();
  libmesh_assert (this->initialized());
}
//=======================================================
EpetraExt_HypreIJMatrix::EpetraExt_HypreIJMatrix(HYPRE_IJMatrix matrix)
  : Epetra_BasicRowMatrix(Epetra_MpiComm(hypre_IJMatrixComm(matrix))),
    Matrix_(matrix),
    ParMatrix_(0),
    NumMyRows_(-1),
    NumGlobalRows_(-1),
    NumGlobalCols_(-1),
    MyRowStart_(-1),
    MyRowEnd_(-1),
    MatType_(-1), 
    TransposeSolve_(false),
    SolveOrPrec_(Solver)
{
  IsSolverSetup_ = new bool[1];
  IsPrecondSetup_ = new bool[1];
  IsSolverSetup_[0] = false;
  IsPrecondSetup_[0] = false;
  // Initialize default values for global variables
  int ierr = 0;
  ierr += InitializeDefaults();
  TEUCHOS_TEST_FOR_EXCEPTION(ierr != 0, std::logic_error, "Couldn't initialize default values.");
  
  // Create array of global row ids
  Teuchos::Array<int> GlobalRowIDs;  GlobalRowIDs.resize(NumMyRows_);
  
  for (int i = MyRowStart_; i <= MyRowEnd_; i++) {
    GlobalRowIDs[i-MyRowStart_] = i;
  }
  
  // Create array of global column ids
  int new_value = 0; int entries = 0;
  std::set<int> Columns;
  int num_entries;
  double *values;
  int *indices;
  for(int i = 0; i < NumMyRows_; i++){
    ierr += HYPRE_ParCSRMatrixGetRow(ParMatrix_, i+MyRowStart_, &num_entries, &indices, &values);
    ierr += HYPRE_ParCSRMatrixRestoreRow(ParMatrix_, i+MyRowStart_,&num_entries,&indices,&values);
    TEUCHOS_TEST_FOR_EXCEPTION(ierr != 0, std::logic_error, "Couldn't get row of matrix.");
    entries = num_entries;
    for(int j = 0; j < num_entries; j++){
      // Insert column ids from this row into set
      new_value = indices[j];
      Columns.insert(new_value);
    }
  }
  int NumMyCols = Columns.size(); 
  Teuchos::Array<int> GlobalColIDs; GlobalColIDs.resize(NumMyCols);
  
  std::set<int>::iterator it;
  int counter = 0;
  for (it = Columns.begin(); it != Columns.end(); it++) {
    // Get column ids in order
    GlobalColIDs[counter] = *it;
    counter = counter + 1;
  }
  //printf("Proc[%d] Rows from %d to %d, num = %d\n", Comm().MyPID(), MyRowStart_,MyRowEnd_, NumMyRows_);
  
  Epetra_Map RowMap(-1, NumMyRows_, &GlobalRowIDs[0], 0, Comm());
  Epetra_Map ColMap(-1, NumMyCols, &GlobalColIDs[0], 0, Comm());
  
  //Need to call SetMaps()
  SetMaps(RowMap, ColMap);
 
  // Get an MPI_Comm to create vectors.
  // The vectors will be reused in Multiply(), so that they aren't recreated every time.   
  MPI_Comm comm;
  ierr += HYPRE_ParCSRMatrixGetComm(ParMatrix_, &comm);
  TEUCHOS_TEST_FOR_EXCEPTION(ierr != 0, std::logic_error, "Couldn't get communicator from Hypre Matrix.");
  
  ierr += HYPRE_IJVectorCreate(comm, MyRowStart_, MyRowEnd_, &X_hypre);
  ierr += HYPRE_IJVectorSetObjectType(X_hypre, HYPRE_PARCSR);
  ierr += HYPRE_IJVectorInitialize(X_hypre);
  ierr += HYPRE_IJVectorAssemble(X_hypre);
  ierr += HYPRE_IJVectorGetObject(X_hypre, (void**) &par_x);
  TEUCHOS_TEST_FOR_EXCEPTION(ierr != 0, std::logic_error, "Couldn't create Hypre X vector.");

  ierr += HYPRE_IJVectorCreate(comm, MyRowStart_, MyRowEnd_, &Y_hypre);
  ierr += HYPRE_IJVectorSetObjectType(Y_hypre, HYPRE_PARCSR);
  ierr += HYPRE_IJVectorInitialize(Y_hypre);
  ierr += HYPRE_IJVectorAssemble(Y_hypre);
  ierr += HYPRE_IJVectorGetObject(Y_hypre, (void**) &par_y);
  TEUCHOS_TEST_FOR_EXCEPTION(ierr != 0, std::logic_error, "Couldn't create Hypre Y vector.");

  x_vec = (hypre_ParVector *) hypre_IJVectorObject(((hypre_IJVector *) X_hypre));
  x_local = hypre_ParVectorLocalVector(x_vec);

  y_vec = (hypre_ParVector *) hypre_IJVectorObject(((hypre_IJVector *) Y_hypre));
  y_local = hypre_ParVectorLocalVector(y_vec);

  SolverCreatePtr_ = &EpetraExt_HypreIJMatrix::Hypre_ParCSRPCGCreate;
  SolverDestroyPtr_ = &HYPRE_ParCSRPCGDestroy;
  SolverSetupPtr_ = &HYPRE_ParCSRPCGSetup;
  SolverSolvePtr_ = &HYPRE_ParCSRPCGSolve;
  SolverPrecondPtr_ = &HYPRE_ParCSRPCGSetPrecond;
  CreateSolver();

  PrecondCreatePtr_ = &EpetraExt_HypreIJMatrix::Hypre_EuclidCreate;
  PrecondDestroyPtr_ = &HYPRE_EuclidDestroy;
  PrecondSetupPtr_ = &HYPRE_EuclidSetup;
  PrecondSolvePtr_ = &HYPRE_EuclidSolve;
  CreatePrecond();
  ComputeNumericConstants();
  ComputeStructureConstants();
} //EpetraExt_HYPREIJMatrix(Hypre_IJMatrix) Constructor
Beispiel #8
0
void test ()
{
  int n_proc; 
#ifdef HAVE_MPI
  MPI_Comm_size(MPI_COMM_WORLD, &n_proc);
#else
  n_proc = 1;
#endif
  int my_id;
#ifdef HAVE_MPI
  MPI_Comm_rank(MPI_COMM_WORLD, &my_id);
#else
  my_id = 0;
#endif

  //All processes should own 10 entries
  const int entries_per_process = 10;

  const long long begin_index = ((long long)my_id)*entries_per_process;
  const long long end_index = ((long long)(my_id+1))*entries_per_process;

  const long long local_begin = std::max(0LL, begin_index-entries_per_process/2);
  const long long local_end = entries_per_process*n_proc;

  //create Epetra maps
  std::vector<unsigned long long> ghosted_indices;
  ghosted_indices.reserve(local_end-local_begin);
  for (long long i = local_begin; i< local_end; ++i)
    ghosted_indices.push_back(i);
  Epetra_Map map_ghosted
    (-1LL,
    local_end-local_begin,
    reinterpret_cast<long long*>(&ghosted_indices[0]),
    0,
#ifdef HAVE_MPI
    Epetra_MpiComm(MPI_COMM_WORLD));
#else
    Epetra_SerialComm());
#endif
  
  std::vector<unsigned long long> distributed_indices;
  distributed_indices.reserve(entries_per_process*n_proc);
  for (long long i = begin_index; i< end_index; ++i)
    distributed_indices.push_back(i);
  Epetra_Map map_distributed
  (entries_per_process*n_proc,
   entries_per_process,
   reinterpret_cast<long long*>(&distributed_indices[0]),
   0,
#ifdef HAVE_MPI
   Epetra_MpiComm(MPI_COMM_WORLD));
#else
    Epetra_SerialComm());
#endif
 
  Epetra_FEVector v_ghosted(map_ghosted);
  Epetra_FEVector v_distributed(map_distributed);
  
  v_distributed.PutScalar(2.);
  v_ghosted.PutScalar(1.);

  Epetra_Import data_exchange (v_distributed.Map(), v_ghosted.Map());
  int ierr = v_distributed.Import(v_ghosted, data_exchange, Epetra_AddLocalAlso);
 
  std::cout << "Distributed:" << std::endl;
  for (long long i=begin_index; i<end_index; ++i)
  {
    int trilinos_i
      = v_distributed.Map().LID(i);
    double value = v_distributed[0][trilinos_i];
    std::cout<<"proc "<<my_id<<" "<< i << ": " << value << std::endl;
	if(value != 3)
		std::cerr << "tests FAILED: value = " << value << std::endl;
  }  
}