コード例 #1
0
ファイル: Amesos_Mumps.cpp プロジェクト: 00liujj/trilinos
//=============================================================================
int Amesos_Mumps::SymbolicFactorization()
{

  // erase data if present. 
  if (IsSymbolicFactorizationOK_ && MDS.job != -777)
   Destroy();

  IsSymbolicFactorizationOK_ = false;
  IsNumericFactorizationOK_ = false;

  CreateTimer(Comm());
  
  CheckParameters();
  AMESOS_CHK_ERR(ConvertToTriplet(false));

#if defined(HAVE_MPI) && defined(HAVE_AMESOS_MPI_C2F)
  if (MaxProcs_ != Comm().NumProc()) 
  {
    if(MUMPSComm_) 
      MPI_Comm_free(&MUMPSComm_);

    std::vector<int> ProcsInGroup(MaxProcs_);
    for (int i = 0 ; i < MaxProcs_ ; ++i) 
      ProcsInGroup[i] = i;

    MPI_Group OrigGroup, MumpsGroup;
    MPI_Comm_group(MPI_COMM_WORLD, &OrigGroup);
    MPI_Group_incl(OrigGroup, MaxProcs_, &ProcsInGroup[0], &MumpsGroup);
    MPI_Comm_create(MPI_COMM_WORLD, MumpsGroup, &MUMPSComm_);

#ifdef MUMPS_4_9
    MDS.comm_fortran = (MUMPS_INT) MPI_Comm_c2f( MUMPSComm_);
#else

#ifndef HAVE_AMESOS_OLD_MUMPS
    MDS.comm_fortran = (DMUMPS_INT) MPI_Comm_c2f( MUMPSComm_);
#else
    MDS.comm_fortran = (F_INT) MPI_Comm_c2f( MUMPSComm_);
#endif

#endif

  } 
  else 
  {
    const Epetra_MpiComm* MpiComm = dynamic_cast<const Epetra_MpiComm*>(&Comm());
    assert (MpiComm != 0);
#ifdef MUMPS_4_9
    MDS.comm_fortran = (MUMPS_INT) MPI_Comm_c2f(MpiComm->GetMpiComm());
#else

#ifndef HAVE_AMESOS_OLD_MUMPS
    MDS.comm_fortran = (DMUMPS_INT) MPI_Comm_c2f(MpiComm->GetMpiComm());
#else
    MDS.comm_fortran = (F_INT) MPI_Comm_c2f(MpiComm->GetMpiComm());
#endif

#endif
  }
#else
  // This next three lines of code were required to make Amesos_Mumps work
  // with Ifpack_SubdomainFilter. They is usefull in all cases
  // when using MUMPS on a subdomain.
  const Epetra_MpiComm* MpiComm = dynamic_cast<const Epetra_MpiComm*>(&Comm());
  assert (MpiComm != 0);
  MDS.comm_fortran = (MUMPS_INT) MPI_Comm_c2f(MpiComm->GetMpiComm());
  // only thing I can do, use MPI_COMM_WORLD. This will work in serial as well
  // Previously, the next line was uncommented, but we don't want MUMPS to work
  // on the global MPI comm, but on the comm associated with the matrix
  //  MDS.comm_fortran = -987654;
#endif
  
  MDS.job = -1  ;     //  Initialization
  MDS.par = 1 ;       //  Host IS involved in computations
//  MDS.sym = MatrixProperty_;
  MDS.sym =  0;       //  MatrixProperty_ is unititalized.  Furthermore MUMPS 
                      //  expects only half of the matrix to be provided for
                      //  symmetric matrices.  Hence setting MDS.sym to be non-zero
                      //  indicating that the matrix is symmetric will only work
                      //  if we change ConvertToTriplet to pass only half of the 
                      //  matrix.  Bug #2331 and Bug #2332 - low priority


  RedistrMatrix(true);

  if (Comm().MyPID() < MaxProcs_) 
  {
    dmumps_c(&(MDS));   //  Initialize MUMPS
    static_cast<void>( CheckError( ) );  
  }

  MDS.n = Matrix().NumGlobalRows();

  // fix pointers for nonzero pattern of A. Numerical values
  // will be entered in PerformNumericalFactorization()
  if (Comm().NumProc() != 1) 
  {
    MDS.nz_loc = RedistrMatrix().NumMyNonzeros();

    if (Comm().MyPID() < MaxProcs_) 
    {
      MDS.irn_loc = &Row[0]; 
      MDS.jcn_loc = &Col[0];
    }
  } 
  else 
  {
    if (Comm().MyPID() == 0) 
    {
      MDS.nz = Matrix().NumMyNonzeros();
      MDS.irn = &Row[0]; 
      MDS.jcn = &Col[0]; 
    }
  }

  // scaling if provided by the user
  if (RowSca_ != 0) 
  {
    MDS.rowsca = RowSca_;
    MDS.colsca = ColSca_;
  }

  // given ordering if provided by the user
  if (PermIn_ != 0) 
  {
    MDS.perm_in = PermIn_;
  }

  MDS.job = 1;     // Request symbolic factorization

  SetICNTLandCNTL();

  // Perform symbolic factorization

  ResetTimer();

  if (Comm().MyPID() < MaxProcs_) 
    dmumps_c(&(MDS));

  SymFactTime_ = AddTime("Total symbolic factorization time", SymFactTime_);

  int IntWrong = CheckError()?1:0 ; 
  int AnyWrong;
  Comm().SumAll( &IntWrong, &AnyWrong, 1 ) ; 
  bool Wrong = AnyWrong > 0 ; 


  if ( Wrong ) {
      AMESOS_CHK_ERR( StructurallySingularMatrixError ) ; 
  }

  IsSymbolicFactorizationOK_ = true ;
  NumSymbolicFact_++;  

  return 0;
}
コード例 #2
0
//=============================================================================
int Amesos_Paraklete::SymbolicFactorization() 
{
  MyPID_    = Comm().MyPID();
  NumProcs_ = Comm().NumProc();
  
  IsSymbolicFactorizationOK_ = false;
  IsNumericFactorizationOK_ = false;
  
#ifdef HAVE_AMESOS_EPETRAEXT
  transposer_ = static_cast<Teuchos::ENull>( 0 ); 
#endif
  
  CreateTimer(Comm(), 2);
  
  ResetTimer(1);
  
  // "overhead" time for the following method is considered here
  AMESOS_CHK_ERR( CreateLocalMatrixAndExporters() ) ;
  assert( NumGlobalElements_ == RowMatrixA_->NumGlobalCols() );
  
  
  SetMaxProcesses(MaxProcesses_, *RowMatrixA_);
  
  //
  //  Perform checks in SymbolicFactorization(), but none in 
  //  NumericFactorization() or Solve()
  //
  assert( ! TrustMe_ ) ;
  if ( TrustMe_ ) { 
    if ( CrsMatrixA_ == 0 ) AMESOS_CHK_ERR(10 );
    if( UseDataInPlace_ != 1 ) AMESOS_CHK_ERR( 10 ) ;
    if( Reindex_ )  AMESOS_CHK_ERR( 10 ) ;
    if( ! Problem_->GetLHS() )  AMESOS_CHK_ERR( 10 ) ;
    if( ! Problem_->GetRHS() )  AMESOS_CHK_ERR( 10 ) ;
    if( ! Problem_->GetLHS()->NumVectors() ) AMESOS_CHK_ERR( 10 ) ;
    if( ! Problem_->GetRHS()->NumVectors() ) AMESOS_CHK_ERR( 10 ) ; 
    SerialB_ = Problem_->GetRHS() ;
    SerialX_ = Problem_->GetLHS() ;
    NumVectors_ = SerialX_->NumVectors();
    if (MyPID_ == 0) {
      AMESOS_CHK_ERR(SerialX_->ExtractView(&SerialXBvalues_,&SerialXlda_ ));
      AMESOS_CHK_ERR(SerialB_->ExtractView(&SerialBvalues_,&SerialXlda_ ));
    }
  }
  
  
  PrivateParakleteData_->common_ = rcp(new paraklete_common());
  
  const Epetra_MpiComm* MpiComm = dynamic_cast<const Epetra_MpiComm*>(&Comm());
  assert (MpiComm != 0);
  
  MPI_Comm PK_Comm;
  //
  //  Create an MPI group with MaxProcesses_ processes
  //
  if ( MaxProcesses_ != Comm().NumProc()) {
    if(ParakleteComm_)  {
      MPI_Comm_free(&ParakleteComm_);
      ParakleteComm_ = 0 ; 
    }
    std::vector<int> ProcsInGroup(MaxProcesses_);
    IamInGroup_ = false; 
    for (int i = 0 ; i < MaxProcesses_ ; ++i) {
      ProcsInGroup[i] = i;
      if ( Comm().MyPID() == i ) IamInGroup_ = true; 
    }
    
    MPI_Group OrigGroup, ParakleteGroup;
    MPI_Comm_group(MpiComm->GetMpiComm(), &OrigGroup);
    MPI_Group_incl(OrigGroup, MaxProcesses_, &ProcsInGroup[0], &ParakleteGroup);
    MPI_Comm_create(MpiComm->GetMpiComm(), ParakleteGroup, &ParakleteComm_);
    PK_Comm = ParakleteComm_ ; 
  } else {
    IamInGroup_ = true; 
    PK_Comm = MpiComm->GetMpiComm() ;
  }
  
  paraklete_common& pk_common =  *PrivateParakleteData_->common_ ;
  cholmod_common *cm = &(pk_common.cm) ;
  amesos_cholmod_l_start (cm) ;
  PK_DEBUG_INIT ("pk", cm) ;
  pk_common.nproc = MaxProcesses_ ;
  pk_common.myid = Comm().MyPID() ; 
  //pk_common.mpicomm = PK_Comm ; 
  cm->print = 1 ;
  cm->precise = TRUE ;
  cm->error_handler = my_handler ;
  
  pk_common.tol_diag = 0.001 ;
  pk_common.tol_offdiag = 0.1 ;
  pk_common.growth = 2. ;

  

  // "overhead" time for the following two methods is considered here
  AMESOS_CHK_ERR( ExportToSerial() );

  AMESOS_CHK_ERR( ConvertToParakleteCRS(true) );

  OverheadTime_ = AddTime("Total Amesos overhead time", OverheadTime_, 1);

  // All this time if PARAKLETE time
  AMESOS_CHK_ERR( PerformSymbolicFactorization() );

  NumSymbolicFact_++;

  IsSymbolicFactorizationOK_ = true;
  
  return 0;
}