int Belos::createEpetraProblem( std::string &filename ,RCP<Epetra_Map> *rowMap ,RCP<Epetra_CrsMatrix> *A ,RCP<Epetra_MultiVector> *B ,RCP<Epetra_MultiVector> *X ,int *MyPID_out ) { // int &MyPID = *MyPID_out; // int i; int n_nonzeros, N_update; int *bindx=0, *update=0, *col_inds=0; double *val=0, *row_vals=0; double *xguess=0, *b=0, *xexact=0; RCP<Epetra_Comm> epetraComm; #ifdef EPETRA_MPI epetraComm = rcp(new Epetra_MpiComm( MPI_COMM_WORLD ) ); #else epetraComm = rcp(new Epetra_SerialComm()); #endif MyPID = epetraComm->MyPID(); // // ********************************************************************** // ******************Set up the problem to be solved********************* // ********************************************************************** // int NumGlobalElements; // total # of rows in matrix // // *****Read in matrix from HB file****** // Trilinos_Util_read_hb(const_cast<char *>(filename.c_str()), MyPID, &NumGlobalElements, &n_nonzeros, &val, &bindx, &xguess, &b, &xexact); // // *****Distribute data among processors***** // Trilinos_Util_distrib_msr_matrix(*epetraComm, &NumGlobalElements, &n_nonzeros, &N_update, &update, &val, &bindx, &xguess, &b, &xexact); // // *****Construct the matrix***** // int NumMyElements = N_update; // # local rows of matrix on processor // // Create an integer std::vector NumNz that is used to build the Petra Matrix. // NumNz[i] is the Number of OFF-DIAGONAL term for the ith global equation // on this processor // int * NumNz = new int[NumMyElements]; for (i=0; i<NumMyElements; i++) { NumNz[i] = bindx[i+1] - bindx[i] + 1; } // RCP<Epetra_Map> epetraMap = rcp(new Epetra_Map(NumGlobalElements, NumMyElements, update, 0, *epetraComm)); Teuchos::set_extra_data( epetraComm, "Map::Comm", Teuchos::inOutArg(epetraMap) ); if(rowMap) *rowMap = epetraMap; // // Create a Epetra_Matrix // *A = rcp(new Epetra_CrsMatrix(Epetra_DataAccess::Copy, *epetraMap, NumNz)); Teuchos::set_extra_data( epetraMap, "Operator::Map", Teuchos::ptr(A) ); // // Add rows one-at-a-time // int NumEntries; for (i=0; i<NumMyElements; i++) { row_vals = val + bindx[i]; col_inds = bindx + bindx[i]; NumEntries = bindx[i+1] - bindx[i]; int info = (*A)->InsertGlobalValues(update[i], NumEntries, row_vals, col_inds); assert( info == 0 ); info = (*A)->InsertGlobalValues(update[i], 1, val+i, update+i); assert( info == 0 ); } // // Finish up // int info = (*A)->FillComplete(); assert( info == 0 ); info = (*A)->OptimizeStorage(); assert( info == 0 ); (*A)->SetTracebackMode(1); // Shutdown Epetra Warning tracebacks // // Construct the right-hand side and solution multivectors. // if(B) { *B = rcp(new Epetra_MultiVector(Epetra_DataAccess::Copy, *epetraMap, b, NumMyElements, 1 )); Teuchos::set_extra_data( epetraMap, "B::Map", Teuchos::ptr(B) ); } if(X) { *X = rcp(new Epetra_MultiVector(*epetraMap, 1 )); Teuchos::set_extra_data( epetraMap, "X::Map", Teuchos::ptr(X) ); } // // Create workspace // Teuchos::set_default_workspace_store( Teuchos::rcp(new Teuchos::WorkspaceStoreInitializeable(static_cast<size_t>(2e+6))) ); // // Free up memory // delete [] NumNz; free(update); free(val); free(bindx); if (xexact) free(xexact); if (xguess) free(xguess); if (b) free(b); return (0); }
int main(int argc, char *argv[]) { int *update; /* vector elements updated on this node. */ int *bindx; double *val; double *xguess, *b, *xexact; int n_nonzeros; int N_update; /* # of block unknowns updated on this node */ int numLocalEquations; /* Number scalar equations on this node */ int numGlobalEquations; /* Total number of equations */ int *numNz, *ColInds; int row, *col_inds, numEntries; double *row_vals; int i; #ifdef EPETRA_MPI MPI_Init(&argc,&argv); Epetra_MpiComm comm(MPI_COMM_WORLD); #else Epetra_SerialComm comm; #endif cout << comm << endl; if(argc != 2) cerr << "error: enter name of data file on command line" << endl; /* Set exact solution to NULL */ xexact = NULL; /* Read matrix file and distribute among processors. Returns with this processor's set of rows */ Trilinos_Util_read_hb(argv[1], comm.MyPID(), &numGlobalEquations, &n_nonzeros, &val, &bindx, &xguess, &b, &xexact); Trilinos_Util_distrib_msr_matrix(comm, &numGlobalEquations, &n_nonzeros, &N_update, &update, &val, &bindx, &xguess, &b, &xexact); numLocalEquations = N_update; /* Make numNzBlks - number of block entries in each block row */ numNz = new int[numLocalEquations]; for (i=0; i<numLocalEquations; i++) numNz[i] = bindx[i+1] - bindx[i] + 1; /* Make ColInds - Exactly bindx, offset by diag (just copy pointer) */ ColInds = bindx+numLocalEquations+1; Epetra_Map map(numGlobalEquations, numLocalEquations, update, 0, comm); Epetra_CrsMatrix A(Copy, map, numNz); /* Add rows one-at-a-time */ for (row=0; row<numLocalEquations; row++) { row_vals = val + bindx[row]; col_inds = bindx + bindx[row]; numEntries = bindx[row+1] - bindx[row]; assert(A.InsertGlobalValues(update[row], numEntries, row_vals, col_inds)==0); assert(A.InsertGlobalValues(update[row], 1, val+row, update+row)==0); } assert(A.FillComplete()==0); Epetra_Vector xx(Copy, map, xexact); Epetra_Vector bb(Copy, map, b); // Construct a Petra Linear Problem Epetra_Vector x(map); Epetra_LinearProblem problem(&A, &x, &bb); // Construct a solver object for this problem AztecOO solver(problem); // Assert symmetric // problem->AssertSymmetric(); // Set Problem Difficulty Level //problem->SetPDL(easy); //solver.SetAztecOption(AZ_precond, AZ_none); solver.SetAztecOption(AZ_precond, AZ_dom_decomp); //solver.SetAztecOption(AZ_precond, AZ_ls); //solver.SetAztecOption(AZ_scaling, 8); solver.SetAztecOption(AZ_subdomain_solve, AZ_ilut); //solver.SetAztecOption(AZ_subdomain_solve, AZ_bilu_ifp); bool bilu = false; //solver.SetAztecOption(AZ_output, 0); //solver.SetAztecOption(AZ_graph_fill, 2); solver.SetAztecOption(AZ_overlap, 0); //solver.SetAztecOption(AZ_reorder, 0); //solver.SetAztecOption(AZ_poly_ord, 9); solver.SetAztecParam(AZ_ilut_fill, 1.0); solver.SetAztecParam(AZ_drop, 0.0); //double rthresh = 1.01; //cout << "Rel threshold = " << rthresh << endl; //solver.SetAztecParam(AZ_rthresh, rthresh); //double athresh = 1.0e-2; //cout << "Abs threshold = " << athresh << endl; //solver.SetAztecParam(AZ_athresh, athresh); //solver.SetAztecOption(AZ_/conv, AZ_noscaled); //solver.SetAztecParam(AZ_ill_cond_thresh, 1.0e12); int Niters = 400; solver.SetAztecOption(AZ_kspace, Niters); double norminf = A.NormInf(); double normone = A.NormOne(); if (comm.MyPID()==0) cout << "\n Inf-norm of A before scaling = " << norminf << "\n One-norm of A before scaling = " << normone<< endl << endl; if (bilu) { int NumTrials = 3; double athresholds[] = {0.0, 1.0E-14, 1.0E-3}; double rthresholds[] = {0.0, 1.0E-14, 1.0E-3}; double condestThreshold = 1.0E16; double maxFill = 4.0; int maxKspace = 4*Niters; solver.SetAdaptiveParams(NumTrials, athresholds, rthresholds, condestThreshold, maxFill, maxKspace); } else { int NumTrials = 7; double athresholds[] = {0.0, 1.0E-12, 1.0E-12, 1.0E-5, 1.0E-5, 1.0E-2, 1.0E-2}; double rthresholds[] = {1.0, 1.0, 1.01, 1.0, 1.01, 1.01, 1.1 }; double condestThreshold = 1.0E16; double maxFill = 4.0; int maxKspace = 4*Niters; solver.SetAdaptiveParams(NumTrials, athresholds, rthresholds, condestThreshold, maxFill, maxKspace); } Epetra_Time timer(comm); solver.AdaptiveIterate(Niters, 1, 5.0e-7); double atime = timer.ElapsedTime(); if (comm.MyPID()==0) cout << "AdaptiveIterate total time = " << atime << endl; norminf = A.NormInf(); normone = A.NormOne(); if (comm.MyPID()==0) cout << "\n Inf-norm of A after scaling = " << norminf << "\n One-norm of A after scaling = " << normone << endl << endl; Epetra_Vector bcomp(map); assert(A.Multiply(false, x, bcomp)==0); Epetra_Vector resid(map); assert(resid.Update(1.0, bb, -1.0, bcomp, 0.0)==0); double residual; assert(resid.Norm2(&residual)==0); if (comm.MyPID()==0) cout << "Residual = " << residual << endl; assert(resid.Update(1.0, xx, -1.0, x, 0.0)==0); assert(resid.Norm2(&residual)==0); if (comm.MyPID()==0) cout << "2-norm of difference between computed and exact solution = " << residual << endl; if (residual>1.0e-5) { cout << "Difference between computed and exact solution is large..." << endl << "Computing norm of A times this difference. If this norm is small, then matrix is singular" << endl; assert(A.Multiply(false, resid, bcomp)==0); assert(bcomp.Norm2(&residual)==0); if (comm.MyPID()==0) cout << "2-norm of A times difference between computed and exact solution = " << residual << endl; } free ((void *) xguess); free ((void *) b); free ((void *) xexact); free ((void *) val); free ((void *) bindx); free ((void *) update); delete [] numNz; #ifdef EPETRA_MPI MPI_Finalize() ; #endif return 0 ; }
void Trilinos_Util_ReadHb2EpetraVbr(char *data_file, char * partitioning, const Epetra_Comm &comm, Epetra_BlockMap *& map, Epetra_VbrMatrix *& A, Epetra_Vector *& x, Epetra_Vector *& b, Epetra_Vector *&xexact) { /* Read matrix file and distribute among processors. Returns with this processor's set of rows */ int NumGlobalEquations = 0, NumMyNonzeros = 0; double *val_msr = 0, *x_in = 0, *b_in = 0, *xexact_in = 0; int *bindx_msr = 0; /* Set exact solution to NULL */ xexact = NULL; Trilinos_Util_read_hb(data_file, comm.MyPID(), &NumGlobalEquations, &NumMyNonzeros, &val_msr, &bindx_msr, &x_in, &b_in, &xexact_in); double *val = 0; int NumGlobalElements = 0; int *indx = 0, *rpntr = 0, *cpntr = 0, *bpntr = 0, *bindx = 0; int NumMyBlockEntries = 0, NumMyElements = 0, * MyGlobalElements = 0; Trilinos_Util_create_vbr(comm, partitioning, &NumGlobalEquations, &NumGlobalElements, &NumMyNonzeros, &NumMyBlockEntries, &NumMyElements, &MyGlobalElements, bindx_msr, val_msr, &val, &indx, &rpntr, &cpntr, &bpntr, &bindx); if(comm.MyPID()==0) { free ((void *) val_msr); free ((void *) bindx_msr); free ((void *) cpntr); } int * ElementSizeList = 0; if (NumMyElements>0) ElementSizeList = new int[NumMyElements]; for (int i=0; i<NumMyElements; i++) ElementSizeList[i] = rpntr[i+1] - rpntr[i]; map = new Epetra_BlockMap(-1, NumMyElements, MyGlobalElements, ElementSizeList, 0, comm); A = new Epetra_VbrMatrix(Copy, *map, 0); /* Add block rows one-at-a-time */ {for (int i=0; i<NumMyElements; i++) { int BlockRow = MyGlobalElements[i]; int NumBlockEntries = bpntr[i+1] - bpntr[i]; int *BlockIndices = bindx + bpntr[i]; int ierr = A->BeginInsertGlobalValues(BlockRow, NumBlockEntries, BlockIndices); if (ierr!=0) { cerr << "Error in BeginInsertGlobalValues(GlobalBlockRow = " << BlockRow << ") = " << ierr << endl; abort(); } int LDA = ElementSizeList[i]; int NumRows = LDA; for (int j=bpntr[i]; j<bpntr[i+1]; j++) { int NumCols = (indx[j+1] - indx[j])/LDA; double * Values = val + indx[j]; ierr = A->SubmitBlockEntry(Values, LDA, NumRows, NumCols); if (ierr!=0) { cerr << "Error in SubmitBlockEntry, GlobalBlockRow = " << BlockRow << "GlobalBlockCol = " << BlockIndices[j] << "Error = " << ierr << endl; abort(); } } ierr = A->EndSubmitEntries(); if (ierr!=0) { cerr << "Error in EndSubmitEntries(GlobalBlockRow = " << BlockRow << ") = " << ierr << endl; abort(); } }} int ierr=A->FillComplete(); if (ierr!=0) cerr << "Error in Epetra_VbrMatrix FillComplete ierr = " << ierr << endl; xexact = new Epetra_Vector(Copy, *map, xexact_in); x = new Epetra_Vector(Copy, *map, x_in); b = new Epetra_Vector(Copy, *map, b_in); if(comm.MyPID()==0) { free ((void *) val); free ((void *) indx); free ((void *) rpntr); free ((void *) bpntr); free ((void *) bindx); free ((void *) b_in); free ((void *) x_in); free ((void *) xexact_in); free ((void *) MyGlobalElements); delete [] ElementSizeList; } return; }
int main(int argc, char *argv[]) { #ifdef HAVE_MPI MPI_Init(&argc, &argv); // define an Epetra communicator Epetra_MpiComm Comm(MPI_COMM_WORLD); #else Epetra_SerialComm Comm; #endif // check number of processes if (Comm.NumProc() != 1) { if (Comm.MyPID() == 0) cerr << "*ERR* can be used only with one process" << endl; #ifdef HAVE_MPI MPI_Finalize(); #endif exit(EXIT_SUCCESS); } // process 0 will read an HB matrix, and store it // in the MSR format given by the arrays bindx and val int N_global; int N_nonzeros; double * val = NULL; int * bindx = NULL; double * x = NULL, * b = NULL, * xexact = NULL; FILE* fp = fopen("../HBMatrices/fidap005.rua", "r"); if (fp == 0) { cerr << "Matrix file not available" << endl; #ifdef HAVE_MPI MPI_Finalize(); #endif exit(EXIT_SUCCESS); } fclose(fp); Trilinos_Util_read_hb("../HBMatrices/fidap005.rua", 0, &N_global, &N_nonzeros, &val, &bindx, &x, &b, &xexact); // assign all the elements to process 0 // (this code can run ONLY with one process, extensions to more // processes will require functions to handle update of ghost nodes) Epetra_Map Map(N_global,0,Comm); MSRMatrix A(Map,bindx,val); // define two vectors Epetra_Vector xxx(Map); Epetra_Vector yyy(Map); xxx.Random(); A.Apply(xxx,yyy); cout << yyy; double norm2; yyy.Norm2(&norm2); cout << norm2 << endl; // free memory allocated by Trilinos_Util_read_hb if (val != NULL) free((void*)val); if (bindx != NULL) free((void*)bindx); if (x != NULL) free((void*)x); if (b != NULL) free((void*)b); if (xexact != NULL) free((void*)xexact);; #ifdef HAVE_MPI MPI_Finalize(); #endif return(EXIT_SUCCESS); } /* main */