int main(int argc,char **argv) { Mat A,B; MatScalar a[1],alpha; PetscMPIInt size,rank; PetscInt m,n,i,col, prid; PetscErrorCode ierr; PetscInitialize(&argc,&argv,(char *)0,help); ierr = MPI_Comm_size(PETSC_COMM_WORLD,&size);CHKERRQ(ierr); ierr = MPI_Comm_rank(PETSC_COMM_WORLD,&rank);CHKERRQ(ierr); prid = size; ierr = PetscOptionsGetInt(PETSC_NULL,"-prid",&prid,PETSC_NULL);CHKERRQ(ierr); m = n = 10*size; ierr = MatCreate(PETSC_COMM_SELF,&A);CHKERRQ(ierr); ierr = MatSetSizes(A,PETSC_DETERMINE,PETSC_DETERMINE,m,n);CHKERRQ(ierr); ierr = MatSetType(A,MATSEQAIJ);CHKERRQ(ierr); ierr = MatSetUp(A);CHKERRQ(ierr); a[0] = rank+1; for (i=0; i<m-rank; i++){ col = i+rank; ierr = MatSetValues(A,1,&i,1,&col,a,INSERT_VALUES);CHKERRQ(ierr); } ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); if (rank == prid){ ierr = PetscPrintf(PETSC_COMM_SELF,"[%d] A: \n",rank); ierr = MatView(A,PETSC_VIEWER_STDOUT_SELF);CHKERRQ(ierr); } /* Test MatCreateMPIAIJSumSeqAIJ */ ierr = MatCreateMPIAIJSumSeqAIJ(PETSC_COMM_WORLD,A,PETSC_DECIDE,PETSC_DECIDE,MAT_INITIAL_MATRIX,&B);CHKERRQ(ierr); /* Test MAT_REUSE_MATRIX */ alpha = 0.1; for (i=0; i<3; i++){ ierr = MatScale(A,alpha);CHKERRQ(ierr); ierr = MatCreateMPIAIJSumSeqAIJ(PETSC_COMM_WORLD,A,PETSC_DECIDE,PETSC_DECIDE,MAT_REUSE_MATRIX,&B);CHKERRQ(ierr); } ierr = MatView(B, PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); ierr = MatDestroy(&B);CHKERRQ(ierr); ierr = MatDestroy(&A);CHKERRQ(ierr); PetscFinalize(); return(0); }
EXTERN_C_END #undef __FUNCT__ #define __FUNCT__ "MatCreateSeqCRL" /*@C MatCreateSeqCRL - Creates a sparse matrix of type SEQCRL. This type inherits from AIJ, but stores some additional information that is used to allow better vectorization of the matrix-vector product. At the cost of increased storage, the AIJ formatted matrix can be copied to a format in which pieces of the matrix are stored in ELLPACK format, allowing the vectorized matrix multiply routine to use stride-1 memory accesses. As with the AIJ type, it is important to preallocate matrix storage in order to get good assembly performance. Collective on MPI_Comm Input Parameters: + comm - MPI communicator, set to PETSC_COMM_SELF . m - number of rows . n - number of columns . nz - number of nonzeros per row (same for all rows) - nnz - array containing the number of nonzeros in the various rows (possibly different for each row) or PETSC_NULL Output Parameter: . A - the matrix Notes: If nnz is given then nz is ignored Level: intermediate .keywords: matrix, cray, sparse, parallel .seealso: MatCreate(), MatCreateMPICSRPERM(), MatSetValues() @*/ PetscErrorCode PETSCMAT_DLLEXPORT MatCreateSeqCRL(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt nz,const PetscInt nnz[],Mat *A) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatCreate(comm,A);CHKERRQ(ierr); ierr = MatSetSizes(*A,m,n,m,n);CHKERRQ(ierr); ierr = MatSetType(*A,MATSEQCRL);CHKERRQ(ierr); ierr = MatSeqAIJSetPreallocation_SeqAIJ(*A,nz,(PetscInt*)nnz);CHKERRQ(ierr); PetscFunctionReturn(0); }
int main(int argc,char **args) { Mat mat; PetscInt i,j,m = 2,n,Ii,J; PetscErrorCode ierr; PetscScalar v,none = -1.0; PetscMPIInt rank,size; ierr = PetscInitialize(&argc,&args,(char*)0,help);if (ierr) return ierr; ierr = MPI_Comm_rank(PETSC_COMM_WORLD,&rank);CHKERRQ(ierr); ierr = MPI_Comm_size(PETSC_COMM_WORLD,&size);CHKERRQ(ierr); n = 2*size; /* create the matrix */ ierr = MatCreate(PETSC_COMM_WORLD,&mat);CHKERRQ(ierr); ierr = MatSetSizes(mat,PETSC_DECIDE,PETSC_DECIDE,m*n,m*n);CHKERRQ(ierr); ierr = MatSetType(mat,MATAIJ);CHKERRQ(ierr); ierr = MatSetUp(mat);CHKERRQ(ierr); /* register user defined MatScaleUser() operation for both SeqAIJ and MPIAIJ types */ ierr = RegisterMatScaleUserImpl(mat);CHKERRQ(ierr); /* assemble the matrix */ for (i=0; i<m; i++) { for (j=2*rank; j<2*rank+2; j++) { v = -1.0; Ii = j + n*i; if (i>0) {J = Ii - n; ierr = MatSetValues(mat,1,&Ii,1,&J,&v,INSERT_VALUES);CHKERRQ(ierr);} if (i<m-1) {J = Ii + n; ierr = MatSetValues(mat,1,&Ii,1,&J,&v,INSERT_VALUES);CHKERRQ(ierr);} if (j>0) {J = Ii - 1; ierr = MatSetValues(mat,1,&Ii,1,&J,&v,INSERT_VALUES);CHKERRQ(ierr);} if (j<n-1) {J = Ii + 1; ierr = MatSetValues(mat,1,&Ii,1,&J,&v,INSERT_VALUES);CHKERRQ(ierr);} v = 4.0; ierr = MatSetValues(mat,1,&Ii,1,&Ii,&v,INSERT_VALUES);CHKERRQ(ierr); } } ierr = MatAssemblyBegin(mat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); ierr = MatAssemblyEnd(mat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); /* check the matrix before and after scaling by -1.0 */ ierr = PetscPrintf(PETSC_COMM_WORLD,"Matrix _before_ MatScaleUserImpl() operation\n"); ierr = MatView(mat,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); ierr = MatScaleUserImpl(mat,none); ierr = PetscPrintf(PETSC_COMM_WORLD,"Matrix _after_ MatScaleUserImpl() operation\n"); ierr = MatView(mat,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); ierr = MatDestroy(&mat);CHKERRQ(ierr); ierr = PetscFinalize(); return ierr; }
PetscErrorCode StokesSetupMatBlock11(Stokes *s) { PetscErrorCode ierr; PetscFunctionBeginUser; /* A[3] is N-by-N null matrix */ ierr = MatCreate(PETSC_COMM_WORLD, &s->subA[3]);CHKERRQ(ierr); ierr = MatSetOptionsPrefix(s->subA[3], "a11_");CHKERRQ(ierr); ierr = MatSetSizes(s->subA[3], PETSC_DECIDE, PETSC_DECIDE, s->nx*s->ny, s->nx*s->ny);CHKERRQ(ierr); ierr = MatSetType(s->subA[3], MATMPIAIJ);CHKERRQ(ierr); ierr = MatMPIAIJSetPreallocation(s->subA[3], 0, NULL, 0, NULL);CHKERRQ(ierr); ierr = MatAssemblyBegin(s->subA[3], MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); ierr = MatAssemblyEnd(s->subA[3], MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); PetscFunctionReturn(0); }
EXTERN_C_END EXTERN_C_BEGIN #undef __FUNCT__ #define __FUNCT__ "MatCreate_MPIAIJPERM" PetscErrorCode MatCreate_MPIAIJPERM(Mat A) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSetType(A,MATMPIAIJ);CHKERRQ(ierr); ierr = MatConvert_MPIAIJ_MPIAIJPERM(A,MATMPIAIJPERM,MAT_REUSE_MATRIX,&A);CHKERRQ(ierr); PetscFunctionReturn(0); }
/*@ MatCreateMFFD - Creates a matrix-free matrix. See also MatCreateSNESMF() Collective on Vec Input Parameters: + comm - MPI communicator . m - number of local rows (or PETSC_DECIDE to have calculated if M is given) This value should be the same as the local size used in creating the y vector for the matrix-vector product y = Ax. . n - This value should be the same as the local size used in creating the x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have calculated if N is given) For square matrices n is almost always m. . M - number of global rows (or PETSC_DETERMINE to have calculated if m is given) - N - number of global columns (or PETSC_DETERMINE to have calculated if n is given) Output Parameter: . J - the matrix-free matrix Options Database Keys: call MatSetFromOptions() to trigger these + -mat_mffd_type - wp or ds (see MATMFFD_WP or MATMFFD_DS) - -mat_mffd_err - square root of estimated relative error in function evaluation - -mat_mffd_period - how often h is recomputed, defaults to 1, everytime Level: advanced Notes: The matrix-free matrix context merely contains the function pointers and work space for performing finite difference approximations of Jacobian-vector products, F'(u)*a, The default code uses the following approach to compute h .vb F'(u)*a = [F(u+h*a) - F(u)]/h where h = error_rel*u'a/||a||^2 if |u'a| > umin*||a||_{1} = error_rel*umin*sign(u'a)*||a||_{1}/||a||^2 otherwise where error_rel = square root of relative error in function evaluation umin = minimum iterate parameter .ve The user can set the error_rel via MatMFFDSetFunctionError() and umin via MatMFFDDSSetUmin(); see the <A href="../../docs/manual.pdf#nameddest=ch_snes">SNES chapter of the users manual</A> for details. The user should call MatDestroy() when finished with the matrix-free matrix context. Options Database Keys: + -mat_mffd_err <error_rel> - Sets error_rel . -mat_mffd_unim <umin> - Sets umin (for default PETSc routine that computes h only) - -mat_mffd_check_positivity .keywords: default, matrix-free, create, matrix .seealso: MatDestroy(), MatMFFDSetFunctionError(), MatMFFDDSSetUmin(), MatMFFDSetFunction() MatMFFDSetHHistory(), MatMFFDResetHHistory(), MatCreateSNESMF(), MatMFFDGetH(), MatMFFDRegister(), MatMFFDComputeJacobian() @*/ PetscErrorCode MatCreateMFFD(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,Mat *J) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatCreate(comm,J); CHKERRQ(ierr); ierr = MatSetSizes(*J,m,n,M,N); CHKERRQ(ierr); ierr = MatSetType(*J,MATMFFD); CHKERRQ(ierr); ierr = MatSetUp(*J); CHKERRQ(ierr); PetscFunctionReturn(0); }
EXTERN_C_END /*MC MATCSRPERM - MATCSRPERM = "CSRPERM" - A matrix type to be used for sparse matrices. This matrix type is identical to MATSEQCSRPERM when constructed with a single process communicator, and MATMPICSRPERM otherwise. As a result, for single process communicators, MatSeqAIJSetPreallocation() is supported, and similarly MatMPIAIJSetPreallocation() is supported for communicators controlling multiple processes. It is recommended that you call both of the above preallocation routines for simplicity. Options Database Keys: . -mat_type csrperm - sets the matrix type to "CSRPERM" during a call to MatSetFromOptions() Level: beginner .seealso: MatCreateMPICSRPERM(), MATSEQCSRPERM, MATMPICSRPERM M*/ EXTERN_C_BEGIN #undef __FUNCT__ #define __FUNCT__ "MatCreate_CSRPERM" PetscErrorCode PETSCMAT_DLLEXPORT MatCreate_CSRPERM(Mat A) { PetscErrorCode ierr; PetscMPIInt size; PetscFunctionBegin; ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr); if (size == 1) { ierr = MatSetType(A,MATSEQCSRPERM);CHKERRQ(ierr); } else { ierr = MatSetType(A,MATMPICSRPERM);CHKERRQ(ierr); } PetscFunctionReturn(0); }
/*@ DMADDAGetMatrixNS - Creates matrix compatiable with two distributed arrays Collective on ADDA Input Parameter: . addar - the distributed array for which we create the matrix, which indexes the rows . addac - the distributed array for which we create the matrix, which indexes the columns - mtype - Supported types are MATSEQAIJ, MATMPIAIJ, MATSEQBAIJ, MATMPIBAIJ, or any type which inherits from one of these (such as MATAIJ, MATLUSOL, etc.). Output Parameter: . mat - the empty Jacobian Level: beginner .keywords: distributed array, matrix .seealso: DMCreateMatrix() @*/ PetscErrorCode DMADDAGetMatrixNS(DM dm, DM dmc, MatType mtype, Mat *mat) { PetscErrorCode ierr; DM_ADDA *dd = (DM_ADDA*)dm->data; DM_ADDA *ddc = (DM_ADDA*)dmc->data; PetscFunctionBegin; PetscValidHeaderSpecific(dm, DM_CLASSID, 1); PetscValidHeaderSpecific(dmc, DM_CLASSID, 2); PetscCheckSameComm(dm, 1, dmc, 2); ierr = MatCreate(PetscObjectComm((PetscObject)dm), mat);CHKERRQ(ierr); ierr = MatSetSizes(*mat, dd->lsize, ddc->lsize, PETSC_DECIDE, PETSC_DECIDE);CHKERRQ(ierr); ierr = MatSetType(*mat, mtype);CHKERRQ(ierr); PetscFunctionReturn(0); }
PetscErrorCode StokesSetupMatBlock01(Stokes* s) { PetscInt row, start, end, size, i, j; PetscInt cols[5]; PetscScalar vals[5]; PetscErrorCode ierr; PetscFunctionBeginUser; /* A[1] is 2N-by-N */ ierr = MatCreate(PETSC_COMM_WORLD, &s->subA[1]); CHKERRQ(ierr); ierr = MatSetOptionsPrefix(s->subA[1], "a01_"); ierr = MatSetSizes(s->subA[1], PETSC_DECIDE, PETSC_DECIDE, 2 * s->nx * s->ny, s->nx * s->ny); CHKERRQ(ierr); ierr = MatSetType(s->subA[1], MATMPIAIJ); CHKERRQ(ierr); ierr = MatMPIAIJSetPreallocation(s->subA[1], 5, NULL, 5, NULL); CHKERRQ(ierr); ierr = MatGetOwnershipRange(s->subA[1], &start, &end); CHKERRQ(ierr); ierr = MatSetOption(s->subA[1], MAT_IGNORE_ZERO_ENTRIES, PETSC_TRUE); CHKERRQ(ierr); for (row = start; row < end; row++) { ierr = StokesGetPosition(s, row, &i, &j); CHKERRQ(ierr); /* first part: rows 0 to (nx*ny-1) */ if (row < s->nx * s->ny) { ierr = StokesStencilGradientX(s, i, j, &size, cols, vals); CHKERRQ(ierr); } /* second part: rows (nx*ny) to (2*nx*ny-1) */ else { ierr = StokesStencilGradientY(s, i, j, &size, cols, vals); CHKERRQ(ierr); } ierr = MatSetValues(s->subA[1], 1, &row, size, cols, vals, INSERT_VALUES); CHKERRQ(ierr); } ierr = MatAssemblyBegin(s->subA[1], MAT_FINAL_ASSEMBLY); CHKERRQ(ierr); ierr = MatAssemblyEnd(s->subA[1], MAT_FINAL_ASSEMBLY); CHKERRQ(ierr); PetscFunctionReturn(0); }
/*@C MatCreateDAAD - Creates a matrix that can do matrix-vector products using a local function that is differentiated with ADIFOR or ADIC. Collective on DMDA Input Parameters: . da - the DMDA that defines the distribution of the vectors Output Parameter: . A - the matrix Level: intermediate Notes: this is currently turned off for Fortran .seealso: MatCreate(), DMDASetLocalAdicMFFunction() @*/ PetscErrorCode MatCreateDAAD(DM da,Mat *A) { PetscErrorCode ierr; MPI_Comm comm; PetscFunctionBegin; ierr = PetscObjectGetComm((PetscObject)da,&comm); CHKERRQ(ierr); ierr = MatCreate(comm,A); CHKERRQ(ierr); ierr = MatSetType(*A,MATDAAD); CHKERRQ(ierr); ierr = MatDAADSetDA(*A,da); CHKERRQ(ierr); PetscFunctionReturn(0); }
int SFieldBeginRuns(SField sfv, unsigned int N, const unsigned int *nelem) { mySField sf = static_cast<mySField>(sfv); unsigned int j; assert(!sf->running); sf->maxN = N; sf->running = 1; sf->timeAssembly = 0; sf->timeSolver = 0; // Given k indices we need k*2^d variables // Given N variables we need N / 2^d // Now we are overestimating the number of modes. sf->modes = sf->maxN;//(sf->N+ignored_modes) / (1 << sf->d) + ((sf->N+ignored_modes) % (1 << sf->d) != 0); sf->N_multi_idx = new ind_t[sf->modes * sf->d]; GenTDSet(sf->d, 0, sf->N_multi_idx, sf->modes); // Create sparse Matrix of size prod(mesh) Mat J; Vec F; Vec U; int s=1; for (j=0;j < sf->d;j++){ sf->mesh[j] = nelem[j]; s *= sf->mesh[j]; } MatCreate(PETSC_COMM_WORLD,&J); MatSetSizes(J,s,s,s,s); MatSetType(J,MATSEQAIJ); MatSeqAIJSetPreallocation(J,1+2*sf->d,NULL); MatSetFromOptions(J); /* MatSetType(J,MATSEQDENSE); */ /* MatSeqDenseSetPreallocation(J,NULL); */ /* MatSetFromOptions(J); */ MatSetUp(J); VecCreate(PETSC_COMM_WORLD,&F); VecSetSizes(F,PETSC_DECIDE,s); VecSetFromOptions(F); VecSetUp(F); VecDuplicate(F,&U); KSP ksp; KSPCreate(PETSC_COMM_WORLD,&ksp); KSPSetFromOptions(ksp); sf->J = J; sf->F = F; sf->U = U; sf->ksp = ksp; return 0; }
EXTERN_C_END #include <../src/mat/impls/aij/seq/aij.h> EXTERN_C_BEGIN #undef __FUNCT__ #define __FUNCT__ "MatConvert_SeqAIJ_SeqBAIJ" PetscErrorCode MatConvert_SeqAIJ_SeqBAIJ(Mat A,MatType newtype,MatReuse reuse,Mat *newmat) { Mat B; Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; Mat_SeqBAIJ *b; PetscErrorCode ierr; PetscInt *ai=a->i,m=A->rmap->N,n=A->cmap->N,i,*rowlengths; PetscFunctionBegin; if (n != m) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Matrix must be square"); ierr = PetscMalloc(m*sizeof(PetscInt),&rowlengths);CHKERRQ(ierr); for (i=0; i<m; i++) { rowlengths[i] = ai[i+1] - ai[i]; } ierr = MatCreate(((PetscObject)A)->comm,&B);CHKERRQ(ierr); ierr = MatSetSizes(B,m,n,m,n);CHKERRQ(ierr); ierr = MatSetType(B,MATSEQBAIJ);CHKERRQ(ierr); ierr = MatSeqBAIJSetPreallocation_SeqBAIJ(B,1,0,rowlengths);CHKERRQ(ierr); ierr = PetscFree(rowlengths);CHKERRQ(ierr); ierr = MatSetOption(B,MAT_ROW_ORIENTED,PETSC_TRUE);CHKERRQ(ierr); b = (Mat_SeqBAIJ*)(B->data); ierr = PetscMemcpy(b->i,a->i,(m+1)*sizeof(PetscInt));CHKERRQ(ierr); ierr = PetscMemcpy(b->ilen,a->ilen,m*sizeof(PetscInt));CHKERRQ(ierr); ierr = PetscMemcpy(b->j,a->j,a->nz*sizeof(PetscInt));CHKERRQ(ierr); ierr = PetscMemcpy(b->a,a->a,a->nz*sizeof(MatScalar));CHKERRQ(ierr); ierr = MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); ierr = MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); if (reuse == MAT_REUSE_MATRIX) { ierr = MatHeaderReplace(A,B);CHKERRQ(ierr); } else { *newmat = B; } PetscFunctionReturn(0); }
int main(int argc,char **args) { Mat A,B; PetscViewer fd; /* viewer */ char file[PETSC_MAX_PATH_LEN]; /* input file name */ PetscErrorCode ierr; PetscBool flg; Vec v; PetscInitialize(&argc,&args,(char*)0,help); /* Determine files from which we read the two linear systems (matrix and right-hand-side vector). */ ierr = PetscOptionsGetString(NULL,"-f0",file,PETSC_MAX_PATH_LEN,&flg); CHKERRQ(ierr); if (!flg) SETERRQ(PETSC_COMM_WORLD,1,"Must indicate binary file with the -f0 option"); ierr = PetscViewerBinaryOpen(PETSC_COMM_WORLD,file,FILE_MODE_READ,&fd); CHKERRQ(ierr); ierr = MatCreate(PETSC_COMM_WORLD,&A); CHKERRQ(ierr); ierr = MatSetType(A,MATSEQAIJ); CHKERRQ(ierr); ierr = MatLoad(A,fd); CHKERRQ(ierr); ierr = VecCreate(PETSC_COMM_WORLD,&v); CHKERRQ(ierr); ierr = VecLoad(v,fd); CHKERRQ(ierr); ierr = MatView(A,PETSC_VIEWER_STDOUT_SELF); CHKERRQ(ierr); ierr = PadMatrix(A,v,3.0,&B); CHKERRQ(ierr); ierr = MatView(B,PETSC_VIEWER_STDOUT_SELF); CHKERRQ(ierr); ierr = MatDestroy(&B); CHKERRQ(ierr); ierr = MatDestroy(&A); CHKERRQ(ierr); ierr = PetscFinalize(); return 0; }
int main(int argc,char **args) { Mat C; PetscViewer viewer; PetscErrorCode ierr; ierr = PetscInitialize(&argc,&args,0,help);if (ierr) return ierr; ierr = PetscViewerBinaryOpen(PETSC_COMM_WORLD,"matrix.dat",FILE_MODE_READ,&viewer);CHKERRQ(ierr); ierr = MatCreate(PETSC_COMM_WORLD,&C);CHKERRQ(ierr); ierr = MatSetType(C,MATMPIDENSE);CHKERRQ(ierr); ierr = MatLoad(C,viewer);CHKERRQ(ierr); ierr = PetscViewerDestroy(&viewer);CHKERRQ(ierr); ierr = MatView(C,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); ierr = MatDestroy(&C);CHKERRQ(ierr); ierr = PetscFinalize(); return ierr; }
PETSC_EXTERN PetscErrorCode MatGetFactor_aij_mkl_pardiso(Mat A,MatFactorType ftype,Mat *F) { Mat B; PetscErrorCode ierr; Mat_MKL_PARDISO *mat_mkl_pardiso; PetscBool isSeqAIJ; PetscFunctionBegin; /* Create the factorization matrix */ ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQAIJ,&isSeqAIJ); CHKERRQ(ierr); ierr = MatCreate(PetscObjectComm((PetscObject)A),&B); CHKERRQ(ierr); ierr = MatSetSizes(B,A->rmap->n,A->cmap->n,A->rmap->N,A->cmap->N); CHKERRQ(ierr); ierr = MatSetType(B,((PetscObject)A)->type_name); CHKERRQ(ierr); if (isSeqAIJ) { ierr = MatSeqAIJSetPreallocation(B,0,NULL); CHKERRQ(ierr); } else { SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Is not allowed other types of matrices apart from MATSEQAIJ."); } B->ops->lufactorsymbolic = MatLUFactorSymbolic_AIJMKL_PARDISO; B->ops->destroy = MatDestroy_MKL_PARDISO; B->ops->view = MatView_MKL_PARDISO; B->factortype = ftype; B->ops->getinfo = MatGetInfo_MKL_PARDISO; B->assembled = PETSC_TRUE; /* required by -ksp_view */ ierr = PetscNewLog(B,&mat_mkl_pardiso); CHKERRQ(ierr); B->spptr = mat_mkl_pardiso; ierr = PetscObjectComposeFunction((PetscObject)B,"MatFactorGetSolverPackage_C",MatFactorGetSolverPackage_mkl_pardiso); CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatMkl_PardisoSetCntl_C",MatMkl_PardisoSetCntl_MKL_PARDISO); CHKERRQ(ierr); ierr = PetscInitializeMKL_PARDISO(A, mat_mkl_pardiso); CHKERRQ(ierr); *F = B; PetscFunctionReturn(0); }
/*@C MatCreateScatter - Creates a new matrix based on a VecScatter Collective on MPI_Comm Input Parameters: + comm - MPI communicator - scatter - a VecScatterContext Output Parameter: . A - the matrix Level: intermediate PETSc requires that matrices and vectors being used for certain operations are partitioned accordingly. For example, when creating a scatter matrix, A, that supports parallel matrix-vector products using MatMult(A,x,y) the user should set the number of local matrix rows to be the number of local elements of the corresponding result vector, y. Note that this is information is required for use of the matrix interface routines, even though the scatter matrix may not actually be physically partitioned. .keywords: matrix, scatter, create .seealso: MatScatterSetVecScatter(), MatScatterGetVecScatter(), MATSCATTER @*/ PetscErrorCode MatCreateScatter(MPI_Comm comm,VecScatter scatter,Mat *A) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatCreate(comm,A); CHKERRQ(ierr); ierr = MatSetSizes(*A,scatter->to_n,scatter->from_n,PETSC_DETERMINE,PETSC_DETERMINE); CHKERRQ(ierr); ierr = MatSetType(*A,MATSCATTER); CHKERRQ(ierr); ierr = MatScatterSetVecScatter(*A,scatter); CHKERRQ(ierr); ierr = MatSetUp(*A); CHKERRQ(ierr); PetscFunctionReturn(0); }
/* RHSMatrixAdvection - User-provided routine to compute the right-hand-side matrix for the Advection (gradient) operator. Input Parameters: ts - the TS context t - current time global_in - global input vector dummy - optional user-defined context, as set by TSetRHSJacobian() Output Parameters: AA - Jacobian matrix BB - optionally different preconditioning matrix str - flag indicating matrix structure */ PetscErrorCode RHSMatrixAdvectiongllDM(TS ts,PetscReal t,Vec X,Mat A,Mat BB,void *ctx) { PetscReal **temp; AppCtx *appctx = (AppCtx*)ctx; /* user-defined application context */ PetscErrorCode ierr; PetscInt xs,xn,l,j; PetscInt *rowsDM; PetscBool flg = PETSC_FALSE; ierr = PetscOptionsGetBool(NULL,NULL,"-gll_mf",&flg,NULL);CHKERRQ(ierr); if (!flg) { /* Creates the advection matrix for the given gll */ ierr = PetscGLLElementAdvectionCreate(&appctx->SEMop.gll,&temp);CHKERRQ(ierr); ierr = MatSetOption(A,MAT_NEW_NONZERO_ALLOCATION_ERR,PETSC_FALSE);CHKERRQ(ierr); ierr = DMDAGetCorners(appctx->da,&xs,NULL,NULL,&xn,NULL,NULL);CHKERRQ(ierr); xs = xs/(appctx->param.N-1); xn = xn/(appctx->param.N-1); ierr = PetscMalloc1(appctx->param.N,&rowsDM);CHKERRQ(ierr); for (j=xs; j<xs+xn; j++) { for (l=0; l<appctx->param.N; l++) { rowsDM[l] = 1+(j-xs)*(appctx->param.N-1)+l; } ierr = MatSetValuesLocal(A,appctx->param.N,rowsDM,appctx->param.N,rowsDM,&temp[0][0],ADD_VALUES);CHKERRQ(ierr); } ierr = PetscFree(rowsDM);CHKERRQ(ierr); ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); ierr = VecReciprocal(appctx->SEMop.mass);CHKERRQ(ierr); ierr = MatDiagonalScale(A,appctx->SEMop.mass,0);CHKERRQ(ierr); ierr = VecReciprocal(appctx->SEMop.mass);CHKERRQ(ierr); ierr = PetscGLLElementAdvectionDestroy(&appctx->SEMop.gll,&temp);CHKERRQ(ierr); } else { ierr = MatSetType(A,MATSHELL);CHKERRQ(ierr); ierr = MatSetUp(A);CHKERRQ(ierr); ierr = MatShellSetContext(A,appctx);CHKERRQ(ierr); ierr = MatShellSetOperation(A,MATOP_MULT,(void (*)(void))MatMult_Advection);CHKERRQ(ierr); } return 0; }
int readmm(char s[], Mat *pA){ FILE *file; int *Is,*Js; PetscScalar *Vs; PetscInt m,n,nnz,i; PetscErrorCode ierr; ierr = PetscFOpen(PETSC_COMM_SELF,s,"r",&file);CHKERRQ(ierr); char buf[100]; /* process header with comments */ do fgets(buf,PETSC_MAX_PATH_LEN-1,file); while (buf[0] == '%'); sscanf(buf,"%d %d %d\n",&m,&n,&nnz); //ierr = PetscPrintf (PETSC_COMM_SELF,"m = %d, n = %d, nnz = %d\n",m,n,nnz); /* reseve memory for matrices */ ierr = PetscMalloc3(nnz,&Is, nnz,&Js, nnz,&Vs); CHKERRQ(ierr); for (i=0; i<nnz; i++) { ierr = fscanf(file,"%d %d %le\n",&Is[i],&Js[i],(double*)&Vs[i]); //ierr = PetscPrintf(PETSC_COMM_WORLD,"%d,%d,%le\n",Is[i],Js[i],Vs[i]);CHKERRQ(ierr); if (ierr == EOF) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_USER,"i=%d, reach EOF\n",i); Is[i]--; Js[i]--; /* adjust from 1-based to 0-based */ } fclose(file); //ierr = PetscPrintf(PETSC_COMM_SELF,"Read file completes.\n");CHKERRQ(ierr); /* Creat and asseble matrix */ ierr = MatCreate(PETSC_COMM_SELF,pA);CHKERRQ(ierr); ierr = MatSetType(*pA, /*MATDENSE*/ MATSEQAIJ );CHKERRQ(ierr); ierr = MatSetSizes(*pA,PETSC_DECIDE,PETSC_DECIDE,m,n);CHKERRQ(ierr); ierr = MatSetFromOptions(*pA);CHKERRQ(ierr); ierr = MatSetUp(*pA);CHKERRQ(ierr); for (i=0; i<nnz; i++) { ierr = MatSetValues(*pA,1,&Is[i],1,&Js[i],&Vs[i],INSERT_VALUES);CHKERRQ(ierr); } ierr = MatAssemblyBegin(*pA,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); ierr = MatAssemblyEnd(*pA,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); //ierr = MatView(*pA,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); ierr = PetscFree3(Is,Js,Vs);CHKERRQ(ierr); return 0; }
PetscErrorCode StokesSetupMatBlock00(Stokes* s) { PetscInt row, start, end, size, i, j; PetscInt cols[5]; PetscScalar vals[5]; PetscErrorCode ierr; PetscFunctionBeginUser; /* A[0] is 2N-by-2N */ ierr = MatCreate(PETSC_COMM_WORLD, &s->subA[0]); CHKERRQ(ierr); ierr = MatSetOptionsPrefix(s->subA[0], "a00_"); CHKERRQ(ierr); ierr = MatSetSizes(s->subA[0], PETSC_DECIDE, PETSC_DECIDE, 2 * s->nx * s->ny, 2 * s->nx * s->ny); CHKERRQ(ierr); ierr = MatSetType(s->subA[0], MATMPIAIJ); CHKERRQ(ierr); ierr = MatMPIAIJSetPreallocation(s->subA[0], 5, NULL, 5, NULL); CHKERRQ(ierr); ierr = MatGetOwnershipRange(s->subA[0], &start, &end); CHKERRQ(ierr); for (row = start; row < end; row++) { ierr = StokesGetPosition(s, row, &i, &j); CHKERRQ(ierr); /* first part: rows 0 to (nx*ny-1) */ ierr = StokesStencilLaplacian(s, i, j, &size, cols, vals); CHKERRQ(ierr); /* second part: rows (nx*ny) to (2*nx*ny-1) */ if (row >= s->nx * s->ny) { for (i = 0; i < 5; i++) cols[i] = cols[i] + s->nx * s->ny; } for (i = 0; i < 5; i++) vals[i] = -1.0 * vals[i]; /* dynamic viscosity coef mu=-1 */ ierr = MatSetValues(s->subA[0], 1, &row, size, cols, vals, INSERT_VALUES); CHKERRQ(ierr); } ierr = MatAssemblyBegin(s->subA[0], MAT_FINAL_ASSEMBLY); CHKERRQ(ierr); ierr = MatAssemblyEnd(s->subA[0], MAT_FINAL_ASSEMBLY); CHKERRQ(ierr); PetscFunctionReturn(0); }
// Test Mat BNHat up to N=10 when Op is a diagonal Mat TEST(operatorsCreateBnHeadTest, testBNHatDiagonalOp) { Mat Op; // operator (e.g., Laplacian) PetscReal dt = 2.3, // time-step size c = 0.5, // time-scheme coefficient of the implicit diffusive term val = 2.0 / dt; // value set on the diagonal of the operator PetscInt nx = 10, // number of points in the x-direction ny = 12; // number of points in the y-direction PetscReal ans = nx * ny * dt; // expected sum of all elements of B1Hat // Create and assemble operator MatCreate(PETSC_COMM_WORLD, &Op); MatSetSizes(Op, PETSC_DECIDE, PETSC_DECIDE, nx * ny, nx * ny); MatSetType(Op, MATAIJ); MatSetUp(Op); for (PetscInt i = 0; i < nx * ny; i++) MatSetValues(Op, 1, &i, 1, &i, &val, INSERT_VALUES); MatAssemblyBegin(Op, MAT_FINAL_ASSEMBLY); MatAssemblyEnd(Op, MAT_FINAL_ASSEMBLY); for (PetscInt N = 1; N <= 10; N++) { Mat BNHat; // Nth-order approximation of the inverse of (I/dt - c*Op) // Call function to test petibm::operators::createBnHead(Op, dt, c, N, BNHat); // Check size of Mat BNHat { PetscInt nrows, ncols; MatGetSize(BNHat, &nrows, &ncols); ASSERT_EQ(nx * ny, nrows); ASSERT_EQ(nx * ny, ncols); } // Check sum of elements of BNHat is the expected value if (N > 1) ans += dt * nx * ny * std::pow(c * dt * val, N - 1); { PetscReal sum; Vec v; MatCreateVecs(Op, &v, nullptr); MatGetRowSum(BNHat, v); VecSum(v, &sum); ASSERT_TRUE(std::abs(sum - ans) <= 1.0E-11); VecDestroy(&v); } MatDestroy(&BNHat); } MatDestroy(&Op); }
PETSC_EXTERN PetscErrorCode MatGetFactor_seqaij_matlab(Mat A,MatFactorType ftype,Mat *F) { PetscErrorCode ierr; PetscFunctionBegin; if (A->cmap->N != A->rmap->N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"matrix must be square"); ierr = MatCreate(PetscObjectComm((PetscObject)A),F);CHKERRQ(ierr); ierr = MatSetSizes(*F,A->rmap->n,A->cmap->n,A->rmap->n,A->cmap->n);CHKERRQ(ierr); ierr = MatSetType(*F,((PetscObject)A)->type_name);CHKERRQ(ierr); ierr = MatSeqAIJSetPreallocation(*F,0,NULL);CHKERRQ(ierr); (*F)->ops->lufactorsymbolic = MatLUFactorSymbolic_Matlab; (*F)->ops->ilufactorsymbolic = MatLUFactorSymbolic_Matlab; ierr = PetscObjectComposeFunction((PetscObject)(*F),"MatFactorGetSolverPackage_C",MatFactorGetSolverPackage_seqaij_matlab);CHKERRQ(ierr); (*F)->factortype = ftype; PetscFunctionReturn(0); }
void PETScLinearSolver::MatrixCreate( PetscInt m, PetscInt n) { MatCreate(PETSC_COMM_WORLD, &A); // TEST MatSetSizes(A, m_size_loc, PETSC_DECIDE, m, n); MatSetSizes(A, PETSC_DECIDE, PETSC_DECIDE, m, n); //MatSetSizes(A, m_size_loc, PETSC_DECIDE, m, n); MatSetType(A, MATMPIAIJ); MatSetFromOptions(A); MatSeqAIJSetPreallocation(A, d_nz, PETSC_NULL); MatMPIAIJSetPreallocation(A, d_nz, PETSC_NULL, o_nz, PETSC_NULL); MatSetOption(A,MAT_NEW_NONZERO_ALLOCATION_ERR,PETSC_FALSE); MatSetUp(A); // KG44 this seems to work with petsc 3.3 ..the commands below result in problems when assembling the matrix with version 3.3 MatGetOwnershipRange(A, &i_start, &i_end); }
PetscErrorCode TaoDefaultComputeHessianMFFD(Tao tao,Vec X,Mat H,Mat B,void *ctx) { PetscInt n,N; PetscErrorCode ierr; PetscFunctionBegin; if (B && B != H) SETERRQ(PetscObjectComm((PetscObject)tao),PETSC_ERR_SUP,"Preconditioning Hessian matrix"); ierr = VecGetSize(X,&N);CHKERRQ(ierr); ierr = VecGetLocalSize(X,&n);CHKERRQ(ierr); ierr = MatSetSizes(H,n,n,N,N);CHKERRQ(ierr); ierr = MatSetType(H,MATMFFD);CHKERRQ(ierr); ierr = MatSetUp(H);CHKERRQ(ierr); ierr = MatMFFDSetBase(H,X,NULL);CHKERRQ(ierr); ierr = MatMFFDSetFunction(H,(PetscErrorCode (*)(void*,Vec,Vec))TaoComputeGradient,tao);CHKERRQ(ierr); ierr = MatAssemblyBegin(H,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); ierr = MatAssemblyEnd(H,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); PetscFunctionReturn(0); }
PETSC_EXTERN PetscErrorCode MatConvert_SeqAIJ_SeqBAIJ(Mat A,MatType newtype,MatReuse reuse,Mat *newmat) { Mat B; Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; Mat_SeqBAIJ *b; PetscErrorCode ierr; PetscInt *ai=a->i,m=A->rmap->N,n=A->cmap->N,i,*rowlengths; PetscFunctionBegin; if (n != m) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Matrix must be square"); if (A->rmap->bs > 1) { ierr = MatConvert_Basic(A,newtype,reuse,newmat);CHKERRQ(ierr); PetscFunctionReturn(0); } ierr = PetscMalloc1(m,&rowlengths);CHKERRQ(ierr); for (i=0; i<m; i++) { rowlengths[i] = ai[i+1] - ai[i]; } ierr = MatCreate(PetscObjectComm((PetscObject)A),&B);CHKERRQ(ierr); ierr = MatSetSizes(B,m,n,m,n);CHKERRQ(ierr); ierr = MatSetType(B,MATSEQBAIJ);CHKERRQ(ierr); ierr = MatSeqBAIJSetPreallocation_SeqBAIJ(B,1,0,rowlengths);CHKERRQ(ierr); ierr = PetscFree(rowlengths);CHKERRQ(ierr); ierr = MatSetOption(B,MAT_ROW_ORIENTED,PETSC_TRUE);CHKERRQ(ierr); b = (Mat_SeqBAIJ*)(B->data); ierr = PetscMemcpy(b->i,a->i,(m+1)*sizeof(PetscInt));CHKERRQ(ierr); ierr = PetscMemcpy(b->ilen,a->ilen,m*sizeof(PetscInt));CHKERRQ(ierr); ierr = PetscMemcpy(b->j,a->j,a->nz*sizeof(PetscInt));CHKERRQ(ierr); ierr = PetscMemcpy(b->a,a->a,a->nz*sizeof(MatScalar));CHKERRQ(ierr); ierr = MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); ierr = MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); if (reuse == MAT_INPLACE_MATRIX) { ierr = MatHeaderReplace(A,&B);CHKERRQ(ierr); } else { *newmat = B; } PetscFunctionReturn(0); }
/*! \brief Create an empty Matrix * * \param N1 number of row * \param N2 number of colums * \param N1_loc number of local row * */ SparseMatrix(size_t N1, size_t N2, size_t n_row_local) :l_row(n_row_local),l_col(n_row_local) { PETSC_SAFE_CALL(MatCreate(PETSC_COMM_WORLD,&mat)); PETSC_SAFE_CALL(MatSetType(mat,MATMPIAIJ)); PETSC_SAFE_CALL(MatSetSizes(mat,n_row_local,n_row_local,N1,N2)); Vcluster & v_cl = create_vcluster(); openfpm::vector<size_t> vn_row_local; v_cl.allGather(l_row,vn_row_local); v_cl.execute(); // Calculate the starting row for this processor start_row = 0; for (size_t i = 0 ; i < v_cl.getProcessUnitID() ; i++) start_row += vn_row_local.get(i); }
static PetscErrorCode DMCreateMatrix_Shell(DM dm,MatType mtype,Mat *J) { PetscErrorCode ierr; DM_Shell *shell = (DM_Shell*)dm->data; Mat A; PetscFunctionBegin; PetscValidHeaderSpecific(dm,DM_CLASSID,1); PetscValidPointer(J,3); if (!shell->A) { if (shell->Xglobal) { PetscInt m,M; ierr = PetscInfo(dm,"Naively creating matrix using global vector distribution without preallocation");CHKERRQ(ierr); ierr = VecGetSize(shell->Xglobal,&M);CHKERRQ(ierr); ierr = VecGetLocalSize(shell->Xglobal,&m);CHKERRQ(ierr); ierr = MatCreate(PetscObjectComm((PetscObject)dm),&shell->A);CHKERRQ(ierr); ierr = MatSetSizes(shell->A,m,m,M,M);CHKERRQ(ierr); if (mtype) {ierr = MatSetType(shell->A,mtype);CHKERRQ(ierr);} ierr = MatSetUp(shell->A);CHKERRQ(ierr); } else SETERRQ(PetscObjectComm((PetscObject)dm),PETSC_ERR_USER,"Must call DMShellSetMatrix(), DMShellSetCreateMatrix(), or provide a vector"); } A = shell->A; /* the check below is tacky and incomplete */ if (mtype) { PetscBool flg,aij,seqaij,mpiaij; ierr = PetscObjectTypeCompare((PetscObject)A,mtype,&flg);CHKERRQ(ierr); ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQAIJ,&seqaij);CHKERRQ(ierr); ierr = PetscObjectTypeCompare((PetscObject)A,MATMPIAIJ,&mpiaij);CHKERRQ(ierr); ierr = PetscStrcmp(mtype,MATAIJ,&aij);CHKERRQ(ierr); if (!flg) { if (!(aij & (seqaij || mpiaij))) SETERRQ2(PetscObjectComm((PetscObject)dm),PETSC_ERR_ARG_NOTSAMETYPE,"Requested matrix of type %s, but only %s available",mtype,((PetscObject)A)->type_name); } } if (((PetscObject)A)->refct < 2) { /* We have an exclusive reference so we can give it out */ ierr = PetscObjectReference((PetscObject)A);CHKERRQ(ierr); ierr = MatZeroEntries(A);CHKERRQ(ierr); *J = A; } else { /* Need to create a copy, could use MAT_SHARE_NONZERO_PATTERN in most cases */ ierr = MatDuplicate(A,MAT_DO_NOT_COPY_VALUES,J);CHKERRQ(ierr); ierr = MatZeroEntries(*J);CHKERRQ(ierr); } PetscFunctionReturn(0); }
int main(int argc,char **args) { Mat A; PetscInt *ia,*ja; PetscErrorCode ierr; PetscMPIInt rank,size; PetscInitialize(&argc,&args,(char*)0,help); ierr = MPI_Comm_size(PETSC_COMM_WORLD,&size);CHKERRQ(ierr); if (size != 4) SETERRQ(PETSC_COMM_WORLD,1,"Must run with 4 processors"); ierr = MPI_Comm_rank(PETSC_COMM_WORLD,&rank);CHKERRQ(ierr); ierr = PetscMalloc1(5,&ia);CHKERRQ(ierr); ierr = PetscMalloc1(16,&ja);CHKERRQ(ierr); if (!rank) { ja[0] = 1; ja[1] = 4; ja[2] = 0; ja[3] = 2; ja[4] = 5; ja[5] = 1; ja[6] = 3; ja[7] = 6; ja[8] = 2; ja[9] = 7; ia[0] = 0; ia[1] = 2; ia[2] = 5; ia[3] = 8; ia[4] = 10; } else if (rank == 1) { ja[0] = 0; ja[1] = 5; ja[2] = 8; ja[3] = 1; ja[4] = 4; ja[5] = 6; ja[6] = 9; ja[7] = 2; ja[8] = 5; ja[9] = 7; ja[10] = 10; ja[11] = 3; ja[12] = 6; ja[13] = 11; ia[0] = 0; ia[1] = 3; ia[2] = 7; ia[3] = 11; ia[4] = 14; } else if (rank == 2) { ja[0] = 4; ja[1] = 9; ja[2] = 12; ja[3] = 5; ja[4] = 8; ja[5] = 10; ja[6] = 13; ja[7] = 6; ja[8] = 9; ja[9] = 11; ja[10] = 14; ja[11] = 7; ja[12] = 10; ja[13] = 15; ia[0] = 0; ia[1] = 3; ia[2] = 7; ia[3] = 11; ia[4] = 14; } else { ja[0] = 8; ja[1] = 13; ja[2] = 9; ja[3] = 12; ja[4] = 14; ja[5] = 10; ja[6] = 13; ja[7] = 15; ja[8] = 11; ja[9] = 14; ia[0] = 0; ia[1] = 2; ia[2] = 5; ia[3] = 8; ia[4] = 10; } ierr = MatCreate(PETSC_COMM_WORLD,&A);CHKERRQ(ierr); ierr = MatSetSizes(A,4,4,16,16);CHKERRQ(ierr); ierr = MatSetType(A,MATMPIAIJ);CHKERRQ(ierr); ierr = MatMPIAIJSetPreallocationCSR(A,ia,ja,NULL);CHKERRQ(ierr); ierr = PetscFree(ia);CHKERRQ(ierr); ierr = PetscFree(ja);CHKERRQ(ierr); ierr = MatView(A,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); ierr = MatDestroy(&A);CHKERRQ(ierr); ierr = PetscFinalize(); return 0; }
EXTERN_C_END #undef __FUNCT__ #define __FUNCT__ "MatCreateScatter" /*@C MatCreateScatter - Creates a new matrix based on a VecScatter Collective on MPI_Comm Input Parameters: + comm - MPI communicator - scatter - a VecScatterContext Output Parameter: . A - the matrix Level: intermediate PETSc requires that matrices and vectors being used for certain operations are partitioned accordingly. For example, when creating a scatter matrix, A, that supports parallel matrix-vector products using MatMult(A,x,y) the user should set the number of local matrix rows to be the number of local elements of the corresponding result vector, y. Note that this is information is required for use of the matrix interface routines, even though the scatter matrix may not actually be physically partitioned. For example, .keywords: matrix, scatter, create .seealso: MatScatterSetVecScatter(), MatScatterGetVecScatter(), MATSCATTER @*/ PetscErrorCode PETSCMAT_DLLEXPORT MatCreateScatter(MPI_Comm comm,VecScatter scatter,Mat *A) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatCreate(comm,A);CHKERRQ(ierr); ierr = MatSetSizes(*A,scatter->to_n,scatter->from_n,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); ierr = MatSetType(*A,MATSCATTER);CHKERRQ(ierr); ierr = MatScatterSetVecScatter(*A,scatter);CHKERRQ(ierr); PetscFunctionReturn(0); }
-fout <output_file> : file for saving output matrix and vector\n\n"; #include <petscmat.h> #undef __FUNCT__ #define __FUNCT__ "main" int main(int argc,char **args) { PetscErrorCode ierr; PetscBool flg; Vec x; Mat A; char file[256]; PetscViewer fd; PetscInitialize(&argc,&args,(char *)0,help); /* Read matrix and RHS */ ierr = PetscOptionsGetString(PETSC_NULL,"-fin",file,256,&flg);CHKERRQ(ierr); if (!flg) SETERRQ(PETSC_COMM_WORLD,1,help); ierr = PetscViewerBinaryOpen(PETSC_COMM_WORLD,file,FILE_MODE_READ,&fd);CHKERRQ(ierr); ierr = MatCreate(PETSC_COMM_WORLD,&A);CHKERRQ(ierr); ierr = MatSetType(A,MATSEQAIJ);CHKERRQ(ierr); ierr = MatLoad(A,fd);CHKERRQ(ierr); ierr = VecCreate(PETSC_COMM_WORLD,&x);CHKERRQ(ierr); ierr = VecLoad(x,fd);CHKERRQ(ierr); ierr = PetscViewerDestroy(&fd);CHKERRQ(ierr); /* Write matrix and vector */ ierr = PetscOptionsGetString(PETSC_NULL,"-fout",file,256,&flg);CHKERRQ(ierr); if (!flg) SETERRQ(PETSC_COMM_WORLD,1,help); ierr = PetscViewerBinaryOpen(PETSC_COMM_WORLD,file,FILE_MODE_WRITE,&fd);CHKERRQ(ierr); ierr = MatView(A,fd);CHKERRQ(ierr); ierr = VecView(x,fd);CHKERRQ(ierr); /* Free data structures */ ierr = MatDestroy(&A);CHKERRQ(ierr); ierr = VecDestroy(&x);CHKERRQ(ierr); ierr = PetscViewerDestroy(&fd);CHKERRQ(ierr); ierr = PetscFinalize(); return 0; }
void Field_solver::alloc_petsc_matrix( Mat *A, PetscInt nrow_local, PetscInt ncol_local, PetscInt nrow, PetscInt ncol, PetscInt nonzero_per_row ) { PetscErrorCode ierr; // PetscInt approx_nonzero_per_row = 7; ierr = MatCreate( PETSC_COMM_WORLD, A ); CHKERRXX( ierr ); ierr = MatSetSizes( *A, nrow_local, ncol_local, nrow, ncol ); CHKERRXX( ierr ); ierr = MatSetFromOptions( *A ); CHKERRXX( ierr ); ierr = MatSetType( *A, MATAIJ ); CHKERRXX( ierr ); // redo; set nonzero_per_row more accurately // if nlocal >= (nx-2)*(ny-2): max_diag_nonzero_per_row = 7, max_offdiag_nonzer_per_row = 3 // (nx-2) <= nlocal < (nx-2)*(ny-2) : max_diag_nonzero_per_row = 5, max_offdiag_nonzer_per_row = 4 // probably. ierr = MatMPIAIJSetPreallocation( *A, nonzero_per_row, NULL, nonzero_per_row, NULL); CHKERRXX( ierr ); ierr = MatSetUp( *A ); CHKERRXX( ierr ); return; }