/***************** Array Access ********************/ int DA::createMatrix(Mat &M, MatType mtype, unsigned int dof) { // first determine the size ... unsigned int sz = 0; if(m_bIamActive) { sz = dof*(m_uiNodeSize + m_uiBoundaryNodeSize); }//end if active // now create the PETSc Mat // The "parallel direct solver" matrix types like MATAIJSPOOLES are ALL gone in petsc-3.0.0 // Thus, I (Ilya Lashuk) "delete" all such checks for matrix type. Hope it is reasonable thing to do. PetscBool isAij, isAijSeq, isAijPrl, isSuperLU, isSuperLU_Dist; PetscStrcmp(mtype,MATAIJ,&isAij); PetscStrcmp(mtype,MATSEQAIJ,&isAijSeq); PetscStrcmp(mtype,MATMPIAIJ,&isAijPrl); isSuperLU = PETSC_FALSE; // PetscStrcmp(mtype,MATSUPERLU,&isSuperLU); isSuperLU_Dist = PETSC_FALSE; // PetscStrcmp(mtype,MATSUPERLU_DIST,&isSuperLU_Dist); MatCreate(m_mpiCommAll, &M); MatSetSizes(M, sz,sz, PETSC_DECIDE, PETSC_DECIDE); MatSetType(M,mtype); if(isAij || isAijSeq || isAijPrl || isSuperLU || isSuperLU_Dist) { if(m_iNpesAll > 1) { MatMPIAIJSetPreallocation(M, 53*dof , PETSC_NULL, 53*dof , PETSC_NULL); }else { MatSeqAIJSetPreallocation(M, 53*dof , PETSC_NULL); } } return 0; }//end function
PetscErrorCode StokesSetupMatBlock00(Stokes *s) { PetscInt row, start, end, size, i, j; PetscInt cols[5]; PetscScalar vals[5]; PetscErrorCode ierr; PetscFunctionBeginUser; /* A[0] is 2N-by-2N */ ierr = MatCreate(PETSC_COMM_WORLD,&s->subA[0]);CHKERRQ(ierr); ierr = MatSetOptionsPrefix(s->subA[0],"a00_");CHKERRQ(ierr); ierr = MatSetSizes(s->subA[0],PETSC_DECIDE,PETSC_DECIDE,2*s->nx*s->ny,2*s->nx*s->ny);CHKERRQ(ierr); ierr = MatSetType(s->subA[0],MATMPIAIJ);CHKERRQ(ierr); ierr = MatMPIAIJSetPreallocation(s->subA[0],5,NULL,5,NULL);CHKERRQ(ierr); ierr = MatGetOwnershipRange(s->subA[0], &start, &end);CHKERRQ(ierr); for (row = start; row < end; row++) { ierr = StokesGetPosition(s, row, &i, &j);CHKERRQ(ierr); /* first part: rows 0 to (nx*ny-1) */ ierr = StokesStencilLaplacian(s, i, j, &size, cols, vals);CHKERRQ(ierr); /* second part: rows (nx*ny) to (2*nx*ny-1) */ if (row >= s->nx*s->ny) { for (i = 0; i < 5; i++) cols[i] = cols[i] + s->nx*s->ny; } for (i = 0; i < 5; i++) vals[i] = -1.0*vals[i]; /* dynamic viscosity coef mu=-1 */ ierr = MatSetValues(s->subA[0], 1, &row, size, cols, vals, INSERT_VALUES);CHKERRQ(ierr); } ierr = MatAssemblyBegin(s->subA[0], MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); ierr = MatAssemblyEnd(s->subA[0], MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); PetscFunctionReturn(0); }
int DA::createActiveMatrix(Mat &M, MatType mtype, unsigned int dof) { // first determine the size ... unsigned int sz = 0; if(m_bIamActive) { sz = dof*(m_uiNodeSize + m_uiBoundaryNodeSize); // now create the PETSc Mat PetscBool isAij, isAijSeq, isAijPrl, isSuperLU, isSuperLU_Dist; PetscStrcmp(mtype,MATAIJ,&isAij); PetscStrcmp(mtype,MATSEQAIJ,&isAijSeq); PetscStrcmp(mtype,MATMPIAIJ,&isAijPrl); isSuperLU = PETSC_FALSE; //PetscStrcmp(mtype,MATSUPERLU,&isSuperLU); isSuperLU_Dist = PETSC_FALSE; //PetscStrcmp(mtype,MATSUPERLU_DIST,&isSuperLU_Dist); MatCreate(m_mpiCommActive, &M); MatSetSizes(M, sz,sz, PETSC_DECIDE, PETSC_DECIDE); MatSetType(M,mtype); if(isAij || isAijSeq || isAijPrl || isSuperLU || isSuperLU_Dist) { if(m_iNpesActive > 1) { MatMPIAIJSetPreallocation(M, 53*dof , PETSC_NULL, 53*dof , PETSC_NULL); }else { MatSeqAIJSetPreallocation(M, 53*dof , PETSC_NULL); } } }//end if active return 0; }//end function
static PetscErrorCode DMCreateInterpolation_Redundant(DM dmc,DM dmf,Mat *P,Vec *scale) { PetscErrorCode ierr; DM_Redundant *redc = (DM_Redundant*)dmc->data; DM_Redundant *redf = (DM_Redundant*)dmf->data; PetscMPIInt flag; PetscInt i,rstart,rend; PetscFunctionBegin; ierr = MPI_Comm_compare(PetscObjectComm((PetscObject)dmc),PetscObjectComm((PetscObject)dmf),&flag);CHKERRQ(ierr); if (flag != MPI_CONGRUENT && flag != MPI_IDENT) SETERRQ(PetscObjectComm((PetscObject)dmf),PETSC_ERR_SUP,"cannot change communicators"); if (redc->rank != redf->rank) SETERRQ(PetscObjectComm((PetscObject)dmf),PETSC_ERR_ARG_INCOMP,"Owning rank does not match"); if (redc->N != redf->N) SETERRQ(PetscObjectComm((PetscObject)dmf),PETSC_ERR_ARG_INCOMP,"Global size does not match"); ierr = MatCreate(PetscObjectComm((PetscObject)dmc),P);CHKERRQ(ierr); ierr = MatSetSizes(*P,redc->n,redc->n,redc->N,redc->N);CHKERRQ(ierr); ierr = MatSetType(*P,MATAIJ);CHKERRQ(ierr); ierr = MatSeqAIJSetPreallocation(*P,1,0);CHKERRQ(ierr); ierr = MatMPIAIJSetPreallocation(*P,1,0,0,0);CHKERRQ(ierr); ierr = MatGetOwnershipRange(*P,&rstart,&rend);CHKERRQ(ierr); for (i=rstart; i<rend; i++) {ierr = MatSetValue(*P,i,i,1.0,INSERT_VALUES);CHKERRQ(ierr);} ierr = MatAssemblyBegin(*P,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); ierr = MatAssemblyEnd(*P,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); if (scale) {ierr = DMCreateInterpolationScale(dmc,dmf,*P,scale);CHKERRQ(ierr);} PetscFunctionReturn(0); }
PetscErrorCode cHamiltonianMatrix::hamiltonianConstruction(){ ierr = MatCreate(PETSC_COMM_WORLD,&Hpolaron);CHKERRQ(ierr); ierr = MatSetType(Hpolaron,MATMPIAIJ);CHKERRQ(ierr); ierr = MatSetSizes(Hpolaron,PETSC_DECIDE,PETSC_DECIDE,DIM,DIM);CHKERRQ(ierr); // TODO: should be able to set the symmetric/hermitian option and // only do upper-right triangle part of matrix construction . // and perform corresponding operations thereon. // ierr = MatSetOption(Hpolaron,MAT_SYMMETRIC,PETSC_TRUE);CHKERRQ(ierr); // ierr = MatSetOption(Hpolaron,MAT_HERMITIAN,PETSC_TRUE);CHKERRQ(ierr); // TODO: what is the estimate of the pre-allocation? // -- number of nonzeros per row in DIAGONAL portion of local submatrix // (same value is used for all local rows) ? I put dim temporarily here. // number of nonzeros per row in the OFF-DIAGONAL portion of local submatrix // (same value is used for all local rows) ? I put dim temporarily here.. // More details at http://www.mcs.anl.gov/petsc/petsc-current/docs/manualpages/Mat/MatMPIAIJSetPreallocation.html ierr = MatMPIAIJSetPreallocation(Hpolaron,DIM,NULL,DIM,NULL);CHKERRQ(ierr); ierr = MatSeqAIJSetPreallocation(Hpolaron,DIM,NULL);CHKERRQ(ierr); ierr = MatGetOwnershipRange(Hpolaron,&rstart,&rend);CHKERRQ(ierr); ierr = MatGetLocalSize(Hpolaron,&nlocal, NULL);CHKERRQ(ierr); ierr = assemblance();CHKERRQ(ierr); ierr = MatAssemblyBegin(Hpolaron,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); ierr = MatAssemblyEnd(Hpolaron,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); // ierr = PetscViewerSetFormat(PETSC_VIEWER_STDOUT_WORLD, PETSC_VIEWER_ASCII_DENSE );CHKERRQ(ierr); // ierr = PetscViewerSetFormat(PETSC_VIEWER_STDOUT_WORLD, PETSC_VIEWER_ASCII_MATLAB );CHKERRQ(ierr); // ierr = MatView(Hpolaron, PETSC_VIEWER_STDOUT_WORLD );CHKERRQ(ierr); return ierr; }
PetscErrorCode StokesSetupMatFp(Stokes *s) { PetscInt row, start, end, size, i, j; PetscInt cols[5]; PetscScalar vals[5]; PetscErrorCode ierr; PetscFunctionBeginUser; // Fp is N-by-N ierr = MatCreate(PETSC_COMM_WORLD,&s->Fp);CHKERRQ(ierr); ierr = MatSetOptionsPrefix(s->Fp,"Fp_");CHKERRQ(ierr); ierr = MatSetSizes(s->Fp,PETSC_DECIDE,PETSC_DECIDE,s->nx*s->ny,s->nx*s->ny);CHKERRQ(ierr); ierr = MatSetType(s->Fp,MATMPIAIJ);CHKERRQ(ierr); ierr = MatMPIAIJSetPreallocation(s->Fp,5,NULL,5,NULL);CHKERRQ(ierr); ierr = MatGetOwnershipRange(s->Fp, &start, &end);CHKERRQ(ierr); for (row = start; row < end; row++) { ierr = StokesGetPosition(s, row, &i, &j);CHKERRQ(ierr); ierr = StokesStencilLaplacian(s, i, j, &size, cols, vals);CHKERRQ(ierr); for (i = 0; i < 5; i++) vals[i] = -1.0*vals[i]; //* dynamic viscosity coef mu=-1 ierr = MatSetValues(s->Fp, 1, &row, size, cols, vals, INSERT_VALUES);CHKERRQ(ierr); } ierr = MatAssemblyBegin(s->Fp, MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); ierr = MatAssemblyEnd(s->Fp, MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); PetscFunctionReturn(0); }
PetscErrorCode StokesSetupMatBlock01(Stokes *s) { PetscInt row, start, end, size, i, j; PetscInt cols[5]; PetscScalar vals[5]; PetscErrorCode ierr; PetscFunctionBeginUser; /* A[1] is 2N-by-N */ ierr = MatCreate(PETSC_COMM_WORLD, &s->subA[1]);CHKERRQ(ierr); ierr = MatSetOptionsPrefix(s->subA[1],"a01_"); ierr = MatSetSizes(s->subA[1],PETSC_DECIDE,PETSC_DECIDE,2*s->nx*s->ny,s->nx*s->ny);CHKERRQ(ierr); ierr = MatSetType(s->subA[1],MATMPIAIJ);CHKERRQ(ierr); ierr = MatMPIAIJSetPreallocation(s->subA[1],5,NULL,5,NULL);CHKERRQ(ierr); ierr = MatGetOwnershipRange(s->subA[1],&start,&end);CHKERRQ(ierr); ierr = MatSetOption(s->subA[1],MAT_IGNORE_ZERO_ENTRIES,PETSC_TRUE);CHKERRQ(ierr); for (row = start; row < end; row++) { ierr = StokesGetPosition(s, row, &i, &j);CHKERRQ(ierr); /* first part: rows 0 to (nx*ny-1) */ if (row < s->nx*s->ny) { ierr = StokesStencilGradientX(s, i, j, &size, cols, vals);CHKERRQ(ierr); } /* second part: rows (nx*ny) to (2*nx*ny-1) */ else { ierr = StokesStencilGradientY(s, i, j, &size, cols, vals);CHKERRQ(ierr); } ierr = MatSetValues(s->subA[1], 1, &row, size, cols, vals, INSERT_VALUES);CHKERRQ(ierr); } ierr = MatAssemblyBegin(s->subA[1], MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); ierr = MatAssemblyEnd(s->subA[1], MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); PetscFunctionReturn(0); }
/*@ KSPComputeExplicitOperator - Computes the explicit preconditioned operator. Collective on KSP Input Parameter: . ksp - the Krylov subspace context Output Parameter: . mat - the explict preconditioned operator Notes: This computation is done by applying the operators to columns of the identity matrix. Currently, this routine uses a dense matrix format when 1 processor is used and a sparse format otherwise. This routine is costly in general, and is recommended for use only with relatively small systems. Level: advanced .keywords: KSP, compute, explicit, operator .seealso: KSPComputeEigenvaluesExplicitly(), PCComputeExplicitOperator() @*/ PetscErrorCode KSPComputeExplicitOperator(KSP ksp,Mat *mat) { Vec in,out; PetscErrorCode ierr; PetscMPIInt size; PetscInt i,M,m,*rows,start,end; Mat A; MPI_Comm comm; PetscScalar *array,one = 1.0; PetscFunctionBegin; PetscValidHeaderSpecific(ksp,KSP_CLASSID,1); PetscValidPointer(mat,2); ierr = PetscObjectGetComm((PetscObject)ksp,&comm);CHKERRQ(ierr); ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); ierr = VecDuplicate(ksp->vec_sol,&in);CHKERRQ(ierr); ierr = VecDuplicate(ksp->vec_sol,&out);CHKERRQ(ierr); ierr = VecGetSize(in,&M);CHKERRQ(ierr); ierr = VecGetLocalSize(in,&m);CHKERRQ(ierr); ierr = VecGetOwnershipRange(in,&start,&end);CHKERRQ(ierr); ierr = PetscMalloc1(m,&rows);CHKERRQ(ierr); for (i=0; i<m; i++) rows[i] = start + i; ierr = MatCreate(comm,mat);CHKERRQ(ierr); ierr = MatSetSizes(*mat,m,m,M,M);CHKERRQ(ierr); if (size == 1) { ierr = MatSetType(*mat,MATSEQDENSE);CHKERRQ(ierr); ierr = MatSeqDenseSetPreallocation(*mat,NULL);CHKERRQ(ierr); } else { ierr = MatSetType(*mat,MATMPIAIJ);CHKERRQ(ierr); ierr = MatMPIAIJSetPreallocation(*mat,0,NULL,0,NULL);CHKERRQ(ierr); } ierr = MatSetOption(*mat,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_FALSE);CHKERRQ(ierr); if (!ksp->pc) {ierr = KSPGetPC(ksp,&ksp->pc);CHKERRQ(ierr);} ierr = PCGetOperators(ksp->pc,&A,NULL);CHKERRQ(ierr); for (i=0; i<M; i++) { ierr = VecSet(in,0.0);CHKERRQ(ierr); ierr = VecSetValues(in,1,&i,&one,INSERT_VALUES);CHKERRQ(ierr); ierr = VecAssemblyBegin(in);CHKERRQ(ierr); ierr = VecAssemblyEnd(in);CHKERRQ(ierr); ierr = KSP_MatMult(ksp,A,in,out);CHKERRQ(ierr); ierr = KSP_PCApply(ksp,out,in);CHKERRQ(ierr); ierr = VecGetArray(in,&array);CHKERRQ(ierr); ierr = MatSetValues(*mat,m,rows,1,&i,array,INSERT_VALUES);CHKERRQ(ierr); ierr = VecRestoreArray(in,&array);CHKERRQ(ierr); } ierr = PetscFree(rows);CHKERRQ(ierr); ierr = VecDestroy(&in);CHKERRQ(ierr); ierr = VecDestroy(&out);CHKERRQ(ierr); ierr = MatAssemblyBegin(*mat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); ierr = MatAssemblyEnd(*mat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); PetscFunctionReturn(0); }
int main(int argc, char **args) { Mat A; MatPartitioning part; IS is; PetscInt r,N = 10, start, end; PetscErrorCode ierr; ierr = PetscInitialize(&argc, &args, (char *) 0, help);CHKERRQ(ierr); ierr = PetscOptionsGetInt(PETSC_NULL, "-N", &N, PETSC_NULL);CHKERRQ(ierr); ierr = MatCreate(PETSC_COMM_WORLD, &A);CHKERRQ(ierr); ierr = MatSetSizes(A, PETSC_DECIDE, PETSC_DECIDE, N, N);CHKERRQ(ierr); ierr = MatSetFromOptions(A);CHKERRQ(ierr); ierr = MatSeqAIJSetPreallocation(A, 3, PETSC_NULL);CHKERRQ(ierr); ierr = MatMPIAIJSetPreallocation(A, 3, PETSC_NULL, 2, PETSC_NULL);CHKERRQ(ierr); // Create a linear mesh ierr = MatGetOwnershipRange(A, &start, &end);CHKERRQ(ierr); for(r = start; r < end; ++r) { if (r == 0) { PetscInt cols[2]; PetscScalar vals[2]; cols[0] = r; cols[1] = r+1; vals[0] = 1.0; vals[1] = 1.0; ierr = MatSetValues(A, 1, &r, 2, cols, vals, INSERT_VALUES);CHKERRQ(ierr); } else if (r == N-1) { PetscInt cols[2]; PetscScalar vals[2]; cols[0] = r-1; cols[1] = r; vals[0] = 1.0; vals[1] = 1.0; ierr = MatSetValues(A, 1, &r, 2, cols, vals, INSERT_VALUES);CHKERRQ(ierr); } else { PetscInt cols[3]; PetscScalar vals[3]; cols[0] = r-1; cols[1] = r; cols[2] = r+1; vals[0] = 1.0; vals[1] = 1.0; vals[2] = 1.0; ierr = MatSetValues(A, 1, &r, 3, cols, vals, INSERT_VALUES);CHKERRQ(ierr); } } ierr = MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); ierr = MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); ierr = MatPartitioningCreate(PETSC_COMM_WORLD, &part);CHKERRQ(ierr); ierr = MatPartitioningSetAdjacency(part, A);CHKERRQ(ierr); ierr = MatPartitioningSetFromOptions(part);CHKERRQ(ierr); //ierr = MatPartitioningSetVertexWeights(part, const PetscInt weights[]);CHKERRQ(ierr); //ierr = MatPartitioningSetPartitionWeights(part,const PetscReal weights[]);CHKERRQ(ierr); ierr = MatPartitioningApply(part, &is);CHKERRQ(ierr); ierr = ISView(is, PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); ierr = ISDestroy(&is);CHKERRQ(ierr); ierr = MatPartitioningDestroy(&part);CHKERRQ(ierr); ierr = MatDestroy(&A);CHKERRQ(ierr); ierr = PetscFinalize(); return 0; }
int main(int argc, char **args) { Mat A; MatPartitioning part; IS is; PetscInt i,m,N,rstart,rend,nemptyranks,*emptyranks,nbigranks,*bigranks; PetscMPIInt rank,size; PetscErrorCode ierr; ierr = PetscInitialize(&argc,&args,(char*)0,help);CHKERRQ(ierr); ierr = MPI_Comm_rank(PETSC_COMM_WORLD,&rank);CHKERRQ(ierr); ierr = MPI_Comm_size(PETSC_COMM_WORLD,&size);CHKERRQ(ierr); nemptyranks = 10; nbigranks = 10; ierr = PetscMalloc2(nemptyranks,PetscInt,&emptyranks,nbigranks,PetscInt,&bigranks);CHKERRQ(ierr); ierr = PetscOptionsBegin(PETSC_COMM_WORLD,PETSC_NULL,"Partitioning example options",PETSC_NULL);CHKERRQ(ierr); ierr = PetscOptionsIntArray("-emptyranks","Ranks to be skipped by partition","",emptyranks,&nemptyranks,PETSC_NULL);CHKERRQ(ierr); ierr = PetscOptionsIntArray("-bigranks","Ranks to be overloaded","",bigranks,&nbigranks,PETSC_NULL);CHKERRQ(ierr); ierr = PetscOptionsEnd();CHKERRQ(ierr); m = 1; for (i=0; i<nemptyranks; i++) if (rank == emptyranks[i]) m = 0; for (i=0; i<nbigranks; i++) if (rank == bigranks[i]) m = 5; ierr = MatCreate(PETSC_COMM_WORLD,&A);CHKERRQ(ierr); ierr = MatSetSizes(A,m,m,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr); ierr = MatSetFromOptions(A);CHKERRQ(ierr); ierr = MatSeqAIJSetPreallocation(A,3,PETSC_NULL);CHKERRQ(ierr); ierr = MatMPIAIJSetPreallocation(A,3,PETSC_NULL,2,PETSC_NULL);CHKERRQ(ierr); ierr = MatSeqBAIJSetPreallocation(A,1,3,PETSC_NULL);CHKERRQ(ierr); ierr = MatMPIBAIJSetPreallocation(A,1,3,PETSC_NULL,2,PETSC_NULL);CHKERRQ(ierr); ierr = MatSeqSBAIJSetPreallocation(A,1,2,PETSC_NULL);CHKERRQ(ierr); ierr = MatMPISBAIJSetPreallocation(A,1,2,PETSC_NULL,1,PETSC_NULL);CHKERRQ(ierr); ierr = MatGetSize(A,PETSC_NULL,&N);CHKERRQ(ierr); ierr = MatGetOwnershipRange(A,&rstart,&rend);CHKERRQ(ierr); for (i=rstart; i<rend; i++) { const PetscInt cols[] = {(i+N-1)%N,i,(i+1)%N}; const PetscScalar vals[] = {1,1,1}; ierr = MatSetValues(A,1,&i,3,cols,vals,INSERT_VALUES);CHKERRQ(ierr); } ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); ierr = MatView(A,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); ierr = MatPartitioningCreate(PETSC_COMM_WORLD,&part);CHKERRQ(ierr); ierr = MatPartitioningSetAdjacency(part,A);CHKERRQ(ierr); ierr = MatPartitioningSetFromOptions(part);CHKERRQ(ierr); ierr = MatPartitioningApply(part,&is);CHKERRQ(ierr); ierr = ISView(is,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); ierr = ISDestroy(&is);CHKERRQ(ierr); ierr = MatPartitioningDestroy(&part);CHKERRQ(ierr); ierr = MatDestroy(&A);CHKERRQ(ierr); ierr = PetscFree2(emptyranks,bigranks);CHKERRQ(ierr); ierr = PetscFinalize(); return 0; }
PetscErrorCode myinterp(MPI_Comm comm, Mat *Aout, int Nr, int Nz, int Mr, int Mz, int mr, int mz, int Mzslab) { //(mr,mz) specifies the origin of the DOF region. //DOF region is a rectangle spanning diagonally from (mr,mz) to (mr+Mr-1,mz+Mz-1). //We keep the z invariant. Mat A; int nz = 1; /* max # nonzero elements in each row */ PetscErrorCode ierr; int ns, ne; int i; int DegFree= (Mzslab==0)*Mr*Mz + (Mzslab==1)*Mr + (Mzslab==2)*Mz; MatCreate(comm, &A); MatSetType(A,MATMPIAIJ); MatSetSizes(A,PETSC_DECIDE, PETSC_DECIDE, 6*Nr*Nz, DegFree); MatMPIAIJSetPreallocation(A, nz, PETSC_NULL, nz, PETSC_NULL); ierr = MatGetOwnershipRange(A, &ns, &ne); CHKERRQ(ierr); double shift=0.5; for (i = ns; i < ne; ++i) { int ir, iz, ic; double rd, zd; int ird, izd; int j, id; iz = (j = i) % Nz; ir = (j /= Nz) % Nr; ic = (j /= Nr) % 3; rd = (ir-mr) + (ic!= 0)*shift; ird = ceil(rd-0.5); if (ird < 0 || ird >= Mr) continue; zd = (iz-mz) + (ic!= 2)*shift; izd = ceil(zd-0.5); if (izd < 0 || izd >= Mz) continue; if (Mzslab==1) { id = ird; }else if (Mzslab==2){ id = izd; }else{ id = izd + Mz * ird; } ierr = MatSetValue(A, i, id, 1.0, INSERT_VALUES); CHKERRQ(ierr); } ierr = MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY); CHKERRQ(ierr); ierr = MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY); CHKERRQ(ierr); ierr = PetscObjectSetName((PetscObject) A, "InterpMatrix"); CHKERRQ(ierr); *Aout = A; PetscFunctionReturn(0); }
PetscErrorCode ConvertMatrixToMat(MPI_Comm comm,matrix *B,Mat *PB) { PetscMPIInt size,rank; PetscErrorCode ierr; int m,n,M,N; int d_nz,o_nz; int *d_nnz,*o_nnz; int i,k,global_row,global_col,first_diag_col,last_diag_col; PetscScalar val; PetscFunctionBegin; ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); m = n = B->mnls[rank]; d_nz = o_nz = 0; /* Determine preallocation for MatCreateMPIAIJ */ ierr = PetscMalloc(m*sizeof(PetscInt),&d_nnz);CHKERRQ(ierr); ierr = PetscMalloc(m*sizeof(PetscInt),&o_nnz);CHKERRQ(ierr); for (i=0; i<m; i++) d_nnz[i] = o_nnz[i] = 0; first_diag_col = B->start_indices[rank]; last_diag_col = first_diag_col + B->mnls[rank]; for (i=0; i<B->mnls[rank]; i++) { for (k=0; k<B->lines->len[i]; k++) { global_col = B->lines->ptrs[i][k]; if ((global_col >= first_diag_col) && (global_col < last_diag_col)) d_nnz[i]++; else o_nnz[i]++; } } M = N = B->n; /* Here we only know how to create AIJ format */ ierr = MatCreate(comm,PB);CHKERRQ(ierr); ierr = MatSetSizes(*PB,m,n,M,N);CHKERRQ(ierr); ierr = MatSetType(*PB,MATAIJ);CHKERRQ(ierr); ierr = MatSeqAIJSetPreallocation(*PB,d_nz,d_nnz);CHKERRQ(ierr); ierr = MatMPIAIJSetPreallocation(*PB,d_nz,d_nnz,o_nz,o_nnz);CHKERRQ(ierr); for (i=0; i<B->mnls[rank]; i++) { global_row = B->start_indices[rank]+i; for (k=0; k<B->lines->len[i]; k++) { global_col = B->lines->ptrs[i][k]; val = B->lines->A[i][k]; ierr = MatSetValues(*PB,1,&global_row,1,&global_col,&val,ADD_VALUES);CHKERRQ(ierr); } } ierr = PetscFree(d_nnz);CHKERRQ(ierr); ierr = PetscFree(o_nnz);CHKERRQ(ierr); ierr = MatAssemblyBegin(*PB,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); ierr = MatAssemblyEnd(*PB,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); PetscFunctionReturn(0); }
/*! \brief Fill the petsc Matrix * * */ void fill_petsc() { d_nnz.resize(l_row); o_nnz.resize(l_row); d_nnz.fill(0); o_nnz.fill(0); // Here we explore every row to count how many non zero we have in the diagonal matrix part, // and the non diagonal matrix part, needed by MatMPIAIJSetPreallocation size_t i = 0; // Set the Matrix from triplet while (i < trpl.size()) { PetscInt row = trpl.get(i).row(); while(row == trpl.get(i).row() && i < trpl.size()) { if ((size_t)trpl.get(i).col() >= start_row && (size_t)trpl.get(i).col() < start_row + l_row) d_nnz.get(row - start_row)++; else o_nnz.get(row - start_row)++; i++; } } PETSC_SAFE_CALL(MatMPIAIJSetPreallocation(mat,0,static_cast<const PetscInt*>(d_nnz.getPointer()),0, static_cast<const PetscInt*>(o_nnz.getPointer()))); // Counter i is zero i = 0; // Set the Matrix from triplet while (i < trpl.size()) { vals.clear(); cols.clear(); PetscInt row = trpl.get(i).row(); while(row == trpl.get(i).row() && i < trpl.size()) { vals.add(trpl.get(i).value()); cols.add(trpl.get(i).col()); i++; } PETSC_SAFE_CALL(MatSetValues(mat,1,&row,cols.size(),static_cast<const PetscInt*>(cols.getPointer()), static_cast<const PetscScalar *>(vals.getPointer()), INSERT_VALUES)); } PETSC_SAFE_CALL(MatAssemblyBegin(mat,MAT_FINAL_ASSEMBLY)); PETSC_SAFE_CALL(MatAssemblyEnd(mat,MAT_FINAL_ASSEMBLY)); }
/*@ MatComputeExplicitOperator - Computes the explicit matrix Collective on Mat Input Parameter: . inmat - the matrix Output Parameter: . mat - the explict preconditioned operator Notes: This computation is done by applying the operators to columns of the identity matrix. Currently, this routine uses a dense matrix format when 1 processor is used and a sparse format otherwise. This routine is costly in general, and is recommended for use only with relatively small systems. Level: advanced .keywords: Mat, compute, explicit, operator @*/ PetscErrorCode MatComputeExplicitOperator(Mat inmat,Mat *mat) { Vec in,out; PetscErrorCode ierr; PetscInt i,m,n,M,N,*rows,start,end; MPI_Comm comm; PetscScalar *array,zero = 0.0,one = 1.0; PetscMPIInt size; PetscFunctionBegin; PetscValidHeaderSpecific(inmat,MAT_CLASSID,1); PetscValidPointer(mat,2); ierr = PetscObjectGetComm((PetscObject)inmat,&comm);CHKERRQ(ierr); ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); ierr = MatGetLocalSize(inmat,&m,&n);CHKERRQ(ierr); ierr = MatGetSize(inmat,&M,&N);CHKERRQ(ierr); ierr = MatGetVecs(inmat,&in,&out);CHKERRQ(ierr); ierr = VecSetOption(in,VEC_IGNORE_OFF_PROC_ENTRIES,PETSC_TRUE);CHKERRQ(ierr); ierr = VecGetOwnershipRange(out,&start,&end);CHKERRQ(ierr); ierr = PetscMalloc(m*sizeof(PetscInt),&rows);CHKERRQ(ierr); for (i=0; i<m; i++) rows[i] = start + i; ierr = MatCreate(comm,mat);CHKERRQ(ierr); ierr = MatSetSizes(*mat,m,n,M,N);CHKERRQ(ierr); if (size == 1) { ierr = MatSetType(*mat,MATSEQDENSE);CHKERRQ(ierr); ierr = MatSeqDenseSetPreallocation(*mat,NULL);CHKERRQ(ierr); } else { ierr = MatSetType(*mat,MATMPIAIJ);CHKERRQ(ierr); ierr = MatMPIAIJSetPreallocation(*mat,n,NULL,N-n,NULL);CHKERRQ(ierr); } for (i=0; i<N; i++) { ierr = VecSet(in,zero);CHKERRQ(ierr); ierr = VecSetValues(in,1,&i,&one,INSERT_VALUES);CHKERRQ(ierr); ierr = VecAssemblyBegin(in);CHKERRQ(ierr); ierr = VecAssemblyEnd(in);CHKERRQ(ierr); ierr = MatMult(inmat,in,out);CHKERRQ(ierr); ierr = VecGetArray(out,&array);CHKERRQ(ierr); ierr = MatSetValues(*mat,m,rows,1,&i,array,INSERT_VALUES);CHKERRQ(ierr); ierr = VecRestoreArray(out,&array);CHKERRQ(ierr); } ierr = PetscFree(rows);CHKERRQ(ierr); ierr = VecDestroy(&out);CHKERRQ(ierr); ierr = VecDestroy(&in);CHKERRQ(ierr); ierr = MatAssemblyBegin(*mat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); ierr = MatAssemblyEnd(*mat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); PetscFunctionReturn(0); }
/*@ MatXAIJSetPreallocation - set preallocation for serial and parallel AIJ, BAIJ, and SBAIJ matrices Collective on Mat Input Arguments: + A - matrix being preallocated . bs - block size . dnnz - number of nonzero blocks per block row of diagonal part of parallel matrix . onnz - number of nonzero blocks per block row of off-diagonal part of parallel matrix . dnnzu - number of nonzero blocks per block row of upper-triangular part of diagonal part of parallel matrix - onnzu - number of nonzero blocks per block row of upper-triangular part of off-diagonal part of parallel matrix Level: beginner .seealso: MatSeqAIJSetPreallocation(), MatMPIAIJSetPreallocation(), MatSeqBAIJSetPreallocation(), MatMPIBAIJSetPreallocation(), MatSeqSBAIJSetPreallocation(), MatMPISBAIJSetPreallocation(), PetscSplitOwnership() @*/ PetscErrorCode MatXAIJSetPreallocation(Mat A,PetscInt bs,const PetscInt dnnz[],const PetscInt onnz[],const PetscInt dnnzu[],const PetscInt onnzu[]) { PetscErrorCode ierr; void (*aij)(void); PetscFunctionBegin; ierr = MatSetBlockSize(A,bs);CHKERRQ(ierr); ierr = MatGetBlockSize(A,&bs);CHKERRQ(ierr); ierr = PetscLayoutSetUp(A->rmap);CHKERRQ(ierr); ierr = PetscLayoutSetUp(A->cmap);CHKERRQ(ierr); ierr = MatSeqBAIJSetPreallocation(A,bs,0,dnnz);CHKERRQ(ierr); ierr = MatMPIBAIJSetPreallocation(A,bs,0,dnnz,0,onnz);CHKERRQ(ierr); ierr = MatSeqSBAIJSetPreallocation(A,bs,0,dnnzu);CHKERRQ(ierr); ierr = MatMPISBAIJSetPreallocation(A,bs,0,dnnzu,0,onnzu);CHKERRQ(ierr); /* In general, we have to do extra work to preallocate for scalar (AIJ) matrices so we check whether it will do any good before going on with it. */ ierr = PetscObjectQueryFunction((PetscObject)A,"MatMPIAIJSetPreallocation_C",&aij);CHKERRQ(ierr); if (!aij) { ierr = PetscObjectQueryFunction((PetscObject)A,"MatSeqAIJSetPreallocation_C",&aij);CHKERRQ(ierr); } if (aij) { if (bs == 1) { ierr = MatSeqAIJSetPreallocation(A,0,dnnz);CHKERRQ(ierr); ierr = MatMPIAIJSetPreallocation(A,0,dnnz,0,onnz);CHKERRQ(ierr); } else { /* Convert block-row precallocation to scalar-row */ PetscInt i,m,*sdnnz,*sonnz; ierr = MatGetLocalSize(A,&m,NULL);CHKERRQ(ierr); ierr = PetscMalloc2((!!dnnz)*m,&sdnnz,(!!onnz)*m,&sonnz);CHKERRQ(ierr); for (i=0; i<m; i++) { if (dnnz) sdnnz[i] = dnnz[i/bs] * bs; if (onnz) sonnz[i] = onnz[i/bs] * bs; } ierr = MatSeqAIJSetPreallocation(A,0,dnnz ? sdnnz : NULL);CHKERRQ(ierr); ierr = MatMPIAIJSetPreallocation(A,0,dnnz ? sdnnz : NULL,0,onnz ? sonnz : NULL);CHKERRQ(ierr); ierr = PetscFree2(sdnnz,sonnz);CHKERRQ(ierr); } } PetscFunctionReturn(0); }
/*@ STComputeExplicitOperator - Computes the explicit operator associated to the eigenvalue problem with the specified spectral transformation. Collective on ST Input Parameter: . st - the spectral transform context Output Parameter: . mat - the explicit operator Notes: This routine builds a matrix containing the explicit operator. For example, in generalized problems with shift-and-invert spectral transformation the result would be matrix (A - s B)^-1 B. This computation is done by applying the operator to columns of the identity matrix. This is analogous to MatComputeExplicitOperator(). Level: advanced .seealso: STApply() @*/ PetscErrorCode STComputeExplicitOperator(ST st,Mat *mat) { PetscErrorCode ierr; Vec in,out; PetscInt i,M,m,*rows,start,end; const PetscScalar *array; PetscScalar one = 1.0; PetscMPIInt size; PetscFunctionBegin; PetscValidHeaderSpecific(st,ST_CLASSID,1); PetscValidPointer(mat,2); STCheckMatrices(st,1); if (st->nmat>2) SETERRQ(PetscObjectComm((PetscObject)st),PETSC_ERR_ARG_WRONGSTATE,"Can only be used with 1 or 2 matrices"); ierr = MPI_Comm_size(PetscObjectComm((PetscObject)st),&size);CHKERRQ(ierr); ierr = MatGetVecs(st->A[0],&in,&out);CHKERRQ(ierr); ierr = VecGetSize(out,&M);CHKERRQ(ierr); ierr = VecGetLocalSize(out,&m);CHKERRQ(ierr); ierr = VecSetOption(in,VEC_IGNORE_OFF_PROC_ENTRIES,PETSC_TRUE);CHKERRQ(ierr); ierr = VecGetOwnershipRange(out,&start,&end);CHKERRQ(ierr); ierr = PetscMalloc1(m,&rows);CHKERRQ(ierr); for (i=0;i<m;i++) rows[i] = start + i; ierr = MatCreate(PetscObjectComm((PetscObject)st),mat);CHKERRQ(ierr); ierr = MatSetSizes(*mat,m,m,M,M);CHKERRQ(ierr); if (size == 1) { ierr = MatSetType(*mat,MATSEQDENSE);CHKERRQ(ierr); ierr = MatSeqDenseSetPreallocation(*mat,NULL);CHKERRQ(ierr); } else { ierr = MatSetType(*mat,MATMPIAIJ);CHKERRQ(ierr); ierr = MatMPIAIJSetPreallocation(*mat,m,NULL,M-m,NULL);CHKERRQ(ierr); } for (i=0;i<M;i++) { ierr = VecSet(in,0.0);CHKERRQ(ierr); ierr = VecSetValues(in,1,&i,&one,INSERT_VALUES);CHKERRQ(ierr); ierr = VecAssemblyBegin(in);CHKERRQ(ierr); ierr = VecAssemblyEnd(in);CHKERRQ(ierr); ierr = STApply(st,in,out);CHKERRQ(ierr); ierr = VecGetArrayRead(out,&array);CHKERRQ(ierr); ierr = MatSetValues(*mat,m,rows,1,&i,array,INSERT_VALUES);CHKERRQ(ierr); ierr = VecRestoreArrayRead(out,&array);CHKERRQ(ierr); } ierr = PetscFree(rows);CHKERRQ(ierr); ierr = VecDestroy(&in);CHKERRQ(ierr); ierr = VecDestroy(&out);CHKERRQ(ierr); ierr = MatAssemblyBegin(*mat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); ierr = MatAssemblyEnd(*mat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); PetscFunctionReturn(0); }
void SingleLongPipe::initialize() { MatCreate(PETSC_COMM_WORLD, &M); MatSetSizes(M, local_n, local_n, global_n, global_n); MatSetFromOptions(M); MatMPIAIJSetPreallocation(M, 2, nullptr, 2, nullptr); MatSeqAIJSetPreallocation(M, 2, nullptr); MatSetUp(M); KSPCreate(PETSC_COMM_WORLD, &ksp); KSPSetOperators(ksp, M, M); KSPSetFromOptions(ksp); KSPSetUp(ksp); }
PetscErrorCode StokesSetupMatBlock11(Stokes *s) { PetscErrorCode ierr; PetscFunctionBeginUser; /* A[3] is N-by-N null matrix */ ierr = MatCreate(PETSC_COMM_WORLD, &s->subA[3]);CHKERRQ(ierr); ierr = MatSetOptionsPrefix(s->subA[3], "a11_");CHKERRQ(ierr); ierr = MatSetSizes(s->subA[3], PETSC_DECIDE, PETSC_DECIDE, s->nx*s->ny, s->nx*s->ny);CHKERRQ(ierr); ierr = MatSetType(s->subA[3], MATMPIAIJ);CHKERRQ(ierr); ierr = MatMPIAIJSetPreallocation(s->subA[3], 0, NULL, 0, NULL);CHKERRQ(ierr); ierr = MatAssemblyBegin(s->subA[3], MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); ierr = MatAssemblyEnd(s->subA[3], MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); PetscFunctionReturn(0); }
void linearSystemPETSc<scalar>::preAllocateEntries() { if (_entriesPreAllocated) return; if (!_isAllocated) Msg::Fatal("system must be allocated first"); int blockSize = _getBlockSizeFromParameters(); std::vector<int> nByRowDiag (_localSize), nByRowOffDiag (_localSize); if (_sparsity.getNbRows() == 0) { PetscInt prealloc = 500; PetscBool set; PetscOptionsGetInt(PETSC_NULL, "-petsc_prealloc", &prealloc, &set); prealloc = std::min(prealloc, _localSize); nByRowDiag.resize(0); nByRowDiag.resize(_localSize, prealloc); } else { for (int i = 0; i < _localSize; i++) { int n; const int *r = _sparsity.getRow(i, n); for (int j = 0; j < n; j++) { if (r[j] >= _localRowStart && r[j] < _localRowEnd) nByRowDiag[i] ++; else nByRowOffDiag[i] ++; } } _sparsity.clear(); } //MatXAIJSetPreallocation is not available in petsc < 3.3 int commSize = 1; MPI_Comm_size(_comm, &commSize); if (commSize == 1){ if (blockSize == 1) _try(MatSeqAIJSetPreallocation(_a, 0, &nByRowDiag[0])); else _try(MatSeqBAIJSetPreallocation(_a, blockSize, 0, &nByRowDiag[0])); } else { if (blockSize == 1) _try(MatMPIAIJSetPreallocation(_a, 0, &nByRowDiag[0], 0, &nByRowOffDiag[0])); else _try(MatMPIBAIJSetPreallocation(_a, blockSize, 0, &nByRowDiag[0], 0, &nByRowOffDiag[0])); } if (blockSize > 1) _try(MatSetOption(_a, MAT_ROW_ORIENTED, PETSC_FALSE)); _entriesPreAllocated = true; }
void PETScLinearSolver::MatrixCreate( PetscInt m, PetscInt n) { MatCreate(PETSC_COMM_WORLD, &A); // TEST MatSetSizes(A, m_size_loc, PETSC_DECIDE, m, n); MatSetSizes(A, PETSC_DECIDE, PETSC_DECIDE, m, n); //MatSetSizes(A, m_size_loc, PETSC_DECIDE, m, n); MatSetType(A, MATMPIAIJ); MatSetFromOptions(A); MatSeqAIJSetPreallocation(A, d_nz, PETSC_NULL); MatMPIAIJSetPreallocation(A, d_nz, PETSC_NULL, o_nz, PETSC_NULL); MatSetOption(A,MAT_NEW_NONZERO_ALLOCATION_ERR,PETSC_FALSE); MatSetUp(A); // KG44 this seems to work with petsc 3.3 ..the commands below result in problems when assembling the matrix with version 3.3 MatGetOwnershipRange(A, &i_start, &i_end); }
/*@C MatCreateMPIAIJPERM - Creates a sparse parallel matrix whose local portions are stored as SEQAIJPERM matrices (a matrix class that inherits from SEQAIJ but includes some optimizations to allow more effective vectorization). The same guidelines that apply to MPIAIJ matrices for preallocating the matrix storage apply here as well. Collective on MPI_Comm Input Parameters: + comm - MPI communicator . m - number of local rows (or PETSC_DECIDE to have calculated if M is given) This value should be the same as the local size used in creating the y vector for the matrix-vector product y = Ax. . n - This value should be the same as the local size used in creating the x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have calculated if N is given) For square matrices n is almost always m. . M - number of global rows (or PETSC_DETERMINE to have calculated if m is given) . N - number of global columns (or PETSC_DETERMINE to have calculated if n is given) . d_nz - number of nonzeros per row in DIAGONAL portion of local submatrix (same value is used for all local rows) . d_nnz - array containing the number of nonzeros in the various rows of the DIAGONAL portion of the local submatrix (possibly different for each row) or PETSC_NULL, if d_nz is used to specify the nonzero structure. The size of this array is equal to the number of local rows, i.e 'm'. For matrices you plan to factor you must leave room for the diagonal entry and put in the entry even if it is zero. . o_nz - number of nonzeros per row in the OFF-DIAGONAL portion of local submatrix (same value is used for all local rows). - o_nnz - array containing the number of nonzeros in the various rows of the OFF-DIAGONAL portion of the local submatrix (possibly different for each row) or PETSC_NULL, if o_nz is used to specify the nonzero structure. The size of this array is equal to the number of local rows, i.e 'm'. Output Parameter: . A - the matrix Notes: If the *_nnz parameter is given then the *_nz parameter is ignored m,n,M,N parameters specify the size of the matrix, and its partitioning across processors, while d_nz,d_nnz,o_nz,o_nnz parameters specify the approximate storage requirements for this matrix. If PETSC_DECIDE or PETSC_DETERMINE is used for a particular argument on one processor than it must be used on all processors that share the object for that argument. The user MUST specify either the local or global matrix dimensions (possibly both). The parallel matrix is partitioned such that the first m0 rows belong to process 0, the next m1 rows belong to process 1, the next m2 rows belong to process 2 etc.. where m0,m1,m2... are the input parameter 'm'. The DIAGONAL portion of the local submatrix of a processor can be defined as the submatrix which is obtained by extraction the part corresponding to the rows r1-r2 and columns r1-r2 of the global matrix, where r1 is the first row that belongs to the processor, and r2 is the last row belonging to the this processor. This is a square mxm matrix. The remaining portion of the local submatrix (mxN) constitute the OFF-DIAGONAL portion. If o_nnz, d_nnz are specified, then o_nz, and d_nz are ignored. When calling this routine with a single process communicator, a matrix of type SEQAIJPERM is returned. If a matrix of type MPIAIJPERM is desired for this type of communicator, use the construction mechanism: MatCreate(...,&A); MatSetType(A,MPIAIJ); MatMPIAIJSetPreallocation(A,...); By default, this format uses inodes (identical nodes) when possible. We search for consecutive rows with the same nonzero structure, thereby reusing matrix information to achieve increased efficiency. Options Database Keys: + -mat_no_inode - Do not use inodes . -mat_inode_limit <limit> - Sets inode limit (max limit=5) - -mat_aij_oneindex - Internally use indexing starting at 1 rather than 0. Note that when calling MatSetValues(), the user still MUST index entries starting at 0! Level: intermediate .keywords: matrix, cray, sparse, parallel .seealso: MatCreate(), MatCreateSeqAIJPERM(), MatSetValues() @*/ PetscErrorCode MatCreateMPIAIJPERM(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[],Mat *A) { PetscErrorCode ierr; PetscMPIInt size; PetscFunctionBegin; ierr = MatCreate(comm,A);CHKERRQ(ierr); ierr = MatSetSizes(*A,m,n,M,N);CHKERRQ(ierr); ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); if (size > 1) { ierr = MatSetType(*A,MATMPIAIJPERM);CHKERRQ(ierr); ierr = MatMPIAIJSetPreallocation(*A,d_nz,d_nnz,o_nz,o_nnz);CHKERRQ(ierr); } else { ierr = MatSetType(*A,MATSEQAIJPERM);CHKERRQ(ierr); ierr = MatSeqAIJSetPreallocation(*A,d_nz,d_nnz);CHKERRQ(ierr); } PetscFunctionReturn(0); }
void Field_solver::alloc_petsc_matrix( Mat *A, PetscInt nrow_local, PetscInt ncol_local, PetscInt nrow, PetscInt ncol, PetscInt nonzero_per_row ) { PetscErrorCode ierr; // PetscInt approx_nonzero_per_row = 7; ierr = MatCreate( PETSC_COMM_WORLD, A ); CHKERRXX( ierr ); ierr = MatSetSizes( *A, nrow_local, ncol_local, nrow, ncol ); CHKERRXX( ierr ); ierr = MatSetFromOptions( *A ); CHKERRXX( ierr ); ierr = MatSetType( *A, MATAIJ ); CHKERRXX( ierr ); // redo; set nonzero_per_row more accurately // if nlocal >= (nx-2)*(ny-2): max_diag_nonzero_per_row = 7, max_offdiag_nonzer_per_row = 3 // (nx-2) <= nlocal < (nx-2)*(ny-2) : max_diag_nonzero_per_row = 5, max_offdiag_nonzer_per_row = 4 // probably. ierr = MatMPIAIJSetPreallocation( *A, nonzero_per_row, NULL, nonzero_per_row, NULL); CHKERRXX( ierr ); ierr = MatSetUp( *A ); CHKERRXX( ierr ); return; }
static PetscErrorCode preallocation(Mat M,PetscInt *d_nz, PetscInt *o_nz) { PetscErrorCode ierr; PetscBool isaij,ismpiaij,isseqaij; PetscMPIInt size; PetscFunctionBegin; ierr = PetscObjectTypeCompare((PetscObject)M,MATAIJ,&isaij);CHKERRQ(ierr); ierr = PetscObjectTypeCompare((PetscObject)M,MATMPIAIJ,&ismpiaij);CHKERRQ(ierr); ierr = PetscObjectTypeCompare((PetscObject)M,MATSEQAIJ,&isseqaij);CHKERRQ(ierr); ierr = MPI_Comm_size(PETSC_COMM_WORLD,&size);CHKERRQ(ierr); if ((isaij && size == 1) || isseqaij) { ierr = MatSeqAIJSetPreallocation(M,0,d_nz);CHKERRQ(ierr); } else if (isaij || ismpiaij) { ierr = MatMPIAIJSetPreallocation(M,0,d_nz,0,o_nz);CHKERRQ(ierr); } else { ierr = PetscInfo(M,"NOT using preallocation\n");CHKERRQ(ierr); } PetscFunctionReturn(0); }
EXTERN_C_END EXTERN_C_BEGIN #undef __FUNCT__ #define __FUNCT__ "MatGetFactor_mpiaij_pastix" PetscErrorCode MatGetFactor_mpiaij_pastix(Mat A,MatFactorType ftype,Mat *F) { Mat B; PetscErrorCode ierr; Mat_Pastix *pastix; PetscFunctionBegin; if (ftype != MAT_FACTOR_LU) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Cannot use PETSc AIJ matrices with PaStiX Cholesky, use SBAIJ matrix"); /* Create the factorization matrix */ ierr = MatCreate(((PetscObject)A)->comm,&B);CHKERRQ(ierr); ierr = MatSetSizes(B,A->rmap->n,A->cmap->n,A->rmap->N,A->cmap->N);CHKERRQ(ierr); ierr = MatSetType(B,((PetscObject)A)->type_name);CHKERRQ(ierr); ierr = MatSeqAIJSetPreallocation(B,0,PETSC_NULL);CHKERRQ(ierr); ierr = MatMPIAIJSetPreallocation(B,0,PETSC_NULL,0,PETSC_NULL);CHKERRQ(ierr); B->ops->lufactorsymbolic = MatLUFactorSymbolic_AIJPASTIX; B->ops->view = MatView_PaStiX; ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatFactorGetSolverPackage_C","MatFactorGetSolverPackage_pastix",MatFactorGetSolverPackage_pastix);CHKERRQ(ierr); B->factortype = MAT_FACTOR_LU; ierr = PetscNewLog(B,Mat_Pastix,&pastix);CHKERRQ(ierr); pastix->CleanUpPastix = PETSC_FALSE; pastix->isAIJ = PETSC_TRUE; pastix->scat_rhs = PETSC_NULL; pastix->scat_sol = PETSC_NULL; pastix->Destroy = B->ops->destroy; B->ops->destroy = MatDestroy_Pastix; B->spptr = (void*)pastix; *F = B; PetscFunctionReturn(0); }
PetscErrorCode GetDotMat(MPI_Comm comm, Mat *Bout, int c1, int c2, int Nr, int Nz) { int Nc=3, N=2*Nc*Nr*Nz; Mat B; int ns,ne,i,j,ir,iz,ic,iq; double value=1; int col; PetscErrorCode ierr; MatCreate(comm, &B); MatSetType(B,MATMPIAIJ); MatSetSizes(B,PETSC_DECIDE, PETSC_DECIDE, N, N); MatMPIAIJSetPreallocation(B, 1, PETSC_NULL, 1, PETSC_NULL); ierr = MatGetOwnershipRange(B, &ns, &ne); CHKERRQ(ierr); for (i = ns; i < ne; ++i) { iz = (j = i) % Nz; ir = (j /= Nz) % Nr; ic = (j /= Nr) % Nc; iq = j /= Nc; if(ic==c1){ col = iz + Nz * (ir + Nr * (c2 + Nc * iq)); ierr = MatSetValue(B,i,col,value,INSERT_VALUES); CHKERRQ(ierr); } } ierr = MatAssemblyBegin(B, MAT_FINAL_ASSEMBLY); CHKERRQ(ierr); ierr = MatAssemblyEnd(B, MAT_FINAL_ASSEMBLY); CHKERRQ(ierr); ierr = PetscObjectSetName((PetscObject) B,"DotMatrix"); CHKERRQ(ierr); *Bout = B; PetscFunctionReturn(0); }
static PetscErrorCode DMCreateMatrix_Redundant(DM dm,Mat *J) { DM_Redundant *red = (DM_Redundant*)dm->data; PetscErrorCode ierr; ISLocalToGlobalMapping ltog,ltogb; PetscInt i,rstart,rend,*cols; PetscScalar *vals; PetscFunctionBegin; ierr = MatCreate(PetscObjectComm((PetscObject)dm),J);CHKERRQ(ierr); ierr = MatSetSizes(*J,red->n,red->n,red->N,red->N);CHKERRQ(ierr); ierr = MatSetType(*J,dm->mattype);CHKERRQ(ierr); ierr = MatSeqAIJSetPreallocation(*J,red->n,NULL);CHKERRQ(ierr); ierr = MatSeqBAIJSetPreallocation(*J,1,red->n,NULL);CHKERRQ(ierr); ierr = MatMPIAIJSetPreallocation(*J,red->n,NULL,red->N-red->n,NULL);CHKERRQ(ierr); ierr = MatMPIBAIJSetPreallocation(*J,1,red->n,NULL,red->N-red->n,NULL);CHKERRQ(ierr); ierr = DMGetLocalToGlobalMapping(dm,<og);CHKERRQ(ierr); ierr = DMGetLocalToGlobalMappingBlock(dm,<ogb);CHKERRQ(ierr); ierr = MatSetLocalToGlobalMapping(*J,ltog,ltog);CHKERRQ(ierr); ierr = MatSetLocalToGlobalMappingBlock(*J,ltogb,ltogb);CHKERRQ(ierr); ierr = PetscMalloc2(red->N,&cols,red->N,&vals);CHKERRQ(ierr); for (i=0; i<red->N; i++) { cols[i] = i; vals[i] = 0.0; } ierr = MatGetOwnershipRange(*J,&rstart,&rend);CHKERRQ(ierr); for (i=rstart; i<rend; i++) { ierr = MatSetValues(*J,1,&i,red->N,cols,vals,INSERT_VALUES);CHKERRQ(ierr); } ierr = PetscFree2(cols,vals);CHKERRQ(ierr); ierr = MatAssemblyBegin(*J,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); ierr = MatAssemblyEnd(*J,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); PetscFunctionReturn(0); }
int main(int argc,char **args) { Mat C,A; PetscInt i, n = 10,midx[3],bs=1; PetscErrorCode ierr; PetscScalar v[3]; PetscBool flg,isAIJ; MatType type; PetscMPIInt size; PetscInitialize(&argc,&args,(char *)0,help); ierr = MPI_Comm_size(PETSC_COMM_WORLD,&size);CHKERRQ(ierr); ierr = PetscOptionsGetInt(PETSC_NULL,"-n",&n,PETSC_NULL);CHKERRQ(ierr); ierr = PetscOptionsGetInt(PETSC_NULL,"-mat_block_size",&bs,PETSC_NULL);CHKERRQ(ierr); ierr = MatCreate(PETSC_COMM_WORLD,&C);CHKERRQ(ierr); ierr = MatSetSizes(C,PETSC_DECIDE,PETSC_DECIDE,n,n);CHKERRQ(ierr); ierr = MatSetType(C,MATAIJ);CHKERRQ(ierr); ierr = MatSetFromOptions(C);CHKERRQ(ierr); ierr = MatGetType(C,&type);CHKERRQ(ierr); if (size == 1){ ierr = PetscObjectTypeCompare((PetscObject)C,MATSEQAIJ,&isAIJ);CHKERRQ(ierr); } else { ierr = PetscObjectTypeCompare((PetscObject)C,MATMPIAIJ,&isAIJ);CHKERRQ(ierr); } ierr = MatSeqAIJSetPreallocation(C,3,PETSC_NULL); ierr = MatMPIAIJSetPreallocation(C,3,PETSC_NULL,3,PETSC_NULL);CHKERRQ(ierr); ierr = MatSeqBAIJSetPreallocation(C,bs,3,PETSC_NULL); ierr = MatMPIBAIJSetPreallocation(C,bs,3,PETSC_NULL,3,PETSC_NULL);CHKERRQ(ierr); v[0] = -1.; v[1] = 2.; v[2] = -1.; for (i=1; i<n-1; i++){ midx[2] = i-1; midx[1] = i; midx[0] = i+1; ierr = MatSetValues(C,1,&i,3,midx,v,INSERT_VALUES);CHKERRQ(ierr); } i = 0; midx[0] = 0; midx[1] = 1; v[0] = 2.0; v[1] = -1.; ierr = MatSetValues(C,1,&i,2,midx,v,INSERT_VALUES);CHKERRQ(ierr); i = n-1; midx[0] = n-2; midx[1] = n-1; v[0] = -1.0; v[1] = 2.; ierr = MatSetValues(C,1,&i,2,midx,v,INSERT_VALUES);CHKERRQ(ierr); ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); ierr = MatCreate(PETSC_COMM_WORLD,&A);CHKERRQ(ierr); ierr = MatSetSizes(A,PETSC_DECIDE,PETSC_DECIDE,n,n);CHKERRQ(ierr); ierr = MatSetFromOptions(A);CHKERRQ(ierr); ierr = MatSetUp(A);CHKERRQ(ierr); ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); /* test matrices with different nonzero patterns - Note: A is created with different nonzero pattern of C! */ ierr = MatCopy(C,A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr); ierr = MatEqual(A,C,&flg);CHKERRQ(ierr); if (!flg) SETERRQ(PETSC_COMM_SELF,1,"MatCopy(C,A,DIFFERENT_NONZERO_PATTERN): Matrices are NOT equal"); ierr = PetscViewerSetFormat(PETSC_VIEWER_STDOUT_WORLD,PETSC_VIEWER_ASCII_INFO);CHKERRQ(ierr); ierr = PetscPrintf(PETSC_COMM_WORLD,"A is obtained with MatCopy(,,DIFFERENT_NONZERO_PATTERN):\n");CHKERRQ(ierr); ierr = MatView(A,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); ierr = MatDestroy(&A);CHKERRQ(ierr); /* test matrices with same nonzero pattern */ ierr = MatDuplicate(C,MAT_DO_NOT_COPY_VALUES,&A);CHKERRQ(ierr); ierr = MatCopy(C,A,SAME_NONZERO_PATTERN);CHKERRQ(ierr); ierr = MatEqual(A,C,&flg);CHKERRQ(ierr); if (!flg) SETERRQ(PETSC_COMM_SELF,1,"MatCopy(C,A,SAME_NONZERO_PATTERN): Matrices are NOT equal"); ierr = PetscViewerSetFormat(PETSC_VIEWER_STDOUT_WORLD,PETSC_VIEWER_ASCII_INFO);CHKERRQ(ierr); ierr = PetscPrintf(PETSC_COMM_WORLD,"\nA is obtained with MatCopy(,,SAME_NONZERO_PATTERN):\n");CHKERRQ(ierr); ierr = MatView(A,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); ierr = PetscViewerSetFormat(PETSC_VIEWER_STDOUT_WORLD,PETSC_VIEWER_ASCII_COMMON);CHKERRQ(ierr); ierr = PetscPrintf(PETSC_COMM_WORLD,"A:\n");CHKERRQ(ierr); ierr = MatView(A,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); /* test MatStore/RetrieveValues() */ if (isAIJ){ ierr = MatSetOption(A,MAT_NEW_NONZERO_LOCATIONS,PETSC_FALSE);CHKERRQ(ierr); ierr = MatStoreValues(A);CHKERRQ(ierr); ierr = MatZeroEntries(A);CHKERRQ(ierr); ierr = MatRetrieveValues(A);CHKERRQ(ierr); } ierr = MatDestroy(&C);CHKERRQ(ierr); ierr = MatDestroy(&A);CHKERRQ(ierr); ierr = PetscFinalize(); return 0; }
/* Developer Notes: This should be implemented with a MatCreate_SchurComplement() as that is the standard design for new Mat classes. */ PetscErrorCode MatGetSchurComplement_Basic(Mat mat,IS isrow0,IS iscol0,IS isrow1,IS iscol1,MatReuse mreuse,Mat *newmat,MatReuse preuse,Mat *newpmat) { PetscErrorCode ierr; Mat A=0,Ap=0,B=0,C=0,D=0; PetscFunctionBegin; PetscValidHeaderSpecific(mat,MAT_CLASSID,1); PetscValidHeaderSpecific(isrow0,IS_CLASSID,2); PetscValidHeaderSpecific(iscol0,IS_CLASSID,3); PetscValidHeaderSpecific(isrow1,IS_CLASSID,4); PetscValidHeaderSpecific(iscol1,IS_CLASSID,5); if (mreuse == MAT_REUSE_MATRIX) PetscValidHeaderSpecific(*newmat,MAT_CLASSID,7); if (preuse == MAT_REUSE_MATRIX) PetscValidHeaderSpecific(*newpmat,MAT_CLASSID,9); PetscValidType(mat,1); if (mat->factortype) SETERRQ(((PetscObject)mat)->comm,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix"); if (mreuse != MAT_IGNORE_MATRIX) { /* Use MatSchurComplement */ if (mreuse == MAT_REUSE_MATRIX) { ierr = MatSchurComplementGetSubmatrices(*newmat,&A,&Ap,&B,&C,&D);CHKERRQ(ierr); if (!A || !Ap || !B || !C) SETERRQ(((PetscObject)mat)->comm,PETSC_ERR_ARG_WRONGSTATE,"Attempting to reuse matrix but Schur complement matrices unset"); if (A != Ap) SETERRQ(((PetscObject)mat)->comm,PETSC_ERR_ARG_WRONGSTATE,"Preconditioning matrix does not match operator"); ierr = MatDestroy(&Ap);CHKERRQ(ierr); /* get rid of extra reference */ } ierr = MatGetSubMatrix(mat,isrow0,iscol0,mreuse,&A);CHKERRQ(ierr); ierr = MatGetSubMatrix(mat,isrow0,iscol1,mreuse,&B);CHKERRQ(ierr); ierr = MatGetSubMatrix(mat,isrow1,iscol0,mreuse,&C);CHKERRQ(ierr); ierr = MatGetSubMatrix(mat,isrow1,iscol1,mreuse,&D);CHKERRQ(ierr); switch (mreuse) { case MAT_INITIAL_MATRIX: ierr = MatCreateSchurComplement(A,A,B,C,D,newmat);CHKERRQ(ierr); break; case MAT_REUSE_MATRIX: ierr = MatSchurComplementUpdate(*newmat,A,A,B,C,D,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr); break; default: SETERRQ(((PetscObject)mat)->comm,PETSC_ERR_SUP,"Unrecognized value of mreuse"); } } if (preuse != MAT_IGNORE_MATRIX) { /* Use the diagonal part of A to form D - C inv(diag(A)) B */ Mat Ad,AdB,S; Vec diag; PetscInt i,m,n,mstart,mend; PetscScalar *x; /* We could compose these with newpmat so that the matrices can be reused. */ if (!A) {ierr = MatGetSubMatrix(mat,isrow0,iscol0,MAT_INITIAL_MATRIX,&A);CHKERRQ(ierr);} if (!B) {ierr = MatGetSubMatrix(mat,isrow0,iscol1,MAT_INITIAL_MATRIX,&B);CHKERRQ(ierr);} if (!C) {ierr = MatGetSubMatrix(mat,isrow1,iscol0,MAT_INITIAL_MATRIX,&C);CHKERRQ(ierr);} if (!D) {ierr = MatGetSubMatrix(mat,isrow1,iscol1,MAT_INITIAL_MATRIX,&D);CHKERRQ(ierr);} ierr = MatGetVecs(A,&diag,PETSC_NULL);CHKERRQ(ierr); ierr = MatGetDiagonal(A,diag);CHKERRQ(ierr); ierr = VecReciprocal(diag);CHKERRQ(ierr); ierr = MatGetLocalSize(A,&m,&n);CHKERRQ(ierr); /* We need to compute S = D - C inv(diag(A)) B. For row-oriented formats, it is easy to scale the rows of B and * for column-oriented formats the columns of C can be scaled. Would skip creating a silly diagonal matrix. */ ierr = MatCreate(((PetscObject)A)->comm,&Ad);CHKERRQ(ierr); ierr = MatSetSizes(Ad,m,n,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); ierr = MatSetOptionsPrefix(Ad,((PetscObject)mat)->prefix);CHKERRQ(ierr); ierr = MatAppendOptionsPrefix(Ad,"diag_");CHKERRQ(ierr); ierr = MatSetFromOptions(Ad);CHKERRQ(ierr); ierr = MatSeqAIJSetPreallocation(Ad,1,PETSC_NULL);CHKERRQ(ierr); ierr = MatMPIAIJSetPreallocation(Ad,1,PETSC_NULL,0,PETSC_NULL);CHKERRQ(ierr); ierr = MatGetOwnershipRange(Ad,&mstart,&mend);CHKERRQ(ierr); ierr = VecGetArray(diag,&x);CHKERRQ(ierr); for (i=mstart; i<mend; i++) { ierr = MatSetValue(Ad,i,i,x[i-mstart],INSERT_VALUES);CHKERRQ(ierr); } ierr = VecRestoreArray(diag,&x);CHKERRQ(ierr); ierr = MatAssemblyBegin(Ad,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); ierr = MatAssemblyEnd(Ad,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); ierr = VecDestroy(&diag);CHKERRQ(ierr); ierr = MatMatMult(Ad,B,MAT_INITIAL_MATRIX,1,&AdB);CHKERRQ(ierr); S = (preuse == MAT_REUSE_MATRIX) ? *newpmat : (Mat)0; ierr = MatMatMult(C,AdB,preuse,PETSC_DEFAULT,&S);CHKERRQ(ierr); ierr = MatAYPX(S,-1,D,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr); *newpmat = S; ierr = MatDestroy(&Ad);CHKERRQ(ierr); ierr = MatDestroy(&AdB);CHKERRQ(ierr); } ierr = MatDestroy(&A);CHKERRQ(ierr); ierr = MatDestroy(&B);CHKERRQ(ierr); ierr = MatDestroy(&C);CHKERRQ(ierr); ierr = MatDestroy(&D);CHKERRQ(ierr); PetscFunctionReturn(0); }
int main(int argc,char **args) { Mat A,Asp; PetscViewer fd; /* viewer */ char file[PETSC_MAX_PATH_LEN]; /* input file name */ PetscErrorCode ierr; PetscInt m,n,rstart,rend; PetscBool flg; PetscInt row,ncols,j,nrows,nnzA=0,nnzAsp=0; const PetscInt *cols; const PetscScalar *vals; PetscReal norm,percent,val,dtol=1.e-16; PetscMPIInt rank; MatInfo matinfo; PetscInt Dnnz,Onnz; PetscInitialize(&argc,&args,(char *)0,help); ierr = MPI_Comm_rank(PETSC_COMM_WORLD,&rank);CHKERRQ(ierr); /* Determine files from which we read the linear systems. */ ierr = PetscOptionsGetString(PETSC_NULL,"-f",file,PETSC_MAX_PATH_LEN,&flg);CHKERRQ(ierr); if (!flg) SETERRQ(PETSC_COMM_WORLD,1,"Must indicate binary file with the -f option"); /* Open binary file. Note that we use FILE_MODE_READ to indicate reading from this file. */ ierr = PetscViewerBinaryOpen(PETSC_COMM_WORLD,file,FILE_MODE_READ,&fd);CHKERRQ(ierr); /* Load the matrix; then destroy the viewer. */ ierr = MatCreate(PETSC_COMM_WORLD,&A);CHKERRQ(ierr); ierr = MatSetOptionsPrefix(A,"a_");CHKERRQ(ierr); ierr = MatSetFromOptions(A);CHKERRQ(ierr); ierr = MatLoad(A,fd);CHKERRQ(ierr); ierr = PetscViewerDestroy(&fd);CHKERRQ(ierr); ierr = MatGetSize(A,&m,&n);CHKERRQ(ierr); ierr = MatGetInfo(A,MAT_LOCAL,&matinfo);CHKERRQ(ierr); //printf("matinfo.nz_used %g\n",matinfo.nz_used); /* Get a sparse matrix Asp by dumping zero entries of A */ ierr = MatCreate(PETSC_COMM_WORLD,&Asp);CHKERRQ(ierr); ierr = MatSetSizes(Asp,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr); ierr = MatSetOptionsPrefix(Asp,"asp_");CHKERRQ(ierr); ierr = MatSetFromOptions(Asp);CHKERRQ(ierr); Dnnz = (PetscInt)matinfo.nz_used/m + 1; Onnz = Dnnz/2; printf("Dnnz %d %d\n",Dnnz,Onnz); ierr = MatSeqAIJSetPreallocation(Asp,Dnnz,PETSC_NULL);CHKERRQ(ierr); ierr = MatMPIAIJSetPreallocation(Asp,Dnnz,PETSC_NULL,Onnz,PETSC_NULL);CHKERRQ(ierr); /* Check zero rows */ ierr = MatGetOwnershipRange(A,&rstart,&rend);CHKERRQ(ierr); nrows = 0; for (row=rstart; row<rend; row++){ ierr = MatGetRow(A,row,&ncols,&cols,&vals);CHKERRQ(ierr); nnzA += ncols; norm = 0.0; for (j=0; j<ncols; j++){ val = PetscAbsScalar(vals[j]); if (norm < val) norm = norm; if (val > dtol){ ierr = MatSetValues(Asp,1,&row,1,&cols[j],&vals[j],INSERT_VALUES);CHKERRQ(ierr); nnzAsp++; } } if (!norm) nrows++; ierr = MatRestoreRow(A,row,&ncols,&cols,&vals);CHKERRQ(ierr); } ierr = MatAssemblyBegin(Asp,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); ierr = MatAssemblyEnd(Asp,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); percent=(PetscReal)nnzA*100/(m*n); ierr = PetscPrintf(PETSC_COMM_SELF," [%d] Matrix A local size %d,%d; nnzA %d, %g percent; No. of zero rows: %d\n",rank,m,n,nnzA,percent,nrows); percent=(PetscReal)nnzAsp*100/(m*n); ierr = PetscPrintf(PETSC_COMM_SELF," [%d] Matrix Asp nnzAsp %d, %g percent\n",rank,nnzAsp,percent); /* investigate matcoloring for Asp */ PetscBool Asp_coloring = PETSC_FALSE; ierr = PetscOptionsHasName(PETSC_NULL,"-Asp_color",&Asp_coloring);CHKERRQ(ierr); if (Asp_coloring){ ISColoring iscoloring; MatFDColoring matfdcoloring; ierr = PetscPrintf(PETSC_COMM_WORLD," Create coloring of Asp...\n"); ierr = MatGetColoring(Asp,MATCOLORINGSL,&iscoloring);CHKERRQ(ierr); ierr = MatFDColoringCreate(Asp,iscoloring,&matfdcoloring);CHKERRQ(ierr); ierr = MatFDColoringSetFromOptions(matfdcoloring);CHKERRQ(ierr); //ierr = MatFDColoringView(matfdcoloring,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); ierr = ISColoringDestroy(&iscoloring);CHKERRQ(ierr); ierr = MatFDColoringDestroy(&matfdcoloring);CHKERRQ(ierr); } /* Write Asp in binary for study - see ~petsc/src/mat/examples/tests/ex124.c */ PetscBool Asp_write = PETSC_FALSE; ierr = PetscOptionsHasName(PETSC_NULL,"-Asp_write",&Asp_write);CHKERRQ(ierr); if (Asp_write){ PetscViewer viewer; ierr = PetscPrintf(PETSC_COMM_SELF,"Write Asp into file Asp.dat ...\n"); ierr = PetscViewerBinaryOpen(PETSC_COMM_WORLD,"Asp.dat",FILE_MODE_WRITE,&viewer);CHKERRQ(ierr); ierr = MatView(Asp,viewer);CHKERRQ(ierr); ierr = PetscViewerDestroy(&viewer);CHKERRQ(ierr); } ierr = MatDestroy(&A);CHKERRQ(ierr); ierr = MatDestroy(&Asp);CHKERRQ(ierr); ierr = PetscFinalize(); return 0; }
/* PCGAMGCreateGraph - create simple scaled scalar graph from matrix Input Parameter: . Amat - matrix Output Parameter: . a_Gmaat - eoutput scalar graph (symmetric?) */ PetscErrorCode PCGAMGCreateGraph(Mat Amat, Mat *a_Gmat) { PetscErrorCode ierr; PetscInt Istart,Iend,Ii,jj,kk,ncols,nloc,NN,MM,bs; MPI_Comm comm; Mat Gmat; MatType mtype; PetscFunctionBegin; ierr = PetscObjectGetComm((PetscObject)Amat,&comm);CHKERRQ(ierr); ierr = MatGetOwnershipRange(Amat, &Istart, &Iend);CHKERRQ(ierr); ierr = MatGetSize(Amat, &MM, &NN);CHKERRQ(ierr); ierr = MatGetBlockSize(Amat, &bs);CHKERRQ(ierr); nloc = (Iend-Istart)/bs; #if defined PETSC_GAMG_USE_LOG ierr = PetscLogEventBegin(petsc_gamg_setup_events[GRAPH],0,0,0,0);CHKERRQ(ierr); #endif if (bs > 1) { const PetscScalar *vals; const PetscInt *idx; PetscInt *d_nnz, *o_nnz,*w0,*w1,*w2; PetscBool ismpiaij,isseqaij; /* Determine the preallocation needed for the scalar matrix derived from the vector matrix. */ ierr = PetscObjectBaseTypeCompare((PetscObject)Amat,MATSEQAIJ,&isseqaij);CHKERRQ(ierr); ierr = PetscObjectBaseTypeCompare((PetscObject)Amat,MATMPIAIJ,&ismpiaij);CHKERRQ(ierr); ierr = PetscMalloc2(nloc, &d_nnz,isseqaij ? 0 : nloc, &o_nnz);CHKERRQ(ierr); if (isseqaij) { PetscInt max_d_nnz; /* Determine exact preallocation count for (sequential) scalar matrix */ ierr = MatSeqAIJGetMaxRowNonzeros(Amat,&max_d_nnz);CHKERRQ(ierr); max_d_nnz = PetscMin(nloc,bs*max_d_nnz);CHKERRQ(ierr); ierr = PetscMalloc3(max_d_nnz, &w0,max_d_nnz, &w1,max_d_nnz, &w2);CHKERRQ(ierr); for (Ii = 0, jj = 0; Ii < Iend; Ii += bs, jj++) { ierr = MatCollapseRows(Amat,Ii,bs,w0,w1,w2,&d_nnz[jj],NULL);CHKERRQ(ierr); } ierr = PetscFree3(w0,w1,w2);CHKERRQ(ierr); } else if (ismpiaij) { Mat Daij,Oaij; const PetscInt *garray; PetscInt max_d_nnz; ierr = MatMPIAIJGetSeqAIJ(Amat,&Daij,&Oaij,&garray);CHKERRQ(ierr); /* Determine exact preallocation count for diagonal block portion of scalar matrix */ ierr = MatSeqAIJGetMaxRowNonzeros(Daij,&max_d_nnz);CHKERRQ(ierr); max_d_nnz = PetscMin(nloc,bs*max_d_nnz);CHKERRQ(ierr); ierr = PetscMalloc3(max_d_nnz, &w0,max_d_nnz, &w1,max_d_nnz, &w2);CHKERRQ(ierr); for (Ii = 0, jj = 0; Ii < Iend - Istart; Ii += bs, jj++) { ierr = MatCollapseRows(Daij,Ii,bs,w0,w1,w2,&d_nnz[jj],NULL);CHKERRQ(ierr); } ierr = PetscFree3(w0,w1,w2);CHKERRQ(ierr); /* Over estimate (usually grossly over), preallocation count for off-diagonal portion of scalar matrix */ for (Ii = 0, jj = 0; Ii < Iend - Istart; Ii += bs, jj++) { o_nnz[jj] = 0; for (kk=0; kk<bs; kk++) { /* rows that get collapsed to a single row */ ierr = MatGetRow(Oaij,Ii+kk,&ncols,0,0);CHKERRQ(ierr); o_nnz[jj] += ncols; ierr = MatRestoreRow(Oaij,Ii+kk,&ncols,0,0);CHKERRQ(ierr); } if (o_nnz[jj] > (NN/bs-nloc)) o_nnz[jj] = NN/bs-nloc; } } else SETERRQ(PETSC_COMM_WORLD,PETSC_ERR_USER,"Require AIJ matrix type"); /* get scalar copy (norms) of matrix */ ierr = MatGetType(Amat,&mtype);CHKERRQ(ierr); ierr = MatCreate(comm, &Gmat);CHKERRQ(ierr); ierr = MatSetSizes(Gmat,nloc,nloc,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); ierr = MatSetBlockSizes(Gmat, 1, 1);CHKERRQ(ierr); ierr = MatSetType(Gmat, mtype);CHKERRQ(ierr); ierr = MatSeqAIJSetPreallocation(Gmat,0,d_nnz);CHKERRQ(ierr); ierr = MatMPIAIJSetPreallocation(Gmat,0,d_nnz,0,o_nnz);CHKERRQ(ierr); ierr = PetscFree2(d_nnz,o_nnz);CHKERRQ(ierr); for (Ii = Istart; Ii < Iend; Ii++) { PetscInt dest_row = Ii/bs; ierr = MatGetRow(Amat,Ii,&ncols,&idx,&vals);CHKERRQ(ierr); for (jj=0; jj<ncols; jj++) { PetscInt dest_col = idx[jj]/bs; PetscScalar sv = PetscAbs(PetscRealPart(vals[jj])); ierr = MatSetValues(Gmat,1,&dest_row,1,&dest_col,&sv,ADD_VALUES);CHKERRQ(ierr); } ierr = MatRestoreRow(Amat,Ii,&ncols,&idx,&vals);CHKERRQ(ierr); } ierr = MatAssemblyBegin(Gmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); ierr = MatAssemblyEnd(Gmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); } else { /* just copy scalar matrix - abs() not taken here but scaled later */ ierr = MatDuplicate(Amat, MAT_COPY_VALUES, &Gmat);CHKERRQ(ierr); } #if defined PETSC_GAMG_USE_LOG ierr = PetscLogEventEnd(petsc_gamg_setup_events[GRAPH],0,0,0,0);CHKERRQ(ierr); #endif *a_Gmat = Gmat; PetscFunctionReturn(0); }