PetscErrorCode MatAXPY_Basic(Mat Y,PetscScalar a,Mat X,MatStructure str) { PetscInt i,start,end,j,ncols,m,n; PetscErrorCode ierr; const PetscInt *row; PetscScalar *val; const PetscScalar *vals; PetscFunctionBegin; ierr = MatGetSize(X,&m,&n);CHKERRQ(ierr); ierr = MatGetOwnershipRange(X,&start,&end);CHKERRQ(ierr); if (a == 1.0) { for (i = start; i < end; i++) { ierr = MatGetRow(X,i,&ncols,&row,&vals);CHKERRQ(ierr); ierr = MatSetValues(Y,1,&i,ncols,row,vals,ADD_VALUES);CHKERRQ(ierr); ierr = MatRestoreRow(X,i,&ncols,&row,&vals);CHKERRQ(ierr); } } else { ierr = PetscMalloc((n+1)*sizeof(PetscScalar),&val);CHKERRQ(ierr); for (i=start; i<end; i++) { ierr = MatGetRow(X,i,&ncols,&row,&vals);CHKERRQ(ierr); for (j=0; j<ncols; j++) { val[j] = a*vals[j]; } ierr = MatSetValues(Y,1,&i,ncols,row,val,ADD_VALUES);CHKERRQ(ierr); ierr = MatRestoreRow(X,i,&ncols,&row,&vals);CHKERRQ(ierr); } ierr = PetscFree(val);CHKERRQ(ierr); } ierr = MatAssemblyBegin(Y,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); ierr = MatAssemblyEnd(Y,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); PetscFunctionReturn(0); }
int testBSplineSetD2R1Mat() { PrintTimeStamp(PETSC_COMM_SELF, "D2", NULL); MPI_Comm comm = PETSC_COMM_SELF; BPS bps; BPSCreate(comm, &bps); BPSSetLine(bps, 5.0, 6); int order = 3; BSS bss; BSSCreate(comm, &bss); BSSSetKnots(bss, order, bps); BSSSetUp(bss); // compute matrix Mat M; BSSCreateR1Mat(bss, &M); BSSD2R1Mat(bss, M); // value check const PetscScalar *row; MatGetRow(M, 1, NULL, NULL, &row); ASSERT_DOUBLE_NEAR(0.1666666666, row[0], pow(10.0, -10.0)); ASSERT_DOUBLE_NEAR(-1.0, row[1], pow(10.0, -10.0)); ASSERT_DOUBLE_NEAR(0.3333333333, row[2], pow(10.0, -10.0)); ASSERT_DOUBLE_NEAR(0.16666666666, row[3], pow(10.0, -10.0)); MatRestoreRow(M, 1, NULL, NULL, &row); MatGetRow(M, 2, NULL, NULL, &row); ASSERT_DOUBLE_NEAR(0.1666666666666, row[0], pow(10.0, -10.0)); ASSERT_DOUBLE_NEAR(0.3333333333333, row[1], pow(10.0, -10.0)); ASSERT_DOUBLE_NEAR(-1.0, row[2] , pow(10.0, -10.0)); ASSERT_DOUBLE_NEAR(0.3333333333333, row[3], pow(10.0, -10.0)); ASSERT_DOUBLE_NEAR(0.1666666666666, row[4], pow(10.0, -10.0)); MatRestoreRow(M, 2, NULL, NULL, &row); // Finalize MatDestroy(&M); BSSDestroy(&bss); return 0; }
PetscErrorCode PETSCMAT_DLLEXPORT MatConvertFrom_MPIAdj(Mat A,const MatType type,MatReuse reuse,Mat *newmat) { Mat B; PetscErrorCode ierr; PetscInt i,m,N,nzeros = 0,*ia,*ja,len,rstart,cnt,j,*a; const PetscInt *rj; const PetscScalar *ra; MPI_Comm comm; PetscFunctionBegin; ierr = MatGetSize(A,PETSC_NULL,&N);CHKERRQ(ierr); ierr = MatGetLocalSize(A,&m,PETSC_NULL);CHKERRQ(ierr); ierr = MatGetOwnershipRange(A,&rstart,PETSC_NULL);CHKERRQ(ierr); /* count the number of nonzeros per row */ for (i=0; i<m; i++) { ierr = MatGetRow(A,i+rstart,&len,&rj,PETSC_NULL);CHKERRQ(ierr); for (j=0; j<len; j++) { if (rj[j] == i+rstart) {len--; break;} /* don't count diagonal */ } ierr = MatRestoreRow(A,i+rstart,&len,&rj,PETSC_NULL);CHKERRQ(ierr); nzeros += len; } /* malloc space for nonzeros */ ierr = PetscMalloc((nzeros+1)*sizeof(PetscInt),&a);CHKERRQ(ierr); ierr = PetscMalloc((N+1)*sizeof(PetscInt),&ia);CHKERRQ(ierr); ierr = PetscMalloc((nzeros+1)*sizeof(PetscInt),&ja);CHKERRQ(ierr); nzeros = 0; ia[0] = 0; for (i=0; i<m; i++) { ierr = MatGetRow(A,i+rstart,&len,&rj,&ra);CHKERRQ(ierr); cnt = 0; for (j=0; j<len; j++) { if (rj[j] != i+rstart) { /* if not diagonal */ a[nzeros+cnt] = (PetscInt) PetscAbsScalar(ra[j]); ja[nzeros+cnt++] = rj[j]; } } ierr = MatRestoreRow(A,i+rstart,&len,&rj,&ra);CHKERRQ(ierr); nzeros += cnt; ia[i+1] = nzeros; } ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); ierr = MatCreate(comm,&B);CHKERRQ(ierr); ierr = MatSetSizes(B,m,PETSC_DETERMINE,PETSC_DETERMINE,N);CHKERRQ(ierr); ierr = MatSetType(B,type);CHKERRQ(ierr); ierr = MatMPIAdjSetPreallocation(B,ia,ja,a);CHKERRQ(ierr); if (reuse == MAT_REUSE_MATRIX) { ierr = MatHeaderReplace(A,B);CHKERRQ(ierr); } else { *newmat = B; } PetscFunctionReturn(0); }
TEST(schur, complement) { Profiler::initialize(); IS set; // vytvorit rozdeleni bloku na procesory ve tvaru "part" (tj. indexy prvnich radku na procesorech) int np, rank; MPI_Comm_size(PETSC_COMM_WORLD, &np); MPI_Comm_rank(PETSC_COMM_WORLD, &rank); Distribution ds(block_size, MPI_COMM_WORLD); Distribution block_ds(block_count, MPI_COMM_WORLD); Distribution all_ds(block_size + block_count, MPI_COMM_WORLD); /*if (rank == 0) { cout << all_ds; cout << ds; cout << block_ds; }*/ ISCreateStride(PETSC_COMM_WORLD, ds.lsize(), all_ds.begin(), 1, &set); ISView(set, PETSC_VIEWER_STDOUT_WORLD); // volat s lokalni velkosti = pocet radku na lokalnim proc. SchurComplementTest * schurComplement = new SchurComplementTest(set, &all_ds); schurComplement->set_solution(NULL); schurComplement->set_positive_definite(); schurComplement->start_allocation(); schurComplement->fill_matrix( rank, ds, block_ds); // preallocate matrix schurComplement->start_add_assembly(); schurComplement->fill_matrix( rank, ds, block_ds); // fill matrix schurComplement->finish_assembly(); MatView(*(schurComplement->get_matrix()),PETSC_VIEWER_STDOUT_WORLD); LinSys * lin_sys = new LinSysPetscTest( schurComplement->make_complement_distribution() ); schurComplement->set_complement( (LinSys_PETSC *)lin_sys ); schurComplement->solve(); // test of computed values { PetscInt ncols; const PetscInt *cols; const PetscScalar *vals; for (unsigned int i=0; i<block_size; i++) { MatGetRow(schurComplement->get_a_inv(), i + rank*block_size, &ncols, &cols, &vals); EXPECT_FLOAT_EQ( (1.0 / (double)(rank + 2)), vals[i] ); MatRestoreRow(schurComplement->get_a_inv(), i + rank*block_size, &ncols, &cols, &vals); } MatGetRow(*(schurComplement->get_system()->get_matrix()), rank, &ncols, &cols, &vals); EXPECT_FLOAT_EQ( ((double)block_size / (double)(rank + 2)), vals[0] ); MatRestoreRow(*(schurComplement->get_system()->get_matrix()), rank, &ncols, &cols, &vals); } }
int testBSplineSetSR1Mat() { PrintTimeStamp(PETSC_COMM_SELF, "S", NULL); MPI_Comm comm = PETSC_COMM_SELF; BPS bps; BPSCreate(comm, &bps); BPSSetLine(bps, 5.0, 6); int order = 3; BSS bss; BSSCreate(comm, &bss); BSSSetKnots(bss, order, bps); BSSSetUp(bss); // compute S matrix Mat S; PetscErrorCode ierr; ierr = BSSCreateR1Mat(bss, &S); ierr = BSSSR1Mat(bss, S); CHKERRQ(ierr); // Get structure and check int m, n; ierr = MatGetSize(S, &m, &n); CHKERRQ(ierr); ASSERT_EQ(5, m); ASSERT_EQ(5, n); // value check const PetscScalar *row; PetscInt ncols; const PetscInt *cols; ierr = MatGetRow(S, 0, &ncols, &cols, &row); CHKERRQ(ierr); ASSERT_EQ(3, ncols); ASSERT_EQ(0, cols[0]); ASSERT_DOUBLE_EQ(1.0/3.0, row[0]); ASSERT_EQ(1, cols[1]); ASSERT_DOUBLE_NEAR(0.2083333333, row[1], pow(10.0, -10.0)); ASSERT_EQ(2, cols[2]); ASSERT_DOUBLE_NEAR(0.0083333333, row[2], pow(10.0, -10.0)); ierr = MatRestoreRow(S, 0, &ncols, &cols, &row); CHKERRQ(ierr); ierr = MatGetRow(S, 2, &ncols, &cols, &row); CHKERRQ(ierr); ASSERT_EQ(5, ncols); for(int i = 0; i < 5; i++) ASSERT_EQ(i, cols[i]); ASSERT_DOUBLE_NEAR(0.0083333333333, row[0], pow(10.0, -10.0)); ASSERT_DOUBLE_NEAR(0.2166666666666, row[1], pow(10.0, -10.0)); ASSERT_DOUBLE_NEAR(0.55, row[2], pow(10.0, -10.0)); ASSERT_DOUBLE_NEAR(0.2166666666666, row[3], pow(10.0, -10.0)); ASSERT_DOUBLE_NEAR(0.0083333333, row[4], pow(10.0, -10.0)); ierr = MatRestoreRow(S, 3, &ncols, &cols, &row); CHKERRQ(ierr); // Finalize MatDestroy(&S); BSSDestroy(&bss); return 0; }
/*@ MatChop - Set all values in the matrix less than the tolerance to zero Input Parameters: + A - The matrix - tol - The zero tolerance Output Parameters: . A - The chopped matrix Level: intermediate .seealso: MatCreate(), MatZeroEntries() @*/ PetscErrorCode MatChop(Mat A, PetscReal tol) { PetscScalar *newVals; PetscInt *newCols; PetscInt rStart, rEnd, numRows, maxRows, r, colMax = 0; PetscErrorCode ierr; PetscFunctionBegin; ierr = MatGetOwnershipRange(A, &rStart, &rEnd); CHKERRQ(ierr); for (r = rStart; r < rEnd; ++r) { PetscInt ncols; ierr = MatGetRow(A, r, &ncols, NULL, NULL); CHKERRQ(ierr); colMax = PetscMax(colMax, ncols); CHKERRQ(ierr); ierr = MatRestoreRow(A, r, &ncols, NULL, NULL); CHKERRQ(ierr); } numRows = rEnd - rStart; ierr = MPI_Allreduce(&numRows, &maxRows, 1, MPIU_INT, MPI_MAX, PetscObjectComm((PetscObject)A)); CHKERRQ(ierr); ierr = PetscMalloc2(colMax,&newCols,colMax,&newVals); CHKERRQ(ierr); for (r = rStart; r < rStart+maxRows; ++r) { const PetscScalar *vals; const PetscInt *cols; PetscInt ncols, newcols, c; if (r < rEnd) { ierr = MatGetRow(A, r, &ncols, &cols, &vals); CHKERRQ(ierr); for (c = 0; c < ncols; ++c) { newCols[c] = cols[c]; newVals[c] = PetscAbsScalar(vals[c]) < tol ? 0.0 : vals[c]; } newcols = ncols; ierr = MatRestoreRow(A, r, &ncols, &cols, &vals); CHKERRQ(ierr); ierr = MatSetValues(A, 1, &r, newcols, newCols, newVals, INSERT_VALUES); CHKERRQ(ierr); } ierr = MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY); CHKERRQ(ierr); ierr = MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY); CHKERRQ(ierr); } ierr = PetscFree2(newCols,newVals); CHKERRQ(ierr); PetscFunctionReturn(0); }
int Epetra_PETScAIJMatrix::NumMyRowEntries(int Row, int & NumEntries) const { int globalRow = PetscRowStart_ + Row; MatGetRow(Amat_, globalRow, &NumEntries, PETSC_NULL, PETSC_NULL); MatRestoreRow(Amat_,globalRow,&NumEntries, PETSC_NULL, PETSC_NULL); return(0); }
void PETSc::Get_row_of_A(PetscInt i, PetscInt *ncol, PetscInt **cols, double **row) { ierr = MatGetRow(A,i,ncol,(const PetscInt**)cols, (const PetscScalar**)row); ierr = MatRestoreRow(A,i,ncol,(const PetscInt**)cols, (const PetscScalar**)row); }
void PetscSparseStorage::fromGetSpRow( int row, int col, double A[], int /*lenA */, int jcolA[], int& nnz, int colExtent, int& info ) { const double * B; const int * jcolB; int k, ierr; int m, n; this->getSize(m,n); ierr = MatGetRow(M, row, &nnz, &jcolB, &B ); assert( ierr == 0 ); if( col == 0 && colExtent == n ) { // We want the whole row, just do a simple copy for( k = 0; k < nnz; k++ ) { A[k] = B[k]; jcolA[k] = jcolB[k]; } } else { // Copy only those in range int i = 0; for( k = 0; k < nnz; k++ ) { if( jcolB[k] >= col && jcolB[k] < col + colExtent ) { A[i] = B[k]; jcolA[i] = jcolB[k]; i++; } } } ierr = MatRestoreRow( M, row, &nnz, &jcolB, &B ); assert( ierr == 0 ); info = 0; }
int testBSplineSetENMatR1Mat() { PrintTimeStamp(PETSC_COMM_SELF, "EN", NULL); MPI_Comm comm = PETSC_COMM_SELF; BPS bps; BPSCreate(comm, &bps); BPSSetLine(bps, 5.0, 6); int order = 3; BSS bss; BSSCreate(comm, &bss); BSSSetKnots(bss, order, bps); BSSSetUp(bss); // compute matrix Mat M; BSSCreateR1Mat(bss, &M); BSSENR1Mat(bss, 2, 0.7, M); // value check const PetscScalar *row; MatGetRow(M, 4, NULL, NULL, &row); ASSERT_DOUBLE_NEAR(0.0000964408077, row[0], pow(10.0, -8)); ASSERT_DOUBLE_NEAR(0.00168615366, row[1], pow(10.0, -8)); ASSERT_DOUBLE_NEAR(0.00211136230, row[2], pow(10.0, -8)); MatRestoreRow(M, 4, NULL, NULL, &row); // Finalize MatDestroy(&M); BSSDestroy(&bss); return 0; }
/* MatDumpSPAI - Dumps a PETSc matrix to a file in an ASCII format suitable for the SPAI code of Stephen Barnard to solve. This routine is simply here to allow testing of matrices directly with the SPAI code, rather then through the PETSc interface. */ PetscErrorCode MatDumpSPAI(Mat A,FILE *file) { const PetscScalar *vals; PetscErrorCode ierr; int i,j,n,size,nz; const int *cols; MPI_Comm comm; PetscObjectGetComm((PetscObject)A,&comm); MPI_Comm_size(comm,&size); if (size > 1) SETERRQ(((PetscObject)A)->comm,PETSC_ERR_SUP,"Only single processor dumps"); ierr = MatGetSize(A,&n,&n);CHKERRQ(ierr); /* print the matrix */ fprintf(file,"%d\n",n); for (i=0; i<n; i++) { ierr = MatGetRow(A,i,&nz,&cols,&vals);CHKERRQ(ierr); for (j=0; j<nz; j++) { fprintf(file,"%d %d %16.14e\n",i+1,cols[j]+1,vals[j]); } ierr = MatRestoreRow(A,i,&nz,&cols,&vals);CHKERRQ(ierr); } PetscFunctionReturn(0); }
/*@ MatComputeBandwidth - Calculate the full bandwidth of the matrix, meaning the width 2k+1 where k diagonals on either side are sufficient to contain all the matrix nonzeros. Collective on Mat Input Parameters: + A - The Mat - fraction - An optional percentage of the Frobenius norm of the matrix that the bandwidth should enclose Output Parameter: . bw - The matrix bandwidth Level: beginner .seealso: DMPlexCreate(), DMPlexSetConeSize(), DMPlexSetChart() @*/ PetscErrorCode MatComputeBandwidth(Mat A, PetscReal fraction, PetscInt *bw) { PetscInt lbw[2] = {0, 0}, gbw[2]; PetscInt rStart, rEnd, r; PetscErrorCode ierr; PetscFunctionBegin; PetscValidHeaderSpecific(A, MAT_CLASSID, 1); PetscValidLogicalCollectiveReal(A,fraction,2); PetscValidPointer(bw, 3); if ((fraction > 0.0) && (fraction < 1.0)) SETERRQ(PetscObjectComm((PetscObject) A), PETSC_ERR_SUP, "We do not yet support a fractional bandwidth"); ierr = MatGetOwnershipRange(A, &rStart, &rEnd);CHKERRQ(ierr); for (r = rStart; r < rEnd; ++r) { const PetscInt *cols; PetscInt ncols; ierr = MatGetRow(A, r, &ncols, &cols, NULL);CHKERRQ(ierr); if (ncols) { lbw[0] = PetscMax(lbw[0], r - cols[0]); lbw[1] = PetscMax(lbw[1], cols[ncols-1] - r); } ierr = MatRestoreRow(A, r, &ncols, &cols, NULL);CHKERRQ(ierr); } ierr = MPI_Allreduce(lbw, gbw, 2, MPIU_INT, MPI_MAX, PetscObjectComm((PetscObject) A));CHKERRQ(ierr); *bw = 2*PetscMax(gbw[0], gbw[1]) + 1; PetscFunctionReturn(0); }
PetscErrorCode MatColoringCreateLargestFirstWeights(MatColoring mc,PetscReal *weights) { PetscErrorCode ierr; PetscInt i,s,e,n,ncols; PetscRandom rand; PetscReal r; PetscInt *degrees; Mat G = mc->mat; PetscFunctionBegin; /* each weight should be the degree plus a random perturbation */ ierr = PetscRandomCreate(PetscObjectComm((PetscObject)mc),&rand);CHKERRQ(ierr); ierr = PetscRandomSetFromOptions(rand);CHKERRQ(ierr); ierr = MatGetOwnershipRange(G,&s,&e);CHKERRQ(ierr); n=e-s; ierr = PetscMalloc1(n,°rees);CHKERRQ(ierr); ierr = MatColoringGetDegrees(G,mc->dist,degrees);CHKERRQ(ierr); for (i=s;i<e;i++) { ierr = MatGetRow(G,i,&ncols,NULL,NULL);CHKERRQ(ierr); ierr = PetscRandomGetValueReal(rand,&r);CHKERRQ(ierr); weights[i-s] = ncols + PetscAbsReal(r); ierr = MatRestoreRow(G,i,&ncols,NULL,NULL);CHKERRQ(ierr); } ierr = PetscRandomDestroy(&rand);CHKERRQ(ierr); ierr = PetscFree(degrees);CHKERRQ(ierr); PetscFunctionReturn(0); }
PetscErrorCode MatGetDiagonalHermitian_Normal(Mat N,Vec v) { Mat_Normal *Na = (Mat_Normal*)N->data; Mat A = Na->A; PetscErrorCode ierr; PetscInt i,j,rstart,rend,nnz; const PetscInt *cols; PetscScalar *diag,*work,*values; const PetscScalar *mvalues; PetscFunctionBegin; ierr = PetscMalloc2(A->cmap->N,&diag,A->cmap->N,&work);CHKERRQ(ierr); ierr = PetscMemzero(work,A->cmap->N*sizeof(PetscScalar));CHKERRQ(ierr); ierr = MatGetOwnershipRange(A,&rstart,&rend);CHKERRQ(ierr); for (i=rstart; i<rend; i++) { ierr = MatGetRow(A,i,&nnz,&cols,&mvalues);CHKERRQ(ierr); for (j=0; j<nnz; j++) { work[cols[j]] += mvalues[j]*PetscConj(mvalues[j]); } ierr = MatRestoreRow(A,i,&nnz,&cols,&mvalues);CHKERRQ(ierr); } ierr = MPIU_Allreduce(work,diag,A->cmap->N,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)N));CHKERRQ(ierr); rstart = N->cmap->rstart; rend = N->cmap->rend; ierr = VecGetArray(v,&values);CHKERRQ(ierr); ierr = PetscMemcpy(values,diag+rstart,(rend-rstart)*sizeof(PetscScalar));CHKERRQ(ierr); ierr = VecRestoreArray(v,&values);CHKERRQ(ierr); ierr = PetscFree2(diag,work);CHKERRQ(ierr); ierr = VecScale(v,Na->scale);CHKERRQ(ierr); PetscFunctionReturn(0); }
/* MatConvert_Basic - Converts from any input format to another format. For parallel formats, the new matrix distribution is determined by PETSc. Does not do preallocation so in general will be slow */ PETSC_INTERN PetscErrorCode MatConvert_Basic(Mat mat, MatType newtype,MatReuse reuse,Mat *newmat) { Mat M; const PetscScalar *vwork; PetscErrorCode ierr; PetscInt nz,i,m,n,rstart,rend,lm,ln; const PetscInt *cwork; PetscBool isSBAIJ; PetscFunctionBegin; ierr = PetscObjectTypeCompare((PetscObject)mat,MATSEQSBAIJ,&isSBAIJ);CHKERRQ(ierr); if (!isSBAIJ) { ierr = PetscObjectTypeCompare((PetscObject)mat,MATMPISBAIJ,&isSBAIJ);CHKERRQ(ierr); } if (isSBAIJ) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Cannot convert from SBAIJ matrix since cannot obtain entire rows of matrix"); ierr = MatGetSize(mat,&m,&n);CHKERRQ(ierr); ierr = MatGetLocalSize(mat,&lm,&ln);CHKERRQ(ierr); if (ln == n) ln = PETSC_DECIDE; /* try to preserve column ownership */ ierr = MatCreate(PetscObjectComm((PetscObject)mat),&M);CHKERRQ(ierr); ierr = MatSetSizes(M,lm,ln,m,n);CHKERRQ(ierr); ierr = MatSetBlockSizesFromMats(M,mat,mat);CHKERRQ(ierr); ierr = MatSetType(M,newtype);CHKERRQ(ierr); ierr = MatSeqDenseSetPreallocation(M,NULL);CHKERRQ(ierr); ierr = MatMPIDenseSetPreallocation(M,NULL);CHKERRQ(ierr); ierr = MatSetUp(M);CHKERRQ(ierr); ierr = MatSetOption(M,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_FALSE);CHKERRQ(ierr); ierr = MatSetOption(M,MAT_NEW_NONZERO_ALLOCATION_ERR,PETSC_FALSE);CHKERRQ(ierr); ierr = PetscObjectTypeCompare((PetscObject)M,MATSEQSBAIJ,&isSBAIJ);CHKERRQ(ierr); if (!isSBAIJ) { ierr = PetscObjectTypeCompare((PetscObject)M,MATMPISBAIJ,&isSBAIJ);CHKERRQ(ierr); } if (isSBAIJ) { ierr = MatSetOption(M,MAT_IGNORE_LOWER_TRIANGULAR,PETSC_TRUE);CHKERRQ(ierr); } ierr = MatGetOwnershipRange(mat,&rstart,&rend);CHKERRQ(ierr); for (i=rstart; i<rend; i++) { ierr = MatGetRow(mat,i,&nz,&cwork,&vwork);CHKERRQ(ierr); ierr = MatSetValues(M,1,&i,nz,cwork,vwork,INSERT_VALUES);CHKERRQ(ierr); ierr = MatRestoreRow(mat,i,&nz,&cwork,&vwork);CHKERRQ(ierr); } ierr = MatAssemblyBegin(M,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); ierr = MatAssemblyEnd(M,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); if (reuse == MAT_INPLACE_MATRIX) { ierr = MatHeaderReplace(mat,&M);CHKERRQ(ierr); } else { *newmat = M; } PetscFunctionReturn(0); }
void PetscSparseMtrx :: times(const FloatMatrix &B, FloatMatrix &answer) const { if ( this->giveNumberOfColumns() != B.giveNumberOfRows() ) { OOFEM_ERROR("Dimension mismatch"); } #ifdef __PARALLEL_MODE if ( emodel->isParallel() ) { OOFEM_ERROR("PetscSparseMtrx :: times - Not implemented"); } #endif // I'm opting to work with a set of vectors, as i think it might be faster and more robust. / Mikael int nr = this->giveNumberOfRows(); int nc = B.giveNumberOfColumns(); answer.resize(nr, nc); double *aptr = answer.givePointer(); #if 0 // Approach using several vectors. Not sure if it is optimal, but it includes petsc calls which i suspect are inefficient. / Mikael // UNTESTED! Vec globX, globY; VecCreate(PETSC_COMM_SELF, &globY); VecSetType(globY, VECSEQ); VecSetSizes(globY, PETSC_DECIDE, nr); int nrB = B.giveNumberOfRows(); for (int k = 0; k < nc; k++) { double colVals[nrB]; for (int i = 0; i < nrB; i++) colVals[i] = B(i,k); // B.copyColumn(Bk,k); VecCreateSeqWithArray(PETSC_COMM_SELF, nrB, colVals, &globX); MatMult(this->mtrx, globX, globY ); double *ptr; VecGetArray(globY, &ptr); for (int i = 0; i < nr; i++) *aptr++ = ptr[i]; // answer.setColumn(Ak,k); VecRestoreArray(globY, &ptr); VecDestroy(globX); } VecDestroy(globY); #endif Mat globB, globC; MatCreateSeqDense(PETSC_COMM_SELF, B.giveNumberOfRows(), B.giveNumberOfColumns(), B.givePointer(), & globB); MatMatMult(this->mtrx, globB, MAT_INITIAL_MATRIX, PETSC_DEFAULT, & globC); const double *vals; for ( int r = 0; r < nr; r++ ) { MatGetRow(globC, r, NULL, NULL, & vals); for ( int i = 0, i2 = r; i < nc; i++, i2 += nr ) { aptr [ i2 ] = vals [ i ]; } MatRestoreRow(globC, r, NULL, NULL, & vals); } MatDestroy(&globB); MatDestroy(&globC); }
PetscErrorCode MatGetRow_SMF(Mat mat,PetscInt row,PetscInt *ncols,const PetscInt **cols,const PetscScalar **vals) { PetscErrorCode ierr; MatSubMatFreeCtx ctx; PetscFunctionBegin; ierr = MatShellGetContext(mat,(void **)&ctx);CHKERRQ(ierr); ierr = MatGetRow(ctx->A,row,ncols,cols,vals);CHKERRQ(ierr); PetscFunctionReturn(0); }
PetscErrorCode PCGAMGOptProl_Classical_Jacobi(PC pc,const Mat A,Mat *P) { PetscErrorCode ierr; PetscInt f,s,n,cf,cs,i,idx; PetscInt *coarserows; PetscInt ncols; const PetscInt *pcols; const PetscScalar *pvals; Mat Pnew; Vec diag; PC_MG *mg = (PC_MG*)pc->data; PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx; PC_GAMG_Classical *cls = (PC_GAMG_Classical*)pc_gamg->subctx; PetscFunctionBegin; if (cls->nsmooths == 0) { ierr = PCGAMGTruncateProlongator_Private(pc,P);CHKERRQ(ierr); PetscFunctionReturn(0); } ierr = MatGetOwnershipRange(*P,&s,&f);CHKERRQ(ierr); n = f-s; ierr = MatGetOwnershipRangeColumn(*P,&cs,&cf);CHKERRQ(ierr); ierr = PetscMalloc(sizeof(PetscInt)*n,&coarserows);CHKERRQ(ierr); /* identify the rows corresponding to coarse unknowns */ idx = 0; for (i=s;i<f;i++) { ierr = MatGetRow(*P,i,&ncols,&pcols,&pvals);CHKERRQ(ierr); /* assume, for now, that it's a coarse unknown if it has a single unit entry */ if (ncols == 1) { if (pvals[0] == 1.) { coarserows[idx] = i; idx++; } } ierr = MatRestoreRow(*P,i,&ncols,&pcols,&pvals);CHKERRQ(ierr); } ierr = MatGetVecs(A,&diag,0);CHKERRQ(ierr); ierr = MatGetDiagonal(A,diag);CHKERRQ(ierr); ierr = VecReciprocal(diag);CHKERRQ(ierr); for (i=0;i<cls->nsmooths;i++) { ierr = MatMatMult(A,*P,MAT_INITIAL_MATRIX,PETSC_DEFAULT,&Pnew);CHKERRQ(ierr); ierr = MatZeroRows(Pnew,idx,coarserows,0.,NULL,NULL);CHKERRQ(ierr); ierr = MatDiagonalScale(Pnew,diag,0);CHKERRQ(ierr); ierr = MatAYPX(Pnew,-1.0,*P,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr); ierr = MatDestroy(P);CHKERRQ(ierr); *P = Pnew; Pnew = NULL; } ierr = VecDestroy(&diag);CHKERRQ(ierr); ierr = PetscFree(coarserows);CHKERRQ(ierr); ierr = PCGAMGTruncateProlongator_Private(pc,P);CHKERRQ(ierr); PetscFunctionReturn(0); }
EXTERN_C_BEGIN #undef __FUNCT__ #define __FUNCT__ "MatConvert_MPIAIJ_MPISBAIJ" PetscErrorCode MatConvert_MPIAIJ_MPISBAIJ(Mat A, MatType newtype,MatReuse reuse,Mat *newmat) { PetscErrorCode ierr; Mat M; Mat_MPIAIJ *mpimat = (Mat_MPIAIJ*)A->data; Mat_SeqAIJ *Aa = (Mat_SeqAIJ*)mpimat->A->data,*Ba = (Mat_SeqAIJ*)mpimat->B->data; PetscInt *d_nnz,*o_nnz; PetscInt i,j,nz; PetscInt m,n,lm,ln; PetscInt rstart,rend; const PetscScalar *vwork; const PetscInt *cwork; PetscFunctionBegin; if (!A->symmetric) SETERRQ(((PetscObject)A)->comm,PETSC_ERR_USER,"Matrix must be symmetric. Call MatSetOption(mat,MAT_SYMMETRIC,PETSC_TRUE)"); ierr = MatGetSize(A,&m,&n);CHKERRQ(ierr); ierr = MatGetLocalSize(A,&lm,&ln);CHKERRQ(ierr); ierr = PetscMalloc2(lm,PetscInt,&d_nnz,lm,PetscInt,&o_nnz);CHKERRQ(ierr); ierr = MatMarkDiagonal_SeqAIJ(mpimat->A);CHKERRQ(ierr); for (i=0;i<lm;i++){ d_nnz[i] = Aa->i[i+1] - Aa->diag[i]; o_nnz[i] = Ba->i[i+1] - Ba->i[i]; } ierr = MatCreate(((PetscObject)A)->comm,&M);CHKERRQ(ierr); ierr = MatSetSizes(M,lm,ln,m,n);CHKERRQ(ierr); ierr = MatSetType(M,MATMPISBAIJ);CHKERRQ(ierr); ierr = MatSeqSBAIJSetPreallocation(M,1,0,d_nnz);CHKERRQ(ierr); ierr = MatMPISBAIJSetPreallocation(M,1,0,d_nnz,0,o_nnz);CHKERRQ(ierr); ierr = PetscFree2(d_nnz,o_nnz);CHKERRQ(ierr); ierr = MatGetOwnershipRange(A,&rstart,&rend);CHKERRQ(ierr); for (i=rstart;i<rend;i++){ ierr = MatGetRow(A,i,&nz,&cwork,&vwork);CHKERRQ(ierr); j = 0; while (cwork[j] < i){ j++; nz--;} ierr = MatSetValues(M,1,&i,nz,cwork+j,vwork+j,INSERT_VALUES);CHKERRQ(ierr); ierr = MatRestoreRow(A,i,&nz,&cwork,&vwork);CHKERRQ(ierr); } ierr = MatAssemblyBegin(M,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); ierr = MatAssemblyEnd(M,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); if (reuse == MAT_REUSE_MATRIX) { ierr = MatHeaderReplace(A,M);CHKERRQ(ierr); } else { *newmat = M; } PetscFunctionReturn(0); }
void build_strength_matrix(Mat A, PetscReal theta, Mat* strength) { //Variables for the new matrix structure std::vector<PetscInt> rows, cols; int cursor = 0; PetscInt start; PetscInt end; MatGetOwnershipRange((A), &start, &end); for (int row=start; row<end; row++) { PetscInt ncols; const PetscInt *col_indx; const PetscScalar *col_value; MatGetRow((A), row, &ncols, &col_indx, &col_value); rows.push_back(cursor); // First, find the threshhold for this row PetscScalar strong_threshhold = -col_value[0]; for (int ii=0; ii<ncols; ii++) { if (-col_value[ii] > strong_threshhold) { strong_threshhold = -col_value[ii]; } } strong_threshhold *= theta; //if the threshold is negative, assume that this row only has a diagonal entry and skip the row if (strong_threshhold > 0) { for (int ii=0; ii<ncols; ii++) { if (-col_value[ii] >= strong_threshhold) { cols.push_back(col_indx[ii]); cursor++; } } } MatRestoreRow((A), row, &ncols, &col_indx, &col_value); } rows.push_back(cursor); MatGetOwnershipRange((A), &start, &end); std::vector<PetscScalar> data(cols.size()); //TODO: control for cases where cols and rows are split differently //TODO: replace this PETSC_COMM_WORLD so the strength matrix is using the same communicator as the original matrix. MatCreate(PETSC_COMM_WORLD, strength); MatSetSizes(*strength, end-start, end-start, PETSC_DETERMINE, PETSC_DETERMINE); MatSetType(*strength,MATMPIAIJ); MatMPIAIJSetPreallocationCSR(*strength,&rows[0],&cols[0],&data[0]); //TODO: the above code is a work around for a bug in the following function call: //MatCreateMPIAIJWithArrays(PETSC_COMM_WORLD, end-start, end-start, PETSC_DETERMINE, PETSC_DETERMINE, &rows[0], &cols[0], &data[0], strength); }
PETSC_EXTERN PetscErrorCode MatConvert_MPIBAIJ_MPISBAIJ(Mat A, MatType newtype,MatReuse reuse,Mat *newmat) { PetscErrorCode ierr; Mat M; Mat_MPIBAIJ *mpimat = (Mat_MPIBAIJ*)A->data; Mat_SeqBAIJ *Aa = (Mat_SeqBAIJ*)mpimat->A->data,*Ba = (Mat_SeqBAIJ*)mpimat->B->data; PetscInt *d_nnz,*o_nnz; PetscInt i,j,nz; PetscInt m,n,lm,ln; PetscInt rstart,rend; const PetscScalar *vwork; const PetscInt *cwork; PetscInt bs = A->rmap->bs; PetscFunctionBegin; ierr = MatGetSize(A,&m,&n);CHKERRQ(ierr); ierr = MatGetLocalSize(A,&lm,&ln);CHKERRQ(ierr); ierr = PetscMalloc2(lm/bs,&d_nnz,lm/bs,&o_nnz);CHKERRQ(ierr); ierr = MatMarkDiagonal_SeqBAIJ(mpimat->A);CHKERRQ(ierr); for (i=0; i<lm/bs; i++) { d_nnz[i] = Aa->i[i+1] - Aa->diag[i]; o_nnz[i] = Ba->i[i+1] - Ba->i[i]; } ierr = MatCreate(PetscObjectComm((PetscObject)A),&M);CHKERRQ(ierr); ierr = MatSetSizes(M,lm,ln,m,n);CHKERRQ(ierr); ierr = MatSetType(M,MATMPISBAIJ);CHKERRQ(ierr); ierr = MatSeqSBAIJSetPreallocation(M,bs,0,d_nnz);CHKERRQ(ierr); ierr = MatMPISBAIJSetPreallocation(M,bs,0,d_nnz,0,o_nnz);CHKERRQ(ierr); ierr = PetscFree2(d_nnz,o_nnz);CHKERRQ(ierr); ierr = MatGetOwnershipRange(A,&rstart,&rend);CHKERRQ(ierr); ierr = MatSetOption(M,MAT_IGNORE_LOWER_TRIANGULAR,PETSC_TRUE);CHKERRQ(ierr); for (i=rstart; i<rend; i++) { ierr = MatGetRow(A,i,&nz,&cwork,&vwork);CHKERRQ(ierr); j = 0; ierr = MatSetValues(M,1,&i,nz,cwork+j,vwork+j,INSERT_VALUES);CHKERRQ(ierr); ierr = MatRestoreRow(A,i,&nz,&cwork,&vwork);CHKERRQ(ierr); } ierr = MatAssemblyBegin(M,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); ierr = MatAssemblyEnd(M,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); if (reuse == MAT_REUSE_MATRIX) { ierr = MatHeaderReplace(A,&M);CHKERRQ(ierr); } else { *newmat = M; } PetscFunctionReturn(0); }
PETSC_EXTERN void PETSC_STDCALL matgetrow_(Mat *mat,PetscInt *row,PetscInt *ncols,PetscInt *cols,PetscScalar *vals,PetscErrorCode *ierr) { const PetscInt **oocols = &my_ocols; const PetscScalar **oovals = &my_ovals; if (matgetrowactive) { PetscError(PETSC_COMM_SELF,__LINE__,"MatGetRow_Fortran",__FILE__,PETSC_ERR_ARG_WRONGSTATE,PETSC_ERROR_INITIAL, "Cannot have two MatGetRow() active simultaneously\n\ call MatRestoreRow() before calling MatGetRow() a second time"); *ierr = 1; return; } CHKFORTRANNULLINTEGER(cols); if (!cols) oocols = NULL; CHKFORTRANNULLSCALAR(vals); if (!vals) oovals = NULL; *ierr = MatGetRow(*mat,*row,ncols,oocols,oovals); if (*ierr) return; if (oocols) { *ierr = PetscMemcpy(cols,my_ocols,(*ncols)*sizeof(PetscInt)); if (*ierr) return;} if (oovals) { *ierr = PetscMemcpy(vals,my_ovals,(*ncols)*sizeof(PetscScalar)); if (*ierr) return;} matgetrowactive = 1; }
void EuclidGetRow(void *Ain, HYPRE_Int row, HYPRE_Int *len, HYPRE_Int **ind, double **val) { START_FUNC_DH Mat A = Ain; HYPRE_Int ierr; ierr = MatGetRow(A, row, len, ind, val); if (ierr) { hypre_sprintf(msgBuf_dh, "PETSc's MatGetRow bombed for row= %i", row); SET_V_ERROR(msgBuf_dh); } END_FUNC_DH }
PetscErrorCode PreallocateJacobian(Mat J, Userctx *user) { PetscErrorCode ierr; PetscInt *d_nnz; PetscInt i,idx=0,start=0; PetscFunctionBegin; ierr = PetscMalloc1(user->neqs_pgrid,&d_nnz);CHKERRQ(ierr); for (i=0; i<user->neqs_pgrid; i++) d_nnz[i] = 0; /* Generator subsystem */ for (i=0; i < ngen; i++) { d_nnz[idx] += 3; d_nnz[idx+1] += 2; d_nnz[idx+2] += 2; d_nnz[idx+3] += 5; d_nnz[idx+4] += 6; d_nnz[idx+5] += 6; d_nnz[user->neqs_gen+2*gbus[i]] += 3; d_nnz[user->neqs_gen+2*gbus[i]+1] += 3; d_nnz[idx+6] += 2; d_nnz[idx+7] += 2; d_nnz[idx+8] += 5; idx = idx + 9; } start = user->neqs_gen; PetscInt ncols; for (i=0; i < nbus; i++) { ierr = MatGetRow(user->Ybus,2*i,&ncols,NULL,NULL);CHKERRQ(ierr); d_nnz[start+2*i] += ncols; d_nnz[start+2*i+1] += ncols; ierr = MatRestoreRow(user->Ybus,2*i,&ncols,NULL,NULL);CHKERRQ(ierr); } ierr = MatSeqAIJSetPreallocation(J,0,d_nnz);CHKERRQ(ierr); ierr = PetscFree(d_nnz);CHKERRQ(ierr); PetscFunctionReturn(0); }
int testBSplineSetEE() { PrintTimeStamp(PETSC_COMM_SELF, "EE", NULL); MPI_Comm comm = PETSC_COMM_SELF; BPS bps; BPSCreate(comm, &bps); BPSSetLine(bps, 5.0, 6); int order = 3; BSS bss; BSSCreate(comm, &bss); BSSSetKnots(bss, order, bps); BSSSetUp(bss); Mat ee; BSSCreateR2Mat(bss, &ee); BSSEER2Mat(bss, 0, ee); // size check PetscInt n, m; MatGetSize(ee, &n, &m); ASSERT_EQ(25, n); ASSERT_EQ(25, m); // value check const PetscScalar *row; PetscInt ncols; const PetscInt *cols; MatGetRow(ee, 1, &ncols, &cols, &row); ASSERT_DOUBLE_NEAR(0.0709681582, row[0], pow(10.0, -8)); ASSERT_DOUBLE_NEAR(0.129990244, row[1], pow(10.0, -8)); ASSERT_DOUBLE_NEAR(0.0371913912, row[2], pow(10.0, -8)); MatRestoreRow(ee, 1, NULL, NULL, &row); /* [7.09681582e-02, 1.29990244e-01, 3.71913912e-02, 1.11566485e-03, 0.00000000e+00, 3.85977593e-02, 7.84793730e-02, 2.32290473e-02, 6.97290528e-04, 0.00000000e+00, 1.16319501e-03, 2.84290410e-03, 9.23893753e-04, 2.78916211e-05, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00] */ // Finalize MatDestroy(&ee); BSSDestroy(&bss); return 0; }
void SparsityBasedContactConstraint::getConnectedDofIndices() { #if defined(LIBMESH_HAVE_PETSC) && !PETSC_VERSION_LESS_THAN(3, 3, 0) _connected_dof_indices.clear(); // An ugly hack: have to extract the row and look at it's sparsity structure, since otherwise I // won't get the dofs connected to this row by virtue of intervariable coupling // This is easier than sifting through all coupled variables, selecting those active on the // current subdomain, dealing with the scalar variables, etc. // Also, importantly, this will miss coupling to variables that might have introduced by prior // constraints similar to this one! PetscMatrix<Number> * petsc_jacobian = dynamic_cast<PetscMatrix<Number> *>(_jacobian); mooseAssert(petsc_jacobian, "Expected a PETSc matrix"); Mat jac = petsc_jacobian->mat(); PetscErrorCode ierr; PetscInt ncols; const PetscInt * cols; ierr = MatGetRow(jac, static_cast<PetscInt>(_var.nodalDofIndex()), &ncols, &cols, PETSC_NULL); CHKERRABORT(_communicator.get(), ierr); bool debug = false; if (debug) { libMesh::out << "_connected_dof_indices: adding " << ncols << " dofs from Jacobian row[" << _var.nodalDofIndex() << "] = ["; } for (PetscInt i = 0; i < ncols; ++i) { if (debug) { libMesh::out << cols[i] << " "; } _connected_dof_indices.push_back(cols[i]); } if (debug) { libMesh::out << "]\n"; } ierr = MatRestoreRow(jac, static_cast<PetscInt>(_var.nodalDofIndex()), &ncols, &cols, PETSC_NULL); CHKERRABORT(_communicator.get(), ierr); #else NodeFaceConstraint::getConnectedDofIndices(); #endif }
/* Produces a set of block column indices of the matrix row, one for each block represented in the original row n - the number of block indices in cc[] cc - the block indices (must be large enough to contain the indices) */ PETSC_STATIC_INLINE PetscErrorCode MatCollapseRow(Mat Amat,PetscInt row,PetscInt bs,PetscInt *n,PetscInt *cc) { PetscInt cnt = -1,nidx,j; const PetscInt *idx; PetscErrorCode ierr; PetscFunctionBegin; ierr = MatGetRow(Amat,row,&nidx,&idx,NULL);CHKERRQ(ierr); if (nidx) { cnt = 0; cc[cnt] = idx[0]/bs; for (j=1; j<nidx; j++) { if (cc[cnt] < idx[j]/bs) cc[++cnt] = idx[j]/bs; } } ierr = MatRestoreRow(Amat,row,&nidx,&idx,NULL);CHKERRQ(ierr); *n = cnt+1; PetscFunctionReturn(0); }
/*@ MatGetColumnVector - Gets the values from a given column of a matrix. Not Collective Input Parameters: + A - the matrix . yy - the vector - c - the column requested (in global numbering) Level: advanced Notes: Each processor for which this is called gets the values for its rows. Since PETSc matrices are usually stored in compressed row format, this routine will generally be slow. The vector must have the same parallel row layout as the matrix. Contributed by: Denis Vanderstraeten .keywords: matrix, column, get .seealso: MatGetRow(), MatGetDiagonal() @*/ PetscErrorCode MatGetColumnVector(Mat A,Vec yy,PetscInt col) { PetscScalar *y; const PetscScalar *v; PetscErrorCode ierr; PetscInt i,j,nz,N,Rs,Re,rs,re; const PetscInt *idx; PetscFunctionBegin; PetscValidHeaderSpecific(A,MAT_CLASSID,1); PetscValidHeaderSpecific(yy,VEC_CLASSID,2); if (col < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Requested negative column: %D",col); ierr = MatGetSize(A,NULL,&N);CHKERRQ(ierr); if (col >= N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Requested column %D larger than number columns in matrix %D",col,N); ierr = MatGetOwnershipRange(A,&Rs,&Re);CHKERRQ(ierr); ierr = VecGetOwnershipRange(yy,&rs,&re);CHKERRQ(ierr); if (Rs != rs || Re != re) SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Matrix %D %D does not have same ownership range (size) as vector %D %D",Rs,Re,rs,re); if (A->ops->getcolumnvector) { ierr = (*A->ops->getcolumnvector)(A,yy,col);CHKERRQ(ierr); } else { ierr = VecSet(yy,0.0);CHKERRQ(ierr); ierr = VecGetArray(yy,&y);CHKERRQ(ierr); for (i=Rs; i<Re; i++) { ierr = MatGetRow(A,i,&nz,&idx,&v);CHKERRQ(ierr); if (nz && idx[0] <= col) { /* Should use faster search here */ for (j=0; j<nz; j++) { if (idx[j] >= col) { if (idx[j] == col) y[i-rs] = v[j]; break; } } } ierr = MatRestoreRow(A,i,&nz,&idx,&v);CHKERRQ(ierr); } ierr = VecRestoreArray(yy,&y);CHKERRQ(ierr); } PetscFunctionReturn(0); }
TEST(TestISTLPETScMatrix, SchurComplement) { ASMmxBase::Type = ASMmxBase::FULL_CONT_RAISE_BASIS1; ASMmxBase::geoBasis = 2; Matrix stencil(13,13); for (size_t i = 1; i<= 13; ++i) for (size_t j = 1; j <= 13; ++j) stencil(i,j) = 1.0; std::array<InspectMatrixSIM,2> sim; for (size_t i = 0; i < 2; ++i) { sim[i].read("src/LinAlg/Test/refdata/petsc_test_blocks_basis.xinp"); sim[i].opt.solver = i == 0 ? SystemMatrix::PETSC : SystemMatrix::ISTL; sim[i].preprocess(); sim[i].initSystem(i == 0 ? SystemMatrix::PETSC : SystemMatrix::ISTL); for (int iel = 1; iel <= sim[i].getSAM()->getNoElms(); ++iel) sim[i].getMatrix()->assemble(stencil, *sim[i].getSAM(), iel); sim[i].getMatrix()->beginAssembly(); sim[i].getMatrix()->endAssembly(); } const ProcessAdm& adm = sim[1].getProcessAdm(); ISTL::Mat& A = static_cast<ISTLMatrix*>(sim[1].getMatrix())->getMatrix(); ISTL::BlockPreconditioner block(A, adm.dd, "upper"); ISTL::Mat& S = block.getBlock(1); PETScSolParams params(LinSolParams(), adm); params.setupSchurComplement(static_cast<PETScMatrix*>(sim[0].getMatrix())->getBlockMatrices()); // check that matrices are the same for (size_t r = 0; r < S.N(); ++r) { const PetscInt* cols; PetscInt ncols; const PetscScalar* vals; MatGetRow(params.getSchurComplement(), r, &ncols, &cols, &vals); for (PetscInt i = 0; i < ncols; ++i) ASSERT_FLOAT_EQ(vals[i], S[r][cols[i]]); MatRestoreRow(params.getSchurComplement(), r, &ncols, &cols, &vals); } }
void PetscSparseStorage::writeToStream(ostream& out) const { int ierr; int low, high; ierr = MatGetOwnershipRange( M, &low, &high ); assert(ierr == 0); int row; for( row = low; row < high; row++ ) { const PetscScalar * a; const PetscInt * ja; int nnz; ierr = MatGetRow( M, row, &nnz, &ja, &a ); assert(ierr == 0); int k; for( k = 0; k < nnz; k++ ) { out << row << " " << ja[k] << " " << a[k] << endl; } ierr = MatRestoreRow( M, row, &nnz, &ja, &a ); assert(ierr == 0); } }