void PetscMatrix<T>::zero_rows (std::vector<numeric_index_type> & rows, T diag_value) { libmesh_assert (this->initialized()); semiparallel_only(); PetscErrorCode ierr=0; #if PETSC_VERSION_RELEASE && PETSC_VERSION_LESS_THAN(3,1,1) if(!rows.empty()) ierr = MatZeroRows(_mat, rows.size(), (PetscInt*)&rows[0], diag_value); else ierr = MatZeroRows(_mat, 0, PETSC_NULL, diag_value); #else // As of petsc-dev at the time of 3.1.0, MatZeroRows now takes two additional // optional arguments. The optional arguments (x,b) can be used to specify the // solutions for the zeroed rows (x) and right hand side (b) to update. // Could be useful for setting boundary conditions... if(!rows.empty()) ierr = MatZeroRows(_mat, rows.size(), (PetscInt*)&rows[0], diag_value, PETSC_NULL, PETSC_NULL); else ierr = MatZeroRows(_mat, 0, PETSC_NULL, diag_value, PETSC_NULL, PETSC_NULL); #endif LIBMESH_CHKERRABORT(ierr); }
void PETScLinearSolver::zeroRows_in_Matrix(const int nrows, const PetscInt *rows) { PetscScalar one = 1.0; // Each process indicates only rows it owns that are to be zeroed // MatSetOption(A, MAT_NO_OFF_PROC_ZERO_ROWS,PETSC_TRUE); if(nrows>0) MatZeroRows (A, nrows, rows, one, PETSC_NULL, PETSC_NULL); else MatZeroRows(A, 0, PETSC_NULL, one, PETSC_NULL, PETSC_NULL); }
PetscErrorCode PCGAMGOptProl_Classical_Jacobi(PC pc,const Mat A,Mat *P) { PetscErrorCode ierr; PetscInt f,s,n,cf,cs,i,idx; PetscInt *coarserows; PetscInt ncols; const PetscInt *pcols; const PetscScalar *pvals; Mat Pnew; Vec diag; PC_MG *mg = (PC_MG*)pc->data; PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx; PC_GAMG_Classical *cls = (PC_GAMG_Classical*)pc_gamg->subctx; PetscFunctionBegin; if (cls->nsmooths == 0) { ierr = PCGAMGTruncateProlongator_Private(pc,P);CHKERRQ(ierr); PetscFunctionReturn(0); } ierr = MatGetOwnershipRange(*P,&s,&f);CHKERRQ(ierr); n = f-s; ierr = MatGetOwnershipRangeColumn(*P,&cs,&cf);CHKERRQ(ierr); ierr = PetscMalloc(sizeof(PetscInt)*n,&coarserows);CHKERRQ(ierr); /* identify the rows corresponding to coarse unknowns */ idx = 0; for (i=s;i<f;i++) { ierr = MatGetRow(*P,i,&ncols,&pcols,&pvals);CHKERRQ(ierr); /* assume, for now, that it's a coarse unknown if it has a single unit entry */ if (ncols == 1) { if (pvals[0] == 1.) { coarserows[idx] = i; idx++; } } ierr = MatRestoreRow(*P,i,&ncols,&pcols,&pvals);CHKERRQ(ierr); } ierr = MatGetVecs(A,&diag,0);CHKERRQ(ierr); ierr = MatGetDiagonal(A,diag);CHKERRQ(ierr); ierr = VecReciprocal(diag);CHKERRQ(ierr); for (i=0;i<cls->nsmooths;i++) { ierr = MatMatMult(A,*P,MAT_INITIAL_MATRIX,PETSC_DEFAULT,&Pnew);CHKERRQ(ierr); ierr = MatZeroRows(Pnew,idx,coarserows,0.,NULL,NULL);CHKERRQ(ierr); ierr = MatDiagonalScale(Pnew,diag,0);CHKERRQ(ierr); ierr = MatAYPX(Pnew,-1.0,*P,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr); ierr = MatDestroy(P);CHKERRQ(ierr); *P = Pnew; Pnew = NULL; } ierr = VecDestroy(&diag);CHKERRQ(ierr); ierr = PetscFree(coarserows);CHKERRQ(ierr); ierr = PCGAMGTruncateProlongator_Private(pc,P);CHKERRQ(ierr); PetscFunctionReturn(0); }
void Field_solver::cross_out_nodes_occupied_by_objects( Mat *A, int nx, int ny, int nz, Inner_region &inner_region ) { std::vector<int> occupied_nodes_global_indices = list_of_nodes_global_indices_in_matrix( inner_region.inner_nodes_not_at_domain_edge, nx, ny, nz ); PetscErrorCode ierr; PetscInt num_of_rows_to_remove = occupied_nodes_global_indices.size(); if( num_of_rows_to_remove != 0 ){ PetscInt *rows_global_indices = &occupied_nodes_global_indices[0]; PetscScalar diag = 1.0; /* Approx solution and RHS at zeroed rows */ inner_region.phi_inside_region = NULL; inner_region.rhs_inside_region = NULL; // looks like setting phi_inside_region and // rhs_inside_region has no effect // todo: separate function // std::string vec_name = "Phi inside " + inner_region.name; // alloc_petsc_vector( &inner_region.phi_inside_region, // (nx-2) * (ny-2) * (nz-2), // vec_name.c_str() ); // VecSet( inner_region.phi_inside_region, inner_region.potential ); // ierr = VecAssemblyBegin( inner_region.phi_inside_region ); CHKERRXX( ierr ); // ierr = VecAssemblyEnd( inner_region.phi_inside_region ); CHKERRXX( ierr ); // todo: separate function // vec_name = "RHS inside " + inner_region.name; // PetscScalar charge_density_inside_conductor = 0.0; // alloc_petsc_vector( &inner_region.rhs_inside_region, // (nx-2) * (ny-2) * (nz-2), // vec_name.c_str() ); // VecSet( inner_region.rhs_inside_region, charge_density_inside_conductor ); // ierr = VecAssemblyBegin( inner_region.rhs_inside_region ); CHKERRXX( ierr ); // ierr = VecAssemblyEnd( inner_region.rhs_inside_region ); CHKERRXX( ierr ); ierr = MatZeroRows( *A, num_of_rows_to_remove, rows_global_indices, diag, inner_region.phi_inside_region, inner_region.rhs_inside_region); CHKERRXX( ierr ); // VecDestroy for phi_inside_region and rhs_inside_region // should be called in inner_region destructor. } }
PetscErrorCode MatZeroRowsLocal_IS(Mat A,PetscInt n,const PetscInt rows[],PetscScalar diag,Vec x,Vec b) { Mat_IS *is = (Mat_IS*)A->data; PetscErrorCode ierr; PetscInt i; PetscScalar *array; PetscFunctionBegin; if (x && b) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"No support"); { /* Set up is->x as a "counting vector". This is in order to MatMult_IS work properly in the interface nodes. */ Vec counter; PetscScalar one=1.0, zero=0.0; ierr = MatCreateVecs(A,&counter,NULL);CHKERRQ(ierr); ierr = VecSet(counter,zero);CHKERRQ(ierr); ierr = VecSet(is->x,one);CHKERRQ(ierr); ierr = VecScatterBegin(is->ctx,is->x,counter,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr); ierr = VecScatterEnd (is->ctx,is->x,counter,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr); ierr = VecScatterBegin(is->ctx,counter,is->x,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); ierr = VecScatterEnd (is->ctx,counter,is->x,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); ierr = VecDestroy(&counter);CHKERRQ(ierr); } if (!n) { is->pure_neumann = PETSC_TRUE; } else { is->pure_neumann = PETSC_FALSE; ierr = VecGetArray(is->x,&array);CHKERRQ(ierr); ierr = MatZeroRows(is->A,n,rows,diag,0,0);CHKERRQ(ierr); for (i=0; i<n; i++) { ierr = MatSetValue(is->A,rows[i],rows[i],diag/(array[rows[i]]),INSERT_VALUES);CHKERRQ(ierr); } ierr = MatAssemblyBegin(is->A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); ierr = MatAssemblyEnd (is->A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); ierr = VecRestoreArray(is->x,&array);CHKERRQ(ierr); } PetscFunctionReturn(0); }
int main(int argc,char **args) { Mat C; PetscErrorCode ierr; PetscInt i,m = 2,N,M,its,idx[4],count,*rows; PetscScalar val,Ke[16],r[4]; PetscReal x,y,h,norm,tol=1.e-14; Vec u,ustar,b; KSP ksp; PetscInitialize(&argc,&args,(char*)0,help); ierr = PetscOptionsGetInt(NULL,"-m",&m,NULL);CHKERRQ(ierr); N = (m+1)*(m+1); /* dimension of matrix */ M = m*m; /* number of elements */ h = 1.0/m; /* mesh width */ /* create stiffness matrix */ ierr = MatCreateSeqAIJ(PETSC_COMM_SELF,N,N,9,NULL,&C);CHKERRQ(ierr); ierr = MatSetUp(C);CHKERRQ(ierr); /* forms the element stiffness for the Laplacian */ ierr = FormElementStiffness(h*h,Ke);CHKERRQ(ierr); for (i=0; i<M; i++) { /* location of lower left corner of element */ x = h*(i % m); y = h*(i/m); /* node numbers for the four corners of element */ idx[0] = (m+1)*(i/m) + (i % m); idx[1] = idx[0]+1; idx[2] = idx[1] + m + 1; idx[3] = idx[2] - 1; ierr = MatSetValues(C,4,idx,4,idx,Ke,ADD_VALUES);CHKERRQ(ierr); } ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); /* create right hand side and solution */ ierr = VecCreateSeq(PETSC_COMM_SELF,N,&u);CHKERRQ(ierr); ierr = VecDuplicate(u,&b);CHKERRQ(ierr); ierr = VecDuplicate(b,&ustar);CHKERRQ(ierr); ierr = VecSet(u,0.0);CHKERRQ(ierr); ierr = VecSet(b,0.0);CHKERRQ(ierr); for (i=0; i<M; i++) { /* location of lower left corner of element */ x = h*(i % m); y = h*(i/m); /* node numbers for the four corners of element */ idx[0] = (m+1)*(i/m) + (i % m); idx[1] = idx[0]+1; idx[2] = idx[1] + m + 1; idx[3] = idx[2] - 1; ierr = FormElementRhs(x,y,h*h,r);CHKERRQ(ierr); ierr = VecSetValues(b,4,idx,r,ADD_VALUES);CHKERRQ(ierr); } ierr = VecAssemblyBegin(b);CHKERRQ(ierr); ierr = VecAssemblyEnd(b);CHKERRQ(ierr); /* modify matrix and rhs for Dirichlet boundary conditions */ ierr = PetscMalloc1((4*m+1),&rows);CHKERRQ(ierr); for (i=0; i<m+1; i++) { rows[i] = i; /* bottom */ rows[3*m - 1 +i] = m*(m+1) + i; /* top */ } count = m+1; /* left side */ for (i=m+1; i<m*(m+1); i+= m+1) rows[count++] = i; count = 2*m; /* left side */ for (i=2*m+1; i<m*(m+1); i+= m+1) rows[count++] = i; for (i=0; i<4*m; i++) { x = h*(rows[i] % (m+1)); y = h*(rows[i]/(m+1)); val = y; ierr = VecSetValues(u,1,&rows[i],&val,INSERT_VALUES);CHKERRQ(ierr); ierr = VecSetValues(b,1,&rows[i],&val,INSERT_VALUES);CHKERRQ(ierr); } ierr = MatZeroRows(C,4*m,rows,1.0,0,0);CHKERRQ(ierr); ierr = PetscFree(rows);CHKERRQ(ierr); ierr = VecAssemblyBegin(u);CHKERRQ(ierr); ierr = VecAssemblyEnd(u);CHKERRQ(ierr); ierr = VecAssemblyBegin(b);CHKERRQ(ierr); ierr = VecAssemblyEnd(b);CHKERRQ(ierr); /* solve linear system */ ierr = KSPCreate(PETSC_COMM_WORLD,&ksp);CHKERRQ(ierr); ierr = KSPSetOperators(ksp,C,C,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr); ierr = KSPSetFromOptions(ksp);CHKERRQ(ierr); ierr = KSPSetInitialGuessNonzero(ksp,PETSC_TRUE);CHKERRQ(ierr); ierr = KSPSolve(ksp,b,u);CHKERRQ(ierr); /* check error */ for (i=0; i<N; i++) { x = h*(i % (m+1)); y = h*(i/(m+1)); val = y; ierr = VecSetValues(ustar,1,&i,&val,INSERT_VALUES);CHKERRQ(ierr); } ierr = VecAssemblyBegin(ustar);CHKERRQ(ierr); ierr = VecAssemblyEnd(ustar);CHKERRQ(ierr); ierr = VecAXPY(u,-1.0,ustar);CHKERRQ(ierr); ierr = VecNorm(u,NORM_2,&norm);CHKERRQ(ierr); ierr = KSPGetIterationNumber(ksp,&its);CHKERRQ(ierr); if (norm > tol) { ierr = PetscPrintf(PETSC_COMM_WORLD,"Norm of error %G Iterations %D\n",norm*h,its);CHKERRQ(ierr); } ierr = KSPDestroy(&ksp);CHKERRQ(ierr); ierr = VecDestroy(&ustar);CHKERRQ(ierr); ierr = VecDestroy(&u);CHKERRQ(ierr); ierr = VecDestroy(&b);CHKERRQ(ierr); ierr = MatDestroy(&C);CHKERRQ(ierr); ierr = PetscFinalize(); return 0; }
int main(int argc,char **args) { Mat C; PetscMPIInt rank,size; PetscInt i,m = 5,N,start,end,M,its; PetscScalar val,Ke[16],r[4]; PetscReal x,y,h,norm,tol=1.e-14; PetscErrorCode ierr; PetscInt idx[4],count,*rows; Vec u,ustar,b; KSP ksp; PetscInitialize(&argc,&args,(char*)0,help); ierr = PetscOptionsGetInt(NULL,"-m",&m,NULL);CHKERRQ(ierr); N = (m+1)*(m+1); /* dimension of matrix */ M = m*m; /* number of elements */ h = 1.0/m; /* mesh width */ ierr = MPI_Comm_rank(PETSC_COMM_WORLD,&rank);CHKERRQ(ierr); ierr = MPI_Comm_size(PETSC_COMM_WORLD,&size);CHKERRQ(ierr); /* Create stiffness matrix */ ierr = MatCreate(PETSC_COMM_WORLD,&C);CHKERRQ(ierr); ierr = MatSetSizes(C,PETSC_DECIDE,PETSC_DECIDE,N,N);CHKERRQ(ierr); ierr = MatSetFromOptions(C);CHKERRQ(ierr); ierr = MatSetUp(C);CHKERRQ(ierr); start = rank*(M/size) + ((M%size) < rank ? (M%size) : rank); end = start + M/size + ((M%size) > rank); /* Assemble matrix */ ierr = FormElementStiffness(h*h,Ke); /* element stiffness for Laplacian */ for (i=start; i<end; i++) { /* location of lower left corner of element */ x = h*(i % m); y = h*(i/m); /* node numbers for the four corners of element */ idx[0] = (m+1)*(i/m) + (i % m); idx[1] = idx[0]+1; idx[2] = idx[1] + m + 1; idx[3] = idx[2] - 1; ierr = MatSetValues(C,4,idx,4,idx,Ke,ADD_VALUES);CHKERRQ(ierr); } ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); /* Create right-hand-side and solution vectors */ ierr = VecCreate(PETSC_COMM_WORLD,&u);CHKERRQ(ierr); ierr = VecSetSizes(u,PETSC_DECIDE,N);CHKERRQ(ierr); ierr = VecSetFromOptions(u);CHKERRQ(ierr); ierr = PetscObjectSetName((PetscObject)u,"Approx. Solution");CHKERRQ(ierr); ierr = VecDuplicate(u,&b);CHKERRQ(ierr); ierr = PetscObjectSetName((PetscObject)b,"Right hand side");CHKERRQ(ierr); ierr = VecDuplicate(b,&ustar);CHKERRQ(ierr); ierr = VecSet(u,0.0);CHKERRQ(ierr); ierr = VecSet(b,0.0);CHKERRQ(ierr); /* Assemble right-hand-side vector */ for (i=start; i<end; i++) { /* location of lower left corner of element */ x = h*(i % m); y = h*(i/m); /* node numbers for the four corners of element */ idx[0] = (m+1)*(i/m) + (i % m); idx[1] = idx[0]+1; idx[2] = idx[1] + m + 1; idx[3] = idx[2] - 1; ierr = FormElementRhs(x,y,h*h,r);CHKERRQ(ierr); ierr = VecSetValues(b,4,idx,r,ADD_VALUES);CHKERRQ(ierr); } ierr = VecAssemblyBegin(b);CHKERRQ(ierr); ierr = VecAssemblyEnd(b);CHKERRQ(ierr); /* Modify matrix and right-hand-side for Dirichlet boundary conditions */ ierr = PetscMalloc1(4*m,&rows);CHKERRQ(ierr); for (i=0; i<m+1; i++) { rows[i] = i; /* bottom */ rows[3*m - 1 +i] = m*(m+1) + i; /* top */ } count = m+1; /* left side */ for (i=m+1; i<m*(m+1); i+= m+1) rows[count++] = i; count = 2*m; /* left side */ for (i=2*m+1; i<m*(m+1); i+= m+1) rows[count++] = i; for (i=0; i<4*m; i++) { x = h*(rows[i] % (m+1)); y = h*(rows[i]/(m+1)); val = y; ierr = VecSetValues(u,1,&rows[i],&val,INSERT_VALUES);CHKERRQ(ierr); ierr = VecSetValues(b,1,&rows[i],&val,INSERT_VALUES);CHKERRQ(ierr); } ierr = MatZeroRows(C,4*m,rows,1.0,0,0);CHKERRQ(ierr); ierr = PetscFree(rows);CHKERRQ(ierr); ierr = VecAssemblyBegin(u);CHKERRQ(ierr); ierr = VecAssemblyEnd(u);CHKERRQ(ierr); ierr = VecAssemblyBegin(b);CHKERRQ(ierr); ierr = VecAssemblyEnd(b);CHKERRQ(ierr); { Mat A; ierr = MatConvert(C,MATSAME,MAT_INITIAL_MATRIX,&A);CHKERRQ(ierr); ierr = MatDestroy(&C);CHKERRQ(ierr); ierr = MatConvert(A,MATSAME,MAT_INITIAL_MATRIX,&C);CHKERRQ(ierr); ierr = MatDestroy(&A);CHKERRQ(ierr); } /* Solve linear system */ ierr = KSPCreate(PETSC_COMM_WORLD,&ksp);CHKERRQ(ierr); ierr = KSPSetOperators(ksp,C,C,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr); ierr = KSPSetFromOptions(ksp);CHKERRQ(ierr); ierr = KSPSetInitialGuessNonzero(ksp,PETSC_TRUE);CHKERRQ(ierr); ierr = KSPSolve(ksp,b,u);CHKERRQ(ierr); /* Check error */ ierr = VecGetOwnershipRange(ustar,&start,&end);CHKERRQ(ierr); for (i=start; i<end; i++) { x = h*(i % (m+1)); y = h*(i/(m+1)); val = y; ierr = VecSetValues(ustar,1,&i,&val,INSERT_VALUES);CHKERRQ(ierr); } ierr = VecAssemblyBegin(ustar);CHKERRQ(ierr); ierr = VecAssemblyEnd(ustar);CHKERRQ(ierr); ierr = VecAXPY(u,-1.0,ustar);CHKERRQ(ierr); ierr = VecNorm(u,NORM_2,&norm);CHKERRQ(ierr); ierr = KSPGetIterationNumber(ksp,&its);CHKERRQ(ierr); if (norm > tol) { ierr = PetscPrintf(PETSC_COMM_WORLD,"Norm of error %G Iterations %D\n",norm*h,its);CHKERRQ(ierr); } /* Free work space */ ierr = KSPDestroy(&ksp);CHKERRQ(ierr); ierr = VecDestroy(&ustar);CHKERRQ(ierr); ierr = VecDestroy(&u);CHKERRQ(ierr); ierr = VecDestroy(&b);CHKERRQ(ierr); ierr = MatDestroy(&C);CHKERRQ(ierr); ierr = PetscFinalize(); return 0; }
int main(int argc,char **args) { Mat A; PetscInt bs=3,m=4,n=6,i,j,val = 10,row[2],col[3],eval,rstart; PetscErrorCode ierr; PetscMPIInt size,rank; PetscScalar x[6][9],y[3][3],one=1.0; PetscBool flg,testsbaij=PETSC_FALSE; PetscInitialize(&argc,&args,(char*)0,help); ierr = MPI_Comm_size(PETSC_COMM_WORLD,&size);CHKERRQ(ierr); ierr = MPI_Comm_rank(PETSC_COMM_WORLD,&rank);CHKERRQ(ierr); ierr = PetscOptionsHasName(NULL,"-test_mat_sbaij",&testsbaij);CHKERRQ(ierr); if (testsbaij) { ierr = MatCreateSBAIJ(PETSC_COMM_WORLD,bs,m*bs,n*bs,PETSC_DECIDE,PETSC_DECIDE,1,NULL,1,NULL,&A);CHKERRQ(ierr); } else { ierr = MatCreateBAIJ(PETSC_COMM_WORLD,bs,m*bs,n*bs,PETSC_DECIDE,PETSC_DECIDE,1,NULL,1,NULL,&A);CHKERRQ(ierr); } ierr = MatSetOption(A,MAT_NEW_NONZERO_ALLOCATION_ERR,PETSC_FALSE);CHKERRQ(ierr); eval = 9; ierr = PetscOptionsHasName(NULL,"-ass_extern",&flg);CHKERRQ(ierr); if (flg && (size != 1)) rstart = m*((rank+1)%size); else rstart = m*(rank); row[0] =rstart+0; row[1] =rstart+2; col[0] =rstart+0; col[1] =rstart+1; col[2] =rstart+3; for (i=0; i<6; i++) { for (j =0; j< 9; j++) x[i][j] = (PetscScalar)val++; } ierr = MatSetValuesBlocked(A,2,row,3,col,&x[0][0],INSERT_VALUES);CHKERRQ(ierr); ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); /* This option does not work for rectangular matrices ierr = MatSetOption(A,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);CHKERRQ(ierr); */ ierr = MatSetValuesBlocked(A,2,row,3,col,&x[0][0],INSERT_VALUES);CHKERRQ(ierr); /* Do another MatSetValues to test the case when only one local block is specified */ for (i=0; i<3; i++) { for (j =0; j<3; j++) y[i][j] = (PetscScalar)(10 + i*eval + j); } ierr = MatSetValuesBlocked(A,1,row,1,col,&y[0][0],INSERT_VALUES);CHKERRQ(ierr); ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); ierr = PetscOptionsHasName(NULL,"-zero_rows",&flg);CHKERRQ(ierr); if (flg) { col[0] = rstart*bs+0; col[1] = rstart*bs+1; col[2] = rstart*bs+2; ierr = MatZeroRows(A,3,col,one,0,0);CHKERRQ(ierr); } ierr = MatView(A,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); ierr = MatDestroy(&A);CHKERRQ(ierr); ierr = PetscFinalize(); return 0; }
void PetscMatTools::ZeroRowsWithValueOnDiagonal(Mat matrix, std::vector<unsigned>& rRows, double diagonalValue) { Finalise(matrix); /* * Important! PETSc by default will destroy the sparsity structure for this row and deallocate memory * when the row is zeroed, and if there is a next timestep, the memory will have to reallocated when * assembly to done again. This can kill performance. The following makes sure the zeroed rows are kept. * * Note: if the following lines are called, then once MatZeroRows() is called below, there will be an * error if some rows have no entries at all. Hence for problems with dummy variables, Stokes flow for * example, the identity block needs to be added before dirichlet BCs. */ #if (PETSC_VERSION_MAJOR == 3 && PETSC_VERSION_MINOR >= 1) //PETSc 3.1 or later MatSetOption(matrix, MAT_KEEP_NONZERO_PATTERN, PETSC_TRUE); #elif (PETSC_VERSION_MAJOR == 3 && PETSC_VERSION_MINOR == 0) //PETSc 3.0 MatSetOption(matrix, MAT_KEEP_ZEROED_ROWS, PETSC_TRUE); #else //PETSc 2.x.x MatSetOption(matrix, MAT_KEEP_ZEROED_ROWS); #endif PetscInt* rows = new PetscInt[rRows.size()]; for (unsigned i=0; i<rRows.size(); i++) { rows[i] = rRows[i]; } #if (PETSC_VERSION_MAJOR == 2 && PETSC_VERSION_MINOR == 2) //PETSc 2.2 IS is; ISCreateGeneral(PETSC_COMM_WORLD, rRows.size(), rows, &is); MatZeroRows(matrix, is, &diagonalValue); ISDestroy(PETSC_DESTROY_PARAM(is)); /* * [2]PETSC ERROR: MatMissingDiagonal_SeqAIJ() line 1011 in src/mat/impls/aij/seq/aij.c [2]PETSC ERROR: PETSc has generated inconsistent data! [2]PETSC ERROR: Matrix is missing diagonal number 15! [2]PETSC ERROR: MatILUFactorSymbolic_SeqAIJ() line 906 in src/mat/impls/aij/seq/aijfact.c * * // There appears to be a problem with MatZeroRows not setting diagonals correctly // While we are supporting PETSc 2.2, we have to do this the slow way AssembleFinal(matrix); PetscInt lo, hi; GetOwnershipRange(matrix, lo, hi); PetscInt size=GetSize(matrix); ///assert(rRows.size() == 1); for (unsigned index=0; index<rRows.size(); index++) { PetscInt row = rRows[index]; if (row >= lo && row < hi) { std::vector<unsigned> non_zero_cols; // This row is local, so zero it. for (PetscInt column = 0; column < size; column++) { if (GetElement(matrix, row, column) != 0.0) { non_zero_cols.push_back(column); } } // Separate "gets" from "sets" so that we don't have to keep going into "assembled" mode for (unsigned i=0; i<non_zero_cols.size();i++) { SetElement(matrix, row, non_zero_cols[i], 0.0); } // Set the diagonal SetElement(matrix, row, row, diagonalValue); } // Everyone communicate after row is finished AssembleFinal(matrix); } */ #elif (PETSC_VERSION_MAJOR == 3 && PETSC_VERSION_MINOR >= 2) //PETSc 3.2 or later MatZeroRows(matrix, rRows.size(), rows, diagonalValue , NULL, NULL); #else MatZeroRows(matrix, rRows.size(), rows, diagonalValue); #endif delete [] rows; }
int main(int argc, char * argv[]) { typedef MPI::HGeometryForest<DIM,DOW> forest_t; typedef MPI::BirdView<forest_t> ir_mesh_t; typedef FEMSpace<double,DIM,DOW> fe_space_t; typedef MPI::DOF::GlobalIndex<forest_t, fe_space_t> global_index_t; PetscInitialize(&argc, &argv, (char *)NULL, help); forest_t forest(PETSC_COMM_WORLD); forest.readMesh(argv[1]); ir_mesh_t ir_mesh(forest); int round = 0; if (argc >= 3) round = atoi(argv[2]); ir_mesh.globalRefine(round); ir_mesh.semiregularize(); ir_mesh.regularize(false); setenv("AFEPACK_TEMPLATE_PATH", "/usr/local/AFEPack/template/triangle", 1); TemplateGeometry<DIM> tri; tri.readData("triangle.tmp_geo"); CoordTransform<DIM,DIM> tri_ct; tri_ct.readData("triangle.crd_trs"); TemplateDOF<DIM> tri_td(tri); tri_td.readData("triangle.1.tmp_dof"); BasisFunctionAdmin<double,DIM,DIM> tri_bf(tri_td); tri_bf.readData("triangle.1.bas_fun"); std::vector<TemplateElement<double,DIM,DIM> > tmp_ele(1); tmp_ele[0].reinit(tri, tri_td, tri_ct, tri_bf); RegularMesh<DIM,DOW>& mesh = ir_mesh.regularMesh(); fe_space_t fem_space(mesh, tmp_ele); u_int n_ele = mesh.n_geometry(DIM); fem_space.element().resize(n_ele); for (int i = 0;i < n_ele;i ++) { fem_space.element(i).reinit(fem_space, i, 0); } fem_space.buildElement(); fem_space.buildDof(); fem_space.buildDofBoundaryMark(); std::cout << "Building global indices ... " << std::flush; global_index_t global_index(forest, fem_space); global_index.build(); std::cout << "OK!" << std::endl; std::cout << "Building the linear system ... " << std::flush; Mat A; Vec x, b; MatCreateMPIAIJ(PETSC_COMM_WORLD, global_index.n_primary_dof(), global_index.n_primary_dof(), PETSC_DECIDE, PETSC_DECIDE, 0, PETSC_NULL, 0, PETSC_NULL, &A); VecCreateMPI(PETSC_COMM_WORLD, global_index.n_primary_dof(), PETSC_DECIDE, &b); fe_space_t::ElementIterator the_ele = fem_space.beginElement(), end_ele = fem_space.endElement(); for (;the_ele != end_ele;++ the_ele) { double vol = the_ele->templateElement().volume(); const QuadratureInfo<DIM>& qi = the_ele->findQuadratureInfo(5); std::vector<Point<DIM> > q_pnt = the_ele->local_to_global(qi.quadraturePoint()); int n_q_pnt = qi.n_quadraturePoint(); std::vector<double> jac = the_ele->local_to_global_jacobian(qi.quadraturePoint()); std::vector<std::vector<double> > bas_val = the_ele->basis_function_value(q_pnt); std::vector<std::vector<std::vector<double> > > bas_grad = the_ele->basis_function_gradient(q_pnt); const std::vector<int>& ele_dof = the_ele->dof(); u_int n_ele_dof = ele_dof.size(); FullMatrix<double> ele_mat(n_ele_dof, n_ele_dof); Vector<double> ele_rhs(n_ele_dof); for (u_int l = 0;l < n_q_pnt;++ l) { double JxW = vol*jac[l]*qi.weight(l); double f_val = _f_(q_pnt[l]); for (u_int i = 0;i < n_ele_dof;++ i) { for (u_int j = 0;j < n_ele_dof;++ j) { ele_mat(i, j) += JxW*(bas_val[i][l]*bas_val[j][l] + innerProduct(bas_grad[i][l], bas_grad[j][l])); } ele_rhs(i) += JxW*f_val*bas_val[i][l]; } } /** * 此处将单元矩阵和单元载荷先计算好,然后向全局的矩阵和载荷向量上 * 集中,可以提高效率。 */ std::vector<int> indices(n_ele_dof); for (u_int i = 0;i < n_ele_dof;++ i) { indices[i] = global_index(ele_dof[i]); } MatSetValues(A, n_ele_dof, &indices[0], n_ele_dof, &indices[0], &ele_mat(0,0), ADD_VALUES); VecSetValues(b, n_ele_dof, &indices[0], &ele_rhs(0), ADD_VALUES); } MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY); MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY); VecAssemblyBegin(b); VecAssemblyEnd(b); std::cout << "OK!" << std::endl; /// 加上狄氏边值条件 std::cout << "Applying the Dirichlet boundary condition ... " << std::flush; u_int n_bnd_dof = 0; /// 首先清点边界上自由度的个数 for (u_int i = 0;i < fem_space.n_dof();++ i) { if (fem_space.dofBoundaryMark(i) > 0) { /// 如果不是在主几何体上就不做 if (! global_index.is_dof_on_primary_geometry(i)) continue; n_bnd_dof += 1; } } /// 准备空间存储边界上全局标号、自变量和右端项 std::vector<int> bnd_idx(n_bnd_dof); std::vector<double> rhs_entry(n_bnd_dof); /// 对自由度做循环 for (u_int i = 0, j = 0;i < fem_space.n_dof();++ i) { if (fem_space.dofBoundaryMark(i) > 0) { /// 边界上的自由度? /// 如果不是在主几何体上就不做 if (! global_index.is_dof_on_primary_geometry(i)) continue; bnd_idx[j] = global_index(i); /// 行的全局标号 /// 计算并记下自变量和右端项,假设自由度值为插值量 double u_b_val = _u_b_(fem_space.dofInfo(i).interp_point); rhs_entry[j] = u_b_val; j += 1; } } /// 将矩阵修改为对角元 1.0,其它元素为零的状态 /// MatSetOption(A, MAT_KEEP_ZEROED_ROWS); MatZeroRows(A, n_bnd_dof, &bnd_idx[0], 1.0); /// 修改右端项为相应点的边值 Vec rhs_bnd; VecCreateSeqWithArray(PETSC_COMM_SELF, n_bnd_dof, &rhs_entry[0], &rhs_bnd); IS is_bnd; ISCreateGeneralWithArray(PETSC_COMM_WORLD, n_bnd_dof, &bnd_idx[0], &is_bnd); VecScatter bnd_scatter; VecScatterCreate(rhs_bnd, PETSC_NULL, b, is_bnd, &bnd_scatter); VecScatterBegin(bnd_scatter, rhs_bnd, b, INSERT_VALUES, SCATTER_FORWARD); VecScatterEnd(bnd_scatter, rhs_bnd, b, INSERT_VALUES, SCATTER_FORWARD); VecDestroy(rhs_bnd); ISDestroy(is_bnd); VecScatterDestroy(bnd_scatter); std::cout << "OK!" << std::endl; VecDuplicate(b, &x); KSP solver; KSPCreate(PETSC_COMM_WORLD, &solver); KSPSetOperators(solver, A, A, SAME_NONZERO_PATTERN); KSPSetType(solver, KSPGMRES); KSPSetFromOptions(solver); KSPSolve(solver, b, x); if (forest.rank() == 0) { KSPConvergedReason reason; KSPGetConvergedReason(solver,&reason); if (reason == KSP_DIVERGED_INDEFINITE_PC) { printf("\nDivergence because of indefinite preconditioner;\n"); printf("Run the executable again but with -pc_ilu_shift option.\n"); } else if (reason<0) { printf("\nOther kind of divergence: this should not happen.\n"); } else { PetscInt its; KSPGetIterationNumber(solver,&its); printf("\nConvergence in %d iterations.\n",(int)its); } printf("\n"); } MatDestroy(A); VecDestroy(b); KSPDestroy(solver); FEMFunction<double,DIM> u_h(fem_space); Vec X; VecCreateSeqWithArray(PETSC_COMM_SELF, global_index.n_local_dof(), &u_h(0), &X); std::vector<int> primary_idx(global_index.n_primary_dof()); global_index.build_primary_index(&primary_idx[0]); IS is; ISCreateGeneralWithArray(PETSC_COMM_WORLD, global_index.n_local_dof(), &global_index(0), &is); VecScatter scatter; VecScatterCreate(x, is, X, PETSC_NULL, &scatter); VecScatterBegin(scatter, x, X, INSERT_VALUES, SCATTER_FORWARD); VecScatterEnd(scatter, x, X, INSERT_VALUES, SCATTER_FORWARD); VecDestroy(x); VecDestroy(X); VecScatterDestroy(scatter); ISDestroy(is); char filename[1024]; sprintf(filename, "u_h%d.dx", forest.rank()); u_h.writeOpenDXData(filename); PetscFinalize(); return 0; }
EXTERN_C_END /* -------------------------------------------------------------------------- */ /* PCNNCreateCoarseMatrix - */ #undef __FUNCT__ #define __FUNCT__ "PCNNCreateCoarseMatrix" PetscErrorCode PCNNCreateCoarseMatrix (PC pc) { MPI_Request *send_request, *recv_request; PetscErrorCode ierr; PetscInt i, j, k; PetscScalar* mat; /* Sub-matrix with this subdomain's contribution to the coarse matrix */ PetscScalar** DZ_OUT; /* proc[k].DZ_OUT[i][] = bit of vector to be sent from processor k to processor i */ /* aliasing some names */ PC_IS* pcis = (PC_IS*)(pc->data); PC_NN* pcnn = (PC_NN*)pc->data; PetscInt n_neigh = pcis->n_neigh; PetscInt* neigh = pcis->neigh; PetscInt* n_shared = pcis->n_shared; PetscInt** shared = pcis->shared; PetscScalar** DZ_IN; /* Must be initialized after memory allocation. */ PetscFunctionBegin; /* Allocate memory for mat (the +1 is to handle the case n_neigh equal to zero) */ ierr = PetscMalloc((n_neigh*n_neigh+1)*sizeof(PetscScalar),&mat);CHKERRQ(ierr); /* Allocate memory for DZ */ /* Notice that DZ_OUT[0] is allocated some space that is never used. */ /* This is just in order to DZ_OUT and DZ_IN to have exactly the same form. */ { PetscInt size_of_Z = 0; ierr = PetscMalloc ((n_neigh+1)*sizeof(PetscScalar*),&pcnn->DZ_IN);CHKERRQ(ierr); DZ_IN = pcnn->DZ_IN; ierr = PetscMalloc ((n_neigh+1)*sizeof(PetscScalar*),&DZ_OUT);CHKERRQ(ierr); for (i=0; i<n_neigh; i++) { size_of_Z += n_shared[i]; } ierr = PetscMalloc ((size_of_Z+1)*sizeof(PetscScalar),&DZ_IN[0]);CHKERRQ(ierr); ierr = PetscMalloc ((size_of_Z+1)*sizeof(PetscScalar),&DZ_OUT[0]);CHKERRQ(ierr); } for (i=1; i<n_neigh; i++) { DZ_IN[i] = DZ_IN [i-1] + n_shared[i-1]; DZ_OUT[i] = DZ_OUT[i-1] + n_shared[i-1]; } /* Set the values of DZ_OUT, in order to send this info to the neighbours */ /* First, set the auxiliary array pcis->work_N. */ ierr = PCISScatterArrayNToVecB(pcis->work_N,pcis->D,INSERT_VALUES,SCATTER_REVERSE,pc);CHKERRQ(ierr); for (i=1; i<n_neigh; i++){ for (j=0; j<n_shared[i]; j++) { DZ_OUT[i][j] = pcis->work_N[shared[i][j]]; } } /* Non-blocking send/receive the common-interface chunks of scaled nullspaces */ /* Notice that send_request[] and recv_request[] could have one less element. */ /* We make them longer to have request[i] corresponding to neigh[i]. */ { PetscMPIInt tag; ierr = PetscObjectGetNewTag((PetscObject)pc,&tag);CHKERRQ(ierr); ierr = PetscMalloc((2*(n_neigh)+1)*sizeof(MPI_Request),&send_request);CHKERRQ(ierr); recv_request = send_request + (n_neigh); for (i=1; i<n_neigh; i++) { ierr = MPI_Isend((void*)(DZ_OUT[i]),n_shared[i],MPIU_SCALAR,neigh[i],tag,((PetscObject)pc)->comm,&(send_request[i]));CHKERRQ(ierr); ierr = MPI_Irecv((void*)(DZ_IN [i]),n_shared[i],MPIU_SCALAR,neigh[i],tag,((PetscObject)pc)->comm,&(recv_request[i]));CHKERRQ(ierr); } } /* Set DZ_IN[0][] (recall that neigh[0]==rank, always) */ for(j=0; j<n_shared[0]; j++) { DZ_IN[0][j] = pcis->work_N[shared[0][j]]; } /* Start computing with local D*Z while communication goes on. */ /* Apply Schur complement. The result is "stored" in vec (more */ /* precisely, vec points to the result, stored in pc_nn->vec1_B) */ /* and also scattered to pcnn->work_N. */ ierr = PCNNApplySchurToChunk(pc,n_shared[0],shared[0],DZ_IN[0],pcis->work_N,pcis->vec1_B, pcis->vec2_B,pcis->vec1_D,pcis->vec2_D);CHKERRQ(ierr); /* Compute the first column, while completing the receiving. */ for (i=0; i<n_neigh; i++) { MPI_Status stat; PetscMPIInt ind=0; if (i>0) { ierr = MPI_Waitany(n_neigh-1,recv_request+1,&ind,&stat);CHKERRQ(ierr); ind++;} mat[ind*n_neigh+0] = 0.0; for (k=0; k<n_shared[ind]; k++) { mat[ind*n_neigh+0] += DZ_IN[ind][k] * pcis->work_N[shared[ind][k]]; } } /* Compute the remaining of the columns */ for (j=1; j<n_neigh; j++) { ierr = PCNNApplySchurToChunk(pc,n_shared[j],shared[j],DZ_IN[j],pcis->work_N,pcis->vec1_B, pcis->vec2_B,pcis->vec1_D,pcis->vec2_D);CHKERRQ(ierr); for (i=0; i<n_neigh; i++) { mat[i*n_neigh+j] = 0.0; for (k=0; k<n_shared[i]; k++) { mat[i*n_neigh+j] += DZ_IN[i][k] * pcis->work_N[shared[i][k]]; } } } /* Complete the sending. */ if (n_neigh>1) { MPI_Status *stat; ierr = PetscMalloc((n_neigh-1)*sizeof(MPI_Status),&stat);CHKERRQ(ierr); if (n_neigh-1) {ierr = MPI_Waitall(n_neigh-1,&(send_request[1]),stat);CHKERRQ(ierr);} ierr = PetscFree(stat);CHKERRQ(ierr); } /* Free the memory for the MPI requests */ ierr = PetscFree(send_request);CHKERRQ(ierr); /* Free the memory for DZ_OUT */ if (DZ_OUT) { ierr = PetscFree(DZ_OUT[0]);CHKERRQ(ierr); ierr = PetscFree(DZ_OUT);CHKERRQ(ierr); } { PetscMPIInt size; ierr = MPI_Comm_size(((PetscObject)pc)->comm,&size);CHKERRQ(ierr); /* Create the global coarse vectors (rhs and solution). */ ierr = VecCreateMPI(((PetscObject)pc)->comm,1,size,&(pcnn->coarse_b));CHKERRQ(ierr); ierr = VecDuplicate(pcnn->coarse_b,&(pcnn->coarse_x));CHKERRQ(ierr); /* Create and set the global coarse AIJ matrix. */ ierr = MatCreate(((PetscObject)pc)->comm,&(pcnn->coarse_mat));CHKERRQ(ierr); ierr = MatSetSizes(pcnn->coarse_mat,1,1,size,size);CHKERRQ(ierr); ierr = MatSetType(pcnn->coarse_mat,MATAIJ);CHKERRQ(ierr); ierr = MatSeqAIJSetPreallocation(pcnn->coarse_mat,1,PETSC_NULL);CHKERRQ(ierr); ierr = MatMPIAIJSetPreallocation(pcnn->coarse_mat,1,PETSC_NULL,1,PETSC_NULL);CHKERRQ(ierr); ierr = MatSetValues(pcnn->coarse_mat,n_neigh,neigh,n_neigh,neigh,mat,ADD_VALUES);CHKERRQ(ierr); ierr = MatAssemblyBegin(pcnn->coarse_mat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); ierr = MatAssemblyEnd (pcnn->coarse_mat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); } { PetscMPIInt rank; PetscScalar one = 1.0; ierr = MPI_Comm_rank(((PetscObject)pc)->comm,&rank);CHKERRQ(ierr); /* "Zero out" rows of not-purely-Neumann subdomains */ if (pcis->pure_neumann) { /* does NOT zero the row; create an empty index set. The reason is that MatZeroRows() is collective. */ ierr = MatZeroRows(pcnn->coarse_mat,0,PETSC_NULL,one,0,0);CHKERRQ(ierr); } else { /* here it DOES zero the row, since it's not a floating subdomain. */ PetscInt row = (PetscInt) rank; ierr = MatZeroRows(pcnn->coarse_mat,1,&row,one,0,0);CHKERRQ(ierr); } } /* Create the coarse linear solver context */ { PC pc_ctx, inner_pc; KSP inner_ksp; ierr = KSPCreate(((PetscObject)pc)->comm,&pcnn->ksp_coarse);CHKERRQ(ierr); ierr = PetscObjectIncrementTabLevel((PetscObject)pcnn->ksp_coarse,(PetscObject)pc,2);CHKERRQ(ierr); ierr = KSPSetOperators(pcnn->ksp_coarse,pcnn->coarse_mat,pcnn->coarse_mat,SAME_PRECONDITIONER);CHKERRQ(ierr); ierr = KSPGetPC(pcnn->ksp_coarse,&pc_ctx);CHKERRQ(ierr); ierr = PCSetType(pc_ctx,PCREDUNDANT);CHKERRQ(ierr); ierr = KSPSetType(pcnn->ksp_coarse,KSPPREONLY);CHKERRQ(ierr); ierr = PCRedundantGetKSP(pc_ctx,&inner_ksp);CHKERRQ(ierr); ierr = KSPGetPC(inner_ksp,&inner_pc);CHKERRQ(ierr); ierr = PCSetType(inner_pc,PCLU);CHKERRQ(ierr); ierr = KSPSetOptionsPrefix(pcnn->ksp_coarse,"nn_coarse_");CHKERRQ(ierr); ierr = KSPSetFromOptions(pcnn->ksp_coarse);CHKERRQ(ierr); /* the vectors in the following line are dummy arguments, just telling the KSP the vector size. Values are not used */ ierr = KSPSetUp(pcnn->ksp_coarse);CHKERRQ(ierr); } /* Free the memory for mat */ ierr = PetscFree(mat);CHKERRQ(ierr); /* for DEBUGGING, save the coarse matrix to a file. */ { PetscBool flg = PETSC_FALSE; ierr = PetscOptionsGetBool(PETSC_NULL,"-pc_nn_save_coarse_matrix",&flg,PETSC_NULL);CHKERRQ(ierr); if (flg) { PetscViewer viewer; ierr = PetscViewerASCIIOpen(PETSC_COMM_WORLD,"coarse.m",&viewer);CHKERRQ(ierr); ierr = PetscViewerSetFormat(viewer,PETSC_VIEWER_ASCII_MATLAB);CHKERRQ(ierr); ierr = MatView(pcnn->coarse_mat,viewer);CHKERRQ(ierr); ierr = PetscViewerDestroy(&viewer);CHKERRQ(ierr); } } /* Set the variable pcnn->factor_coarse_rhs. */ pcnn->factor_coarse_rhs = (pcis->pure_neumann) ? 1.0 : 0.0; /* See historical note 02, at the bottom of this file. */ PetscFunctionReturn(0); }
void NRSolver :: applyConstraintsToStiffness(SparseMtrx *k) { if ( this->smConstraintVersion == k->giveVersion() ) { return; } #if 0 #ifdef __PETSC_MODULE if ( solverType == ST_Petsc ) { PetscScalar diagVal = 1.0; if ( k->giveType() != SMT_PetscMtrx ) { OOFEM_ERROR("NRSolver :: applyConstraintsToStiffness: PetscSparseMtrx Expected"); } PetscSparseMtrx *lhs = ( PetscSparseMtrx * ) k; if ( !prescribedEgsIS_defined ) { IntArray eqs; #ifdef __PARALLEL_MODE Natural2GlobalOrdering *n2lpm = engngModel->givePetscContext(1)->giveN2Gmap(); int s = prescribedEqs.giveSize(); eqs.resize(s); for ( int i = 1; i <= s; i++ ) { eqs.at(i) = n2lpm->giveNewEq( prescribedEqs.at(i) ); } ISCreateGeneral(PETSC_COMM_WORLD, s, eqs.givePointer(), & prescribedEgsIS); //ISView(prescribedEgsIS,PETSC_VIEWER_STDOUT_WORLD); #else eqs.resize(numberOfPrescribedDofs); for ( int i = 1; i <= numberOfPrescribedDofs; i++ ) { eqs.at(i) = prescribedEqs.at(i) - 1; } ISCreateGeneral(PETSC_COMM_SELF, numberOfPrescribedDofs, eqs.givePointer(), & prescribedEgsIS); //ISView(prescribedEgsIS,PETSC_VIEWER_STDOUT_SELF); #endif prescribedEgsIS_defined = true; } //MatView(*(lhs->giveMtrx()),PETSC_VIEWER_STDOUT_WORLD); MatZeroRows(* ( lhs->giveMtrx() ), prescribedEgsIS, & diagVal); //MatView(*(lhs->giveMtrx()),PETSC_VIEWER_STDOUT_WORLD); if ( numberOfPrescribedDofs ) { this->smConstraintVersion = k->giveVersion(); } return; } #endif // __PETSC_MODULE #else #ifdef __PETSC_MODULE if ( solverType == ST_Petsc ) { if ( k->giveType() != SMT_PetscMtrx ) { OOFEM_ERROR("NRSolver :: applyConstraintsToStiffness: PetscSparseMtrx Expected"); } PetscSparseMtrx *lhs = ( PetscSparseMtrx * ) k; Vec diag; PetscScalar *ptr; int eq; PetscContext *parallel_context = engngModel->givePetscContext(this->domain->giveNumber()); parallel_context->createVecGlobal(& diag); MatGetDiagonal(* lhs->giveMtrx(), diag); VecGetArray(diag, & ptr); for ( int i = 1; i <= numberOfPrescribedDofs; i++ ) { eq = prescribedEqs.at(i) - 1; MatSetValue(* ( lhs->giveMtrx() ), eq, eq, ptr [ eq ] * 1.e6, INSERT_VALUES); } MatAssemblyBegin(* lhs->giveMtrx(), MAT_FINAL_ASSEMBLY); MatAssemblyEnd(* lhs->giveMtrx(), MAT_FINAL_ASSEMBLY); VecRestoreArray(diag, & ptr); VecDestroy(&diag); if ( numberOfPrescribedDofs ) { this->smConstraintVersion = k->giveVersion(); } return; } #endif // __PETSC_MODULE #endif for ( int i = 1; i <= numberOfPrescribedDofs; i++ ) { k->at( prescribedEqs.at(i), prescribedEqs.at(i) ) *= 1.e6; } if ( numberOfPrescribedDofs ) { this->smConstraintVersion = k->giveVersion(); } }