int ISCreateGeneralWithIJ(MPI_Comm comm,Vec x, Vec xvec[],PetscInt nvec, PetscInt nrow,PetscInt pnum, PetscInt *I, PetscInt *J,IS ISfrom[], IS ISto[]){ PetscInt size,rank; PetscErrorCode ierr; PetscInt i,k,count; PetscInt pstart[nvec],pend[nvec]; PetscInt cstart[nvec],cend[nvec]; PetscInt fromarray[pnum],toarray[pnum]; MPI_Comm_size(PETSC_COMM_WORLD,&size); MPI_Comm_rank(PETSC_COMM_WORLD,&rank); LargeVecGetOwnershipRange(&x,nvec,pstart,pend); LargeVecGetColumnOwnershipRange(xvec,nvec, nrow, cstart, cend); for(i=0;i<nvec;i++){ count = 0; for(k=0;k<pnum;k++){ if(*(I+k)>=cstart[i]&&*(I+k)<cend[i]){ fromarray[count] = (PetscInt)((*(I+k)-cstart[i])*nrow + *(J+k)); toarray[count] = k; count++; } } ierr = ISCreateGeneral(PETSC_COMM_SELF,count,fromarray,ISfrom+i);CHKERRQ(ierr); ierr = ISCreateGeneral(PETSC_COMM_SELF,count,toarray,ISto+i);CHKERRQ(ierr); count = 0; } return 0; }
void SAMpatchPETSc::setupIS(char dofType) const { PetscIntVec ldofs; PetscIntVec gdofs; int gdof = 0; if (adm.getProcId() > 0) adm.receive(gdof, adm.getProcId()-1); for (size_t i = 0; i < adm.dd.getMLGN().size(); ++i) { if ((dofType == 'A' || nodeType.empty() || this->SAM::getNodeType(i+1) == dofType) && adm.dd.getMLGN()[i] >= adm.dd.getMinNode() && adm.dd.getMLGN()[i] <= adm.dd.getMaxNode()) { std::pair<int, int> dofs = this->SAM::getNodeDOFs(i+1); for (int dof = dofs.first; dof <= dofs.second; ++dof) { ldofs.push_back(dof-1); gdofs.push_back(gdof++); } } } if (adm.getProcId() < adm.getNoProcs()-1) adm.send(gdof, adm.getProcId()+1); ISCreateGeneral(*adm.getCommunicator(), ldofs.size(), ldofs.data(), PETSC_COPY_VALUES, &dofIS[dofType].local); ISCreateGeneral(*adm.getCommunicator(), gdofs.size(), gdofs.data(), PETSC_COPY_VALUES, &dofIS[dofType].global); dofIS[dofType].nDofs = gdof - gdofs.front(); }
PETSC_EXTERN PetscErrorCode MatGetOrdering_QMD(Mat mat,MatOrderingType type,IS *row,IS *col) { PetscInt i, *deg,*marker,*rchset,*nbrhd,*qsize,*qlink,nofsub,*iperm,nrow,*perm; PetscErrorCode ierr; const PetscInt *ia,*ja; PetscBool done; PetscFunctionBegin; ierr = MatGetRowIJ(mat,1,PETSC_TRUE,PETSC_TRUE,&nrow,&ia,&ja,&done);CHKERRQ(ierr); if (!done) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Cannot get rows for matrix"); ierr = PetscMalloc(nrow * sizeof(PetscInt),&perm);CHKERRQ(ierr); ierr = PetscMalloc5(nrow,PetscInt,&iperm,nrow,PetscInt,°,nrow,PetscInt,&marker,nrow,PetscInt,&rchset,nrow,PetscInt,&nbrhd);CHKERRQ(ierr); ierr = PetscMalloc2(nrow,PetscInt,&qsize,nrow,PetscInt,&qlink);CHKERRQ(ierr); /* WARNING - genqmd trashes ja */ SPARSEPACKgenqmd(&nrow,ia,ja,perm,iperm,deg,marker,rchset,nbrhd,qsize,qlink,&nofsub); ierr = MatRestoreRowIJ(mat,1,PETSC_TRUE,PETSC_TRUE,&nrow,&ia,&ja,&done);CHKERRQ(ierr); ierr = PetscFree2(qsize,qlink);CHKERRQ(ierr); ierr = PetscFree5(iperm,deg,marker,rchset,nbrhd);CHKERRQ(ierr); for (i=0; i<nrow; i++) perm[i]--; ierr = ISCreateGeneral(PETSC_COMM_SELF,nrow,perm,PETSC_COPY_VALUES,row);CHKERRQ(ierr); ierr = ISCreateGeneral(PETSC_COMM_SELF,nrow,perm,PETSC_OWN_POINTER,col);CHKERRQ(ierr); PetscFunctionReturn(0); }
PetscErrorCode MatGetOrdering_myordering(Mat mat,MatOrderingType type,IS *irow,IS *icol) { PetscErrorCode ierr; PetscInt n,i,*ii; PetscBool done; MPI_Comm comm; PetscFunctionBegin; ierr = PetscObjectGetComm((PetscObject)mat,&comm); CHKERRQ(ierr); ierr = MatGetRowIJ(mat,0,PETSC_FALSE,PETSC_TRUE,&n,NULL,NULL,&done); CHKERRQ(ierr); ierr = MatRestoreRowIJ(mat,0,PETSC_FALSE,PETSC_TRUE,NULL,NULL,NULL,&done); CHKERRQ(ierr); if (done) { /* matrix may be "compressed" in symbolic factorization, due to i-nodes or block storage */ ierr = PetscMalloc(n*sizeof(PetscInt),&ii); CHKERRQ(ierr); for (i=0; i<n; i++) ii[i] = n-i-1; /* replace your index here */ ierr = ISCreateGeneral(PETSC_COMM_SELF,n,ii,PETSC_COPY_VALUES,irow); CHKERRQ(ierr); ierr = ISCreateGeneral(PETSC_COMM_SELF,n,ii,PETSC_OWN_POINTER,icol); CHKERRQ(ierr); } else SETERRQ(PETSC_COMM_WORLD,PETSC_ERR_SUP,"MatRestoreRowIJ fails!"); ierr = ISSetIdentity(*irow); CHKERRQ(ierr); ierr = ISSetIdentity(*icol); CHKERRQ(ierr); ierr = ISSetPermutation(*irow); CHKERRQ(ierr); ierr = ISSetPermutation(*icol); CHKERRQ(ierr); PetscFunctionReturn(0); }
/* MatGetOrdering_RCM - Find the Reverse Cuthill-McKee ordering of a given matrix. */ PETSC_INTERN PetscErrorCode MatGetOrdering_RCM(Mat mat,MatOrderingType type,IS *row,IS *col) { PetscErrorCode ierr; PetscInt i,*mask,*xls,nrow,*perm; const PetscInt *ia,*ja; PetscBool done; PetscFunctionBegin; ierr = MatGetRowIJ(mat,1,PETSC_TRUE,PETSC_TRUE,&nrow,&ia,&ja,&done); CHKERRQ(ierr); if (!done) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Cannot get rows for matrix"); ierr = PetscMalloc3(nrow,&mask,nrow,&perm,2*nrow,&xls); CHKERRQ(ierr); SPARSEPACKgenrcm(&nrow,ia,ja,perm,mask,xls); ierr = MatRestoreRowIJ(mat,1,PETSC_TRUE,PETSC_TRUE,NULL,&ia,&ja,&done); CHKERRQ(ierr); /* shift because Sparsepack indices start at one */ for (i=0; i<nrow; i++) perm[i]--; ierr = ISCreateGeneral(PETSC_COMM_SELF,nrow,perm,PETSC_COPY_VALUES,row); CHKERRQ(ierr); ierr = ISCreateGeneral(PETSC_COMM_SELF,nrow,perm,PETSC_COPY_VALUES,col); CHKERRQ(ierr); ierr = PetscFree3(mask,perm,xls); CHKERRQ(ierr); PetscFunctionReturn(0); }
PetscErrorCode VecReorder(Vec r, Vec order, Vec or){ PetscErrorCode err; PetscInt n_local, low, high; PetscInt *to_idx, *from_idx; PetscScalar *o_array; IS to, from; VecScatter scatter; int i; err = VecGetLocalSize(r, &n_local); CHKERRQ(err); err = PetscMalloc1(n_local, &to_idx); CHKERRQ(err); err = PetscMalloc1(n_local, &from_idx); CHKERRQ(err); err = VecGetOwnershipRange(r, &low, &high); CHKERRQ(err); err = VecGetArray(order, &o_array); for(i = 0; i < n_local; i++){ to_idx[i] = (PetscInt) o_array[low + i]; from_idx[i] = (PetscInt) low + i; } err = VecRestoreArray(order, &o_array); err = ISCreateGeneral(PETSC_COMM_SELF, n_local, from_idx, PETSC_OWN_POINTER, &to); CHKERRQ(err); err = ISCreateGeneral(PETSC_COMM_SELF, n_local, from_idx, PETSC_OWN_POINTER, &from); CHKERRQ(err); err = VecScatterCreate(r, from, or, to, &scatter); CHKERRQ(err); err = VecScatterBegin(scatter, r, or, INSERT_VALUES, SCATTER_FORWARD); CHKERRQ(err); err = VecScatterEnd(scatter, r, or, INSERT_VALUES, SCATTER_FORWARD); CHKERRQ(err); err = PetscFree(to_idx); CHKERRQ(err); err = PetscFree(from_idx); CHKERRQ(err); return err; }
/*@C AOCreateMemoryScalable - Creates a memory scalable application ordering using two integer arrays. Collective on MPI_Comm Input Parameters: + comm - MPI communicator that is to share AO . napp - size of integer arrays . myapp - integer array that defines an ordering - mypetsc - integer array that defines another ordering (may be NULL to indicate the natural ordering, that is 0,1,2,3,...) Output Parameter: . aoout - the new application ordering Level: beginner Notes: The arrays myapp and mypetsc must contain the all the integers 0 to napp-1 with no duplicates; that is there cannot be any "holes" in the indices. Use AOCreateMapping() or AOCreateMappingIS() if you wish to have "holes" in the indices. Comparing with AOCreateBasic(), this routine trades memory with message communication. .keywords: AO, create .seealso: AOCreateMemoryScalableIS(), AODestroy(), AOPetscToApplication(), AOApplicationToPetsc() @*/ PetscErrorCode AOCreateMemoryScalable(MPI_Comm comm,PetscInt napp,const PetscInt myapp[],const PetscInt mypetsc[],AO *aoout) { PetscErrorCode ierr; IS isapp,ispetsc; const PetscInt *app=myapp,*petsc=mypetsc; PetscFunctionBegin; ierr = ISCreateGeneral(comm,napp,app,PETSC_USE_POINTER,&isapp); CHKERRQ(ierr); if (mypetsc) { ierr = ISCreateGeneral(comm,napp,petsc,PETSC_USE_POINTER,&ispetsc); CHKERRQ(ierr); } else { ispetsc = NULL; } ierr = AOCreateMemoryScalableIS(isapp,ispetsc,aoout); CHKERRQ(ierr); ierr = ISDestroy(&isapp); CHKERRQ(ierr); if (mypetsc) { ierr = ISDestroy(&ispetsc); CHKERRQ(ierr); } PetscFunctionReturn(0); }
/*@ ISListToPair - convert an IS list to a pair of ISs of equal length defining an equivalent integer multimap. Each IS on the input list is assigned an integer j so that all of the indices of that IS are mapped to j. Collective on comm. Input arguments: + comm - MPI_Comm . listlen - IS list length - islist - IS list Output arguments: + xis - domain IS - yis - range IS Level: advanced Notes: The global integers assigned to the ISs of the local input list might not correspond to the local numbers of the ISs on that list, but the two *orderings* are the same: the global integers assigned to the ISs on the local list form a strictly increasing sequence. The ISs on the input list can belong to subcommunicators of comm, and the subcommunicators on the input IS list are assumed to be in a "deadlock-free" order. Local lists of PetscObjects (or their subcommes) on a comm are "deadlock-free" if subcomm1 preceeds subcomm2 on any local list, then it preceeds subcomm2 on all ranks. Equivalently, the local numbers of the subcomms on each local list are drawn from some global numbering. This is ensured, for example, by ISPairToList(). .seealso ISPairToList() @*/ PetscErrorCode ISListToPair(MPI_Comm comm, PetscInt listlen, IS islist[], IS *xis, IS *yis) { PetscErrorCode ierr; PetscInt ncolors, *colors,i, leni,len,*xinds, *yinds,k,j; const PetscInt *indsi; PetscFunctionBegin; ierr = PetscMalloc1(listlen, &colors);CHKERRQ(ierr); ierr = PetscObjectsListGetGlobalNumbering(comm, listlen, (PetscObject*)islist,&ncolors, colors);CHKERRQ(ierr); len = 0; for (i = 0; i < listlen; ++i) { ierr = ISGetLocalSize(islist[i], &leni);CHKERRQ(ierr); len += leni; } ierr = PetscMalloc1(len, &xinds);CHKERRQ(ierr); ierr = PetscMalloc1(len, &yinds);CHKERRQ(ierr); k = 0; for (i = 0; i < listlen; ++i) { ierr = ISGetLocalSize(islist[i], &leni);CHKERRQ(ierr); ierr = ISGetIndices(islist[i],&indsi);CHKERRQ(ierr); for (j = 0; j < leni; ++j) { xinds[k] = indsi[j]; yinds[k] = colors[i]; ++k; } } ierr = PetscFree(colors);CHKERRQ(ierr); ierr = ISCreateGeneral(comm,len,xinds,PETSC_OWN_POINTER,xis);CHKERRQ(ierr); ierr = ISCreateGeneral(comm,len,yinds,PETSC_OWN_POINTER,yis);CHKERRQ(ierr); PetscFunctionReturn(0); }
EXTERN_C_BEGIN /* MatOrdering_ND - Find the nested dissection ordering of a given matrix. */ #undef __FUNCT__ #define __FUNCT__ "MatOrdering_ND" PetscErrorCode PETSCMAT_DLLEXPORT MatOrdering_ND(Mat mat,const MatOrderingType type,IS *row,IS *col) { PetscErrorCode ierr; PetscInt i, *mask,*xls,*ls,nrow,*ia,*ja,*perm; PetscTruth done; PetscFunctionBegin; ierr = MatGetRowIJ(mat,1,PETSC_TRUE,PETSC_TRUE,&nrow,&ia,&ja,&done);CHKERRQ(ierr); if (!done) SETERRQ1(PETSC_ERR_SUP,"Cannot get rows for matrix type %s",((PetscObject)mat)->type_name); ierr = PetscMalloc4(nrow,PetscInt,&mask,nrow,PetscInt,&perm,nrow+1,PetscInt,&xls,nrow,PetscInt,&ls);CHKERRQ(ierr); SPARSEPACKgennd(&nrow,ia,ja,mask,perm,xls,ls); ierr = MatRestoreRowIJ(mat,1,PETSC_TRUE,PETSC_TRUE,&nrow,&ia,&ja,&done);CHKERRQ(ierr); /* shift because Sparsepack indices start at one */ for (i=0; i<nrow; i++) perm[i]--; ierr = ISCreateGeneral(PETSC_COMM_SELF,nrow,perm,row);CHKERRQ(ierr); ierr = ISCreateGeneral(PETSC_COMM_SELF,nrow,perm,col);CHKERRQ(ierr); ierr = PetscFree4(mask,perm,xls,ls);CHKERRQ(ierr); PetscFunctionReturn(0); }
EXTERN_C_BEGIN /* MatOrdering_RCM - Find the Reverse Cuthill-McKee ordering of a given matrix. */ #undef __FUNCT__ #define __FUNCT__ "MatOrdering_RCM" PetscErrorCode PETSCMAT_DLLEXPORT MatOrdering_RCM(Mat mat,const MatOrderingType type,IS *row,IS *col) { PetscErrorCode ierr; PetscInt i,*mask,*xls,nrow,*ia,*ja,*perm; PetscTruth done; PetscFunctionBegin; ierr = MatGetRowIJ(mat,1,PETSC_TRUE,PETSC_TRUE,&nrow,&ia,&ja,&done);CHKERRQ(ierr); if (!done) SETERRQ(PETSC_ERR_SUP,"Cannot get rows for matrix"); ierr = PetscMalloc3(nrow,PetscInt,&mask,nrow,PetscInt,&perm,2*nrow,PetscInt,&xls);CHKERRQ(ierr); SPARSEPACKgenrcm(&nrow,ia,ja,perm,mask,xls); ierr = MatRestoreRowIJ(mat,1,PETSC_TRUE,PETSC_TRUE,&nrow,&ia,&ja,&done);CHKERRQ(ierr); /* shift because Sparsepack indices start at one */ for (i=0; i<nrow; i++) perm[i]--; ierr = ISCreateGeneral(PETSC_COMM_SELF,nrow,perm,row);CHKERRQ(ierr); ierr = ISCreateGeneral(PETSC_COMM_SELF,nrow,perm,col);CHKERRQ(ierr); ierr = PetscFree3(mask,perm,xls);CHKERRQ(ierr); PetscFunctionReturn(0); }
EXTERN_C_BEGIN /* MatGetOrdering_1WD - Find the 1-way dissection ordering of a given matrix. */ #undef __FUNCT__ #define __FUNCT__ "MatGetOrdering_1WD" PetscErrorCode MatGetOrdering_1WD(Mat mat,MatOrderingType type,IS *row,IS *col) { PetscErrorCode ierr; PetscInt i,*mask,*xls,nblks,*xblk,*ls,nrow,*perm; const PetscInt *ia,*ja; PetscBool done; PetscFunctionBegin; ierr = MatGetRowIJ(mat,1,PETSC_TRUE,PETSC_TRUE,&nrow,&ia,&ja,&done);CHKERRQ(ierr); if (!done) SETERRQ(((PetscObject)mat)->comm,PETSC_ERR_SUP,"Cannot get rows for matrix"); ierr = PetscMalloc5(nrow,PetscInt,&mask,nrow+1,PetscInt,&xls,nrow,PetscInt,&ls,nrow+1,PetscInt,&xblk,nrow,PetscInt,&perm);CHKERRQ(ierr); SPARSEPACKgen1wd(&nrow,ia,ja,mask,&nblks,xblk,perm,xls,ls); ierr = MatRestoreRowIJ(mat,1,PETSC_TRUE,PETSC_TRUE,&nrow,&ia,&ja,&done);CHKERRQ(ierr); for (i=0; i<nrow; i++) perm[i]--; ierr = ISCreateGeneral(PETSC_COMM_SELF,nrow,perm,PETSC_COPY_VALUES,row);CHKERRQ(ierr); ierr = ISCreateGeneral(PETSC_COMM_SELF,nrow,perm,PETSC_COPY_VALUES,col);CHKERRQ(ierr); ierr = PetscFree5(mask,xls,ls,xblk,perm);CHKERRQ(ierr); PetscFunctionReturn(0); }
PETSC_EXTERN PetscErrorCode MatGetOrdering_AMD(Mat mat,MatOrderingType type,IS *row,IS *col) { PetscErrorCode ierr; PetscInt nrow,*perm; const PetscInt *ia,*ja; int status; PetscReal val; double Control[AMD_CONTROL],Info[AMD_INFO]; PetscBool tval,done; PetscFunctionBegin; /* AMD does not require that the matrix be symmetric (it does so internally, at least in so far as computing orderings for A+A^T. */ ierr = MatGetRowIJ(mat,0,PETSC_FALSE,PETSC_TRUE,&nrow,&ia,&ja,&done);CHKERRQ(ierr); if (!done) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Cannot get rows for matrix type %s",((PetscObject)mat)->type_name); amd_AMD_defaults(Control); ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)mat),((PetscObject)mat)->prefix,"AMD Options","Mat");CHKERRQ(ierr); /* We have to use temporary values here because AMD always uses double, even though PetscReal may be single */ val = (PetscReal)Control[AMD_DENSE]; ierr = PetscOptionsReal("-mat_ordering_amd_dense","threshold for \"dense\" rows/columns","None",val,&val,NULL);CHKERRQ(ierr); Control[AMD_DENSE] = (double)val; tval = (PetscBool)Control[AMD_AGGRESSIVE]; ierr = PetscOptionsBool("-mat_ordering_amd_aggressive","use aggressive absorption","None",tval,&tval,NULL);CHKERRQ(ierr); Control[AMD_AGGRESSIVE] = (double)tval; ierr = PetscOptionsEnd();CHKERRQ(ierr); ierr = PetscMalloc(nrow*sizeof(PetscInt),&perm);CHKERRQ(ierr); status = amd_AMD_order(nrow,ia,ja,perm,Control,Info); switch (status) { case AMD_OK: break; case AMD_OK_BUT_JUMBLED: /* The result is fine, but PETSc matrices are supposed to satisfy stricter preconditions, so PETSc considers a * matrix that triggers this error condition to be invalid. */ SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_PLIB,"According to AMD, the matrix has unsorted and/or duplicate row indices"); case AMD_INVALID: amd_info(Info); SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_PLIB,"According to AMD, the matrix is invalid"); case AMD_OUT_OF_MEMORY: SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_MEM,"AMD could not compute ordering"); default: SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_LIB,"Unexpected return value"); } ierr = MatRestoreRowIJ(mat,0,PETSC_FALSE,PETSC_TRUE,NULL,&ia,&ja,&done);CHKERRQ(ierr); ierr = ISCreateGeneral(PETSC_COMM_SELF,nrow,perm,PETSC_COPY_VALUES,row);CHKERRQ(ierr); ierr = ISCreateGeneral(PETSC_COMM_SELF,nrow,perm,PETSC_OWN_POINTER,col);CHKERRQ(ierr); PetscFunctionReturn(0); }
PetscErrorCode RHSFunction(TS ts,PetscReal t,Vec globalin,Vec globalout,void *ctx) { PetscScalar *inptr,*outptr; PetscInt i,n,*idx; PetscErrorCode ierr; IS from,to; VecScatter scatter; Vec tmp_in,tmp_out; /* Get the length of parallel vector */ ierr = VecGetSize(globalin,&n);CHKERRQ(ierr); /* Set the index sets */ ierr = PetscMalloc(n*sizeof(PetscInt),&idx);CHKERRQ(ierr); for(i=0; i<n; i++) idx[i]=i; /* Create local sequential vectors */ ierr = VecCreateSeq(PETSC_COMM_SELF,n,&tmp_in);CHKERRQ(ierr); ierr = VecDuplicate(tmp_in,&tmp_out);CHKERRQ(ierr); /* Create scatter context */ ierr = ISCreateGeneral(PETSC_COMM_SELF,n,idx,PETSC_COPY_VALUES,&from);CHKERRQ(ierr); ierr = ISCreateGeneral(PETSC_COMM_SELF,n,idx,PETSC_COPY_VALUES,&to);CHKERRQ(ierr); ierr = VecScatterCreate(globalin,from,tmp_in,to,&scatter);CHKERRQ(ierr); ierr = VecScatterBegin(scatter,globalin,tmp_in,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); ierr = VecScatterEnd(scatter,globalin,tmp_in,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); ierr = VecScatterDestroy(&scatter);CHKERRQ(ierr); /*Extract income array */ ierr = VecGetArray(tmp_in,&inptr);CHKERRQ(ierr); /* Extract outcome array*/ ierr = VecGetArray(tmp_out,&outptr);CHKERRQ(ierr); outptr[0] = 2.0*inptr[0]+inptr[1]; outptr[1] = inptr[0]+2.0*inptr[1]+inptr[2]; outptr[2] = inptr[1]+2.0*inptr[2]; ierr = VecRestoreArray(tmp_in,&inptr);CHKERRQ(ierr); ierr = VecRestoreArray(tmp_out,&outptr);CHKERRQ(ierr); ierr = VecScatterCreate(tmp_out,from,globalout,to,&scatter);CHKERRQ(ierr); ierr = VecScatterBegin(scatter,tmp_out,globalout,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); ierr = VecScatterEnd(scatter,tmp_out,globalout,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); /* Destroy idx aand scatter */ ierr = ISDestroy(&from);CHKERRQ(ierr); ierr = ISDestroy(&to);CHKERRQ(ierr); ierr = VecScatterDestroy(&scatter);CHKERRQ(ierr); ierr = VecDestroy(&tmp_in);CHKERRQ(ierr); ierr = VecDestroy(&tmp_out);CHKERRQ(ierr); ierr = PetscFree(idx);CHKERRQ(ierr); return 0; }
int main(int argc,char **argv) { PetscErrorCode ierr; AppCtx ctx; TS ts; Vec tsrhs,U; IS is; PetscInt I; PetscMPIInt rank; ierr = PetscInitialize(&argc,&argv,(char*)0,help);if (ierr) return ierr; ierr = MPI_Comm_rank(PETSC_COMM_WORLD,&rank);CHKERRQ(ierr); ierr = TSCreate(PETSC_COMM_WORLD,&ts);CHKERRQ(ierr); ierr = TSSetProblemType(ts,TS_NONLINEAR);CHKERRQ(ierr); ierr = TSSetType(ts,TSEULER);CHKERRQ(ierr); ierr = TSSetFromOptions(ts);CHKERRQ(ierr); ierr = VecCreateMPI(PETSC_COMM_WORLD,1,PETSC_DETERMINE,&tsrhs);CHKERRQ(ierr); ierr = VecCreateMPI(PETSC_COMM_WORLD,1,PETSC_DETERMINE,&U);CHKERRQ(ierr); ierr = TSSetRHSFunction(ts,tsrhs,TSFunction,&ctx);CHKERRQ(ierr); ctx.f = f; ierr = SNESCreate(PETSC_COMM_WORLD,&ctx.snes);CHKERRQ(ierr); ierr = SNESSetFromOptions(ctx.snes);CHKERRQ(ierr); ierr = SNESSetFunction(ctx.snes,NULL,SNESFunction,&ctx);CHKERRQ(ierr); ierr = SNESSetJacobian(ctx.snes,NULL,NULL,SNESComputeJacobianDefault,&ctx);CHKERRQ(ierr); ctx.F = F; ierr = VecCreateMPI(PETSC_COMM_WORLD,1,PETSC_DETERMINE,&ctx.V);CHKERRQ(ierr); /* Create scatters to move between separate U and V representation and UV representation of solution */ ierr = VecCreateMPI(PETSC_COMM_WORLD,2,PETSC_DETERMINE,&ctx.UV);CHKERRQ(ierr); I = 2*rank; ierr = ISCreateGeneral(PETSC_COMM_WORLD,1,&I,PETSC_COPY_VALUES,&is);CHKERRQ(ierr); ierr = VecScatterCreateWithData(U,NULL,ctx.UV,is,&ctx.scatterU);CHKERRQ(ierr); ierr = ISDestroy(&is);CHKERRQ(ierr); I = 2*rank + 1; ierr = ISCreateGeneral(PETSC_COMM_WORLD,1,&I,PETSC_COPY_VALUES,&is);CHKERRQ(ierr); ierr = VecScatterCreateWithData(ctx.V,NULL,ctx.UV,is,&ctx.scatterV);CHKERRQ(ierr); ierr = ISDestroy(&is);CHKERRQ(ierr); ierr = VecSet(U,1.0);CHKERRQ(ierr); ierr = TSSolve(ts,U);CHKERRQ(ierr); ierr = VecDestroy(&ctx.V);CHKERRQ(ierr); ierr = VecDestroy(&ctx.UV);CHKERRQ(ierr); ierr = VecScatterDestroy(&ctx.scatterU);CHKERRQ(ierr); ierr = VecScatterDestroy(&ctx.scatterV);CHKERRQ(ierr); ierr = VecDestroy(&tsrhs);CHKERRQ(ierr); ierr = VecDestroy(&U);CHKERRQ(ierr); ierr = SNESDestroy(&ctx.snes);CHKERRQ(ierr); ierr = TSDestroy(&ts);CHKERRQ(ierr); ierr = PetscFinalize(); return ierr; }
int main(int argc,char **argv) { PetscErrorCode ierr; PetscInt n = 5,i,idx2[3] = {0,2,3},idx1[3] = {0,1,2}; PetscMPIInt size,rank; PetscScalar value; Vec x,y; IS is1,is2; VecScatter ctx = 0; PetscInitialize(&argc,&argv,(char*)0,help); ierr = MPI_Comm_size(PETSC_COMM_WORLD,&size);CHKERRQ(ierr); ierr = MPI_Comm_rank(PETSC_COMM_WORLD,&rank);CHKERRQ(ierr); /* create two vectors */ ierr = VecCreate(PETSC_COMM_WORLD,&x);CHKERRQ(ierr); ierr = VecSetSizes(x,PETSC_DECIDE,size*n);CHKERRQ(ierr); ierr = VecSetFromOptions(x);CHKERRQ(ierr); ierr = VecCreateSeq(PETSC_COMM_SELF,n,&y);CHKERRQ(ierr); /* create two index sets */ ierr = ISCreateGeneral(PETSC_COMM_SELF,3,idx1,PETSC_COPY_VALUES,&is1);CHKERRQ(ierr); ierr = ISCreateGeneral(PETSC_COMM_SELF,3,idx2,PETSC_COPY_VALUES,&is2);CHKERRQ(ierr); /* fill local part of parallel vector */ for (i=n*rank; i<n*(rank+1); i++) { value = (PetscScalar) i; ierr = VecSetValues(x,1,&i,&value,INSERT_VALUES);CHKERRQ(ierr); } ierr = VecAssemblyBegin(x);CHKERRQ(ierr); ierr = VecAssemblyEnd(x);CHKERRQ(ierr); ierr = VecView(x,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); ierr = VecSet(y,-1.0);CHKERRQ(ierr); ierr = VecScatterCreate(x,is1,y,is2,&ctx);CHKERRQ(ierr); ierr = VecScatterBegin(ctx,x,y,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); ierr = VecScatterEnd(ctx,x,y,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); ierr = VecScatterDestroy(&ctx);CHKERRQ(ierr); if (!rank) { ierr = PetscPrintf(PETSC_COMM_SELF,"scattered vector\n");CHKERRQ(ierr); ierr = VecView(y,PETSC_VIEWER_STDOUT_SELF);CHKERRQ(ierr); } ierr = ISDestroy(&is1);CHKERRQ(ierr); ierr = ISDestroy(&is2);CHKERRQ(ierr); ierr = VecDestroy(&x);CHKERRQ(ierr); ierr = VecDestroy(&y);CHKERRQ(ierr); ierr = PetscFinalize(); return 0; }
int main(int argc,char **argv) { PetscErrorCode ierr; PetscMPIInt rank,size; Vec x,y; IS is1,is2; PetscInt n,N,ix[2],iy[2]; VecScatter ctx; ierr = PetscInitialize(&argc,&argv,(char*)0,help);if (ierr) return ierr; ierr = MPI_Comm_rank(PETSC_COMM_WORLD,&rank);CHKERRQ(ierr); ierr = MPI_Comm_size(PETSC_COMM_WORLD,&size);CHKERRQ(ierr); if (size < 3) SETERRQ(PETSC_COMM_WORLD,PETSC_ERR_ARG_OUTOFRANGE,"This example needs at least 3 processes"); /* create two vectors */ n = 2; N = 2*size; ierr = VecCreateMPI(PETSC_COMM_WORLD,n,N,&x);CHKERRQ(ierr); ierr = VecDuplicate(x,&y);CHKERRQ(ierr); /* Specify indices to send from the next process in the ring */ ix[0] = ((rank+1)*n+0) % N; ix[1] = ((rank+1)*n+1) % N; /* And put them on the process after that in the ring */ iy[0] = ((rank+2)*n+0) % N; iy[1] = ((rank+2)*n+1) % N; /* create two index sets */ ierr = ISCreateGeneral(PETSC_COMM_WORLD,n,ix,PETSC_USE_POINTER,&is1);CHKERRQ(ierr); ierr = ISCreateGeneral(PETSC_COMM_WORLD,n,iy,PETSC_USE_POINTER,&is2);CHKERRQ(ierr); ierr = VecSetValue(x,rank*n,rank*n,INSERT_VALUES);CHKERRQ(ierr); ierr = VecSetValue(x,rank*n+1,rank*n+1,INSERT_VALUES);CHKERRQ(ierr); ierr = VecView(x,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); ierr = PetscPrintf(PETSC_COMM_WORLD,"----\n");CHKERRQ(ierr); ierr = VecScatterCreate(x,is1,y,is2,&ctx);CHKERRQ(ierr); ierr = VecScatterBegin(ctx,x,y,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); ierr = VecScatterEnd(ctx,x,y,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); ierr = VecScatterDestroy(&ctx);CHKERRQ(ierr); ierr = VecView(y,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); ierr = ISDestroy(&is1);CHKERRQ(ierr); ierr = ISDestroy(&is2);CHKERRQ(ierr); ierr = VecDestroy(&x);CHKERRQ(ierr); ierr = VecDestroy(&y);CHKERRQ(ierr); ierr = PetscFinalize(); return ierr; }
/*@C DMDAGetRay - Returns a vector on process zero that contains a row or column of the values in a DMDA vector Collective on DMDA Input Parameters: + da - the distributed array . vec - the vector . dir - Cartesian direction, either DMDA_X, DMDA_Y, or DMDA_Z - gp - global grid point number in this direction Output Parameters: + newvec - the new vector that can hold the values (size zero on all processes except process 0) - scatter - the VecScatter that will map from the original vector to the slice Level: advanced Notes: All processors that share the DMDA must call this with the same gp value .keywords: distributed array, get, processor subset @*/ PetscErrorCode DMDAGetRay(DM da,DMDADirection dir,PetscInt gp,Vec *newvec,VecScatter *scatter) { PetscMPIInt rank; DM_DA *dd = (DM_DA*)da->data; PetscErrorCode ierr; IS is; AO ao; Vec vec; PetscInt *indices,i,j; PetscFunctionBegin; if (dd->dim == 1) SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_SUP,"Cannot get slice from 1d DMDA"); if (dd->dim == 3) SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_SUP,"Cannot get slice from 3d DMDA"); ierr = DMDAGetAO(da,&ao);CHKERRQ(ierr); ierr = MPI_Comm_rank(PetscObjectComm((PetscObject)da),&rank);CHKERRQ(ierr); if (!rank) { if (dir == DMDA_Y) { ierr = PetscMalloc(dd->w*dd->M*sizeof(PetscInt),&indices);CHKERRQ(ierr); indices[0] = gp*dd->M*dd->w; for (i=1; i<dd->M*dd->w; i++) indices[i] = indices[i-1] + 1; ierr = AOApplicationToPetsc(ao,dd->M*dd->w,indices);CHKERRQ(ierr); ierr = VecCreate(PETSC_COMM_SELF,newvec);CHKERRQ(ierr); ierr = VecSetBlockSize(*newvec,dd->w);CHKERRQ(ierr); ierr = VecSetSizes(*newvec,dd->M*dd->w,PETSC_DETERMINE);CHKERRQ(ierr); ierr = VecSetType(*newvec,VECSEQ);CHKERRQ(ierr); ierr = ISCreateGeneral(PETSC_COMM_SELF,dd->w*dd->M,indices,PETSC_OWN_POINTER,&is);CHKERRQ(ierr); } else if (dir == DMDA_X) { ierr = PetscMalloc(dd->w*dd->N*sizeof(PetscInt),&indices);CHKERRQ(ierr); indices[0] = dd->w*gp; for (j=1; j<dd->w; j++) indices[j] = indices[j-1] + 1; for (i=1; i<dd->N; i++) { indices[i*dd->w] = indices[i*dd->w-1] + dd->w*dd->M - dd->w + 1; for (j=1; j<dd->w; j++) indices[i*dd->w + j] = indices[i*dd->w + j - 1] + 1; } ierr = AOApplicationToPetsc(ao,dd->w*dd->N,indices);CHKERRQ(ierr); ierr = VecCreate(PETSC_COMM_SELF,newvec);CHKERRQ(ierr); ierr = VecSetBlockSize(*newvec,dd->w);CHKERRQ(ierr); ierr = VecSetSizes(*newvec,dd->N*dd->w,PETSC_DETERMINE);CHKERRQ(ierr); ierr = VecSetType(*newvec,VECSEQ);CHKERRQ(ierr); ierr = ISCreateGeneral(PETSC_COMM_SELF,dd->w*dd->N,indices,PETSC_OWN_POINTER,&is);CHKERRQ(ierr); } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Unknown DMDADirection"); } else { ierr = VecCreateSeq(PETSC_COMM_SELF,0,newvec);CHKERRQ(ierr); ierr = ISCreateGeneral(PETSC_COMM_SELF,0,0,PETSC_COPY_VALUES,&is);CHKERRQ(ierr); } ierr = DMGetGlobalVector(da,&vec);CHKERRQ(ierr); ierr = VecScatterCreate(vec,is,*newvec,NULL,scatter);CHKERRQ(ierr); ierr = DMRestoreGlobalVector(da,&vec);CHKERRQ(ierr); ierr = ISDestroy(&is);CHKERRQ(ierr); PetscFunctionReturn(0); }
int main(int argc,char **argv) { PetscErrorCode ierr; AppCtx ctx; TS ts; Vec tsrhs,UV; IS is; PetscInt I; PetscMPIInt rank; PetscInitialize(&argc,&argv,(char*)0,help); ierr = MPI_Comm_rank(PETSC_COMM_WORLD,&rank);CHKERRQ(ierr); ierr = TSCreate(PETSC_COMM_WORLD,&ts);CHKERRQ(ierr); ierr = TSSetProblemType(ts,TS_NONLINEAR);CHKERRQ(ierr); ierr = TSSetType(ts,TSROSW);CHKERRQ(ierr); ierr = TSSetFromOptions(ts);CHKERRQ(ierr); ierr = VecCreateMPI(PETSC_COMM_WORLD,2,PETSC_DETERMINE,&tsrhs);CHKERRQ(ierr); ierr = VecCreateMPI(PETSC_COMM_WORLD,2,PETSC_DETERMINE,&UV);CHKERRQ(ierr); ierr = TSSetRHSFunction(ts,tsrhs,TSFunctionRHS,&ctx);CHKERRQ(ierr); ierr = TSSetIFunction(ts,NULL,TSFunctionI,&ctx);CHKERRQ(ierr); ctx.f = f; ctx.F = F; ierr = VecCreateMPI(PETSC_COMM_WORLD,1,PETSC_DETERMINE,&ctx.U);CHKERRQ(ierr); ierr = VecCreateMPI(PETSC_COMM_WORLD,1,PETSC_DETERMINE,&ctx.V);CHKERRQ(ierr); ierr = VecCreateMPI(PETSC_COMM_WORLD,1,PETSC_DETERMINE,&ctx.UF);CHKERRQ(ierr); ierr = VecCreateMPI(PETSC_COMM_WORLD,1,PETSC_DETERMINE,&ctx.VF);CHKERRQ(ierr); I = 2*rank; ierr = ISCreateGeneral(PETSC_COMM_WORLD,1,&I,PETSC_COPY_VALUES,&is);CHKERRQ(ierr); ierr = VecScatterCreate(ctx.U,NULL,UV,is,&ctx.scatterU);CHKERRQ(ierr); ierr = ISDestroy(&is);CHKERRQ(ierr); I = 2*rank + 1; ierr = ISCreateGeneral(PETSC_COMM_WORLD,1,&I,PETSC_COPY_VALUES,&is);CHKERRQ(ierr); ierr = VecScatterCreate(ctx.V,NULL,UV,is,&ctx.scatterV);CHKERRQ(ierr); ierr = ISDestroy(&is);CHKERRQ(ierr); ierr = VecSet(UV,1.0);CHKERRQ(ierr); ierr = TSSolve(ts,UV);CHKERRQ(ierr); ierr = VecDestroy(&tsrhs);CHKERRQ(ierr); ierr = VecDestroy(&UV);CHKERRQ(ierr); ierr = VecDestroy(&ctx.U);CHKERRQ(ierr); ierr = VecDestroy(&ctx.V);CHKERRQ(ierr); ierr = VecDestroy(&ctx.UF);CHKERRQ(ierr); ierr = VecDestroy(&ctx.VF);CHKERRQ(ierr); ierr = VecScatterDestroy(&ctx.scatterU);CHKERRQ(ierr); ierr = VecScatterDestroy(&ctx.scatterV);CHKERRQ(ierr); ierr = TSDestroy(&ts);CHKERRQ(ierr); PetscFinalize(); return 0; }
-n <length> : vector length\n\n"; #include <petscvec.h> #undef __FUNCT__ #define __FUNCT__ "main" int main(int argc,char **argv) { PetscErrorCode ierr; PetscInt n = 5,idx1[2] = {0,3},idx2[2] = {1,4}; PetscScalar one = 1.0,two = 2.0; Vec x,y; IS is1,is2; VecScatter ctx = 0; ierr = PetscInitialize(&argc,&argv,(char*)0,help);CHKERRQ(ierr); ierr = PetscOptionsGetInt(NULL,"-n",&n,NULL);CHKERRQ(ierr); /* create two vector */ ierr = VecCreateSeq(PETSC_COMM_SELF,n,&x);CHKERRQ(ierr); ierr = VecDuplicate(x,&y);CHKERRQ(ierr); /* create two index sets */ ierr = ISCreateGeneral(PETSC_COMM_SELF,2,idx1,PETSC_COPY_VALUES,&is1);CHKERRQ(ierr); ierr = ISCreateGeneral(PETSC_COMM_SELF,2,idx2,PETSC_COPY_VALUES,&is2);CHKERRQ(ierr); ierr = VecSet(x,one);CHKERRQ(ierr); ierr = VecSet(y,two);CHKERRQ(ierr); ierr = VecScatterCreate(x,is1,y,is2,&ctx);CHKERRQ(ierr); ierr = VecScatterBegin(ctx,x,y,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); ierr = VecScatterEnd(ctx,x,y,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); ierr = VecView(y,PETSC_VIEWER_STDOUT_SELF);CHKERRQ(ierr); ierr = VecScatterBegin(ctx,y,x,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); ierr = VecScatterEnd(ctx,y,x,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); ierr = VecScatterDestroy(&ctx);CHKERRQ(ierr); ierr = PetscPrintf(PETSC_COMM_SELF,"-------\n");CHKERRQ(ierr); ierr = VecView(x,PETSC_VIEWER_STDOUT_SELF);CHKERRQ(ierr); ierr = ISDestroy(&is1);CHKERRQ(ierr); ierr = ISDestroy(&is2);CHKERRQ(ierr); ierr = VecDestroy(&x);CHKERRQ(ierr); ierr = VecDestroy(&y);CHKERRQ(ierr); ierr = PetscFinalize(); return 0; }
PetscErrorCode Monitor(TS ts,PetscInt step,PetscReal time,Vec global,void *ctx) { VecScatter scatter; IS from,to; PetscInt i,n,*idx,nsteps,maxsteps; Vec tmp_vec; PetscErrorCode ierr; PetscScalar *tmp; PetscReal maxtime; Data *data = (Data*)ctx; PetscReal tfinal = data->tfinal; PetscFunctionBeginUser; if (time > tfinal) PetscFunctionReturn(0); ierr = TSGetTimeStepNumber(ts,&nsteps);CHKERRQ(ierr); /* display output at selected time steps */ ierr = TSGetDuration(ts, &maxsteps, &maxtime);CHKERRQ(ierr); if (nsteps % 10 != 0 && time < maxtime) PetscFunctionReturn(0); /* Get the size of the vector */ ierr = VecGetSize(global,&n);CHKERRQ(ierr); /* Set the index sets */ ierr = PetscMalloc1(n,&idx);CHKERRQ(ierr); for (i=0; i<n; i++) idx[i]=i; /* Create local sequential vectors */ ierr = VecCreateSeq(PETSC_COMM_SELF,n,&tmp_vec);CHKERRQ(ierr); /* Create scatter context */ ierr = ISCreateGeneral(PETSC_COMM_SELF,n,idx,PETSC_COPY_VALUES,&from);CHKERRQ(ierr); ierr = ISCreateGeneral(PETSC_COMM_SELF,n,idx,PETSC_COPY_VALUES,&to);CHKERRQ(ierr); ierr = VecScatterCreate(global,from,tmp_vec,to,&scatter);CHKERRQ(ierr); ierr = VecScatterBegin(scatter,global,tmp_vec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); ierr = VecScatterEnd(scatter,global,tmp_vec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); ierr = VecGetArray(tmp_vec,&tmp);CHKERRQ(ierr); ierr = PetscPrintf(PETSC_COMM_WORLD,"At t[%D] =%14.2e u= %14.2e at the center \n",nsteps,(double)time,(double)PetscRealPart(tmp[n/2]));CHKERRQ(ierr); ierr = VecRestoreArray(tmp_vec,&tmp);CHKERRQ(ierr); ierr = PetscFree(idx);CHKERRQ(ierr); ierr = ISDestroy(&from);CHKERRQ(ierr); ierr = ISDestroy(&to);CHKERRQ(ierr); ierr = VecScatterDestroy(&scatter);CHKERRQ(ierr); ierr = VecDestroy(&tmp_vec);CHKERRQ(ierr); PetscFunctionReturn(0); }
/* Given a DMDA generates a VecScatter context that will deliver a slice of the global vector to each processor. In this example, each processor receives the values i=*, j=*, k=rank, i.e. one z plane. Note: This code is written assuming only one degree of freedom per node. For multiple degrees of freedom per node use ISCreateBlock() instead of ISCreateGeneral(). */ PetscErrorCode GenerateSliceScatter(DM da,VecScatter *scatter,Vec *vslice) { AO ao; PetscInt M,N,P,nslice,*sliceindices,count,i,j; PetscMPIInt rank; PetscErrorCode ierr; MPI_Comm comm; Vec vglobal; IS isfrom,isto; ierr = PetscObjectGetComm((PetscObject)da,&comm);CHKERRQ(ierr); ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); ierr = DMDAGetAO(da,&ao);CHKERRQ(ierr); ierr = DMDAGetInfo(da,0,&M,&N,&P,0,0,0,0,0,0,0,0,0);CHKERRQ(ierr); /* nslice is number of degrees of freedom in this processors slice if there are more processors then z plans the extra processors get 0 elements in their slice. */ if (rank < P) nslice = M*N; else nslice = 0; /* Generate the local vector to hold this processors slice */ ierr = VecCreateSeq(PETSC_COMM_SELF,nslice,vslice);CHKERRQ(ierr); ierr = DMCreateGlobalVector(da,&vglobal);CHKERRQ(ierr); /* Generate the indices for the slice in the "natural" global ordering Note: this is just an example, one could select any subset of nodes on each processor. Just list them in the global natural ordering. */ ierr = PetscMalloc1((nslice+1),&sliceindices);CHKERRQ(ierr); count = 0; if (rank < P) { for (j=0; j<N; j++) { for (i=0; i<M; i++) { sliceindices[count++] = rank*M*N + j*M + i; } } } /* Convert the indices to the "PETSc" global ordering */ ierr = AOApplicationToPetsc(ao,nslice,sliceindices);CHKERRQ(ierr); /* Create the "from" and "to" index set */ /* This is to scatter from the global vector */ ierr = ISCreateGeneral(PETSC_COMM_SELF,nslice,sliceindices,PETSC_OWN_POINTER,&isfrom);CHKERRQ(ierr); /* This is to gather into the local vector */ ierr = ISCreateStride(PETSC_COMM_SELF,nslice,0,1,&isto);CHKERRQ(ierr); ierr = VecScatterCreate(vglobal,isfrom,*vslice,isto,scatter);CHKERRQ(ierr); ierr = ISDestroy(&isfrom);CHKERRQ(ierr); ierr = ISDestroy(&isto);CHKERRQ(ierr); return 0; }
PetscErrorCode MatPartitioningHierarchical_DetermineDestination(MatPartitioning part, IS partitioning, PetscInt pstart, PetscInt pend, IS *destination) { MPI_Comm comm; PetscMPIInt rank,size,target; PetscInt plocalsize,*dest_indices,i; const PetscInt *part_indices; PetscErrorCode ierr; PetscFunctionBegin; /*communicator*/ ierr = PetscObjectGetComm((PetscObject)part,&comm);CHKERRQ(ierr); ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); if((pend-pstart)>size) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"range [%D, %D] should be smaller than or equal to size %D",pstart,pend,size);CHKERRQ(ierr); if(pstart>pend) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP," pstart %D should be smaller than pend %D",pstart,pend);CHKERRQ(ierr); /*local size*/ ierr = ISGetLocalSize(partitioning,&plocalsize);CHKERRQ(ierr); ierr = PetscCalloc1(plocalsize,&dest_indices);CHKERRQ(ierr); ierr = ISGetIndices(partitioning,&part_indices);CHKERRQ(ierr); for(i=0; i<plocalsize; i++){ /*compute target */ target = part_indices[i]-pstart; /*mark out of range entity as -1*/ if(part_indices[i]<pstart || part_indices[i]>pend) target = -1; dest_indices[i] = target; } /*return destination back*/ ierr = ISCreateGeneral(comm,plocalsize,dest_indices,PETSC_OWN_POINTER,destination);CHKERRQ(ierr); PetscFunctionReturn(0); }
int main(int argc,char **argv) { PetscErrorCode ierr; PetscInt i,n,*indices; PetscInt rank,size; IS is,newis; ierr = PetscInitialize(&argc,&argv,(char*)0,help);if (ierr) return ierr; ierr = MPI_Comm_rank(PETSC_COMM_WORLD,&rank);CHKERRQ(ierr); ierr = MPI_Comm_size(PETSC_COMM_WORLD,&size);CHKERRQ(ierr); /* Create IS */ n = 4 + rank; ierr = PetscMalloc1(n,&indices);CHKERRQ(ierr); for (i=0; i<n; i++) indices[i] = rank + i; ierr = ISCreateGeneral(PETSC_COMM_WORLD,n,indices,PETSC_COPY_VALUES,&is);CHKERRQ(ierr); ierr = PetscFree(indices);CHKERRQ(ierr); /* Stick them together from all processors */ ierr = ISAllGather(is,&newis);CHKERRQ(ierr); if (!rank) { ierr = ISView(newis,PETSC_VIEWER_STDOUT_SELF);CHKERRQ(ierr); } ierr = ISDestroy(&newis);CHKERRQ(ierr); ierr = ISDestroy(&is);CHKERRQ(ierr); ierr = PetscFinalize(); return ierr; }
PetscErrorCode RetrieveVecPoints(Vec x, int Npt, int *Pos, double *ptValues) { PetscErrorCode ierr; Vec T; VecScatter scatter; IS from, to; ierr = VecCreateSeq(PETSC_COMM_SELF, Npt, &T);CHKERRQ(ierr); ierr = ISCreateGeneral(PETSC_COMM_SELF,Npt, Pos,PETSC_COPY_VALUES, &from);CHKERRQ(ierr); ierr = ISCreateStride(PETSC_COMM_SELF,Npt,0,1, &to);CHKERRQ(ierr); ierr = VecScatterCreate(x,from,T,to,&scatter);CHKERRQ(ierr); ierr = VecScatterBegin(scatter,x,T,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); ierr = VecScatterEnd(scatter,x,T,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); int ix[Npt]; int i; for(i=0; i<Npt; i++) ix[i]=i; ierr = VecGetValues(T,Npt,ix,ptValues); ierr = ISDestroy(&from);CHKERRQ(ierr); ierr = ISDestroy(&to);CHKERRQ(ierr); ierr = VecScatterDestroy(&scatter);CHKERRQ(ierr); ierr = VecDestroy(&T);CHKERRQ(ierr); PetscFunctionReturn(0); }
int main(int argc,char **argv) { PetscMPIInt rank,size; PetscInt i,j,n,cnt=0,rstart,rend,*indices; IS is,isc; PetscErrorCode ierr; ierr = PetscInitialize(&argc,&argv,(char*)0,help);if (ierr) return ierr; ierr = MPI_Comm_rank(PETSC_COMM_WORLD,&rank);CHKERRQ(ierr); ierr = MPI_Comm_size(PETSC_COMM_WORLD,&size);CHKERRQ(ierr); n = 3*size; /* Number of local indices, same on each process. */ rstart = 3*(size+2)*rank; /* start of local range */ rend = 3*(size+2)*(rank+1); /* end of local range */ ierr = PetscMalloc1(n,&indices);CHKERRQ(ierr); for (i=0; i<3; i++) { for (j=0; j<size; j++) indices[cnt++] = rstart+i*(size+2)+j; } if (cnt != n) SETERRQ(PETSC_COMM_SELF,1,"inconsistent count"); ierr = ISCreateGeneral(PETSC_COMM_WORLD,n,indices,PETSC_COPY_VALUES,&is);CHKERRQ(ierr); ierr = PetscFree(indices);CHKERRQ(ierr); ierr = ISComplement(is,rstart,rend,&isc);CHKERRQ(ierr); ierr = ISView(is,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); ierr = ISView(isc,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); ierr = ISDestroy(&is);CHKERRQ(ierr); ierr = ISDestroy(&isc);CHKERRQ(ierr); ierr = PetscFinalize(); return ierr; }
PetscErrorCode SNESSetUp_VINEWTONRSLS(SNES snes) { PetscErrorCode ierr; SNES_VINEWTONRSLS *vi = (SNES_VINEWTONRSLS*) snes->data; PetscInt *indices; PetscInt i,n,rstart,rend; SNESLineSearch linesearch; PetscFunctionBegin; ierr = SNESSetUp_VI(snes);CHKERRQ(ierr); /* Set up previous active index set for the first snes solve vi->IS_inact_prev = 0,1,2,....N */ ierr = VecGetOwnershipRange(snes->vec_sol,&rstart,&rend);CHKERRQ(ierr); ierr = VecGetLocalSize(snes->vec_sol,&n);CHKERRQ(ierr); ierr = PetscMalloc(n*sizeof(PetscInt),&indices);CHKERRQ(ierr); for (i=0;i < n; i++) indices[i] = rstart + i; ierr = ISCreateGeneral(((PetscObject)snes)->comm,n,indices,PETSC_OWN_POINTER,&vi->IS_inact_prev);CHKERRQ(ierr); /* set the line search functions */ if (!snes->linesearch) { ierr = SNESGetSNESLineSearch(snes, &linesearch);CHKERRQ(ierr); ierr = SNESLineSearchSetType(linesearch, SNESLINESEARCHBT);CHKERRQ(ierr); } PetscFunctionReturn(0); }
/*@C ISExpandIndicesGeneral - convert the indices into non-block indices Input Parameters: + n - the length of the index set (not being used) . nkeys - expected number of keys when PETSC_USE_CTABLE (not being used) . bs - the size of block . imax - the number of index sets - is_in - the blocked array of index sets Output Parameter: . is_out - the non-blocked new index set Level: intermediate .seealso: ISCompressIndicesGeneral() @*/ PetscErrorCode ISExpandIndicesGeneral(PetscInt n,PetscInt nkeys,PetscInt bs,PetscInt imax,const IS is_in[],IS is_out[]) { PetscErrorCode ierr; PetscInt len,i,j,k,*nidx; const PetscInt *idx; PetscInt maxsz; PetscFunctionBegin; /* Check max size of is_in[] */ maxsz=0; for (i=0; i<imax; i++) { ierr = ISGetLocalSize(is_in[i],&len);CHKERRQ(ierr); if (len > maxsz) maxsz = len; } ierr = PetscMalloc1(maxsz*bs,&nidx);CHKERRQ(ierr); for (i=0; i<imax; i++) { ierr = ISGetLocalSize(is_in[i],&len);CHKERRQ(ierr); ierr = ISGetIndices(is_in[i],&idx);CHKERRQ(ierr); for (j=0; j<len ; ++j) { for (k=0; k<bs; k++) nidx[j*bs+k] = idx[j]*bs+k; } ierr = ISRestoreIndices(is_in[i],&idx);CHKERRQ(ierr); ierr = ISCreateGeneral(PETSC_COMM_SELF,len*bs,nidx,PETSC_COPY_VALUES,is_out+i);CHKERRQ(ierr); } ierr = PetscFree(nidx);CHKERRQ(ierr); PetscFunctionReturn(0); }
/*@ ISSortPermutation - calculate the permutation of the indices into a nondecreasing order. Not collective. Input arguments: + f - IS to sort - always - build the permutation even when f's indices are nondecreasin. Output argument: . h - permutation or NULL, if f is nondecreasing and always == PETSC_TRUE. Note: Indices in f are unchanged. f[h[i]] is the i-th smallest f index. If always == PETSC_FALSE, an extra check is peformed to see whether the f indices are nondecreasing. h is built on PETSC_COMM_SELF, since the permutation has a local meaning only. Level: advanced .seealso ISLocalToGlobalMapping, ISSort(), PetscIntSortWithPermutation() @*/ PetscErrorCode ISSortPermutation(IS f,PetscBool always,IS *h) { PetscErrorCode ierr; const PetscInt *findices; PetscInt fsize,*hindices,i; PetscBool isincreasing; PetscFunctionBegin; PetscValidHeaderSpecific(f,IS_CLASSID,1); PetscValidPointer(h,3); ierr = ISGetLocalSize(f,&fsize);CHKERRQ(ierr); ierr = ISGetIndices(f,&findices);CHKERRQ(ierr); *h = NULL; if (!always) { isincreasing = PETSC_TRUE; for (i = 1; i < fsize; ++i) { if (findices[i] <= findices[i-1]) { isincreasing = PETSC_FALSE; break; } } if (isincreasing) { ierr = ISRestoreIndices(f,&findices);CHKERRQ(ierr); PetscFunctionReturn(0); } } ierr = PetscMalloc1(fsize,&hindices);CHKERRQ(ierr); for (i = 0; i < fsize; ++i) hindices[i] = i; ierr = PetscSortIntWithPermutation(fsize,findices,hindices);CHKERRQ(ierr); ierr = ISRestoreIndices(f,&findices);CHKERRQ(ierr); ierr = ISCreateGeneral(PETSC_COMM_SELF,fsize,hindices,PETSC_OWN_POINTER,h);CHKERRQ(ierr); PetscFunctionReturn(0); }
/*@ ISEmbed - embed IS a into IS b by finding the locations in b that have the same indices as in a. If c is the IS of these locations, we have a = b*c, regarded as a composition of the corresponding ISLocalToGlobalMaps. Not collective. Input arguments: + a - IS to embed . b - IS to embed into - drop - flag indicating whether to drop a's indices that are not in b. Output arguments: . c - local embedding indices Note: If some of a's global indices are not among b's indices the embedding is impossible. The local indices of a corresponding to these global indices are either mapped to -1 (if !drop) or are omitted (if drop). In the former case the size of c is that same as that of a, in the latter case c's size may be smaller. The resulting IS is sequential, since the index substition it encodes is purely local. Level: advanced .seealso ISLocalToGlobalMapping @*/ PetscErrorCode ISEmbed(IS a, IS b, PetscBool drop, IS *c) { PetscErrorCode ierr; ISLocalToGlobalMapping ltog; ISGlobalToLocalMappingType gtoltype = IS_GTOLM_DROP; PetscInt alen, clen, *cindices, *cindices2; const PetscInt *aindices; PetscFunctionBegin; PetscValidHeaderSpecific(a, IS_CLASSID, 1); PetscValidHeaderSpecific(b, IS_CLASSID, 2); PetscValidPointer(c,4); ierr = ISLocalToGlobalMappingCreateIS(b, <og);CHKERRQ(ierr); ierr = ISGetLocalSize(a, &alen);CHKERRQ(ierr); ierr = ISGetIndices(a, &aindices);CHKERRQ(ierr); ierr = PetscMalloc1(alen, &cindices);CHKERRQ(ierr); if (!drop) gtoltype = IS_GTOLM_MASK; ierr = ISGlobalToLocalMappingApply(ltog,gtoltype,alen,aindices,&clen,cindices);CHKERRQ(ierr); ierr = ISLocalToGlobalMappingDestroy(<og);CHKERRQ(ierr); if (clen != alen) { cindices2 = cindices; ierr = PetscMalloc1(clen, &cindices);CHKERRQ(ierr); ierr = PetscMemcpy(cindices,cindices2,clen*sizeof(PetscInt));CHKERRQ(ierr); ierr = PetscFree(cindices2);CHKERRQ(ierr); } ierr = ISCreateGeneral(PETSC_COMM_SELF,clen,cindices,PETSC_OWN_POINTER,c);CHKERRQ(ierr); PetscFunctionReturn(0); }
/*@ ISConcatenate - Forms a new IS by locally concatenating the indices from an IS list without reordering. Collective on comm. Input Parameter: + comm - communicator of the concatenated IS. . len - size of islist array (nonnegative) - islist - array of index sets Output Parameters: . isout - The concatenated index set; empty, if len == 0. Notes: The semantics of calling this on comm imply that the comms of the members if islist also contain this rank. Level: intermediate .seealso: ISDifference(), ISSum(), ISExpand() Concepts: index sets^concatenation Concepts: IS^concatenation @*/ PetscErrorCode ISConcatenate(MPI_Comm comm, PetscInt len, const IS islist[], IS *isout) { PetscErrorCode ierr; PetscInt i,n,N; const PetscInt *iidx; PetscInt *idx; PetscFunctionBegin; PetscValidPointer(islist,2); #if defined(PETSC_USE_DEBUG) for (i = 0; i < len; ++i) PetscValidHeaderSpecific(islist[i], IS_CLASSID, 1); #endif PetscValidPointer(isout, 4); if (!len) { ierr = ISCreateStride(comm, 0,0,0, isout);CHKERRQ(ierr); PetscFunctionReturn(0); } if (len < 0) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Negative array length: %D", len); N = 0; for (i = 0; i < len; ++i) { ierr = ISGetLocalSize(islist[i], &n);CHKERRQ(ierr); N += n; } ierr = PetscMalloc1(N, &idx);CHKERRQ(ierr); N = 0; for (i = 0; i < len; ++i) { ierr = ISGetLocalSize(islist[i], &n);CHKERRQ(ierr); ierr = ISGetIndices(islist[i], &iidx);CHKERRQ(ierr); ierr = PetscMemcpy(idx+N,iidx, sizeof(PetscInt)*n);CHKERRQ(ierr); ierr = ISRestoreIndices(islist[i], &iidx);CHKERRQ(ierr); N += n; } ierr = ISCreateGeneral(comm, N, idx, PETSC_OWN_POINTER, isout);CHKERRQ(ierr); PetscFunctionReturn(0); }