PetscErrorCode MatSetLocalToGlobalMapping_IS(Mat A,ISLocalToGlobalMapping rmapping,ISLocalToGlobalMapping cmapping) { PetscErrorCode ierr; PetscInt n,bs; Mat_IS *is = (Mat_IS*)A->data; IS from,to; Vec global; PetscFunctionBegin; PetscCheckSameComm(A,1,rmapping,2); if (rmapping != cmapping) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_INCOMP,"MATIS requires the row and column mappings to be identical"); if (is->mapping) { /* Currenly destroys the objects that will be created by this routine. Is there anything else that should be checked? */ ierr = ISLocalToGlobalMappingDestroy(&is->mapping);CHKERRQ(ierr); ierr = VecDestroy(&is->x);CHKERRQ(ierr); ierr = VecDestroy(&is->y);CHKERRQ(ierr); ierr = VecScatterDestroy(&is->ctx);CHKERRQ(ierr); ierr = MatDestroy(&is->A);CHKERRQ(ierr); } ierr = PetscObjectReference((PetscObject)rmapping);CHKERRQ(ierr); ierr = ISLocalToGlobalMappingDestroy(&is->mapping);CHKERRQ(ierr); is->mapping = rmapping; /* ierr = PetscLayoutSetISLocalToGlobalMapping(A->rmap,rmapping);CHKERRQ(ierr); ierr = PetscLayoutSetISLocalToGlobalMapping(A->cmap,cmapping);CHKERRQ(ierr); */ /* Create the local matrix A */ ierr = ISLocalToGlobalMappingGetSize(rmapping,&n);CHKERRQ(ierr); ierr = ISLocalToGlobalMappingGetBlockSize(rmapping,&bs);CHKERRQ(ierr); ierr = MatCreate(PETSC_COMM_SELF,&is->A);CHKERRQ(ierr); if (bs > 1) { ierr = MatSetType(is->A,MATSEQBAIJ);CHKERRQ(ierr); } else { ierr = MatSetType(is->A,MATSEQAIJ);CHKERRQ(ierr); } ierr = MatSetSizes(is->A,n,n,n,n);CHKERRQ(ierr); ierr = MatSetBlockSize(is->A,bs);CHKERRQ(ierr); ierr = MatSetOptionsPrefix(is->A,((PetscObject)A)->prefix);CHKERRQ(ierr); ierr = MatAppendOptionsPrefix(is->A,"is_");CHKERRQ(ierr); ierr = MatSetFromOptions(is->A);CHKERRQ(ierr); /* Create the local work vectors */ ierr = VecCreate(PETSC_COMM_SELF,&is->x);CHKERRQ(ierr); ierr = VecSetBlockSize(is->x,bs);CHKERRQ(ierr); ierr = VecSetSizes(is->x,n,n);CHKERRQ(ierr); ierr = VecSetOptionsPrefix(is->x,((PetscObject)A)->prefix);CHKERRQ(ierr); ierr = VecAppendOptionsPrefix(is->x,"is_");CHKERRQ(ierr); ierr = VecSetFromOptions(is->x);CHKERRQ(ierr); ierr = VecDuplicate(is->x,&is->y);CHKERRQ(ierr); /* setup the global to local scatter */ ierr = ISCreateStride(PETSC_COMM_SELF,n,0,1,&to);CHKERRQ(ierr); ierr = ISLocalToGlobalMappingApplyIS(rmapping,to,&from);CHKERRQ(ierr); ierr = MatCreateVecs(A,&global,NULL);CHKERRQ(ierr); ierr = VecScatterCreate(global,from,is->x,to,&is->ctx);CHKERRQ(ierr); ierr = VecDestroy(&global);CHKERRQ(ierr); ierr = ISDestroy(&to);CHKERRQ(ierr); ierr = ISDestroy(&from);CHKERRQ(ierr); PetscFunctionReturn(0); }
/*@C DMCompositeGetISLocalToGlobalMappings - gets an ISLocalToGlobalMapping for each DM in the DMComposite, maps to the composite global space Collective on DM Input Parameter: . dm - the packer object Output Parameters: . ltogs - the individual mappings for each packed vector. Note that this includes all the ghost points that individual ghosted DMDA's may have. Level: advanced Notes: Each entry of ltogs should be destroyed with ISLocalToGlobalMappingDestroy(), the ltogs array should be freed with PetscFree(). .seealso DMDestroy(), DMCompositeAddDM(), DMCreateGlobalVector(), DMCompositeGather(), DMCompositeCreate(), DMCompositeGetAccess(), DMCompositeScatter(), DMCompositeGetLocalVectors(), DMCompositeRestoreLocalVectors(),DMCompositeGetEntries() @*/ PetscErrorCode DMCompositeGetISLocalToGlobalMappings(DM dm,ISLocalToGlobalMapping **ltogs) { PetscErrorCode ierr; PetscInt i,*idx,n,cnt; struct DMCompositeLink *next; PetscMPIInt rank; DM_Composite *com = (DM_Composite*)dm->data; PetscFunctionBegin; PetscValidHeaderSpecific(dm,DM_CLASSID,1); ierr = DMSetUp(dm);CHKERRQ(ierr); ierr = PetscMalloc((com->nDM)*sizeof(ISLocalToGlobalMapping),ltogs);CHKERRQ(ierr); next = com->next; ierr = MPI_Comm_rank(((PetscObject)dm)->comm,&rank);CHKERRQ(ierr); /* loop over packed objects, handling one at at time */ cnt = 0; while (next) { ISLocalToGlobalMapping ltog; PetscMPIInt size; const PetscInt *suboff,*indices; Vec global; /* Get sub-DM global indices for each local dof */ ierr = DMGetLocalToGlobalMapping(next->dm,<og);CHKERRQ(ierr); ierr = ISLocalToGlobalMappingGetSize(ltog,&n);CHKERRQ(ierr); ierr = ISLocalToGlobalMappingGetIndices(ltog,&indices);CHKERRQ(ierr); ierr = PetscMalloc(n*sizeof(PetscInt),&idx);CHKERRQ(ierr); /* Get the offsets for the sub-DM global vector */ ierr = DMGetGlobalVector(next->dm,&global);CHKERRQ(ierr); ierr = VecGetOwnershipRanges(global,&suboff);CHKERRQ(ierr); ierr = MPI_Comm_size(((PetscObject)global)->comm,&size);CHKERRQ(ierr); /* Shift the sub-DM definition of the global space to the composite global space */ for (i=0; i<n; i++) { PetscInt subi = indices[i],lo = 0,hi = size,t; /* Binary search to find which rank owns subi */ while (hi-lo > 1) { t = lo + (hi-lo)/2; if (suboff[t] > subi) hi = t; else lo = t; } idx[i] = subi - suboff[lo] + next->grstarts[lo]; } ierr = ISLocalToGlobalMappingRestoreIndices(ltog,&indices);CHKERRQ(ierr); ierr = ISLocalToGlobalMappingCreate(((PetscObject)dm)->comm,n,idx,PETSC_OWN_POINTER,&(*ltogs)[cnt]);CHKERRQ(ierr); ierr = DMRestoreGlobalVector(next->dm,&global);CHKERRQ(ierr); next = next->next; cnt++; } PetscFunctionReturn(0); }
Vec CreateGhostedVector(Grid& g) { auto dist = boost::any_cast<const Decomposition&>(g.userData()); int n; ISLocalToGlobalMappingGetSize(dist.locToGlobMap, &n); const PetscInt *idx; ISLocalToGlobalMappingGetIndices(dist.locToGlobMap, &idx); Vec v; PetscInt nLoc = dist.cellEnd - dist.cellStart; std::cout << n-nLoc << std::endl; VecCreateGhost(PETSC_COMM_WORLD, g.cells().size(), PETSC_DECIDE, n - nLoc, idx + nLoc, &v); ISLocalToGlobalMappingRestoreIndices(dist.locToGlobMap, &idx); return v; }
PetscErrorCode PCBDDCGraphInit(PCBDDCGraph graph, ISLocalToGlobalMapping l2gmap, PetscInt N, PetscInt maxcount) { PetscInt n; PetscErrorCode ierr; PetscFunctionBegin; PetscValidPointer(graph,1); PetscValidHeaderSpecific(l2gmap,IS_LTOGM_CLASSID,2); PetscValidLogicalCollectiveInt(l2gmap,N,3); PetscValidLogicalCollectiveInt(l2gmap,maxcount,4); /* raise an error if already allocated */ if (graph->nvtxs_global) SETERRQ(PetscObjectComm((PetscObject)l2gmap),PETSC_ERR_PLIB,"BDDCGraph already initialized"); /* set number of vertices */ ierr = PetscObjectReference((PetscObject)l2gmap);CHKERRQ(ierr); graph->l2gmap = l2gmap; ierr = ISLocalToGlobalMappingGetSize(l2gmap,&n);CHKERRQ(ierr); graph->nvtxs = n; graph->nvtxs_global = N; /* allocate used space */ ierr = PetscBTCreate(graph->nvtxs,&graph->touched);CHKERRQ(ierr); ierr = PetscMalloc5(graph->nvtxs,&graph->count, graph->nvtxs,&graph->neighbours_set, graph->nvtxs,&graph->subset, graph->nvtxs,&graph->which_dof, graph->nvtxs,&graph->special_dof);CHKERRQ(ierr); /* zeroes memory */ ierr = PetscMemzero(graph->count,graph->nvtxs*sizeof(PetscInt));CHKERRQ(ierr); ierr = PetscMemzero(graph->subset,graph->nvtxs*sizeof(PetscInt));CHKERRQ(ierr); /* use -1 as a default value for which_dof array */ for (n=0;n<graph->nvtxs;n++) graph->which_dof[n] = -1; ierr = PetscMemzero(graph->special_dof,graph->nvtxs*sizeof(PetscInt));CHKERRQ(ierr); /* zeroes first pointer to neighbour set */ if (graph->nvtxs) { graph->neighbours_set[0] = 0; } /* zeroes workspace for values of ncc */ graph->subset_ncc = 0; graph->subset_ref_node = 0; /* maxcount for cc */ graph->maxcount = maxcount; PetscFunctionReturn(0); }
PetscErrorCode PCISSetUp(PC pc) { PC_IS *pcis = (PC_IS*)(pc->data); Mat_IS *matis; PetscErrorCode ierr; PetscBool flg,issbaij; Vec counter; PetscFunctionBegin; ierr = PetscObjectTypeCompare((PetscObject)pc->pmat,MATIS,&flg);CHKERRQ(ierr); if (!flg) SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_WRONG,"Preconditioner type of Neumann Neumman requires matrix of type MATIS"); matis = (Mat_IS*)pc->pmat->data; pcis->pure_neumann = matis->pure_neumann; /* get info on mapping */ ierr = PetscObjectReference((PetscObject)matis->mapping);CHKERRQ(ierr); ierr = ISLocalToGlobalMappingDestroy(&pcis->mapping);CHKERRQ(ierr); pcis->mapping = matis->mapping; ierr = ISLocalToGlobalMappingGetSize(pcis->mapping,&pcis->n);CHKERRQ(ierr); ierr = ISLocalToGlobalMappingGetInfo(pcis->mapping,&(pcis->n_neigh),&(pcis->neigh),&(pcis->n_shared),&(pcis->shared));CHKERRQ(ierr); /* Creating local and global index sets for interior and inteface nodes. */ { PetscInt n_I; PetscInt *idx_I_local,*idx_B_local,*idx_I_global,*idx_B_global; PetscInt *array; PetscInt i,j; /* Identifying interior and interface nodes, in local numbering */ ierr = PetscMalloc1(pcis->n,&array);CHKERRQ(ierr); ierr = PetscMemzero(array,pcis->n*sizeof(PetscInt));CHKERRQ(ierr); for (i=0;i<pcis->n_neigh;i++) for (j=0;j<pcis->n_shared[i];j++) array[pcis->shared[i][j]] += 1; ierr = PetscMalloc1(pcis->n,&idx_I_local);CHKERRQ(ierr); ierr = PetscMalloc1(pcis->n,&idx_B_local);CHKERRQ(ierr); for (i=0, pcis->n_B=0, n_I=0; i<pcis->n; i++) { if (!array[i]) { idx_I_local[n_I] = i; n_I++; } else { idx_B_local[pcis->n_B] = i; pcis->n_B++; } } /* Getting the global numbering */ idx_B_global = idx_I_local + n_I; /* Just avoiding allocating extra memory, since we have vacant space */ idx_I_global = idx_B_local + pcis->n_B; ierr = ISLocalToGlobalMappingApply(pcis->mapping,pcis->n_B,idx_B_local,idx_B_global);CHKERRQ(ierr); ierr = ISLocalToGlobalMappingApply(pcis->mapping,n_I, idx_I_local,idx_I_global);CHKERRQ(ierr); /* Creating the index sets. */ ierr = ISCreateGeneral(PETSC_COMM_SELF,pcis->n_B,idx_B_local,PETSC_COPY_VALUES, &pcis->is_B_local);CHKERRQ(ierr); ierr = ISCreateGeneral(PETSC_COMM_SELF,pcis->n_B,idx_B_global,PETSC_COPY_VALUES,&pcis->is_B_global);CHKERRQ(ierr); ierr = ISCreateGeneral(PETSC_COMM_SELF,n_I,idx_I_local,PETSC_COPY_VALUES, &pcis->is_I_local);CHKERRQ(ierr); ierr = ISCreateGeneral(PETSC_COMM_SELF,n_I,idx_I_global,PETSC_COPY_VALUES,&pcis->is_I_global);CHKERRQ(ierr); /* Freeing memory and restoring arrays */ ierr = PetscFree(idx_B_local);CHKERRQ(ierr); ierr = PetscFree(idx_I_local);CHKERRQ(ierr); ierr = PetscFree(array);CHKERRQ(ierr); } /* Extracting the blocks A_II, A_BI, A_IB and A_BB from A. If the numbering is such that interior nodes come first than the interface ones, we have [ | ] [ A_II | A_IB ] A = [ | ] [-----------+------] [ A_BI | A_BB ] */ ierr = MatGetSubMatrix(matis->A,pcis->is_I_local,pcis->is_I_local,MAT_INITIAL_MATRIX,&pcis->A_II);CHKERRQ(ierr); ierr = MatGetSubMatrix(matis->A,pcis->is_B_local,pcis->is_B_local,MAT_INITIAL_MATRIX,&pcis->A_BB);CHKERRQ(ierr); ierr = PetscObjectTypeCompare((PetscObject)matis->A,MATSEQSBAIJ,&issbaij);CHKERRQ(ierr); if (!issbaij) { ierr = MatGetSubMatrix(matis->A,pcis->is_I_local,pcis->is_B_local,MAT_INITIAL_MATRIX,&pcis->A_IB);CHKERRQ(ierr); ierr = MatGetSubMatrix(matis->A,pcis->is_B_local,pcis->is_I_local,MAT_INITIAL_MATRIX,&pcis->A_BI);CHKERRQ(ierr); } else { Mat newmat; ierr = MatConvert(matis->A,MATSEQBAIJ,MAT_INITIAL_MATRIX,&newmat);CHKERRQ(ierr); ierr = MatGetSubMatrix(newmat,pcis->is_I_local,pcis->is_B_local,MAT_INITIAL_MATRIX,&pcis->A_IB);CHKERRQ(ierr); ierr = MatGetSubMatrix(newmat,pcis->is_B_local,pcis->is_I_local,MAT_INITIAL_MATRIX,&pcis->A_BI);CHKERRQ(ierr); ierr = MatDestroy(&newmat);CHKERRQ(ierr); } /* Creating work vectors and arrays */ ierr = VecDuplicate(matis->x,&pcis->vec1_N);CHKERRQ(ierr); ierr = VecDuplicate(pcis->vec1_N,&pcis->vec2_N);CHKERRQ(ierr); ierr = VecCreateSeq(PETSC_COMM_SELF,pcis->n-pcis->n_B,&pcis->vec1_D);CHKERRQ(ierr); ierr = VecDuplicate(pcis->vec1_D,&pcis->vec2_D);CHKERRQ(ierr); ierr = VecDuplicate(pcis->vec1_D,&pcis->vec3_D);CHKERRQ(ierr); ierr = VecDuplicate(pcis->vec1_D,&pcis->vec4_D);CHKERRQ(ierr); ierr = VecCreateSeq(PETSC_COMM_SELF,pcis->n_B,&pcis->vec1_B);CHKERRQ(ierr); ierr = VecDuplicate(pcis->vec1_B,&pcis->vec2_B);CHKERRQ(ierr); ierr = VecDuplicate(pcis->vec1_B,&pcis->vec3_B);CHKERRQ(ierr); ierr = MatCreateVecs(pc->pmat,&pcis->vec1_global,0);CHKERRQ(ierr); ierr = PetscMalloc1(pcis->n,&pcis->work_N);CHKERRQ(ierr); /* Creating the scatter contexts */ ierr = VecScatterCreate(pcis->vec1_global,pcis->is_I_global,pcis->vec1_D,(IS)0,&pcis->global_to_D);CHKERRQ(ierr); ierr = VecScatterCreate(pcis->vec1_N,pcis->is_B_local,pcis->vec1_B,(IS)0,&pcis->N_to_B);CHKERRQ(ierr); ierr = VecScatterCreate(pcis->vec1_global,pcis->is_B_global,pcis->vec1_B,(IS)0,&pcis->global_to_B);CHKERRQ(ierr); /* Creating scaling "matrix" D */ ierr = PetscOptionsGetBool(((PetscObject)pc)->prefix,"-pc_is_use_stiffness_scaling",&pcis->use_stiffness_scaling,NULL);CHKERRQ(ierr); if (!pcis->D) { ierr = VecDuplicate(pcis->vec1_B,&pcis->D);CHKERRQ(ierr); if (!pcis->use_stiffness_scaling) { ierr = VecSet(pcis->D,pcis->scaling_factor);CHKERRQ(ierr); } else { ierr = MatGetDiagonal(matis->A,pcis->vec1_N);CHKERRQ(ierr); ierr = VecScatterBegin(pcis->N_to_B,pcis->vec1_N,pcis->D,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); ierr = VecScatterEnd (pcis->N_to_B,pcis->vec1_N,pcis->D,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); } } ierr = VecCopy(pcis->D,pcis->vec1_B);CHKERRQ(ierr); ierr = MatCreateVecs(pc->pmat,&counter,0);CHKERRQ(ierr); /* temporary auxiliar vector */ ierr = VecSet(counter,0.0);CHKERRQ(ierr); ierr = VecScatterBegin(pcis->global_to_B,pcis->vec1_B,counter,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr); ierr = VecScatterEnd (pcis->global_to_B,pcis->vec1_B,counter,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr); ierr = VecScatterBegin(pcis->global_to_B,counter,pcis->vec1_B,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); ierr = VecScatterEnd (pcis->global_to_B,counter,pcis->vec1_B,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); ierr = VecPointwiseDivide(pcis->D,pcis->D,pcis->vec1_B);CHKERRQ(ierr); ierr = VecDestroy(&counter);CHKERRQ(ierr); /* See historical note 01, at the bottom of this file. */ /* Creating the KSP contexts for the local Dirichlet and Neumann problems. */ if (pcis->computesolvers) { PC pc_ctx; /* Dirichlet */ ierr = KSPCreate(PETSC_COMM_SELF,&pcis->ksp_D);CHKERRQ(ierr); ierr = PetscObjectIncrementTabLevel((PetscObject)pcis->ksp_D,(PetscObject)pc,1);CHKERRQ(ierr); ierr = KSPSetOperators(pcis->ksp_D,pcis->A_II,pcis->A_II);CHKERRQ(ierr); ierr = KSPSetOptionsPrefix(pcis->ksp_D,"is_localD_");CHKERRQ(ierr); ierr = KSPGetPC(pcis->ksp_D,&pc_ctx);CHKERRQ(ierr); ierr = PCSetType(pc_ctx,PCLU);CHKERRQ(ierr); ierr = KSPSetType(pcis->ksp_D,KSPPREONLY);CHKERRQ(ierr); ierr = KSPSetFromOptions(pcis->ksp_D);CHKERRQ(ierr); /* the vectors in the following line are dummy arguments, just telling the KSP the vector size. Values are not used */ ierr = KSPSetUp(pcis->ksp_D);CHKERRQ(ierr); /* Neumann */ ierr = KSPCreate(PETSC_COMM_SELF,&pcis->ksp_N);CHKERRQ(ierr); ierr = PetscObjectIncrementTabLevel((PetscObject)pcis->ksp_N,(PetscObject)pc,1);CHKERRQ(ierr); ierr = KSPSetOperators(pcis->ksp_N,matis->A,matis->A);CHKERRQ(ierr); ierr = KSPSetOptionsPrefix(pcis->ksp_N,"is_localN_");CHKERRQ(ierr); ierr = KSPGetPC(pcis->ksp_N,&pc_ctx);CHKERRQ(ierr); ierr = PCSetType(pc_ctx,PCLU);CHKERRQ(ierr); ierr = KSPSetType(pcis->ksp_N,KSPPREONLY);CHKERRQ(ierr); ierr = KSPSetFromOptions(pcis->ksp_N);CHKERRQ(ierr); { PetscBool damp_fixed = PETSC_FALSE, remove_nullspace_fixed = PETSC_FALSE, set_damping_factor_floating = PETSC_FALSE, not_damp_floating = PETSC_FALSE, not_remove_nullspace_floating = PETSC_FALSE; PetscReal fixed_factor, floating_factor; ierr = PetscOptionsGetReal(((PetscObject)pc_ctx)->prefix,"-pc_is_damp_fixed",&fixed_factor,&damp_fixed);CHKERRQ(ierr); if (!damp_fixed) fixed_factor = 0.0; ierr = PetscOptionsGetBool(((PetscObject)pc_ctx)->prefix,"-pc_is_damp_fixed",&damp_fixed,NULL);CHKERRQ(ierr); ierr = PetscOptionsGetBool(((PetscObject)pc_ctx)->prefix,"-pc_is_remove_nullspace_fixed",&remove_nullspace_fixed,NULL);CHKERRQ(ierr); ierr = PetscOptionsGetReal(((PetscObject)pc_ctx)->prefix,"-pc_is_set_damping_factor_floating", &floating_factor,&set_damping_factor_floating);CHKERRQ(ierr); if (!set_damping_factor_floating) floating_factor = 0.0; ierr = PetscOptionsGetBool(((PetscObject)pc_ctx)->prefix,"-pc_is_set_damping_factor_floating",&set_damping_factor_floating,NULL);CHKERRQ(ierr); if (!set_damping_factor_floating) floating_factor = 1.e-12; ierr = PetscOptionsGetBool(((PetscObject)pc_ctx)->prefix,"-pc_is_not_damp_floating",¬_damp_floating,NULL);CHKERRQ(ierr); ierr = PetscOptionsGetBool(((PetscObject)pc_ctx)->prefix,"-pc_is_not_remove_nullspace_floating",¬_remove_nullspace_floating,NULL);CHKERRQ(ierr); if (pcis->pure_neumann) { /* floating subdomain */ if (!(not_damp_floating)) { ierr = PCFactorSetShiftType(pc_ctx,MAT_SHIFT_NONZERO);CHKERRQ(ierr); ierr = PCFactorSetShiftAmount(pc_ctx,floating_factor);CHKERRQ(ierr); } if (!(not_remove_nullspace_floating)) { MatNullSpace nullsp; ierr = MatNullSpaceCreate(PETSC_COMM_SELF,PETSC_TRUE,0,NULL,&nullsp);CHKERRQ(ierr); ierr = KSPSetNullSpace(pcis->ksp_N,nullsp);CHKERRQ(ierr); ierr = MatNullSpaceDestroy(&nullsp);CHKERRQ(ierr); } } else { /* fixed subdomain */ if (damp_fixed) { ierr = PCFactorSetShiftType(pc_ctx,MAT_SHIFT_NONZERO);CHKERRQ(ierr); ierr = PCFactorSetShiftAmount(pc_ctx,floating_factor);CHKERRQ(ierr); } if (remove_nullspace_fixed) { MatNullSpace nullsp; ierr = MatNullSpaceCreate(PETSC_COMM_SELF,PETSC_TRUE,0,NULL,&nullsp);CHKERRQ(ierr); ierr = KSPSetNullSpace(pcis->ksp_N,nullsp);CHKERRQ(ierr); ierr = MatNullSpaceDestroy(&nullsp);CHKERRQ(ierr); } } } /* the vectors in the following line are dummy arguments, just telling the KSP the vector size. Values are not used */ ierr = KSPSetUp(pcis->ksp_N);CHKERRQ(ierr); } PetscFunctionReturn(0); }
inline PetscVector::PetscVector (Vec v) : _array_is_present(false), _local_form(NULL), _values(NULL), _global_to_local_map(), _destroy_vec_on_exit(false) { this->_vec = v; this->_is_closed = true; this->_is_initialized = true; /* We need to ask PETSc about the (local to global) ghost value mapping and create the inverse mapping out of it. */ int ierr=0; int petsc_local_size=0; ierr = VecGetLocalSize(_vec, &petsc_local_size); CHKERRABORT(MPI_COMM_WORLD,ierr); // // Get the vector type from PETSc. // const VecType type; // ierr = VecGetType(_vec, &type); // CHKERRABORT(MPI_COMM_WORLD,ierr); // Get the vector type from PETSc. // As of Petsc 3.0.0, the VecType #define lost its const-ness, so we // need to have it in the code #if PETSC_VERSION_LESS_THAN(3,0,0) || !PETSC_VERSION_LESS_THAN(3,4,0) // Pre-3.0 and petsc-dev (as of October 2012) use non-const versions VecType ptype; #else const VecType ptype; #endif ierr = VecGetType(_vec, &ptype); CHKERRABORT(MPI_COMM_WORLD,ierr); if((strcmp(ptype,VECSHARED) == 0) || (strcmp(ptype,VECMPI) == 0)) { #if PETSC_VERSION_RELEASE && PETSC_VERSION_LESS_THAN(3,1,1) ISLocalToGlobalMapping mapping = _vec->mapping; #else ISLocalToGlobalMapping mapping; ierr = VecGetLocalToGlobalMapping(_vec, &mapping); CHKERRABORT(MPI_COMM_WORLD,ierr); #endif // ISLocalToGlobalMapping mapping; // ierr = VecGetLocalToGlobalMapping(_vec, &mapping); // CHKERRABORT(MPI_COMM_WORLD,ierr); // If is a sparsely stored vector, set up our new mapping if (mapping) { const unsigned int local_size = static_cast<unsigned int>(petsc_local_size); const unsigned int ghost_begin = static_cast<unsigned int>(petsc_local_size); #if PETSC_VERSION_RELEASE && PETSC_VERSION_LESS_THAN(3,4,0) const numeric_index_type ghost_end = static_cast<numeric_index_type>(mapping->n); #else PetscInt n; ierr = ISLocalToGlobalMappingGetSize(mapping, &n); CHKERRABORT(MPI_COMM_WORLD,ierr); const unsigned int ghost_end = static_cast<unsigned int>(n); #endif #if PETSC_VERSION_RELEASE && PETSC_VERSION_LESS_THAN(3,1,1) const PetscInt *indices = mapping->indices; #else const PetscInt *indices; ierr = ISLocalToGlobalMappingGetIndices(mapping,&indices); CHKERRABORT(MPI_COMM_WORLD,ierr); #endif for(unsigned int i=ghost_begin; i<ghost_end; i++) _global_to_local_map[indices[i]] = i-local_size; this->_type = GHOSTED; #if !PETSC_VERSION_RELEASE || !PETSC_VERSION_LESS_THAN(3,1,1) ierr = ISLocalToGlobalMappingRestoreIndices(mapping, &indices); CHKERRABORT(MPI_COMM_WORLD,ierr); #endif // const unsigned int ghost_end = static_cast<unsigned int>(mapping->n); // const int *indices = mapping->indices; // ierr = ISLocalToGlobalMappingGetIndices(mapping,&indices); // CHKERRABORT(MPI_COMM_WORLD,ierr); // for(unsigned int i=ghost_begin; i<ghost_end; i++) // _global_to_local_map[indices[i]] = i-local_size; // this->_type = GHOSTED; } else this->_type = PARALLEL; } else this->_type = SERIAL; this->close(); }
int main(int argc,char **argv) { PetscMPIInt rank; PetscErrorCode ierr; PetscInt M = 10,N = 8,m = PETSC_DECIDE; PetscInt s =2,w=2,n = PETSC_DECIDE,nloc,l,i,j,kk; PetscInt Xs,Xm,Ys,Ym,iloc,*iglobal; const PetscInt *ltog; PetscInt *lx = NULL,*ly = NULL; PetscBool testorder = PETSC_FALSE,flg; DMBoundaryType bx = DM_BOUNDARY_NONE,by= DM_BOUNDARY_NONE; DM da; PetscViewer viewer; Vec local,global; PetscScalar value; DMDAStencilType st = DMDA_STENCIL_BOX; AO ao; ierr = PetscInitialize(&argc,&argv,(char*)0,help);if (ierr) return ierr; ierr = PetscViewerDrawOpen(PETSC_COMM_WORLD,0,"",300,0,400,400,&viewer);CHKERRQ(ierr); /* Readoptions */ ierr = PetscOptionsGetInt(NULL,NULL,"-NX",&M,NULL);CHKERRQ(ierr); ierr = PetscOptionsGetInt(NULL,NULL,"-NY",&N,NULL);CHKERRQ(ierr); ierr = PetscOptionsGetInt(NULL,NULL,"-m",&m,NULL);CHKERRQ(ierr); ierr = PetscOptionsGetInt(NULL,NULL,"-n",&n,NULL);CHKERRQ(ierr); ierr = PetscOptionsGetInt(NULL,NULL,"-s",&s,NULL);CHKERRQ(ierr); ierr = PetscOptionsGetInt(NULL,NULL,"-w",&w,NULL);CHKERRQ(ierr); flg = PETSC_FALSE; ierr = PetscOptionsGetBool(NULL,NULL,"-xperiodic",&flg,NULL);CHKERRQ(ierr); if (flg) bx = DM_BOUNDARY_PERIODIC; flg = PETSC_FALSE; ierr = PetscOptionsGetBool(NULL,NULL,"-yperiodic",&flg,NULL);CHKERRQ(ierr); if (flg) by = DM_BOUNDARY_PERIODIC; flg = PETSC_FALSE; ierr = PetscOptionsGetBool(NULL,NULL,"-xghosted",&flg,NULL);CHKERRQ(ierr); if (flg) bx = DM_BOUNDARY_GHOSTED; flg = PETSC_FALSE; ierr = PetscOptionsGetBool(NULL,NULL,"-yghosted",&flg,NULL);CHKERRQ(ierr); if (flg) by = DM_BOUNDARY_GHOSTED; flg = PETSC_FALSE; ierr = PetscOptionsGetBool(NULL,NULL,"-star",&flg,NULL);CHKERRQ(ierr); if (flg) st = DMDA_STENCIL_STAR; flg = PETSC_FALSE; ierr = PetscOptionsGetBool(NULL,NULL,"-box",&flg,NULL);CHKERRQ(ierr); if (flg) st = DMDA_STENCIL_BOX; flg = PETSC_FALSE; ierr = PetscOptionsGetBool(NULL,NULL,"-testorder",&testorder,NULL);CHKERRQ(ierr); /* Test putting two nodes in x and y on each processor, exact last processor in x and y gets the rest. */ flg = PETSC_FALSE; ierr = PetscOptionsGetBool(NULL,NULL,"-distribute",&flg,NULL);CHKERRQ(ierr); if (flg) { if (m == PETSC_DECIDE) SETERRQ(PETSC_COMM_WORLD,1,"Must set -m option with -distribute option"); ierr = PetscMalloc1(m,&lx);CHKERRQ(ierr); for (i=0; i<m-1; i++) { lx[i] = 4;} lx[m-1] = M - 4*(m-1); if (n == PETSC_DECIDE) SETERRQ(PETSC_COMM_WORLD,1,"Must set -n option with -distribute option"); ierr = PetscMalloc1(n,&ly);CHKERRQ(ierr); for (i=0; i<n-1; i++) { ly[i] = 2;} ly[n-1] = N - 2*(n-1); } /* Create distributed array and get vectors */ ierr = DMDACreate2d(PETSC_COMM_WORLD,bx,by,st,M,N,m,n,w,s,lx,ly,&da);CHKERRQ(ierr); ierr = PetscFree(lx);CHKERRQ(ierr); ierr = PetscFree(ly);CHKERRQ(ierr); ierr = DMView(da,viewer);CHKERRQ(ierr); ierr = DMCreateGlobalVector(da,&global);CHKERRQ(ierr); ierr = DMCreateLocalVector(da,&local);CHKERRQ(ierr); /* Set global vector; send ghost points to local vectors */ value = 1; ierr = VecSet(global,value);CHKERRQ(ierr); ierr = DMGlobalToLocalBegin(da,global,INSERT_VALUES,local);CHKERRQ(ierr); ierr = DMGlobalToLocalEnd(da,global,INSERT_VALUES,local);CHKERRQ(ierr); /* Scale local vectors according to processor rank; pass to global vector */ ierr = MPI_Comm_rank(PETSC_COMM_WORLD,&rank);CHKERRQ(ierr); value = rank; ierr = VecScale(local,value);CHKERRQ(ierr); ierr = DMLocalToGlobalBegin(da,local,INSERT_VALUES,global);CHKERRQ(ierr); ierr = DMLocalToGlobalEnd(da,local,INSERT_VALUES,global);CHKERRQ(ierr); if (!testorder) { /* turn off printing when testing ordering mappings */ ierr = PetscPrintf(PETSC_COMM_WORLD,"\nGlobal Vectors:\n");CHKERRQ(ierr); ierr = VecView(global,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); ierr = PetscPrintf(PETSC_COMM_WORLD,"\n\n");CHKERRQ(ierr); } /* Send ghost points to local vectors */ ierr = DMGlobalToLocalBegin(da,global,INSERT_VALUES,local);CHKERRQ(ierr); ierr = DMGlobalToLocalEnd(da,global,INSERT_VALUES,local);CHKERRQ(ierr); flg = PETSC_FALSE; ierr = PetscOptionsGetBool(NULL,NULL,"-local_print",&flg,NULL);CHKERRQ(ierr); if (flg) { PetscViewer sviewer; ierr = PetscViewerASCIIPushSynchronized(PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); ierr = PetscSynchronizedPrintf(PETSC_COMM_WORLD,"\nLocal Vector: processor %d\n",rank);CHKERRQ(ierr); ierr = PetscViewerGetSubViewer(PETSC_VIEWER_STDOUT_WORLD,PETSC_COMM_SELF,&sviewer);CHKERRQ(ierr); ierr = VecView(local,sviewer);CHKERRQ(ierr); ierr = PetscViewerRestoreSubViewer(PETSC_VIEWER_STDOUT_WORLD,PETSC_COMM_SELF,&sviewer);CHKERRQ(ierr); ierr = PetscViewerFlush(PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); ierr = PetscViewerASCIIPopSynchronized(PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); } /* Tests mappings betweeen application/PETSc orderings */ if (testorder) { ISLocalToGlobalMapping ltogm; ierr = DMGetLocalToGlobalMapping(da,<ogm);CHKERRQ(ierr); ierr = ISLocalToGlobalMappingGetSize(ltogm,&nloc);CHKERRQ(ierr); ierr = ISLocalToGlobalMappingGetIndices(ltogm,<og);CHKERRQ(ierr); ierr = DMDAGetGhostCorners(da,&Xs,&Ys,NULL,&Xm,&Ym,NULL);CHKERRQ(ierr); ierr = DMDAGetAO(da,&ao);CHKERRQ(ierr); ierr = PetscMalloc1(nloc,&iglobal);CHKERRQ(ierr); /* Set iglobal to be global indices for each processor's local and ghost nodes, using the DMDA ordering of grid points */ kk = 0; for (j=Ys; j<Ys+Ym; j++) { for (i=Xs; i<Xs+Xm; i++) { iloc = w*((j-Ys)*Xm + i-Xs); for (l=0; l<w; l++) { iglobal[kk++] = ltog[iloc+l]; } } } /* Map this to the application ordering (which for DMDAs is just the natural ordering that would be used for 1 processor, numbering most rapidly by x, then y) */ ierr = AOPetscToApplication(ao,nloc,iglobal);CHKERRQ(ierr); /* Then map the application ordering back to the PETSc DMDA ordering */ ierr = AOApplicationToPetsc(ao,nloc,iglobal);CHKERRQ(ierr); /* Verify the mappings */ kk=0; for (j=Ys; j<Ys+Ym; j++) { for (i=Xs; i<Xs+Xm; i++) { iloc = w*((j-Ys)*Xm + i-Xs); for (l=0; l<w; l++) { if (iglobal[kk] != ltog[iloc+l]) { ierr = PetscFPrintf(PETSC_COMM_SELF,stdout,"[%d] Problem with mapping: j=%D, i=%D, l=%D, petsc1=%D, petsc2=%D\n",rank,j,i,l,ltog[iloc+l],iglobal[kk]);CHKERRQ(ierr); } kk++; } } } ierr = PetscFree(iglobal);CHKERRQ(ierr); ierr = ISLocalToGlobalMappingRestoreIndices(ltogm,<og);CHKERRQ(ierr); } /* Free memory */ ierr = PetscViewerDestroy(&viewer);CHKERRQ(ierr); ierr = VecDestroy(&local);CHKERRQ(ierr); ierr = VecDestroy(&global);CHKERRQ(ierr); ierr = DMDestroy(&da);CHKERRQ(ierr); ierr = PetscFinalize(); return ierr; }
PetscErrorCode MatSetLocalToGlobalMapping_IS(Mat A,ISLocalToGlobalMapping rmapping,ISLocalToGlobalMapping cmapping) { PetscErrorCode ierr; PetscInt nr,rbs,nc,cbs; Mat_IS *is = (Mat_IS*)A->data; IS from,to; Vec cglobal,rglobal; PetscFunctionBegin; PetscCheckSameComm(A,1,rmapping,2); PetscCheckSameComm(A,1,cmapping,3); /* Destroy any previous data */ ierr = VecDestroy(&is->x);CHKERRQ(ierr); ierr = VecDestroy(&is->y);CHKERRQ(ierr); ierr = VecScatterDestroy(&is->rctx);CHKERRQ(ierr); ierr = VecScatterDestroy(&is->cctx);CHKERRQ(ierr); ierr = MatDestroy(&is->A);CHKERRQ(ierr); ierr = PetscSFDestroy(&is->sf);CHKERRQ(ierr); ierr = PetscFree2(is->sf_rootdata,is->sf_leafdata);CHKERRQ(ierr); /* Setup Layout and set local to global maps */ ierr = PetscLayoutSetUp(A->rmap);CHKERRQ(ierr); ierr = PetscLayoutSetUp(A->cmap);CHKERRQ(ierr); ierr = PetscLayoutSetISLocalToGlobalMapping(A->rmap,rmapping);CHKERRQ(ierr); ierr = PetscLayoutSetISLocalToGlobalMapping(A->cmap,cmapping);CHKERRQ(ierr); /* Create the local matrix A */ ierr = ISLocalToGlobalMappingGetSize(rmapping,&nr);CHKERRQ(ierr); ierr = ISLocalToGlobalMappingGetBlockSize(rmapping,&rbs);CHKERRQ(ierr); ierr = ISLocalToGlobalMappingGetSize(cmapping,&nc);CHKERRQ(ierr); ierr = ISLocalToGlobalMappingGetBlockSize(cmapping,&cbs);CHKERRQ(ierr); ierr = MatCreate(PETSC_COMM_SELF,&is->A);CHKERRQ(ierr); ierr = MatSetType(is->A,MATAIJ);CHKERRQ(ierr); ierr = MatSetSizes(is->A,nr,nc,nr,nc);CHKERRQ(ierr); ierr = MatSetBlockSizes(is->A,rbs,cbs);CHKERRQ(ierr); ierr = MatSetOptionsPrefix(is->A,((PetscObject)A)->prefix);CHKERRQ(ierr); ierr = MatAppendOptionsPrefix(is->A,"is_");CHKERRQ(ierr); ierr = MatSetFromOptions(is->A);CHKERRQ(ierr); /* Create the local work vectors */ ierr = MatCreateVecs(is->A,&is->x,&is->y);CHKERRQ(ierr); /* setup the global to local scatters */ ierr = MatCreateVecs(A,&cglobal,&rglobal);CHKERRQ(ierr); ierr = ISCreateStride(PETSC_COMM_SELF,nr,0,1,&to);CHKERRQ(ierr); ierr = ISLocalToGlobalMappingApplyIS(rmapping,to,&from);CHKERRQ(ierr); ierr = VecScatterCreate(rglobal,from,is->y,to,&is->rctx);CHKERRQ(ierr); if (rmapping != cmapping) { ierr = ISDestroy(&to);CHKERRQ(ierr); ierr = ISDestroy(&from);CHKERRQ(ierr); ierr = ISCreateStride(PETSC_COMM_SELF,nc,0,1,&to);CHKERRQ(ierr); ierr = ISLocalToGlobalMappingApplyIS(cmapping,to,&from);CHKERRQ(ierr); ierr = VecScatterCreate(cglobal,from,is->x,to,&is->cctx);CHKERRQ(ierr); } else { ierr = PetscObjectReference((PetscObject)is->rctx);CHKERRQ(ierr); is->cctx = is->rctx; } ierr = VecDestroy(&rglobal);CHKERRQ(ierr); ierr = VecDestroy(&cglobal);CHKERRQ(ierr); ierr = ISDestroy(&to);CHKERRQ(ierr); ierr = ISDestroy(&from);CHKERRQ(ierr); PetscFunctionReturn(0); }
/*@C DMPlexDistribute - Distributes the mesh and any associated sections. Not Collective Input Parameter: + dm - The original DMPlex object . partitioner - The partitioning package, or NULL for the default - overlap - The overlap of partitions, 0 is the default Output Parameter: + sf - The PetscSF used for point distribution - parallelMesh - The distributed DMPlex object, or NULL Note: If the mesh was not distributed, the return value is NULL. The user can control the definition of adjacency for the mesh using DMPlexGetAdjacencyUseCone() and DMPlexSetAdjacencyUseClosure(). They should choose the combination appropriate for the function representation on the mesh. Level: intermediate .keywords: mesh, elements .seealso: DMPlexCreate(), DMPlexDistributeByFace(), DMPlexSetAdjacencyUseCone(), DMPlexSetAdjacencyUseClosure() @*/ PetscErrorCode DMPlexDistribute(DM dm, const char partitioner[], PetscInt overlap, PetscSF *sf, DM *dmParallel) { DM_Plex *mesh = (DM_Plex*) dm->data, *pmesh; MPI_Comm comm; const PetscInt height = 0; PetscInt dim, numRemoteRanks; IS origCellPart, origPart, cellPart, part; PetscSection origCellPartSection, origPartSection, cellPartSection, partSection; PetscSFNode *remoteRanks; PetscSF partSF, pointSF, coneSF; ISLocalToGlobalMapping renumbering; PetscSection originalConeSection, newConeSection; PetscInt *remoteOffsets; PetscInt *cones, *newCones, newConesSize; PetscBool flg; PetscMPIInt rank, numProcs, p; PetscErrorCode ierr; PetscFunctionBegin; PetscValidHeaderSpecific(dm, DM_CLASSID, 1); if (sf) PetscValidPointer(sf,4); PetscValidPointer(dmParallel,5); ierr = PetscLogEventBegin(DMPLEX_Distribute,dm,0,0,0);CHKERRQ(ierr); ierr = PetscObjectGetComm((PetscObject)dm,&comm);CHKERRQ(ierr); ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr); ierr = MPI_Comm_size(comm, &numProcs);CHKERRQ(ierr); *dmParallel = NULL; if (numProcs == 1) PetscFunctionReturn(0); ierr = DMPlexGetDimension(dm, &dim);CHKERRQ(ierr); /* Create cell partition - We need to rewrite to use IS, use the MatPartition stuff */ ierr = PetscLogEventBegin(DMPLEX_Partition,dm,0,0,0);CHKERRQ(ierr); if (overlap > 1) SETERRQ(PetscObjectComm((PetscObject)dm), PETSC_ERR_SUP, "Overlap > 1 not yet implemented"); ierr = DMPlexCreatePartition(dm, partitioner, height, overlap > 0 ? PETSC_TRUE : PETSC_FALSE, &cellPartSection, &cellPart, &origCellPartSection, &origCellPart);CHKERRQ(ierr); /* Create SF assuming a serial partition for all processes: Could check for IS length here */ if (!rank) numRemoteRanks = numProcs; else numRemoteRanks = 0; ierr = PetscMalloc1(numRemoteRanks, &remoteRanks);CHKERRQ(ierr); for (p = 0; p < numRemoteRanks; ++p) { remoteRanks[p].rank = p; remoteRanks[p].index = 0; } ierr = PetscSFCreate(comm, &partSF);CHKERRQ(ierr); ierr = PetscSFSetGraph(partSF, 1, numRemoteRanks, NULL, PETSC_OWN_POINTER, remoteRanks, PETSC_OWN_POINTER);CHKERRQ(ierr); ierr = PetscOptionsHasName(((PetscObject) dm)->prefix, "-partition_view", &flg);CHKERRQ(ierr); if (flg) { ierr = PetscPrintf(comm, "Cell Partition:\n");CHKERRQ(ierr); ierr = PetscSectionView(cellPartSection, PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); ierr = ISView(cellPart, NULL);CHKERRQ(ierr); if (origCellPart) { ierr = PetscPrintf(comm, "Original Cell Partition:\n");CHKERRQ(ierr); ierr = PetscSectionView(origCellPartSection, PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); ierr = ISView(origCellPart, NULL);CHKERRQ(ierr); } ierr = PetscSFView(partSF, NULL);CHKERRQ(ierr); } /* Close the partition over the mesh */ ierr = DMPlexCreatePartitionClosure(dm, cellPartSection, cellPart, &partSection, &part);CHKERRQ(ierr); ierr = ISDestroy(&cellPart);CHKERRQ(ierr); ierr = PetscSectionDestroy(&cellPartSection);CHKERRQ(ierr); /* Create new mesh */ ierr = DMPlexCreate(comm, dmParallel);CHKERRQ(ierr); ierr = DMPlexSetDimension(*dmParallel, dim);CHKERRQ(ierr); ierr = PetscObjectSetName((PetscObject) *dmParallel, "Parallel Mesh");CHKERRQ(ierr); pmesh = (DM_Plex*) (*dmParallel)->data; /* Distribute sieve points and the global point numbering (replaces creating remote bases) */ ierr = PetscSFConvertPartition(partSF, partSection, part, &renumbering, &pointSF);CHKERRQ(ierr); if (flg) { ierr = PetscPrintf(comm, "Point Partition:\n");CHKERRQ(ierr); ierr = PetscSectionView(partSection, PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); ierr = ISView(part, NULL);CHKERRQ(ierr); ierr = PetscSFView(pointSF, NULL);CHKERRQ(ierr); ierr = PetscPrintf(comm, "Point Renumbering after partition:\n");CHKERRQ(ierr); ierr = ISLocalToGlobalMappingView(renumbering, NULL);CHKERRQ(ierr); } ierr = PetscLogEventEnd(DMPLEX_Partition,dm,0,0,0);CHKERRQ(ierr); ierr = PetscLogEventBegin(DMPLEX_DistributeCones,dm,0,0,0);CHKERRQ(ierr); /* Distribute cone section */ ierr = DMPlexGetConeSection(dm, &originalConeSection);CHKERRQ(ierr); ierr = DMPlexGetConeSection(*dmParallel, &newConeSection);CHKERRQ(ierr); ierr = PetscSFDistributeSection(pointSF, originalConeSection, &remoteOffsets, newConeSection);CHKERRQ(ierr); ierr = DMSetUp(*dmParallel);CHKERRQ(ierr); { PetscInt pStart, pEnd, p; ierr = PetscSectionGetChart(newConeSection, &pStart, &pEnd);CHKERRQ(ierr); for (p = pStart; p < pEnd; ++p) { PetscInt coneSize; ierr = PetscSectionGetDof(newConeSection, p, &coneSize);CHKERRQ(ierr); pmesh->maxConeSize = PetscMax(pmesh->maxConeSize, coneSize); } } /* Communicate and renumber cones */ ierr = PetscSFCreateSectionSF(pointSF, originalConeSection, remoteOffsets, newConeSection, &coneSF);CHKERRQ(ierr); ierr = DMPlexGetCones(dm, &cones);CHKERRQ(ierr); ierr = DMPlexGetCones(*dmParallel, &newCones);CHKERRQ(ierr); ierr = PetscSFBcastBegin(coneSF, MPIU_INT, cones, newCones);CHKERRQ(ierr); ierr = PetscSFBcastEnd(coneSF, MPIU_INT, cones, newCones);CHKERRQ(ierr); ierr = PetscSectionGetStorageSize(newConeSection, &newConesSize);CHKERRQ(ierr); ierr = ISGlobalToLocalMappingApplyBlock(renumbering, IS_GTOLM_MASK, newConesSize, newCones, NULL, newCones);CHKERRQ(ierr); ierr = PetscOptionsHasName(((PetscObject) dm)->prefix, "-cones_view", &flg);CHKERRQ(ierr); if (flg) { ierr = PetscPrintf(comm, "Serial Cone Section:\n");CHKERRQ(ierr); ierr = PetscSectionView(originalConeSection, PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); ierr = PetscPrintf(comm, "Parallel Cone Section:\n");CHKERRQ(ierr); ierr = PetscSectionView(newConeSection, PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); ierr = PetscSFView(coneSF, NULL);CHKERRQ(ierr); } ierr = DMPlexGetConeOrientations(dm, &cones);CHKERRQ(ierr); ierr = DMPlexGetConeOrientations(*dmParallel, &newCones);CHKERRQ(ierr); ierr = PetscSFBcastBegin(coneSF, MPIU_INT, cones, newCones);CHKERRQ(ierr); ierr = PetscSFBcastEnd(coneSF, MPIU_INT, cones, newCones);CHKERRQ(ierr); ierr = PetscSFDestroy(&coneSF);CHKERRQ(ierr); ierr = PetscLogEventEnd(DMPLEX_DistributeCones,dm,0,0,0);CHKERRQ(ierr); /* Create supports and stratify sieve */ { PetscInt pStart, pEnd; ierr = PetscSectionGetChart(pmesh->coneSection, &pStart, &pEnd);CHKERRQ(ierr); ierr = PetscSectionSetChart(pmesh->supportSection, pStart, pEnd);CHKERRQ(ierr); } ierr = DMPlexSymmetrize(*dmParallel);CHKERRQ(ierr); ierr = DMPlexStratify(*dmParallel);CHKERRQ(ierr); /* Distribute Coordinates */ { PetscSection originalCoordSection, newCoordSection; Vec originalCoordinates, newCoordinates; PetscInt bs; const char *name; ierr = DMGetCoordinateSection(dm, &originalCoordSection);CHKERRQ(ierr); ierr = DMGetCoordinateSection(*dmParallel, &newCoordSection);CHKERRQ(ierr); ierr = DMGetCoordinatesLocal(dm, &originalCoordinates);CHKERRQ(ierr); ierr = VecCreate(comm, &newCoordinates);CHKERRQ(ierr); ierr = PetscObjectGetName((PetscObject) originalCoordinates, &name);CHKERRQ(ierr); ierr = PetscObjectSetName((PetscObject) newCoordinates, name);CHKERRQ(ierr); ierr = DMPlexDistributeField(dm, pointSF, originalCoordSection, originalCoordinates, newCoordSection, newCoordinates);CHKERRQ(ierr); ierr = DMSetCoordinatesLocal(*dmParallel, newCoordinates);CHKERRQ(ierr); ierr = VecGetBlockSize(originalCoordinates, &bs);CHKERRQ(ierr); ierr = VecSetBlockSize(newCoordinates, bs);CHKERRQ(ierr); ierr = VecDestroy(&newCoordinates);CHKERRQ(ierr); } /* Distribute labels */ ierr = PetscLogEventBegin(DMPLEX_DistributeLabels,dm,0,0,0);CHKERRQ(ierr); { DMLabel next = mesh->labels, newNext = pmesh->labels; PetscInt numLabels = 0, l; /* Bcast number of labels */ while (next) {++numLabels; next = next->next;} ierr = MPI_Bcast(&numLabels, 1, MPIU_INT, 0, comm);CHKERRQ(ierr); next = mesh->labels; for (l = 0; l < numLabels; ++l) { DMLabel labelNew; PetscBool isdepth; /* Skip "depth" because it is recreated */ if (!rank) {ierr = PetscStrcmp(next->name, "depth", &isdepth);CHKERRQ(ierr);} ierr = MPI_Bcast(&isdepth, 1, MPIU_BOOL, 0, comm);CHKERRQ(ierr); if (isdepth) {if (!rank) next = next->next; continue;} ierr = DMLabelDistribute(next, partSection, part, renumbering, &labelNew);CHKERRQ(ierr); /* Insert into list */ if (newNext) newNext->next = labelNew; else pmesh->labels = labelNew; newNext = labelNew; if (!rank) next = next->next; } } ierr = PetscLogEventEnd(DMPLEX_DistributeLabels,dm,0,0,0);CHKERRQ(ierr); /* Setup hybrid structure */ { const PetscInt *gpoints; PetscInt depth, n, d; for (d = 0; d <= dim; ++d) {pmesh->hybridPointMax[d] = mesh->hybridPointMax[d];} ierr = MPI_Bcast(pmesh->hybridPointMax, dim+1, MPIU_INT, 0, comm);CHKERRQ(ierr); ierr = ISLocalToGlobalMappingGetSize(renumbering, &n);CHKERRQ(ierr); ierr = ISLocalToGlobalMappingGetIndices(renumbering, &gpoints);CHKERRQ(ierr); ierr = DMPlexGetDepth(dm, &depth);CHKERRQ(ierr); for (d = 0; d <= dim; ++d) { PetscInt pmax = pmesh->hybridPointMax[d], newmax = 0, pEnd, stratum[2], p; if (pmax < 0) continue; ierr = DMPlexGetDepthStratum(dm, d > depth ? depth : d, &stratum[0], &stratum[1]);CHKERRQ(ierr); ierr = DMPlexGetDepthStratum(*dmParallel, d, NULL, &pEnd);CHKERRQ(ierr); ierr = MPI_Bcast(stratum, 2, MPIU_INT, 0, comm);CHKERRQ(ierr); for (p = 0; p < n; ++p) { const PetscInt point = gpoints[p]; if ((point >= stratum[0]) && (point < stratum[1]) && (point >= pmax)) ++newmax; } if (newmax > 0) pmesh->hybridPointMax[d] = pEnd - newmax; else pmesh->hybridPointMax[d] = -1; } ierr = ISLocalToGlobalMappingRestoreIndices(renumbering, &gpoints);CHKERRQ(ierr); } /* Cleanup Partition */ ierr = ISLocalToGlobalMappingDestroy(&renumbering);CHKERRQ(ierr); ierr = PetscSFDestroy(&partSF);CHKERRQ(ierr); ierr = PetscSectionDestroy(&partSection);CHKERRQ(ierr); ierr = ISDestroy(&part);CHKERRQ(ierr); /* Create point SF for parallel mesh */ ierr = PetscLogEventBegin(DMPLEX_DistributeSF,dm,0,0,0);CHKERRQ(ierr); { const PetscInt *leaves; PetscSFNode *remotePoints, *rowners, *lowners; PetscInt numRoots, numLeaves, numGhostPoints = 0, p, gp, *ghostPoints; PetscInt pStart, pEnd; ierr = DMPlexGetChart(*dmParallel, &pStart, &pEnd);CHKERRQ(ierr); ierr = PetscSFGetGraph(pointSF, &numRoots, &numLeaves, &leaves, NULL);CHKERRQ(ierr); ierr = PetscMalloc2(numRoots,&rowners,numLeaves,&lowners);CHKERRQ(ierr); for (p=0; p<numRoots; p++) { rowners[p].rank = -1; rowners[p].index = -1; } if (origCellPart) { /* Make sure points in the original partition are not assigned to other procs */ const PetscInt *origPoints; ierr = DMPlexCreatePartitionClosure(dm, origCellPartSection, origCellPart, &origPartSection, &origPart);CHKERRQ(ierr); ierr = ISGetIndices(origPart, &origPoints);CHKERRQ(ierr); for (p = 0; p < numProcs; ++p) { PetscInt dof, off, d; ierr = PetscSectionGetDof(origPartSection, p, &dof);CHKERRQ(ierr); ierr = PetscSectionGetOffset(origPartSection, p, &off);CHKERRQ(ierr); for (d = off; d < off+dof; ++d) { rowners[origPoints[d]].rank = p; } } ierr = ISRestoreIndices(origPart, &origPoints);CHKERRQ(ierr); ierr = ISDestroy(&origPart);CHKERRQ(ierr); ierr = PetscSectionDestroy(&origPartSection);CHKERRQ(ierr); } ierr = ISDestroy(&origCellPart);CHKERRQ(ierr); ierr = PetscSectionDestroy(&origCellPartSection);CHKERRQ(ierr); ierr = PetscSFBcastBegin(pointSF, MPIU_2INT, rowners, lowners);CHKERRQ(ierr); ierr = PetscSFBcastEnd(pointSF, MPIU_2INT, rowners, lowners);CHKERRQ(ierr); for (p = 0; p < numLeaves; ++p) { if (lowners[p].rank < 0 || lowners[p].rank == rank) { /* Either put in a bid or we know we own it */ lowners[p].rank = rank; lowners[p].index = leaves ? leaves[p] : p; } else if (lowners[p].rank >= 0) { /* Point already claimed so flag so that MAXLOC does not listen to us */ lowners[p].rank = -2; lowners[p].index = -2; } } for (p=0; p<numRoots; p++) { /* Root must not participate in the rediction, flag so that MAXLOC does not use */ rowners[p].rank = -3; rowners[p].index = -3; } ierr = PetscSFReduceBegin(pointSF, MPIU_2INT, lowners, rowners, MPI_MAXLOC);CHKERRQ(ierr); ierr = PetscSFReduceEnd(pointSF, MPIU_2INT, lowners, rowners, MPI_MAXLOC);CHKERRQ(ierr); ierr = PetscSFBcastBegin(pointSF, MPIU_2INT, rowners, lowners);CHKERRQ(ierr); ierr = PetscSFBcastEnd(pointSF, MPIU_2INT, rowners, lowners);CHKERRQ(ierr); for (p = 0; p < numLeaves; ++p) { if (lowners[p].rank < 0 || lowners[p].index < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Cell partition corrupt: point not claimed"); if (lowners[p].rank != rank) ++numGhostPoints; } ierr = PetscMalloc1(numGhostPoints, &ghostPoints);CHKERRQ(ierr); ierr = PetscMalloc1(numGhostPoints, &remotePoints);CHKERRQ(ierr); for (p = 0, gp = 0; p < numLeaves; ++p) { if (lowners[p].rank != rank) { ghostPoints[gp] = leaves ? leaves[p] : p; remotePoints[gp].rank = lowners[p].rank; remotePoints[gp].index = lowners[p].index; ++gp; } } ierr = PetscFree2(rowners,lowners);CHKERRQ(ierr); ierr = PetscSFSetGraph((*dmParallel)->sf, pEnd - pStart, numGhostPoints, ghostPoints, PETSC_OWN_POINTER, remotePoints, PETSC_OWN_POINTER);CHKERRQ(ierr); ierr = PetscSFSetFromOptions((*dmParallel)->sf);CHKERRQ(ierr); } pmesh->useCone = mesh->useCone; pmesh->useClosure = mesh->useClosure; ierr = PetscLogEventEnd(DMPLEX_DistributeSF,dm,0,0,0);CHKERRQ(ierr); /* Copy BC */ ierr = DMPlexCopyBoundary(dm, *dmParallel);CHKERRQ(ierr); /* Cleanup */ if (sf) {*sf = pointSF;} else {ierr = PetscSFDestroy(&pointSF);CHKERRQ(ierr);} ierr = DMSetFromOptions(*dmParallel);CHKERRQ(ierr); ierr = PetscLogEventEnd(DMPLEX_Distribute,dm,0,0,0);CHKERRQ(ierr); PetscFunctionReturn(0); }