int NonlocalMaterialWTP :: unpackRemoteElements(Domain *d, ProcessCommunicator &pc) { int myrank = d->giveEngngModel()->giveRank(); int iproc = pc.giveRank(); std :: string _type; DofManager *dofman; IntArray _partitions; if ( iproc == myrank ) { return 1; // skip local partition } // query process communicator to use ProcessCommunicatorBuff *pcbuff = pc.giveProcessCommunicatorBuff(); ProcessCommDataStream pcDataStream(pcbuff); // unpack dofman data do { pcbuff->unpackString(_type); if ( _type.size() == 0 ) { break; } dofman = classFactory.createDofManager(_type.c_str(), 0, d); dofman->restoreContext(& pcDataStream, CM_Definition | CM_State | CM_UnknownDictState); dofman->setParallelMode(DofManager_null); if ( d->dofmanGlobal2Local( dofman->giveGlobalNumber() ) ) { // record already exist delete dofman; } else { d->giveTransactionManager()->addDofManTransaction(DomainTransactionManager :: DTT_ADD, dofman->giveGlobalNumber(), dofman); } } while ( 1 ); // unpack element data Element *elem; _partitions.resize(1); _partitions.at(1) = iproc; do { pcbuff->unpackString(_type); if ( _type.size() == 0 ) { break; } elem = classFactory.createElement(_type.c_str(), 0, d); elem->restoreContext(& pcDataStream, CM_Definition | CM_State); elem->setParallelMode(Element_remote); elem->setPartitionList(_partitions); d->giveTransactionManager()->addElementTransaction(DomainTransactionManager :: DTT_ADD, elem->giveGlobalNumber(), elem); } while ( 1 ); return 1; }
int EnrichmentItem :: giveNumDofManEnrichments(const DofManager &iDMan) const { int nodeInd = iDMan.giveGlobalNumber(); auto res = mNodeEnrMarkerMap.find(nodeInd); if ( res != mNodeEnrMarkerMap.end() ) { switch ( res->second ) { case NodeEnr_NONE: return 0; break; case NodeEnr_BULK: return 1; break; case NodeEnr_START_TIP: return mpEnrichmentFrontStart->giveNumEnrichments(iDMan); break; case NodeEnr_END_TIP: return mpEnrichmentFrontEnd->giveNumEnrichments(iDMan); break; case NodeEnr_START_AND_END_TIP: return mpEnrichmentFrontStart->giveNumEnrichments(iDMan) + mpEnrichmentFrontEnd->giveNumEnrichments(iDMan); break; } } return 0; }
int ParmetisLoadBalancer :: packSharedDmanPartitions(ProcessCommunicator &pc) { int myrank = domain->giveEngngModel()->giveRank(); int iproc = pc.giveRank(); int ndofman, idofman; DofManager *dofman; if ( iproc == myrank ) { return 1; // skip local partition } // query process communicator to use ProcessCommunicatorBuff *pcbuff = pc.giveProcessCommunicatorBuff(); // loop over dofManagers and pack shared dofMan data ndofman = domain->giveNumberOfDofManagers(); for ( idofman = 1; idofman <= ndofman; idofman++ ) { dofman = domain->giveDofManager(idofman); // test if iproc is in list of existing shared partitions if ( ( dofman->giveParallelMode() == DofManager_shared ) && ( dofman->givePartitionList()->findFirstIndexOf(iproc) ) ) { // send new partitions to remote representation // fprintf (stderr, "[%d] sending shared plist of %d to [%d]\n", myrank, dofman->giveGlobalNumber(), iproc); pcbuff->write( dofman->giveGlobalNumber() ); this->giveDofManPartitions(idofman)->storeYourself(*pcbuff); } } pcbuff->write((int)PARMETISLB_END_DATA); return 1; }
void TrPlaneStress2dXFEM :: giveDofManDofIDMask(int inode, IntArray &answer) const { // Continuous part TrPlaneStress2d :: giveDofManDofIDMask(inode, answer); // Discontinuous part if( this->giveDomain()->hasXfemManager() ) { DofManager *dMan = giveDofManager(inode); XfemManager *xMan = giveDomain()->giveXfemManager(); const std::vector<int> &nodeEiIndices = xMan->giveNodeEnrichmentItemIndices( dMan->giveGlobalNumber() ); for ( size_t i = 0; i < nodeEiIndices.size(); i++ ) { EnrichmentItem *ei = xMan->giveEnrichmentItem(nodeEiIndices[i]); if ( ei->isDofManEnriched(* dMan) ) { IntArray eiDofIdArray; ei->computeEnrichedDofManDofIdArray(eiDofIdArray, *dMan); answer.followedBy(eiDofIdArray); } } } }
void PetscNatural2GlobalOrdering :: init(EngngModel *emodel, EquationID ut, int di, EquationType et) { Domain *d = emodel->giveDomain(di); int i, j, k, p, ndofs, ndofman = d->giveNumberOfDofManagers(); int myrank = emodel->giveRank(); DofManager *dman; // determine number of local eqs + number of those shared DOFs which are numbered by receiver // shared dofman is numbered on partition with lovest rank number EModelDefaultEquationNumbering dn; EModelDefaultPrescribedEquationNumbering dpn; #ifdef __VERBOSE_PARALLEL VERBOSEPARALLEL_PRINT("PetscNatural2GlobalOrdering :: init", "initializing N2G ordering", myrank); #endif l_neqs = 0; for ( i = 1; i <= ndofman; i++ ) { dman = d->giveDofManager(i); /* * if (dman->giveParallelMode() == DofManager_local) { // count all dofman eqs * ndofs = dman->giveNumberOfDofs (); * for (j=1; j<=ndofs; j++) { * if (dman->giveDof(j)->isPrimaryDof()) { * if (dman->giveDof(j)->giveEquationNumber()) l_neqs++; * } * } * } else if (dman->giveParallelMode() == DofManager_shared) { * // determine if problem is the lowest one sharing the dofman; if yes the receiver is responsible to * // deliver number * IntArray *plist = dman->givePartitionList(); * int n = plist->giveSize(); * int minrank = myrank; * for (j=1; j<=n; j++) minrank = min (minrank, plist->at(j)); * if (minrank == myrank) { // count eqs * ndofs = dman->giveNumberOfDofs (); * for (j=1; j<=ndofs; j++) { * if (dman->giveDof(j)->isPrimaryDof()) { * if (dman->giveDof(j)->giveEquationNumber()) l_neqs++; * } * } * } * } // end shared dman */ if ( isLocal(dman) ) { ndofs = dman->giveNumberOfDofs(); for ( j = 1; j <= ndofs; j++ ) { if ( dman->giveDof(j)->isPrimaryDof() ) { if ( et == et_standard ) { if ( dman->giveDof(j)->giveEquationNumber(dn) ) { l_neqs++; } } else { if ( dman->giveDof(j)->giveEquationNumber(dpn) ) { l_neqs++; } } } } } } // exchange with other procs the number of eqs numbered on particular procs int *leqs = new int [ emodel->giveNumberOfProcesses() ]; MPI_Allgather(& l_neqs, 1, MPI_INT, leqs, 1, MPI_INT, MPI_COMM_WORLD); // compute local offset int offset = 0; for ( j = 0; j < myrank; j++ ) { offset += leqs [ j ]; } // count global number of eqs for ( g_neqs = 0, j = 0; j < emodel->giveNumberOfProcesses(); j++ ) { g_neqs += leqs [ j ]; } // send numbered shared ones if ( et == et_standard ) { locGlobMap.resize( emodel->giveNumberOfEquations(ut) ); } else { locGlobMap.resize( emodel->giveNumberOfPrescribedEquations(ut) ); } // determine shared dofs int psize, nproc = emodel->giveNumberOfProcesses(); IntArray sizeToSend(nproc), sizeToRecv(nproc), nrecToReceive(nproc); #ifdef __VERBOSE_PARALLEL IntArray nrecToSend(nproc); #endif const IntArray *plist; for ( i = 1; i <= ndofman; i++ ) { // if (domain->giveDofManager(i)->giveParallelMode() == DofManager_shared) { if ( isShared( d->giveDofManager(i) ) ) { int n = d->giveDofManager(i)->giveNumberOfDofs(); plist = d->giveDofManager(i)->givePartitionList(); psize = plist->giveSize(); int minrank = myrank; for ( j = 1; j <= psize; j++ ) { minrank = min( minrank, plist->at(j) ); } if ( minrank == myrank ) { // count to send for ( j = 1; j <= psize; j++ ) { #ifdef __VERBOSE_PARALLEL nrecToSend( plist->at(j) )++; #endif sizeToSend( plist->at(j) ) += ( 1 + n ); // ndofs+dofman number } } else { nrecToReceive(minrank)++; sizeToRecv(minrank) += ( 1 + n ); // ndofs+dofman number } } } #ifdef __VERBOSE_PARALLEL for ( i = 0; i < nproc; i++ ) { OOFEM_LOG_INFO("[%d] Record Statistics: Sending %d Receiving %d to %d\n", myrank, nrecToSend(i), nrecToReceive(i), i); } #endif std :: map< int, int >globloc; // global->local mapping for shared // number local guys int globeq = offset; for ( i = 1; i <= ndofman; i++ ) { dman = d->giveDofManager(i); //if (dman->giveParallelMode() == DofManager_shared) { if ( isShared(dman) ) { globloc [ dman->giveGlobalNumber() ] = i; // build global->local mapping for shared plist = dman->givePartitionList(); psize = plist->giveSize(); int minrank = myrank; for ( j = 1; j <= psize; j++ ) { minrank = min( minrank, plist->at(j) ); } if ( minrank == myrank ) { // local ndofs = dman->giveNumberOfDofs(); for ( j = 1; j <= ndofs; j++ ) { if ( dman->giveDof(j)->isPrimaryDof() ) { int eq; if ( et == et_standard ) { eq = dman->giveDof(j)->giveEquationNumber(dn); } else { eq = dman->giveDof(j)->giveEquationNumber(dpn); } if ( eq ) { locGlobMap.at(eq) = globeq++; } } } } //} else if (dman->giveParallelMode() == DofManager_local) { } else { ndofs = dman->giveNumberOfDofs(); for ( j = 1; j <= ndofs; j++ ) { if ( dman->giveDof(j)->isPrimaryDof() ) { int eq; if ( et == et_standard ) { eq = dman->giveDof(j)->giveEquationNumber(dn); } else { eq = dman->giveDof(j)->giveEquationNumber(dpn); } if ( eq ) { locGlobMap.at(eq) = globeq++; } } } } } /* * fprintf (stderr, "[%d] locGlobMap: ", myrank); * for (i=1; i<=locGlobMap.giveSize(); i++) * fprintf (stderr, "%d ",locGlobMap.at(i)); */ // pack data for remote procs CommunicationBuffer **buffs = new CommunicationBuffer * [ nproc ]; for ( p = 0; p < nproc; p++ ) { buffs [ p ] = new StaticCommunicationBuffer(MPI_COMM_WORLD, 0); buffs [ p ]->resize( buffs [ p ]->givePackSize(MPI_INT, 1) * sizeToSend(p) ); #if 0 OOFEM_LOG_INFO( "[%d]PetscN2G:: init: Send buffer[%d] size %d\n", myrank, p, sizeToSend(p) ); #endif } for ( i = 1; i <= ndofman; i++ ) { if ( isShared( d->giveDofManager(i) ) ) { dman = d->giveDofManager(i); plist = dman->givePartitionList(); psize = plist->giveSize(); int minrank = myrank; for ( j = 1; j <= psize; j++ ) { minrank = min( minrank, plist->at(j) ); } if ( minrank == myrank ) { // do send for ( j = 1; j <= psize; j++ ) { p = plist->at(j); if ( p == myrank ) { continue; } #if 0 OOFEM_LOG_INFO("[%d]PetscN2G:: init: Sending localShared node %d[%d] to proc %d\n", myrank, i, dman->giveGlobalNumber(), p); #endif buffs [ p ]->packInt( dman->giveGlobalNumber() ); ndofs = dman->giveNumberOfDofs(); for ( k = 1; k <= ndofs; k++ ) { if ( dman->giveDof(k)->isPrimaryDof() ) { int eq; if ( et == et_standard ) { eq = dman->giveDof(k)->giveEquationNumber(dn); } else { eq = dman->giveDof(k)->giveEquationNumber(dpn); } if ( eq ) { buffs [ p ]->packInt( locGlobMap.at(eq) ); } } } } } } } //fprintf (stderr, "[%d] Sending glob nums ...", myrank); // send buffers for ( p = 0; p < nproc; p++ ) { if ( p != myrank ) { buffs [ p ]->iSend(p, 999); } } /**** * * for (p=0; p<nproc; p++) { * if (p == myrank) continue; * for (i=1; i<= ndofman; i++) { * //if (domain->giveDofManager(i)->giveParallelMode() == DofManager_shared) { * if (isShared(d->giveDofManager(i))) { * dman = d->giveDofManager(i); * plist = dman->givePartitionList(); * psize = plist->giveSize(); * int minrank = myrank; * for (j=1; j<=psize; j++) minrank = min (minrank, plist->at(j)); * if (minrank == myrank) { // do send * buffs[p]->packInt(dman->giveGlobalNumber()); * ndofs = dman->giveNumberOfDofs (); * for (j=1; j<=ndofs; j++) { * if (dman->giveDof(j)->isPrimaryDof()) { * buffs[p]->packInt(locGlobMap.at(dman->giveDof(j)->giveEquationNumber())); * } * } * } * } * } * // send buffer * buffs[p]->iSend(p, 999); * } ****/ // receive remote eqs and complete global numbering CommunicationBuffer **rbuffs = new CommunicationBuffer * [ nproc ]; for ( p = 0; p < nproc; p++ ) { rbuffs [ p ] = new StaticCommunicationBuffer(MPI_COMM_WORLD, 0); rbuffs [ p ]->resize( rbuffs [ p ]->givePackSize(MPI_INT, 1) * sizeToRecv(p) ); #if 0 OOFEM_LOG_INFO( "[%d]PetscN2G:: init: Receive buffer[%d] size %d\n", myrank, p, sizeToRecv(p) ); #endif } //fprintf (stderr, "[%d] Receiving glob nums ...", myrank); for ( p = 0; p < nproc; p++ ) { if ( p != myrank ) { rbuffs [ p ]->iRecv(p, 999); } } IntArray finished(nproc); finished.zero(); int fin = 1; finished.at(emodel->giveRank() + 1) = 1; do { for ( p = 0; p < nproc; p++ ) { if ( finished.at(p + 1) == 0 ) { if ( rbuffs [ p ]->testCompletion() ) { // data are here // unpack them int nite = nrecToReceive(p); int shdm, ldm; for ( i = 1; i <= nite; i++ ) { rbuffs [ p ]->unpackInt(shdm); #if 0 OOFEM_LOG_INFO("[%d]PetscN2G:: init: Received shared node [%d] from proc %d\n", myrank, shdm, p); #endif // // find local guy coorecponding to shdm if ( globloc.find(shdm) != globloc.end() ) { ldm = globloc [ shdm ]; } else { OOFEM_ERROR3("[%d] PetscNatural2GlobalOrdering :: init: invalid shared dofman received, globnum %d\n", myrank, shdm); } dman = d->giveDofManager(ldm); ndofs = dman->giveNumberOfDofs(); for ( j = 1; j <= ndofs; j++ ) { if ( dman->giveDof(j)->isPrimaryDof() ) { int eq; if ( et == et_standard ) { eq = dman->giveDof(j)->giveEquationNumber(dn); } else { eq = dman->giveDof(j)->giveEquationNumber(dpn); } if ( eq ) { int val; rbuffs [ p ]->unpackInt(val); locGlobMap.at(eq) = val; } } } } finished.at(p + 1) = 1; fin++; } } } } while ( fin < nproc ); /* * fprintf (stderr, "[%d] Finished receiving glob nums ...", myrank); * * fprintf (stderr, "[%d] locGlobMap:", myrank); * for (i=1; i<=locGlobMap.giveSize(); i++) * fprintf (stderr, "%d ",locGlobMap.at(i)); */ #ifdef __VERBOSE_PARALLEL if ( et == et_standard ) { int _eq; char *ptr; char *locname = "local", *shname = "shared", *unkname = "unknown"; for ( i = 1; i <= ndofman; i++ ) { dman = d->giveDofManager(i); if ( dman->giveParallelMode() == DofManager_local ) { ptr = locname; } else if ( dman->giveParallelMode() == DofManager_shared ) { ptr = shname; } else { ptr = unkname; } ndofs = dman->giveNumberOfDofs(); for ( j = 1; j <= ndofs; j++ ) { if ( ( _eq = dman->giveDof(j)->giveEquationNumber(dn) ) ) { fprintf( stderr, "[%d] n:%6s %d[%d] (%d), leq = %d, geq = %d\n", emodel->giveRank(), ptr, i, dman->giveGlobalNumber(), j, _eq, locGlobMap.at(_eq) ); } else { fprintf(stderr, "[%d] n:%6s %d[%d] (%d), leq = %d, geq = %d\n", emodel->giveRank(), ptr, i, dman->giveGlobalNumber(), j, _eq, 0); } } } } #endif // build reverse map int lneq; if ( et == et_standard ) { lneq = emodel->giveNumberOfEquations(ut); } else { lneq = emodel->giveNumberOfPrescribedEquations(ut); } globLocMap.clear(); for ( i = 1; i <= lneq; i++ ) { globLocMap [ locGlobMap.at(i) ] = i; } for ( p = 0; p < nproc; p++ ) { delete rbuffs [ p ]; delete buffs [ p ]; } delete[] rbuffs; delete[] buffs; delete[] leqs; MPI_Barrier(MPI_COMM_WORLD); #ifdef __VERBOSE_PARALLEL VERBOSEPARALLEL_PRINT("PetscNatural2GlobalOrdering :: init", "done", myrank); #endif }
void ProblemCommunicator :: setUpCommunicationMapsForElementCut(EngngModel *pm, bool excludeSelfCommFlag) { Domain *domain = pm->giveDomain(1); int nnodes = domain->giveNumberOfDofManagers(); int i, j, partition; if ( this->mode == ProblemCommMode__ELEMENT_CUT ) { /* * Initially, each partition knows for which nodes a receive * is needed (and can therefore compute easily the recv map), * but does not know for which nodes it should send data to which * partition. Hence, the communication setup is performed by * broadcasting "send request" lists of nodes for which * a partition expects to receive data (ie. of those nodes * which the partition uses, but does not own) to all * collaborating processes. The "send request" list are * converted into send maps. */ // receive maps can be build locally, // but send maps should be assembled from broadcasted lists (containing // expected receive nodes) of remote partitions. // first build local receive map IntArray domainNodeRecvCount(size); const IntArray *partitionList; DofManager *dofMan; //Element *element; int domainRecvListSize = 0, domainRecvListPos = 0; //int nelems; int result = 1; for ( i = 1; i <= nnodes; i++ ) { partitionList = domain->giveDofManager(i)->givePartitionList(); if ( domain->giveDofManager(i)->giveParallelMode() == DofManager_remote ) { // size of partitionList should be 1 <== only ine master for ( j = 1; j <= partitionList->giveSize(); j++ ) { if ( !( excludeSelfCommFlag && ( this->rank == partitionList->at(j) ) ) ) { domainRecvListSize++; domainNodeRecvCount.at(partitionList->at(j) + 1)++; } } } } // build maps simultaneously IntArray pos(size); IntArray **maps = new IntArray * [ size ]; for ( i = 0; i < size; i++ ) { maps [ i ] = new IntArray( domainNodeRecvCount.at(i + 1) ); } // allocate also domain receive list to be broadcasted IntArray domainRecvList(domainRecvListSize); if ( domainRecvListSize ) { for ( i = 1; i <= nnodes; i++ ) { // test if node is remote DofMan dofMan = domain->giveDofManager(i); if ( dofMan->giveParallelMode() == DofManager_remote ) { domainRecvList.at(++domainRecvListPos) = dofMan->giveGlobalNumber(); partitionList = domain->giveDofManager(i)->givePartitionList(); // size of partitionList should be 1 <== only ine master for ( j = 1; j <= partitionList->giveSize(); j++ ) { if ( !( excludeSelfCommFlag && ( this->rank == partitionList->at(j) ) ) ) { partition = partitionList->at(j); maps [ partition ]->at( ++pos.at(partition + 1) ) = i; } } } } } // set up process recv communicator maps for ( i = 0; i < size; i++ ) { this->setProcessCommunicatorToRecvArry(this->giveProcessCommunicator(i), * maps [ i ]); //this->giveDomainCommunicator(i)->setToRecvArry (this->engngModel, *maps[i]); } // delete local maps for ( i = 0; i < size; i++ ) { delete maps [ i ]; } delete maps; // to assemble send maps, we must analyze broadcasted remote domain send lists // and we must also broadcast our send list. #ifdef __VERBOSE_PARALLEL VERBOSEPARALLEL_PRINT("ProblemCommunicator::setUpCommunicationMaps", "Element-cut broadcasting started", rank); #endif StaticCommunicationBuffer commBuff(MPI_COMM_WORLD); IntArray remoteDomainRecvList; IntArray toSendMap; int localExpectedSize, globalRecvSize; int sendMapPos, sendMapSize, globalDofManNum; // determine the size of receive buffer using AllReduce operation #ifndef IBM_MPI_IMPLEMENTATION localExpectedSize = domainRecvList.givePackSize(commBuff); #else localExpectedSize = domainRecvList.givePackSize(commBuff) + 1; #endif #ifdef __USE_MPI result = MPI_Allreduce(& localExpectedSize, & globalRecvSize, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD); if ( result != MPI_SUCCESS ) { _error("setUpCommunicationMaps: MPI_Allreduce failed"); } #else WARNING: NOT SUPPORTED MESSAGE PARSING LIBRARY #endif #ifdef __VERBOSE_PARALLEL VERBOSEPARALLEL_PRINT("ProblemCommunicator::setUpCommunicationMaps", "Finished reducing receiveBufferSize", rank); #endif // resize to fit largest received message commBuff.resize(globalRecvSize); // resize toSend map to max possible size toSendMap.resize(globalRecvSize); for ( i = 0; i < size; i++ ) { // loop over domains commBuff.init(); if ( i == rank ) { //current domain has to send its receive list to all domains // broadcast domainRecvList #ifdef __VERBOSE_PARALLEL VERBOSEPARALLEL_PRINT("ProblemCommunicator::setUpCommunicationMaps", "Broadcasting own send list", rank); #endif commBuff.packIntArray(domainRecvList); result = commBuff.bcast(i); if ( result != MPI_SUCCESS ) { _error("setUpCommunicationMaps: commBuff broadcast failed"); } #ifdef __VERBOSE_PARALLEL VERBOSEPARALLEL_PRINT("ProblemCommunicator::setUpCommunicationMaps", "Broadcasting own send list finished", rank); #endif } else { #ifdef __VERBOSE_PARALLEL OOFEM_LOG_DEBUG("[process rank %3d]: %-30s: Receiving broadcasted send map from partition %3d\n", rank, "ProblemCommunicator :: unpackAllData", i); #endif // receive broadcasted lists result = commBuff.bcast(i); if ( result != MPI_SUCCESS ) { _error("setUpCommunicationMaps: commBuff broadcast failed"); } #ifdef __VERBOSE_PARALLEL OOFEM_LOG_DEBUG("[process rank %3d]: %-30s: Receiving broadcasted send map from partition %3d finished\n", rank, "ProblemCommunicator :: unpackAllData", i); #endif // unpack remote receive list if ( !commBuff.unpackIntArray(remoteDomainRecvList) ) { _error("ProblemCommunicator::setUpCommunicationMaps: unpack remote receive list failed"); } // find if remote nodes are in local partition // if yes add them into send map for correcponding i-th partition sendMapPos = 0; sendMapSize = 0; // determine sendMap size for ( j = 1; j <= nnodes; j++ ) { // loop over local DofManagers dofMan = domain->giveDofManager(j); globalDofManNum = dofMan->giveGlobalNumber(); // test id globalDofManNum is in remoteDomainRecvList if ( remoteDomainRecvList.findFirstIndexOf(globalDofManNum) ) { sendMapSize++; } } toSendMap.resize(sendMapSize); for ( j = 1; j <= nnodes; j++ ) { // loop over local DofManagers dofMan = domain->giveDofManager(j); globalDofManNum = dofMan->giveGlobalNumber(); // test id globalDofManNum is in remoteDomainRecvList if ( remoteDomainRecvList.findFirstIndexOf(globalDofManNum) ) { // add this local DofManager number to sed map for active partition toSendMap.at(++sendMapPos) = j; } } // end loop over local DofManagers // set send map to i-th process communicator this->setProcessCommunicatorToSendArry(this->giveProcessCommunicator(i), toSendMap); //this->giveDomainCommunicator(i)->setToSendArry (this->engngModel, toSendMap); } // end receiving broadcasted lists #ifdef __VERBOSE_PARALLEL VERBOSEPARALLEL_PRINT("ProblemCommunicator::setUpCommunicationMaps", "Receiving broadcasted send maps finished", rank); #endif } // end loop over domains } else { _error("setUpCommunicationMapsForElementCut: unknown mode"); } }
int LoadBalancer :: packMigratingData(Domain *d, ProcessCommunicator &pc) { int myrank = d->giveEngngModel()->giveRank(); int iproc = pc.giveRank(); int idofman, ndofman; classType dtype; DofManager *dofman; LoadBalancer :: DofManMode dmode; // ************************************************** // Pack migrating data to remote partition // ************************************************** // pack dofManagers if ( iproc == myrank ) { return 1; // skip local partition } // query process communicator to use ProcessCommunicatorBuff *pcbuff = pc.giveProcessCommunicatorBuff(); ProcessCommDataStream pcDataStream(pcbuff); // loop over dofManagers ndofman = d->giveNumberOfDofManagers(); for ( idofman = 1; idofman <= ndofman; idofman++ ) { dofman = d->giveDofManager(idofman); dmode = this->giveDofManState(idofman); dtype = dofman->giveClassID(); // sync data to remote partition // if dofman already present on remote partition then there is no need to sync //if ((this->giveDofManPartitions(idofman)->findFirstIndexOf(iproc))) { if ( ( this->giveDofManPartitions(idofman)->findFirstIndexOf(iproc) ) && ( !dofman->givePartitionList()->findFirstIndexOf(iproc) ) ) { pcbuff->packInt(dtype); pcbuff->packInt(dmode); pcbuff->packInt( dofman->giveGlobalNumber() ); // pack dofman state (this is the local dofman, not available on remote) /* this is a potential performance leak, sending shared dofman to a partition, * in which is already shared does not require to send context (is already there) * here for simplicity it is always send */ dofman->saveContext(& pcDataStream, CM_Definition | CM_DefinitionGlobal | CM_State | CM_UnknownDictState); // send list of new partitions pcbuff->packIntArray( * ( this->giveDofManPartitions(idofman) ) ); } } // pack end-of-dofman-section record pcbuff->packInt(LOADBALANCER_END_DATA); int ielem, nelem = d->giveNumberOfElements(), nsend = 0; Element *elem; for ( ielem = 1; ielem <= nelem; ielem++ ) { // begin loop over elements elem = d->giveElement(ielem); if ( ( elem->giveParallelMode() == Element_local ) && ( this->giveElementPartition(ielem) == iproc ) ) { // pack local element (node numbers shuld be global ones!!!) // pack type pcbuff->packInt( elem->giveClassID() ); // nodal numbers shuld be packed as global !! elem->saveContext(& pcDataStream, CM_Definition | CM_DefinitionGlobal | CM_State); nsend++; } } // end loop over elements // pack end-of-element-record pcbuff->packInt(LOADBALANCER_END_DATA); OOFEM_LOG_RELEVANT("[%d] LoadBalancer:: sending %d migrating elements to %d\n", myrank, nsend, iproc); return 1; }
void TrPlaneStress2dXFEM :: giveCompositeExportData(std::vector< VTKPiece > &vtkPieces, IntArray &primaryVarsToExport, IntArray &internalVarsToExport, IntArray cellVarsToExport, TimeStep *tStep) { vtkPieces.resize(1); const int numCells = mSubTri.size(); if(numCells == 0) { // Enriched but uncut element // Visualize as a quad vtkPieces[0].setNumberOfCells(1); int numTotalNodes = 3; vtkPieces[0].setNumberOfNodes(numTotalNodes); // Node coordinates std :: vector< FloatArray >nodeCoords; for(int i = 1; i <= 3; i++) { FloatArray &x = *(giveDofManager(i)->giveCoordinates()); nodeCoords.push_back(x); vtkPieces[0].setNodeCoords(i, x); } // Connectivity IntArray nodes1 = {1, 2, 3}; vtkPieces[0].setConnectivity(1, nodes1); // Offset int offset = 3; vtkPieces[0].setOffset(1, offset); // Cell types vtkPieces[0].setCellType(1, 5); // Linear triangle // Export nodal variables from primary fields vtkPieces[0].setNumberOfPrimaryVarsToExport(primaryVarsToExport.giveSize(), numTotalNodes); for ( int fieldNum = 1; fieldNum <= primaryVarsToExport.giveSize(); fieldNum++ ) { UnknownType type = ( UnknownType ) primaryVarsToExport.at(fieldNum); for ( int nodeInd = 1; nodeInd <= numTotalNodes; nodeInd++ ) { if ( type == DisplacementVector ) { // compute displacement FloatArray u = {0.0, 0.0, 0.0}; // Fetch global coordinates (in undeformed configuration) const FloatArray &x = nodeCoords[nodeInd-1]; // Compute local coordinates FloatArray locCoord; computeLocalCoordinates(locCoord, x); // Compute displacement in point FloatMatrix NMatrix; computeNmatrixAt(locCoord, NMatrix); FloatArray solVec; computeVectorOf(VM_Total, tStep, solVec); FloatArray uTemp; uTemp.beProductOf(NMatrix, solVec); if(uTemp.giveSize() == 3) { u = uTemp; } else { u = {uTemp[0], uTemp[1], 0.0}; } vtkPieces[0].setPrimaryVarInNode(fieldNum, nodeInd, u); } else { printf("fieldNum: %d\n", fieldNum); // TODO: Implement // ZZNodalRecoveryMI_recoverValues(values, layer, ( InternalStateType ) 1, tStep); // does not work well - fix // for ( int j = 1; j <= numCellNodes; j++ ) { // vtkPiece.setPrimaryVarInNode(fieldNum, nodeNum, values [ j - 1 ]); // nodeNum += 1; // } } } } // Export nodal variables from internal fields vtkPieces[0].setNumberOfInternalVarsToExport(0, numTotalNodes); // Export cell variables vtkPieces[0].setNumberOfCellVarsToExport(cellVarsToExport.giveSize(), 1); for ( int i = 1; i <= cellVarsToExport.giveSize(); i++ ) { InternalStateType type = ( InternalStateType ) cellVarsToExport.at(i); FloatArray average; std :: unique_ptr< IntegrationRule > &iRule = integrationRulesArray [ 0 ]; VTKXMLExportModule :: computeIPAverage(average, iRule.get(), this, type, tStep); FloatArray averageV9(9); averageV9.at(1) = average.at(1); averageV9.at(5) = average.at(2); averageV9.at(9) = average.at(3); averageV9.at(6) = averageV9.at(8) = average.at(4); averageV9.at(3) = averageV9.at(7) = average.at(5); averageV9.at(2) = averageV9.at(4) = average.at(6); vtkPieces[0].setCellVar( i, 1, averageV9 ); } // Export of XFEM related quantities if ( domain->hasXfemManager() ) { XfemManager *xMan = domain->giveXfemManager(); int nEnrIt = xMan->giveNumberOfEnrichmentItems(); vtkPieces[0].setNumberOfInternalXFEMVarsToExport(xMan->vtkExportFields.giveSize(), nEnrIt, numTotalNodes); const int nDofMan = giveNumberOfDofManagers(); for ( int field = 1; field <= xMan->vtkExportFields.giveSize(); field++ ) { XFEMStateType xfemstype = ( XFEMStateType ) xMan->vtkExportFields [ field - 1 ]; for ( int enrItIndex = 1; enrItIndex <= nEnrIt; enrItIndex++ ) { EnrichmentItem *ei = xMan->giveEnrichmentItem(enrItIndex); for ( int nodeInd = 1; nodeInd <= numTotalNodes; nodeInd++ ) { const FloatArray &x = nodeCoords[nodeInd-1]; FloatArray locCoord; computeLocalCoordinates(locCoord, x); FloatArray N; FEInterpolation *interp = giveInterpolation(); interp->evalN( N, locCoord, FEIElementGeometryWrapper(this) ); if ( xfemstype == XFEMST_LevelSetPhi ) { double levelSet = 0.0, levelSetInNode = 0.0; for(int elNodeInd = 1; elNodeInd <= nDofMan; elNodeInd++) { DofManager *dMan = giveDofManager(elNodeInd); ei->evalLevelSetNormalInNode(levelSetInNode, dMan->giveGlobalNumber(), *(dMan->giveCoordinates()) ); levelSet += N.at(elNodeInd)*levelSetInNode; } FloatArray valueArray = {levelSet}; vtkPieces[0].setInternalXFEMVarInNode(field, enrItIndex, nodeInd, valueArray); } else if ( xfemstype == XFEMST_LevelSetGamma ) { double levelSet = 0.0, levelSetInNode = 0.0; for(int elNodeInd = 1; elNodeInd <= nDofMan; elNodeInd++) { DofManager *dMan = giveDofManager(elNodeInd); ei->evalLevelSetTangInNode(levelSetInNode, dMan->giveGlobalNumber(), *(dMan->giveCoordinates()) ); levelSet += N.at(elNodeInd)*levelSetInNode; } FloatArray valueArray = {levelSet}; vtkPieces[0].setInternalXFEMVarInNode(field, enrItIndex, nodeInd, valueArray); } else if ( xfemstype == XFEMST_NodeEnrMarker ) { double nodeEnrMarker = 0.0, nodeEnrMarkerInNode = 0.0; for(int elNodeInd = 1; elNodeInd <= nDofMan; elNodeInd++) { DofManager *dMan = giveDofManager(elNodeInd); ei->evalNodeEnrMarkerInNode(nodeEnrMarkerInNode, dMan->giveGlobalNumber() ); nodeEnrMarker += N.at(elNodeInd)*nodeEnrMarkerInNode; } FloatArray valueArray = {nodeEnrMarker}; vtkPieces[0].setInternalXFEMVarInNode(field, enrItIndex, nodeInd, valueArray); } } } } } } else { // Enriched and cut element XfemStructuralElementInterface::giveSubtriangulationCompositeExportData(vtkPieces, primaryVarsToExport, internalVarsToExport, cellVarsToExport, tStep); } }