int NodalAveragingRecoveryModel :: unpackSharedDofManData(parallelStruct *s, ProcessCommunicator &processComm) { int result = 1; int i, j, eq, indx, size, flag, intValue; IntArray const *toRecvMap = processComm.giveToRecvMap(); ProcessCommunicatorBuff *pcbuff = processComm.giveProcessCommunicatorBuff(); double value; size = toRecvMap->giveSize(); for ( i = 1; i <= size; i++ ) { indx = s->regionNodalNumbers->at( toRecvMap->at(i) ); // toRecvMap contains all shared dofmans with remote partition // one has to check, if particular shared node received contribution is available for given region result &= pcbuff->unpackInt(flag); if ( flag ) { // "1" to indicates that for given shared node this is a valid contribution result &= pcbuff->unpackInt(intValue); // now check if we have a valid number if ( indx ) { s->regionDofMansConnectivity->at(indx) += intValue; } eq = ( indx - 1 ) * s->regionValSize; for ( j = 1; j <= s->regionValSize; j++ ) { result &= pcbuff->unpackDouble(value); if ( indx ) { s->lhs->at(eq + j) += value; } } } } return result; }
int NonlocalMaterialWTP :: unpackMigratingElementDependencies(Domain *d, ProcessCommunicator &pc) { int myrank = d->giveEngngModel()->giveRank(); int iproc = pc.giveRank(); int _globnum; if ( iproc == myrank ) { return 1; // skip local partition } // query process communicator to use ProcessCommunicatorBuff *pcbuff = pc.giveProcessCommunicatorBuff(); ProcessCommDataStream pcDataStream(pcbuff); // unpack element data do { pcbuff->unpackInt(_globnum); if ( _globnum == NonlocalMaterialWTP_END_DATA ) { break; } pcbuff->unpackIntArray(nonlocElementDependencyMap [ _globnum ]); } while ( 1 ); return 1; }
int ParmetisLoadBalancer :: unpackSharedDmanPartitions(ProcessCommunicator &pc) { int myrank = domain->giveEngngModel()->giveRank(); int iproc = pc.giveRank(); int _globnum, _locnum; IntArray _partitions; if ( iproc == myrank ) { return 1; // skip local partition } // query process communicator to use ProcessCommunicatorBuff *pcbuff = pc.giveProcessCommunicatorBuff(); // init domain global2local map domain->initGlobalDofManMap(); pcbuff->unpackInt(_globnum); // unpack dofman data while ( _globnum != PARMETISLB_END_DATA ) { pcbuff->unpackIntArray(_partitions); if ( ( _locnum = domain->dofmanGlobal2Local(_globnum) ) ) { this->addSharedDofmanPartitions(_locnum, _partitions); } else { OOFEM_ERROR2("ParmetisLoadBalancer::unpackSharedDmanPartitions: internal error, unknown global dofman %d", _globnum); } /* * fprintf (stderr,"[%d] Received shared plist of %d ", myrank, _globnum); * for (int _i=1; _i<=dofManPartitions[_locnum-1].giveSize(); _i++) * fprintf (stderr,"%d ", dofManPartitions[_locnum-1].at(_i)); * fprintf (stderr,"\n"); */ pcbuff->unpackInt(_globnum); } return 1; }
int LoadBalancer :: unpackMigratingData(Domain *d, ProcessCommunicator &pc) { // create temp space for dofManagers and elements // merging should be made by domain ? // maps of new dofmanagers and elements indexed by global number // we can put local dofManagers and elements into maps (should be done before unpacking) // int nproc=this->giveEngngModel()->giveNumberOfProcesses(); int myrank = d->giveEngngModel()->giveRank(); int iproc = pc.giveRank(); int _mode, _globnum, _type; bool _newentry; classType _etype; IntArray _partitions, local_partitions; //LoadBalancer::DofManMode dmode; DofManager *dofman; DomainTransactionManager *dtm = d->giveTransactionManager(); // ************************************************** // Unpack migrating data to remote partition // ************************************************** if ( iproc == myrank ) { return 1; // skip local partition } // query process communicator to use ProcessCommunicatorBuff *pcbuff = pc.giveProcessCommunicatorBuff(); ProcessCommDataStream pcDataStream(pcbuff); pcbuff->unpackInt(_type); // unpack dofman data while ( _type != LOADBALANCER_END_DATA ) { _etype = ( classType ) _type; pcbuff->unpackInt(_mode); switch ( _mode ) { case LoadBalancer :: DM_Remote: // receiving new local dofManager pcbuff->unpackInt(_globnum); /* * _newentry = false; * if ( ( dofman = dtm->giveDofManager(_globnum) ) == NULL ) { * // data not available -> create a new one * _newentry = true; * dofman = CreateUsrDefDofManagerOfType(_etype, 0, d); * } */ _newentry = true; dofman = CreateUsrDefDofManagerOfType(_etype, 0, d); dofman->setGlobalNumber(_globnum); // unpack dofman state (this is the local dofman, not available on remote) dofman->restoreContext(& pcDataStream, CM_Definition | CM_DefinitionGlobal | CM_State | CM_UnknownDictState); // unpack list of new partitions pcbuff->unpackIntArray(_partitions); dofman->setPartitionList(& _partitions); dofman->setParallelMode(DofManager_local); // add transaction if new entry allocated; otherwise existing one has been modified via returned dofman if ( _newentry ) { dtm->addTransaction(DomainTransactionManager :: DTT_ADD, DomainTransactionManager :: DCT_DofManager, _globnum, dofman); } //dmanMap[_globnum] = dofman; break; case LoadBalancer :: DM_Shared: // receiving new shared dofManager, that was local on sending partition // should be received only once (from partition where was local) pcbuff->unpackInt(_globnum); /* * _newentry = false; * if ( ( dofman = dtm->giveDofManager(_globnum) ) == NULL ) { * // data not available -> mode should be SharedUpdate * _newentry = true; * dofman = CreateUsrDefDofManagerOfType(_etype, 0, d); * } */ _newentry = true; dofman = CreateUsrDefDofManagerOfType(_etype, 0, d); dofman->setGlobalNumber(_globnum); // unpack dofman state (this is the local dofman, not available on remote) dofman->restoreContext(& pcDataStream, CM_Definition | CM_DefinitionGlobal | CM_State | CM_UnknownDictState); // unpack list of new partitions pcbuff->unpackIntArray(_partitions); dofman->setPartitionList(& _partitions); dofman->setParallelMode(DofManager_shared); #ifdef __VERBOSE_PARALLEL fprintf(stderr, "[%d] received Shared new dofman [%d]\n", myrank, _globnum); #endif // add transaction if new entry allocated; otherwise existing one has been modified via returned dofman if ( _newentry ) { dtm->addTransaction(DomainTransactionManager :: DTT_ADD, DomainTransactionManager :: DCT_DofManager, _globnum, dofman); } //dmanMap[_globnum] = dofman; break; default: OOFEM_ERROR2("LoadBalancer::unpackMigratingData: unexpected dof manager type (%d)", _type); } // get next type record pcbuff->unpackInt(_type); } ; // while (_type != LOADBALANCER_END_DATA); // unpack element data Element *elem; int nrecv = 0; do { pcbuff->unpackInt(_type); if ( _type == LOADBALANCER_END_DATA ) { break; } _etype = ( classType ) _type; elem = CreateUsrDefElementOfType(_etype, 0, d); elem->restoreContext(& pcDataStream, CM_Definition | CM_State); elem->initForNewStep(); dtm->addTransaction(DomainTransactionManager :: DTT_ADD, DomainTransactionManager :: DCT_Element, elem->giveGlobalNumber(), elem); nrecv++; //recvElemList.push_back(elem); } while ( 1 ); OOFEM_LOG_RELEVANT("[%d] LoadBalancer:: receiving %d migrating elements from %d\n", myrank, nrecv, iproc); return 1; }