int
NodalAveragingRecoveryModel :: packSharedDofManData(parallelStruct *s, ProcessCommunicator &processComm)
{
    int result = 1, i, j, indx, eq, size;
    ProcessCommunicatorBuff *pcbuff = processComm.giveProcessCommunicatorBuff();
    IntArray const *toSendMap = processComm.giveToSendMap();

    size = toSendMap->giveSize();
    for ( i = 1; i <= size; i++ ) {
        // toSendMap contains all shared dofmans with remote partition
        // one has to check, if particular shared node value is available for given region
        indx = s->regionNodalNumbers->at( toSendMap->at(i) );
        if ( indx ) {
            // pack "1" to indicate that for given shared node this is a valid contribution
            result &= pcbuff->packInt(1);
            result &= pcbuff->packInt( s->regionDofMansConnectivity->at(indx) );
            eq = ( indx - 1 ) * s->regionValSize;
            for ( j = 1; j <= s->regionValSize; j++ ) {
                result &= pcbuff->packDouble( s->lhs->at(eq + j) );
            }
        } else {
            // ok shared node is not in active region (determined by s->regionNodalNumbers)
            result &= pcbuff->packInt(0);
        }
    }

    return result;
}
Пример #2
0
int
ParmetisLoadBalancer :: packSharedDmanPartitions(ProcessCommunicator &pc)
{
    int myrank = domain->giveEngngModel()->giveRank();
    int iproc = pc.giveRank();
    int ndofman, idofman;
    DofManager *dofman;

    if ( iproc == myrank ) {
        return 1;                // skip local partition
    }

    // query process communicator to use
    ProcessCommunicatorBuff *pcbuff = pc.giveProcessCommunicatorBuff();
    // loop over dofManagers and pack shared dofMan data
    ndofman = domain->giveNumberOfDofManagers();
    for ( idofman = 1; idofman <= ndofman; idofman++ ) {
        dofman = domain->giveDofManager(idofman);
        // test if iproc is in list of existing shared partitions
        if ( ( dofman->giveParallelMode() == DofManager_shared ) &&
            ( dofman->givePartitionList()->findFirstIndexOf(iproc) ) ) {
            // send new partitions to remote representation
            // fprintf (stderr, "[%d] sending shared plist of %d to [%d]\n", myrank, dofman->giveGlobalNumber(), iproc);
            pcbuff->packInt( dofman->giveGlobalNumber() );
            pcbuff->packIntArray( * ( this->giveDofManPartitions(idofman) ) );
        }
    }

    pcbuff->packInt(PARMETISLB_END_DATA);
    return 1;
}
Пример #3
0
int NonlocalMaterialWTP :: packMigratingElementDependencies(Domain *d, ProcessCommunicator &pc)
{
    int myrank = d->giveEngngModel()->giveRank();
    int iproc = pc.giveRank();

    if ( iproc == myrank ) {
        return 1;                // skip local partition
    }

    // query process communicator to use
    ProcessCommunicatorBuff *pcbuff = pc.giveProcessCommunicatorBuff();
    ProcessCommDataStream pcDataStream(pcbuff);

    int ielem, nelem = d->giveNumberOfElements();
    int _globnum;
    Element *elem;

    for ( ielem = 1; ielem <= nelem; ielem++ ) { // begin loop over elements
        elem = d->giveElement(ielem);
        if ( ( elem->giveParallelMode() == Element_local ) &&
            ( lb->giveElementPartition(ielem) == iproc ) ) {
            // pack local element (node numbers shuld be global ones!!!)
            // pack type
            _globnum = elem->giveGlobalNumber();
            pcbuff->packInt(_globnum);
            pcbuff->packIntArray(nonlocElementDependencyMap [ _globnum ]);
        }
    } // end loop over elements

    // pack end-of-element-record
    pcbuff->packInt(NonlocalMaterialWTP_END_DATA);

    return 1;
}
Пример #4
0
int
LoadBalancer :: packMigratingData(Domain *d, ProcessCommunicator &pc)
{
    int myrank = d->giveEngngModel()->giveRank();
    int iproc = pc.giveRank();
    int idofman, ndofman;
    classType dtype;
    DofManager *dofman;
    LoadBalancer :: DofManMode dmode;

    //  **************************************************
    //  Pack migrating data to remote partition
    //  **************************************************

    // pack dofManagers
    if ( iproc == myrank ) {
        return 1;                // skip local partition
    }

    // query process communicator to use
    ProcessCommunicatorBuff *pcbuff = pc.giveProcessCommunicatorBuff();
    ProcessCommDataStream pcDataStream(pcbuff);
    // loop over dofManagers
    ndofman = d->giveNumberOfDofManagers();
    for ( idofman = 1; idofman <= ndofman; idofman++ ) {
        dofman = d->giveDofManager(idofman);
        dmode = this->giveDofManState(idofman);
        dtype = dofman->giveClassID();
        // sync data to remote partition
        // if dofman already present on remote partition then there is no need to sync
        //if ((this->giveDofManPartitions(idofman)->findFirstIndexOf(iproc))) {
        if ( ( this->giveDofManPartitions(idofman)->findFirstIndexOf(iproc) ) &&
            ( !dofman->givePartitionList()->findFirstIndexOf(iproc) ) ) {
            pcbuff->packInt(dtype);
            pcbuff->packInt(dmode);
            pcbuff->packInt( dofman->giveGlobalNumber() );

            // pack dofman state (this is the local dofman, not available on remote)
            /* this is a potential performance leak, sending shared dofman to a partition,
             * in which is already shared does not require to send context (is already there)
             * here for simplicity it is always send */
            dofman->saveContext(& pcDataStream, CM_Definition | CM_DefinitionGlobal | CM_State | CM_UnknownDictState);
            // send list of new partitions
            pcbuff->packIntArray( * ( this->giveDofManPartitions(idofman) ) );
        }
    }

    // pack end-of-dofman-section record
    pcbuff->packInt(LOADBALANCER_END_DATA);

    int ielem, nelem = d->giveNumberOfElements(), nsend = 0;

    Element *elem;

    for ( ielem = 1; ielem <= nelem; ielem++ ) { // begin loop over elements
        elem = d->giveElement(ielem);
        if ( ( elem->giveParallelMode() == Element_local ) &&
            ( this->giveElementPartition(ielem) == iproc ) ) {
            // pack local element (node numbers shuld be global ones!!!)
            // pack type
            pcbuff->packInt( elem->giveClassID() );
            // nodal numbers shuld be packed as global !!
            elem->saveContext(& pcDataStream, CM_Definition | CM_DefinitionGlobal | CM_State);
            nsend++;
        }
    } // end loop over elements

    // pack end-of-element-record
    pcbuff->packInt(LOADBALANCER_END_DATA);

    OOFEM_LOG_RELEVANT("[%d] LoadBalancer:: sending %d migrating elements to %d\n", myrank, nsend, iproc);

    return 1;
}