Exemple #1
0
void
ProblemCommunicator :: setUpCommunicationMapsForRemoteElementMode(EngngModel *pm,
                                                                  bool excludeSelfCommFlag)
{
    //int nnodes = domain->giveNumberOfDofManagers();
    Domain *domain = pm->giveDomain(1);
    int i, j, partition;

    if ( this->mode == ProblemCommMode__REMOTE_ELEMENT_MODE ) {
        /*
         * Initially, each partition knows for which nodes a receive
         * is needed (and can therefore compute easily the recv map),
         * but does not know for which nodes it should send data to which
         * partition. Hence, the communication setup is performed by
         * broadcasting "send request" lists of nodes for which
         * a partition expects to receive data (ie. of those nodes
         * which the partition uses, but does not own) to all
         * collaborating processes. The "send request" list are
         * converted into send maps.
         */

        // receive maps can be build locally,
        // but send maps should be assembled from broadcasted lists (containing
        // expected receive nodes) of remote partitions.

        // first build local receive map
        IntArray domainNodeRecvCount(size);
        const IntArray *partitionList;
        //DofManager *dofMan;
        Element *element;
        int domainRecvListSize = 0, domainRecvListPos = 0;
        int nelems;
        int result = 1;

        nelems = domain->giveNumberOfElements();
        for ( i = 1; i <= nelems; i++ ) {
            partitionList = domain->giveElement(i)->givePartitionList();
            if ( domain->giveElement(i)->giveParallelMode() == Element_remote ) {
                // size of partitionList should be 1 <== only ine master
                for ( j = 1; j <= partitionList->giveSize(); j++ ) {
                    if ( !( excludeSelfCommFlag && ( this->rank == partitionList->at(j) ) ) ) {
                        domainRecvListSize++;
                        domainNodeRecvCount.at(partitionList->at(j) + 1)++;
                    }
                }
            }
        }

        // build maps simultaneously
        IntArray pos(size);
        IntArray **maps = new IntArray * [ size ];
        for ( i = 0; i < size; i++ ) {
            maps [ i ] = new IntArray( domainNodeRecvCount.at(i + 1) );
        }

        // allocate also domain receive list to be broadcasted
        IntArray domainRecvList(domainRecvListSize);

        if ( domainRecvListSize ) {
            for ( i = 1; i <= nelems; i++ ) {
                // test if element is remote one
                element = domain->giveElement(i);
                if ( element->giveParallelMode() == Element_remote ) {
                    domainRecvList.at(++domainRecvListPos) = element->giveGlobalNumber();

                    partitionList = domain->giveElement(i)->givePartitionList();
                    // size of partitionList should be 1 <== only ine master
                    for ( j = 1; j <= partitionList->giveSize(); j++ ) {
                        if ( !( excludeSelfCommFlag && ( this->rank == partitionList->at(j) ) ) ) {
                            partition = partitionList->at(j);
                            maps [ partition ]->at( ++pos.at(partition + 1) ) = i;
                        }
                    }
                }
            }
        }

        // set up domains recv communicator maps
        for ( i = 0; i < size; i++ ) {
            this->setProcessCommunicatorToRecvArry(this->giveProcessCommunicator(i), * maps [ i ]);
            //this->giveDomainCommunicator(i)->setToRecvArry (this->engngModel, *maps[i]);
        }

        /*
         * #ifdef __VERBOSE_PARALLEL
         * for (i=0; i<size; i++) {
         * fprintf (stderr, "domain %d-%d: domainCommRecvsize is %d\n",rank,i,this->giveDomainCommunicator(i)->giveRecvBuff()->giveSize() );
         * printf ("domain %d-%d: reecv map:",rank,i);
         * this->giveDomainCommunicator(i)->giveToRecvMap()->printYourself();
         * }
         *#endif
         */

        // delete local maps
        for ( i = 0; i < size; i++ ) {
            delete maps [ i ];
        }

        delete [] maps;

        // to assemble send maps, we must analyze broadcasted remote domain send lists
        // and we must also broadcast our send list.

#ifdef __VERBOSE_PARALLEL
        VERBOSEPARALLEL_PRINT("ProblemCommunicator::setUpCommunicationMaps", "Remote Element-cut broadcasting started", rank);
#endif


        StaticCommunicationBuffer commBuff(MPI_COMM_WORLD);
        IntArray remoteDomainRecvList;
        IntArray toSendMap;
        int localExpectedSize, globalRecvSize;
        int sendMapPos, sendMapSize, globalDofManNum;

        // determine the size of receive buffer using AllReduce operation
#ifndef IBM_MPI_IMPLEMENTATION
        localExpectedSize = domainRecvList.givePackSize(commBuff);
#else
        localExpectedSize = domainRecvList.givePackSize(commBuff) + 1;
#endif


#ifdef __USE_MPI
        result = MPI_Allreduce(& localExpectedSize, & globalRecvSize, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD);
        if ( result != MPI_SUCCESS ) {
            _error("setUpCommunicationMaps: MPI_Allreduce failed");
        }

#else
WARNING: NOT SUPPORTED MESSAGE PARSING LIBRARY
#endif

#ifdef __VERBOSE_PARALLEL
        VERBOSEPARALLEL_PRINT("ProblemCommunicator::setUpCommunicationMaps", "Finished reducing receiveBufferSize", rank);
#endif


        // resize to fit largest received message
        commBuff.resize(globalRecvSize);

        // resize toSend map to max possible size
        toSendMap.resize(globalRecvSize);

        for ( i = 0; i < size; i++ ) { // loop over domains
            commBuff.init();
            if ( i == rank ) {
                //current domain has to send its receive list to all domains
                // broadcast domainRecvList

#ifdef __VERBOSE_PARALLEL
                VERBOSEPARALLEL_PRINT("ProblemCommunicator::setUpCommunicationMaps", "Broadcasting own send list", rank);
#endif

                commBuff.packIntArray(domainRecvList);
                result = commBuff.bcast(i);
                if ( result != MPI_SUCCESS ) {
                    _error("setUpCommunicationMaps: commBuff broadcast failed");
                }

#ifdef __VERBOSE_PARALLEL
                VERBOSEPARALLEL_PRINT("ProblemCommunicator::setUpCommunicationMaps", "Broadcasting own send list finished", rank);
#endif
            } else {
#ifdef __VERBOSE_PARALLEL
                OOFEM_LOG_DEBUG("[process rank %3d]: %-30s: Receiving broadcasted send map from partition %3d\n",
                                rank, "ProblemCommunicator :: unpackAllData", i);
#endif
                // receive broadcasted lists
                result = commBuff.bcast(i);
                if ( result != MPI_SUCCESS ) {
                    _error("setUpCommunicationMaps: commBuff broadcast failed");
                }

#ifdef __VERBOSE_PARALLEL
                OOFEM_LOG_DEBUG("[process rank %3d]: %-30s: Receiving broadcasted send map from partition %3d finished\n",
                                rank, "ProblemCommunicator :: unpackAllData", i);
#endif


                // unpack remote receive list
                if ( !commBuff.unpackIntArray(remoteDomainRecvList) ) {
                    _error("ProblemCommunicator::setUpCommunicationMaps: unpack remote receive list failed");
                }

                // find if remote elements are in local partition
                // if yes add them into send map for correcponding i-th partition
                sendMapPos = 0;
                sendMapSize = 0;
                // determine sendMap size
                for ( j = 1; j <= nelems; j++ ) { // loop over local elements
                    element = domain->giveElement(j);
                    if ( element->giveParallelMode() == Element_local ) {
                        globalDofManNum = element->giveGlobalNumber();
                        // test id globalDofManNum is in remoteDomainRecvList
                        if ( remoteDomainRecvList.findFirstIndexOf(globalDofManNum) ) {
                            sendMapSize++;
                        }
                    }
                }

                toSendMap.resize(sendMapSize);

                for ( j = 1; j <= nelems; j++ ) { // loop over local elements
                    element = domain->giveElement(j);
                    if ( element->giveParallelMode() == Element_local ) {
                        globalDofManNum = element->giveGlobalNumber();
                        // test id globalDofManNum is in remoteDomainRecvList
                        if ( remoteDomainRecvList.findFirstIndexOf(globalDofManNum) ) {
                            // add this local DofManager number to sed map for active partition
                            toSendMap.at(++sendMapPos) = j;
                        }
                    }
                } // end loop over local DofManagers

                // set send map to i-th process communicator
                this->setProcessCommunicatorToSendArry(this->giveProcessCommunicator(i), toSendMap);

                /*
                 * #ifdef __VERBOSE_PARALLEL
                 *  fprintf (stderr, "domain %d-%d: domainCommSendsize is %d\n",rank,i,this->giveDomainCommunicator(i)->giveSendBuff()->giveSize() );
                 *  printf ("domain %d-%d: send map:",rank,i);
                 *  this->giveDomainCommunicator(i)->giveToSendMap()->printYourself();
                 *
                 *#endif
                 */

                //this->giveDomainCommunicator(i)->setToSendArry (this->engngModel, toSendMap);
            } // end receiving broadcasted lists

#ifdef __VERBOSE_PARALLEL
            VERBOSEPARALLEL_PRINT("ProblemCommunicator::setUpCommunicationMaps", "Receiving broadcasted send maps finished", rank);
#endif
        } // end loop over domains

    } else {
        _error("setUpCommunicationMapsForRemoteElementMode: unknown mode");
    }
}
void
FETICommunicator :: setUpCommunicationMaps(EngngModel *pm)
{
    int i, j, l, maxRec;
    int globaldofmannum, localNumber, ndofs;
    int numberOfBoundaryDofMans;
    int source, tag;
    IntArray numberOfPartitionBoundaryDofMans(size);
    StaticCommunicationBuffer commBuff(MPI_COMM_WORLD);
    EModelDefaultEquationNumbering dn;
    // FETIBoundaryDofManager *dofmanrec;
    // Map containing boundary dof managers records, the key is corresponding global number
    // value is corresponding local master dof manager number
    map< int, int, less< int > >BoundaryDofManagerMap;
    // communication maps of slaves
    IntArray **commMaps = new IntArray * [ size ];
    // location array
    IntArray locNum;
    Domain *domain = pm->giveDomain(1);

    // check if receiver is master
    if ( this->rank != 0 ) {
        _error("FETICommunicator::setUpCommunicationMaps : rank 0 (master) expected as receiver");
    }

    // resize receive buffer
    commBuff.resize( commBuff.givePackSize(MPI_INT, 1) );

    //
    // receive data
    //
    for ( i = 1; i < size; i++ ) {
        commBuff.iRecv(MPI_ANY_SOURCE, FETICommunicator :: NumberOfBoundaryDofManagersMsg);
        while ( !commBuff.testCompletion(source, tag) ) {
            ;
        }

        // unpack data
        commBuff.unpackInt(j);
#ifdef __VERBOSE_PARALLEL
        OOFEM_LOG_DEBUG("[process rank %3d]: %-30s: Received data from partition %3d (received %d)\n",
                        rank, "FETICommunicator :: setUpCommunicationMaps : received number of boundary dofMans", source, j);
#endif
        numberOfPartitionBoundaryDofMans.at(source + 1) = j;
        commBuff.init();
    }

    MPI_Barrier(MPI_COMM_WORLD);


    // determine the total number of boundary dof managers at master
    int nnodes = domain->giveNumberOfDofManagers();
    j = 0;
    for ( i = 1; i <= nnodes; i++ ) {
        if ( domain->giveDofManager(i)->giveParallelMode() == DofManager_shared ) {
            j++;
        }
    }

    numberOfPartitionBoundaryDofMans.at(1) = j;

    //
    // receive list of bounadry dof managers with corresponding number of dofs from each partition
    //

    // resize the receive buffer to fit all messages
    maxRec = 0;
    for ( i = 0; i < size; i++ ) {
        if ( numberOfPartitionBoundaryDofMans.at(i + 1) > maxRec ) {
            maxRec = numberOfPartitionBoundaryDofMans.at(i + 1);
        }
    }

    commBuff.resize( 2 * maxRec * commBuff.givePackSize(MPI_INT, 1) );
    // resize communication maps acordingly
    for ( i = 0; i < size; i++ ) {
        j = numberOfPartitionBoundaryDofMans.at(i + 1);
        commMaps [ i ] = new IntArray(j);
    }


    // add local master contribution first
    // loop over all dofmanager data received
    i = 0;
    for ( j = 1; j <= numberOfPartitionBoundaryDofMans.at(1); j++ ) {
        // fing next shared dofman
        while ( !( domain->giveDofManager(++i)->giveParallelMode() == DofManager_shared ) ) {
            ;
        }

        globaldofmannum = domain->giveDofManager(i)->giveGlobalNumber();
        domain->giveDofManager(i)->giveCompleteLocationArray(locNum, dn);
        ndofs = 0;
        for ( l = 1; l <= locNum.giveSize(); l++ ) {
            if ( locNum.at(l) ) {
                ndofs++;
            }
        }

        // add corresponding entry to master map of boundary dof managers
        if ( ( localNumber = BoundaryDofManagerMap [ globaldofmannum ] ) == 0 ) { // no local counterpart exist
            // create it
            boundaryDofManList.push_back( FETIBoundaryDofManager(globaldofmannum, 0, ndofs) );
            // remember the local number; actual position in vector is localNumber-1
            localNumber = BoundaryDofManagerMap [ globaldofmannum ] = ( boundaryDofManList.size() );
            boundaryDofManList.back().addPartition(0);
        } else { // update the corresponding record
            boundaryDofManList [ localNumber - 1 ].addPartition(0);
            if ( boundaryDofManList [ localNumber - 1 ].giveNumberOfDofs() != ndofs ) {
                _error("FETICommunicator :: setUpCommunicationMaps : ndofs size mismatch");
            }
        }

        // remember communication map for particular partition
        commMaps [ 0 ]->at(j) = localNumber;
    }

    //
    // receive data from slave partitions
    //

    for ( i = 1; i < size; i++ ) {
        commBuff.iRecv(MPI_ANY_SOURCE, FETICommunicator :: BoundaryDofManagersRecMsg);
        while ( !commBuff.testCompletion(source, tag) ) {
            ;
        }

        // unpack data
#ifdef __VERBOSE_PARALLEL
        OOFEM_LOG_DEBUG("[process rank %3d]: %-30s: Received data from partition %3d\n",
                        rank, "FETICommunicator :: setUpCommunicationMaps : received boundary dofMans records", source);
#endif

        // loop over all dofmanager data received
        for ( j = 1; j <= numberOfPartitionBoundaryDofMans.at(source + 1); j++ ) {
            commBuff.unpackInt(globaldofmannum);
            commBuff.unpackInt(ndofs);

            // add corresponding entry to master map of boundary dof managers
            if ( ( localNumber = BoundaryDofManagerMap [ globaldofmannum ] ) == 0 ) { // no local counterpart exist
                // create it
                boundaryDofManList.push_back( FETIBoundaryDofManager(globaldofmannum, 0, ndofs) );
                // remember the local number; actual position in vector is localNumber-1
                localNumber = BoundaryDofManagerMap [ globaldofmannum ] = ( boundaryDofManList.size() );
                boundaryDofManList.back().addPartition(source);
            } else { // update the corresponding record
                boundaryDofManList [ localNumber - 1 ].addPartition(source);
                if ( boundaryDofManList [ localNumber - 1 ].giveNumberOfDofs() != ndofs ) {
                    _error("FETICommunicator :: setUpCommunicationMaps : ndofs size mismatch");
                }
            }

            // remember communication map for particular partition
            commMaps [ source ]->at(j) = localNumber;
        }

        commBuff.init();
    }

    MPI_Barrier(MPI_COMM_WORLD);
    //
    // assign code numbers to boundary dofs
    //
    numberOfEquations = 0;
    numberOfBoundaryDofMans = boundaryDofManList.size();
    for ( i = 1; i <= numberOfBoundaryDofMans; i++ ) {
        boundaryDofManList [ i - 1 ].setCodeNumbers(numberOfEquations); // updates numberOfEquations
    }

    // store the commMaps
    for ( i = 0; i < size; i++ ) {
        if ( i != 0 ) {
            this->giveProcessCommunicator(i)->setToSendArry(engngModel, * commMaps [ i ], 0);
            this->giveProcessCommunicator(i)->setToRecvArry(engngModel, * commMaps [ i ], 0);
        } else {
            masterCommMap = * commMaps [ i ];
        }

        delete commMaps [ i ];
    }

    delete commMaps;

    MPI_Barrier(MPI_COMM_WORLD);

#ifdef __VERBOSE_PARALLEL
    VERBOSEPARALLEL_PRINT("FETICommunicator::setUpCommunicationMaps", "communication maps setup finished", rank);
#endif
}
Exemple #3
0
/*
 * should be called after basic local migration is finalized,
 * when all local elements are already available
 */
void
NonlocalMaterialWTP :: migrate()
{
    Domain *domain = this->lb->giveDomain();
    EngngModel *emodel = domain->giveEngngModel();
    int nproc = emodel->giveNumberOfProcesses();
    int myrank = emodel->giveRank();
    CommunicatorBuff cb(nproc, CBT_dynamic);
    Communicator com(emodel, &cb, myrank, nproc, CommMode_Dynamic);
    StaticCommunicationBuffer commBuff(MPI_COMM_WORLD);

    /*
     * build domain nonlocal element dependency list. Then exclude local elements - what remains are unsatisfied
     * remote dependencies that have to be broadcasted and received from partitions owning relevant elements
     */
    int _locsize, i, _i, ie, _size, _globnum, result, nelems = domain->giveNumberOfElements();
    int _globsize, _val;
    Element *elem;
    std :: set< int >domainElementDepSet;
    // loop over each element dep list to assemble domain list
    for ( ie = 1; ie <= nelems; ie++ ) {
        elem = domain->giveElement(ie);
        if ( ( elem->giveParallelMode() == Element_local ) ) {
            _globnum = elem->giveGlobalNumber();
            IntArray &iedep = nonlocElementDependencyMap [ _globnum ];
            _size = iedep.giveSize();
            for ( _i = 1; _i <= _size; _i++ ) {
                domainElementDepSet.insert( iedep.at(_i) );
            }

#if NonlocalMaterialWTP_DEBUG_PRINT
            fprintf(stderr, "[%d] element %d dependency:", myrank, _globnum);
            for ( _i = 1; _i <= _size; _i++ ) {
                fprintf( stderr, "%d ", iedep.at(_i) );
            }

            fprintf(stderr, "\n");
#endif
        }
    }

#if NonlocalMaterialWTP_DEBUG_PRINT
    fprintf(stderr, "[%d] nonlocal domain dependency:", myrank);
    for ( int eldep: domainElementDepSet ) {
        fprintf(stderr, "%d ", eldep);
    }

    fprintf(stderr, "\n");
#endif

    // now exclude local elements (local dependency is always satisfied)
    for ( _i = 1; _i <= nelems; _i++ ) {
        elem = domain->giveElement(_i);
        if ( elem->giveParallelMode() == Element_local ) {
            domainElementDepSet.erase( elem->giveGlobalNumber() );
        }
    }

#if NonlocalMaterialWTP_DEBUG_PRINT
    fprintf(stderr, "[%d] remote elem wish list:", myrank);
    for ( int eldep: domainElementDepSet ) {
        fprintf(stderr, "%d ", eldep);
    }

    fprintf(stderr, "\n");
#endif

    // broadcast remaining elements (unsatisfied domain nonlocal dependency) to remaining partitions
    _locsize = domainElementDepSet.size() + 1;
    result = MPI_Allreduce(& _locsize, & _globsize, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD);
    if ( result != MPI_SUCCESS ) {
        OOFEM_ERROR("MPI_Allreduce to determine  broadcast buffer size failed");
    }

    commBuff.resize( commBuff.givePackSize(MPI_INT, _globsize) );
    // remote domain wish list
    std :: set< int >remoteWishSet;

    toSendList.resize(nproc);
    for ( i = 0; i < nproc; i++ ) { // loop over partitions
        commBuff.init();
        toSendList [ i ].clear();
        if ( i == myrank ) {
            // current domain has to send its receive wish list to all domains
            commBuff.packInt(_locsize);
            for ( int eldep: domainElementDepSet ) {
                commBuff.packInt(eldep);
            }

            result = commBuff.bcast(i);
        } else {
            // unpack remote domain wish list
            remoteWishSet.clear();
            result = commBuff.bcast(i);
            // unpack size
            commBuff.unpackInt(_size);
            for ( _i = 1; _i < _size; _i++ ) {
                commBuff.unpackInt(_val);
                remoteWishSet.insert(_val);
            }

            // determine which local elements are to be sent to remotepartition
            for ( _i = 1; _i <= nelems; _i++ ) {
                elem = domain->giveElement(_i);
                if ( elem->giveParallelMode() == Element_local ) {
                    if ( remoteWishSet.find( elem->giveGlobalNumber() ) != remoteWishSet.end() ) {
                        // store local element number
                        toSendList [ i ].push_back(_i);
                    }
                }
            }
        }
    } // end loop over partitions broadcast

#if NonlocalMaterialWTP_DEBUG_PRINT
    for ( i = 0; i < nproc; i++ ) { // loop over partitions
        // print some info
        fprintf(stderr, "[%d] elements scheduled for mirroring at [%d]:",
                myrank, i);
        for ( int elnum: toSendList [ i ] ) {
            fprintf( stderr, "%d[%d] ", elnum, domain->giveElement(elnum)->giveGlobalNumber() );
        }

        fprintf(stderr, "\n");
    }

#endif





    com.packAllData(this, domain, & NonlocalMaterialWTP :: packRemoteElements);
    com.initExchange(MIGRATE_REMOTE_ELEMENTS_TAG);
    com.unpackAllData(this, domain, & NonlocalMaterialWTP :: unpackRemoteElements);
    com.finishExchange();

    domain->commitTransactions( domain->giveTransactionManager() );

#ifdef __VERBOSE_PARALLEL
    VERBOSEPARALLEL_PRINT("NonlocalMaterialWTP::migrate", "Finished migrating remote elements", myrank);
#endif
}