Example #1
0
void
ProblemCommunicator :: setUpCommunicationMapsForRemoteElementMode(EngngModel *pm,
                                                                  bool excludeSelfCommFlag)
{
    //int nnodes = domain->giveNumberOfDofManagers();
    Domain *domain = pm->giveDomain(1);
    int i, j, partition;

    if ( this->mode == ProblemCommMode__REMOTE_ELEMENT_MODE ) {
        /*
         * Initially, each partition knows for which nodes a receive
         * is needed (and can therefore compute easily the recv map),
         * but does not know for which nodes it should send data to which
         * partition. Hence, the communication setup is performed by
         * broadcasting "send request" lists of nodes for which
         * a partition expects to receive data (ie. of those nodes
         * which the partition uses, but does not own) to all
         * collaborating processes. The "send request" list are
         * converted into send maps.
         */

        // receive maps can be build locally,
        // but send maps should be assembled from broadcasted lists (containing
        // expected receive nodes) of remote partitions.

        // first build local receive map
        IntArray domainNodeRecvCount(size);
        const IntArray *partitionList;
        //DofManager *dofMan;
        Element *element;
        int domainRecvListSize = 0, domainRecvListPos = 0;
        int nelems;
        int result = 1;

        nelems = domain->giveNumberOfElements();
        for ( i = 1; i <= nelems; i++ ) {
            partitionList = domain->giveElement(i)->givePartitionList();
            if ( domain->giveElement(i)->giveParallelMode() == Element_remote ) {
                // size of partitionList should be 1 <== only ine master
                for ( j = 1; j <= partitionList->giveSize(); j++ ) {
                    if ( !( excludeSelfCommFlag && ( this->rank == partitionList->at(j) ) ) ) {
                        domainRecvListSize++;
                        domainNodeRecvCount.at(partitionList->at(j) + 1)++;
                    }
                }
            }
        }

        // build maps simultaneously
        IntArray pos(size);
        IntArray **maps = new IntArray * [ size ];
        for ( i = 0; i < size; i++ ) {
            maps [ i ] = new IntArray( domainNodeRecvCount.at(i + 1) );
        }

        // allocate also domain receive list to be broadcasted
        IntArray domainRecvList(domainRecvListSize);

        if ( domainRecvListSize ) {
            for ( i = 1; i <= nelems; i++ ) {
                // test if element is remote one
                element = domain->giveElement(i);
                if ( element->giveParallelMode() == Element_remote ) {
                    domainRecvList.at(++domainRecvListPos) = element->giveGlobalNumber();

                    partitionList = domain->giveElement(i)->givePartitionList();
                    // size of partitionList should be 1 <== only ine master
                    for ( j = 1; j <= partitionList->giveSize(); j++ ) {
                        if ( !( excludeSelfCommFlag && ( this->rank == partitionList->at(j) ) ) ) {
                            partition = partitionList->at(j);
                            maps [ partition ]->at( ++pos.at(partition + 1) ) = i;
                        }
                    }
                }
            }
        }

        // set up domains recv communicator maps
        for ( i = 0; i < size; i++ ) {
            this->setProcessCommunicatorToRecvArry(this->giveProcessCommunicator(i), * maps [ i ]);
            //this->giveDomainCommunicator(i)->setToRecvArry (this->engngModel, *maps[i]);
        }

        /*
         * #ifdef __VERBOSE_PARALLEL
         * for (i=0; i<size; i++) {
         * fprintf (stderr, "domain %d-%d: domainCommRecvsize is %d\n",rank,i,this->giveDomainCommunicator(i)->giveRecvBuff()->giveSize() );
         * printf ("domain %d-%d: reecv map:",rank,i);
         * this->giveDomainCommunicator(i)->giveToRecvMap()->printYourself();
         * }
         *#endif
         */

        // delete local maps
        for ( i = 0; i < size; i++ ) {
            delete maps [ i ];
        }

        delete [] maps;

        // to assemble send maps, we must analyze broadcasted remote domain send lists
        // and we must also broadcast our send list.

#ifdef __VERBOSE_PARALLEL
        VERBOSEPARALLEL_PRINT("ProblemCommunicator::setUpCommunicationMaps", "Remote Element-cut broadcasting started", rank);
#endif


        StaticCommunicationBuffer commBuff(MPI_COMM_WORLD);
        IntArray remoteDomainRecvList;
        IntArray toSendMap;
        int localExpectedSize, globalRecvSize;
        int sendMapPos, sendMapSize, globalDofManNum;

        // determine the size of receive buffer using AllReduce operation
#ifndef IBM_MPI_IMPLEMENTATION
        localExpectedSize = domainRecvList.givePackSize(commBuff);
#else
        localExpectedSize = domainRecvList.givePackSize(commBuff) + 1;
#endif


#ifdef __USE_MPI
        result = MPI_Allreduce(& localExpectedSize, & globalRecvSize, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD);
        if ( result != MPI_SUCCESS ) {
            _error("setUpCommunicationMaps: MPI_Allreduce failed");
        }

#else
WARNING: NOT SUPPORTED MESSAGE PARSING LIBRARY
#endif

#ifdef __VERBOSE_PARALLEL
        VERBOSEPARALLEL_PRINT("ProblemCommunicator::setUpCommunicationMaps", "Finished reducing receiveBufferSize", rank);
#endif


        // resize to fit largest received message
        commBuff.resize(globalRecvSize);

        // resize toSend map to max possible size
        toSendMap.resize(globalRecvSize);

        for ( i = 0; i < size; i++ ) { // loop over domains
            commBuff.init();
            if ( i == rank ) {
                //current domain has to send its receive list to all domains
                // broadcast domainRecvList

#ifdef __VERBOSE_PARALLEL
                VERBOSEPARALLEL_PRINT("ProblemCommunicator::setUpCommunicationMaps", "Broadcasting own send list", rank);
#endif

                commBuff.packIntArray(domainRecvList);
                result = commBuff.bcast(i);
                if ( result != MPI_SUCCESS ) {
                    _error("setUpCommunicationMaps: commBuff broadcast failed");
                }

#ifdef __VERBOSE_PARALLEL
                VERBOSEPARALLEL_PRINT("ProblemCommunicator::setUpCommunicationMaps", "Broadcasting own send list finished", rank);
#endif
            } else {
#ifdef __VERBOSE_PARALLEL
                OOFEM_LOG_DEBUG("[process rank %3d]: %-30s: Receiving broadcasted send map from partition %3d\n",
                                rank, "ProblemCommunicator :: unpackAllData", i);
#endif
                // receive broadcasted lists
                result = commBuff.bcast(i);
                if ( result != MPI_SUCCESS ) {
                    _error("setUpCommunicationMaps: commBuff broadcast failed");
                }

#ifdef __VERBOSE_PARALLEL
                OOFEM_LOG_DEBUG("[process rank %3d]: %-30s: Receiving broadcasted send map from partition %3d finished\n",
                                rank, "ProblemCommunicator :: unpackAllData", i);
#endif


                // unpack remote receive list
                if ( !commBuff.unpackIntArray(remoteDomainRecvList) ) {
                    _error("ProblemCommunicator::setUpCommunicationMaps: unpack remote receive list failed");
                }

                // find if remote elements are in local partition
                // if yes add them into send map for correcponding i-th partition
                sendMapPos = 0;
                sendMapSize = 0;
                // determine sendMap size
                for ( j = 1; j <= nelems; j++ ) { // loop over local elements
                    element = domain->giveElement(j);
                    if ( element->giveParallelMode() == Element_local ) {
                        globalDofManNum = element->giveGlobalNumber();
                        // test id globalDofManNum is in remoteDomainRecvList
                        if ( remoteDomainRecvList.findFirstIndexOf(globalDofManNum) ) {
                            sendMapSize++;
                        }
                    }
                }

                toSendMap.resize(sendMapSize);

                for ( j = 1; j <= nelems; j++ ) { // loop over local elements
                    element = domain->giveElement(j);
                    if ( element->giveParallelMode() == Element_local ) {
                        globalDofManNum = element->giveGlobalNumber();
                        // test id globalDofManNum is in remoteDomainRecvList
                        if ( remoteDomainRecvList.findFirstIndexOf(globalDofManNum) ) {
                            // add this local DofManager number to sed map for active partition
                            toSendMap.at(++sendMapPos) = j;
                        }
                    }
                } // end loop over local DofManagers

                // set send map to i-th process communicator
                this->setProcessCommunicatorToSendArry(this->giveProcessCommunicator(i), toSendMap);

                /*
                 * #ifdef __VERBOSE_PARALLEL
                 *  fprintf (stderr, "domain %d-%d: domainCommSendsize is %d\n",rank,i,this->giveDomainCommunicator(i)->giveSendBuff()->giveSize() );
                 *  printf ("domain %d-%d: send map:",rank,i);
                 *  this->giveDomainCommunicator(i)->giveToSendMap()->printYourself();
                 *
                 *#endif
                 */

                //this->giveDomainCommunicator(i)->setToSendArry (this->engngModel, toSendMap);
            } // end receiving broadcasted lists

#ifdef __VERBOSE_PARALLEL
            VERBOSEPARALLEL_PRINT("ProblemCommunicator::setUpCommunicationMaps", "Receiving broadcasted send maps finished", rank);
#endif
        } // end loop over domains

    } else {
        _error("setUpCommunicationMapsForRemoteElementMode: unknown mode");
    }
}