Пример #1
0
int
ParmetisLoadBalancer :: packSharedDmanPartitions(ProcessCommunicator &pc)
{
    int myrank = domain->giveEngngModel()->giveRank();
    int iproc = pc.giveRank();
    int ndofman, idofman;
    DofManager *dofman;

    if ( iproc == myrank ) {
        return 1;                // skip local partition
    }

    // query process communicator to use
    ProcessCommunicatorBuff *pcbuff = pc.giveProcessCommunicatorBuff();
    // loop over dofManagers and pack shared dofMan data
    ndofman = domain->giveNumberOfDofManagers();
    for ( idofman = 1; idofman <= ndofman; idofman++ ) {
        dofman = domain->giveDofManager(idofman);
        // test if iproc is in list of existing shared partitions
        if ( ( dofman->giveParallelMode() == DofManager_shared ) &&
            ( dofman->givePartitionList()->findFirstIndexOf(iproc) ) ) {
            // send new partitions to remote representation
            // fprintf (stderr, "[%d] sending shared plist of %d to [%d]\n", myrank, dofman->giveGlobalNumber(), iproc);
            pcbuff->write( dofman->giveGlobalNumber() );
            this->giveDofManPartitions(idofman)->storeYourself(*pcbuff);
        }
    }

    pcbuff->write((int)PARMETISLB_END_DATA);
    return 1;
}
Пример #2
0
/* will delete those dofmanagers, that were sent to remote partition and are locally owned here
 * so they are no longer necessary (those with state equal to DM_Remote and DM_SharedMerge)
 * This will update domain DofManager list as well as global dmanMap and physically deletes the remote dofManager
 */
void
LoadBalancer :: deleteRemoteDofManagers(Domain *d)
{
    int i, ndofman =  d->giveNumberOfDofManagers();
    //LoadBalancer* lb = this->giveLoadBalancer();
    LoadBalancer :: DofManMode dmode;
    DofManager *dman;
    int myrank = d->giveEngngModel()->giveRank();
    DomainTransactionManager *dtm = d->giveTransactionManager();
    // loop over local nodes

    for ( i = 1; i <= ndofman; i++ ) {
        dmode = this->giveDofManState(i);
        if ( ( dmode == LoadBalancer :: DM_Remote ) ) {
            // positive candidate found
            dtm->addTransaction(DomainTransactionManager :: DTT_Remove, DomainTransactionManager :: DCT_DofManager, d->giveDofManager(i)->giveGlobalNumber(), NULL);
            // dmanMap.erase (d->giveDofManager (i)->giveGlobalNumber());
            //dman = dofManagerList->unlink (i);
            //delete dman;
        } else if ( ( dmode == LoadBalancer :: DM_NULL ) ) {
            // positive candidate found; we delete all null dof managers
            // they will be created by nonlocalmatwtp if necessary.
            // potentially, they can be reused, but this will make the code too complex
            dtm->addTransaction(DomainTransactionManager :: DTT_Remove, DomainTransactionManager :: DCT_DofManager, d->giveDofManager(i)->giveGlobalNumber(), NULL);
        } else if ( dmode == LoadBalancer :: DM_Shared ) {
            dman = d->giveDofManager(i);
            dman->setPartitionList( this->giveDofManPartitions(i) );
            dman->setParallelMode(DofManager_shared);
            if ( !dman->givePartitionList()->findFirstIndexOf(myrank) ) {
                dtm->addTransaction(DomainTransactionManager :: DTT_Remove, DomainTransactionManager :: DCT_DofManager, d->giveDofManager(i)->giveGlobalNumber(), NULL);
                //dmanMap.erase (this->giveDofManager (i)->giveGlobalNumber());
                //dman = dofManagerList->unlink (i);
                //delete dman;
            }
        } else if ( dmode == LoadBalancer :: DM_Local ) {
            IntArray _empty(0);
            dman = d->giveDofManager(i);
            dman->setPartitionList(& _empty);
            dman->setParallelMode(DofManager_local);
        } else {
            OOFEM_ERROR("Domain::deleteRemoteDofManagers: unknown dmode encountered");
        }
    }
}
Пример #3
0
void
PetscNatural2GlobalOrdering :: init(EngngModel *emodel, EquationID ut, int di, EquationType et)
{
    Domain *d = emodel->giveDomain(di);
    int i, j, k, p, ndofs, ndofman = d->giveNumberOfDofManagers();
    int myrank = emodel->giveRank();
    DofManager *dman;
    // determine number of local eqs + number of those shared DOFs which are numbered by receiver
    // shared dofman is numbered on partition with lovest rank number
    EModelDefaultEquationNumbering dn;
    EModelDefaultPrescribedEquationNumbering dpn;

#ifdef __VERBOSE_PARALLEL
    VERBOSEPARALLEL_PRINT("PetscNatural2GlobalOrdering :: init", "initializing N2G ordering", myrank);
#endif

    l_neqs = 0;
    for ( i = 1; i <= ndofman; i++ ) {
        dman = d->giveDofManager(i);
        /*
         *  if (dman->giveParallelMode() == DofManager_local) { // count all dofman eqs
         *    ndofs = dman->giveNumberOfDofs ();
         *    for (j=1; j<=ndofs; j++) {
         *      if (dman->giveDof(j)->isPrimaryDof()) {
         *        if (dman->giveDof(j)->giveEquationNumber()) l_neqs++;
         *      }
         *    }
         *  } else if (dman->giveParallelMode() == DofManager_shared) {
         *    // determine if problem is the lowest one sharing the dofman; if yes the receiver is responsible to
         *    // deliver number
         *    IntArray *plist = dman->givePartitionList();
         *    int n = plist->giveSize();
         *    int minrank = myrank;
         *    for (j=1; j<=n; j++) minrank = min (minrank, plist->at(j));
         *    if (minrank == myrank) { // count eqs
         *      ndofs = dman->giveNumberOfDofs ();
         *      for (j=1; j<=ndofs; j++) {
         *        if (dman->giveDof(j)->isPrimaryDof()) {
         *          if (dman->giveDof(j)->giveEquationNumber()) l_neqs++;
         *        }
         *      }
         *    }
         *  } // end shared dman
         */
        if ( isLocal(dman) ) {
            ndofs = dman->giveNumberOfDofs();
            for ( j = 1; j <= ndofs; j++ ) {
                if ( dman->giveDof(j)->isPrimaryDof() ) {
                    if ( et == et_standard ) {
                        if ( dman->giveDof(j)->giveEquationNumber(dn) ) {
                            l_neqs++;
                        }
                    } else {
                        if ( dman->giveDof(j)->giveEquationNumber(dpn) ) {
                            l_neqs++;
                        }
                    }
                }
            }
        }
    }

    // exchange with other procs the number of eqs numbered on particular procs
    int *leqs = new int [ emodel->giveNumberOfProcesses() ];
    MPI_Allgather(& l_neqs, 1, MPI_INT, leqs, 1, MPI_INT, MPI_COMM_WORLD);
    // compute local offset
    int offset = 0;
    for ( j = 0; j < myrank; j++ ) {
        offset += leqs [ j ];
    }

    // count global number of eqs
    for ( g_neqs = 0, j = 0; j < emodel->giveNumberOfProcesses(); j++ ) {
        g_neqs += leqs [ j ];
    }

    // send numbered shared ones
    if ( et == et_standard ) {
        locGlobMap.resize( emodel->giveNumberOfEquations(ut) );
    } else {
        locGlobMap.resize( emodel->giveNumberOfPrescribedEquations(ut) );
    }

    // determine shared dofs
    int psize, nproc = emodel->giveNumberOfProcesses();
    IntArray sizeToSend(nproc), sizeToRecv(nproc), nrecToReceive(nproc);
#ifdef __VERBOSE_PARALLEL
    IntArray nrecToSend(nproc);
#endif
    const IntArray *plist;
    for ( i = 1; i <= ndofman; i++ ) {
        // if (domain->giveDofManager(i)->giveParallelMode() == DofManager_shared) {
        if ( isShared( d->giveDofManager(i) ) ) {
            int n = d->giveDofManager(i)->giveNumberOfDofs();
            plist = d->giveDofManager(i)->givePartitionList();
            psize = plist->giveSize();
            int minrank = myrank;
            for ( j = 1; j <= psize; j++ ) {
                minrank = min( minrank, plist->at(j) );
            }

            if ( minrank == myrank ) { // count to send
                for ( j = 1; j <= psize; j++ ) {
#ifdef __VERBOSE_PARALLEL
                    nrecToSend( plist->at(j) )++;
#endif
                    sizeToSend( plist->at(j) ) += ( 1 + n );  // ndofs+dofman number
                }
            } else {
                nrecToReceive(minrank)++;
                sizeToRecv(minrank) += ( 1 + n );      // ndofs+dofman number
            }
        }
    }

#ifdef __VERBOSE_PARALLEL
    for ( i = 0; i < nproc; i++ ) {
        OOFEM_LOG_INFO("[%d] Record Statistics: Sending %d Receiving %d to %d\n",
                       myrank, nrecToSend(i), nrecToReceive(i), i);
    }

#endif



    std :: map< int, int >globloc; //  global->local mapping for shared
    // number local guys
    int globeq = offset;
    for ( i = 1; i <= ndofman; i++ ) {
        dman = d->giveDofManager(i);
        //if (dman->giveParallelMode() == DofManager_shared) {
        if ( isShared(dman) ) {
            globloc [ dman->giveGlobalNumber() ] = i; // build global->local mapping for shared

            plist = dman->givePartitionList();
            psize = plist->giveSize();
            int minrank = myrank;
            for ( j = 1; j <= psize; j++ ) {
                minrank = min( minrank, plist->at(j) );
            }

            if ( minrank == myrank ) { // local
                ndofs = dman->giveNumberOfDofs();
                for ( j = 1; j <= ndofs; j++ ) {
                    if ( dman->giveDof(j)->isPrimaryDof() ) {
                        int eq;
                        if ( et == et_standard ) {
                            eq = dman->giveDof(j)->giveEquationNumber(dn);
                        } else {
                            eq = dman->giveDof(j)->giveEquationNumber(dpn);
                        }

                        if ( eq ) {
                            locGlobMap.at(eq) = globeq++;
                        }
                    }
                }
            }

            //} else if (dman->giveParallelMode() == DofManager_local) {
        } else {
            ndofs = dman->giveNumberOfDofs();
            for ( j = 1; j <= ndofs; j++ ) {
                if ( dman->giveDof(j)->isPrimaryDof() ) {
                    int eq;
                    if ( et == et_standard ) {
                        eq = dman->giveDof(j)->giveEquationNumber(dn);
                    } else {
                        eq = dman->giveDof(j)->giveEquationNumber(dpn);
                    }

                    if ( eq ) {
                        locGlobMap.at(eq) = globeq++;
                    }
                }
            }
        }
    }


    /*
     * fprintf (stderr, "[%d] locGlobMap: ", myrank);
     * for (i=1; i<=locGlobMap.giveSize(); i++)
     * fprintf (stderr, "%d ",locGlobMap.at(i));
     */

    // pack data for remote procs
    CommunicationBuffer **buffs = new CommunicationBuffer * [ nproc ];
    for ( p = 0; p < nproc; p++ ) {
        buffs [ p ] = new StaticCommunicationBuffer(MPI_COMM_WORLD, 0);
        buffs [ p ]->resize( buffs [ p ]->givePackSize(MPI_INT, 1) * sizeToSend(p) );

#if 0
        OOFEM_LOG_INFO( "[%d]PetscN2G:: init: Send buffer[%d] size %d\n",
                       myrank, p, sizeToSend(p) );
#endif
    }


    for ( i = 1; i <= ndofman; i++ ) {
        if ( isShared( d->giveDofManager(i) ) ) {
            dman = d->giveDofManager(i);
            plist = dman->givePartitionList();
            psize = plist->giveSize();
            int minrank = myrank;
            for ( j = 1; j <= psize; j++ ) {
                minrank = min( minrank, plist->at(j) );
            }

            if ( minrank == myrank ) { // do send
                for ( j = 1; j <= psize; j++ ) {
                    p = plist->at(j);
                    if ( p == myrank ) {
                        continue;
                    }

#if 0
                    OOFEM_LOG_INFO("[%d]PetscN2G:: init: Sending localShared node %d[%d] to proc %d\n",
                                   myrank, i, dman->giveGlobalNumber(), p);
#endif
                    buffs [ p ]->packInt( dman->giveGlobalNumber() );
                    ndofs = dman->giveNumberOfDofs();
                    for ( k = 1; k <= ndofs; k++ ) {
                        if ( dman->giveDof(k)->isPrimaryDof() ) {
                            int eq;
                            if ( et == et_standard ) {
                                eq = dman->giveDof(k)->giveEquationNumber(dn);
                            } else {
                                eq = dman->giveDof(k)->giveEquationNumber(dpn);
                            }

                            if ( eq ) {
                                buffs [ p ]->packInt( locGlobMap.at(eq) );
                            }
                        }
                    }
                }
            }
        }
    }


    //fprintf (stderr, "[%d] Sending glob nums ...", myrank);
    // send buffers
    for ( p = 0; p < nproc; p++ ) {
        if ( p != myrank ) {
            buffs [ p ]->iSend(p, 999);
        }
    }


    /****
    *
    *  for (p=0; p<nproc; p++) {
    *   if (p == myrank) continue;
    *   for (i=1;  i<= ndofman; i++) {
    *     //if (domain->giveDofManager(i)->giveParallelMode() == DofManager_shared) {
    *     if (isShared(d->giveDofManager(i))) {
    *       dman = d->giveDofManager(i);
    *       plist = dman->givePartitionList();
    *       psize = plist->giveSize();
    *       int minrank = myrank;
    *       for (j=1; j<=psize; j++) minrank = min (minrank, plist->at(j));
    *       if (minrank == myrank) { // do send
    *         buffs[p]->packInt(dman->giveGlobalNumber());
    *         ndofs = dman->giveNumberOfDofs ();
    *         for (j=1; j<=ndofs; j++) {
    *           if (dman->giveDof(j)->isPrimaryDof()) {
    *             buffs[p]->packInt(locGlobMap.at(dman->giveDof(j)->giveEquationNumber()));
    *           }
    *         }
    *       }
    *     }
    *   }
    *   // send buffer
    *   buffs[p]->iSend(p, 999);
    *  }
    ****/

    // receive remote eqs and complete global numbering
    CommunicationBuffer **rbuffs = new CommunicationBuffer * [ nproc ];
    for ( p = 0; p < nproc; p++ ) {
        rbuffs [ p ] = new StaticCommunicationBuffer(MPI_COMM_WORLD, 0);
        rbuffs [ p ]->resize( rbuffs [ p ]->givePackSize(MPI_INT, 1) * sizeToRecv(p) );
#if 0
        OOFEM_LOG_INFO( "[%d]PetscN2G:: init: Receive buffer[%d] size %d\n",
                       myrank, p, sizeToRecv(p) );
#endif
    }


    //fprintf (stderr, "[%d] Receiving glob nums ...", myrank);
    for ( p = 0; p < nproc; p++ ) {
        if ( p != myrank ) {
            rbuffs [ p ]->iRecv(p, 999);
        }
    }


    IntArray finished(nproc);
    finished.zero();
    int fin = 1;
    finished.at(emodel->giveRank() + 1) = 1;
    do {
        for ( p = 0; p < nproc; p++ ) {
            if ( finished.at(p + 1) == 0 ) {
                if ( rbuffs [ p ]->testCompletion() ) {
                    // data are here
                    // unpack them
                    int nite = nrecToReceive(p);
                    int shdm, ldm;
                    for ( i = 1; i <= nite; i++ ) {
                        rbuffs [ p ]->unpackInt(shdm);

#if 0
                        OOFEM_LOG_INFO("[%d]PetscN2G:: init: Received shared node [%d] from proc %d\n",
                                       myrank, shdm, p);
#endif
                        //
                        // find local guy coorecponding to shdm
                        if ( globloc.find(shdm) != globloc.end() ) {
                            ldm = globloc [ shdm ];
                        } else {
                            OOFEM_ERROR3("[%d] PetscNatural2GlobalOrdering :: init: invalid shared dofman received, globnum %d\n", myrank, shdm);
                        }

                        dman = d->giveDofManager(ldm);
                        ndofs = dman->giveNumberOfDofs();
                        for ( j = 1; j <= ndofs; j++ ) {
                            if ( dman->giveDof(j)->isPrimaryDof() ) {
                                int eq;
                                if ( et == et_standard ) {
                                    eq = dman->giveDof(j)->giveEquationNumber(dn);
                                } else {
                                    eq = dman->giveDof(j)->giveEquationNumber(dpn);
                                }

                                if ( eq ) {
                                    int val;
                                    rbuffs [ p ]->unpackInt(val);
                                    locGlobMap.at(eq) = val;
                                }
                            }
                        }
                    }

                    finished.at(p + 1) = 1;
                    fin++;
                }
            }
        }
    } while ( fin < nproc );


    /*
     * fprintf (stderr, "[%d] Finished receiving glob nums ...", myrank);
     *
     * fprintf (stderr, "[%d] locGlobMap:", myrank);
     * for (i=1; i<=locGlobMap.giveSize(); i++)
     * fprintf (stderr, "%d ",locGlobMap.at(i));
     */

#ifdef  __VERBOSE_PARALLEL
    if ( et == et_standard ) {
        int _eq;
        char *ptr;
        char *locname = "local", *shname = "shared", *unkname = "unknown";
        for ( i = 1; i <= ndofman; i++ ) {
            dman = d->giveDofManager(i);
            if ( dman->giveParallelMode() == DofManager_local ) {
                ptr = locname;
            } else if ( dman->giveParallelMode() == DofManager_shared ) {
                ptr = shname;
            } else {
                ptr = unkname;
            }

            ndofs = dman->giveNumberOfDofs();
            for ( j = 1; j <= ndofs; j++ ) {
                if ( ( _eq = dman->giveDof(j)->giveEquationNumber(dn) ) ) {
                    fprintf( stderr, "[%d] n:%6s %d[%d] (%d), leq = %d, geq = %d\n", emodel->giveRank(), ptr, i, dman->giveGlobalNumber(), j, _eq, locGlobMap.at(_eq) );
                } else {
                    fprintf(stderr, "[%d] n:%6s %d[%d] (%d), leq = %d, geq = %d\n", emodel->giveRank(), ptr, i, dman->giveGlobalNumber(), j, _eq, 0);
                }
            }
        }
    }

#endif


    // build reverse map
    int lneq;
    if ( et == et_standard ) {
        lneq = emodel->giveNumberOfEquations(ut);
    } else {
        lneq = emodel->giveNumberOfPrescribedEquations(ut);
    }

    globLocMap.clear();
    for ( i = 1; i <= lneq; i++ ) {
        globLocMap [ locGlobMap.at(i) ] = i;
    }

    for ( p = 0; p < nproc; p++ ) {
        delete rbuffs [ p ];
        delete buffs [ p ];
    }

    delete[] rbuffs;
    delete[] buffs;
    delete[] leqs;

    MPI_Barrier(MPI_COMM_WORLD);
#ifdef __VERBOSE_PARALLEL
    VERBOSEPARALLEL_PRINT("PetscNatural2GlobalOrdering :: init", "done", myrank);
#endif
}
Пример #4
0
int
LoadBalancer :: packMigratingData(Domain *d, ProcessCommunicator &pc)
{
    int myrank = d->giveEngngModel()->giveRank();
    int iproc = pc.giveRank();
    int idofman, ndofman;
    classType dtype;
    DofManager *dofman;
    LoadBalancer :: DofManMode dmode;

    //  **************************************************
    //  Pack migrating data to remote partition
    //  **************************************************

    // pack dofManagers
    if ( iproc == myrank ) {
        return 1;                // skip local partition
    }

    // query process communicator to use
    ProcessCommunicatorBuff *pcbuff = pc.giveProcessCommunicatorBuff();
    ProcessCommDataStream pcDataStream(pcbuff);
    // loop over dofManagers
    ndofman = d->giveNumberOfDofManagers();
    for ( idofman = 1; idofman <= ndofman; idofman++ ) {
        dofman = d->giveDofManager(idofman);
        dmode = this->giveDofManState(idofman);
        dtype = dofman->giveClassID();
        // sync data to remote partition
        // if dofman already present on remote partition then there is no need to sync
        //if ((this->giveDofManPartitions(idofman)->findFirstIndexOf(iproc))) {
        if ( ( this->giveDofManPartitions(idofman)->findFirstIndexOf(iproc) ) &&
            ( !dofman->givePartitionList()->findFirstIndexOf(iproc) ) ) {
            pcbuff->packInt(dtype);
            pcbuff->packInt(dmode);
            pcbuff->packInt( dofman->giveGlobalNumber() );

            // pack dofman state (this is the local dofman, not available on remote)
            /* this is a potential performance leak, sending shared dofman to a partition,
             * in which is already shared does not require to send context (is already there)
             * here for simplicity it is always send */
            dofman->saveContext(& pcDataStream, CM_Definition | CM_DefinitionGlobal | CM_State | CM_UnknownDictState);
            // send list of new partitions
            pcbuff->packIntArray( * ( this->giveDofManPartitions(idofman) ) );
        }
    }

    // pack end-of-dofman-section record
    pcbuff->packInt(LOADBALANCER_END_DATA);

    int ielem, nelem = d->giveNumberOfElements(), nsend = 0;

    Element *elem;

    for ( ielem = 1; ielem <= nelem; ielem++ ) { // begin loop over elements
        elem = d->giveElement(ielem);
        if ( ( elem->giveParallelMode() == Element_local ) &&
            ( this->giveElementPartition(ielem) == iproc ) ) {
            // pack local element (node numbers shuld be global ones!!!)
            // pack type
            pcbuff->packInt( elem->giveClassID() );
            // nodal numbers shuld be packed as global !!
            elem->saveContext(& pcDataStream, CM_Definition | CM_DefinitionGlobal | CM_State);
            nsend++;
        }
    } // end loop over elements

    // pack end-of-element-record
    pcbuff->packInt(LOADBALANCER_END_DATA);

    OOFEM_LOG_RELEVANT("[%d] LoadBalancer:: sending %d migrating elements to %d\n", myrank, nsend, iproc);

    return 1;
}