コード例 #1
0
bool
NodeErrorCheckingRule :: check(Domain *domain, TimeStep *tStep)
{
    // Rule doesn't apply yet.
    if ( tStep->giveNumber() != tstep ) {
        return true;
    }

    DofManager *dman = domain->giveGlobalDofManager(number);
    if ( !dman ) {
        if ( domain->giveEngngModel()->isParallel() ) {
            return true;
        } else {
            OOFEM_WARNING("Dof manager %d not found.", number);
            return false;
        }
    }

    if ( dman->giveParallelMode() == DofManager_remote || dman->giveParallelMode() == DofManager_null ) {
        return true;
    }

    Dof *dof = dman->giveDofWithID(dofid);

    double dmanValue = dof->giveUnknown(mode, tStep);
    bool check = checkValue(dmanValue);
    if ( !check ) {
        OOFEM_WARNING("Check failed in: tstep %d, node %d, dof %d, mode %d:\n"
                      "value is %.8e, but should be %.8e ( error is %e but tolerance is %e )",
                      tstep, number, dofid, mode,
                      dmanValue, value, fabs(dmanValue-value), tolerance );
    }
    return check;
}
コード例 #2
0
int
EIPrimaryUnknownMapper :: mapAndUpdate(FloatArray &answer, ValueModeType mode,
                                       Domain *oldd, Domain *newd,  TimeStep *tStep)
{
    int inode, nd_nnodes = newd->giveNumberOfDofManagers();
    int nsize = newd->giveEngngModel()->giveNumberOfDomainEquations( newd->giveNumber(), EModelDefaultEquationNumbering() );
    FloatArray unknownValues;
    IntArray dofidMask, locationArray;
    IntArray reglist;
#ifdef OOFEM_MAPPING_CHECK_REGIONS
    ConnectivityTable *conTable = newd->giveConnectivityTable();
    const IntArray *nodeConnectivity;
#endif

    answer.resize(nsize);
    answer.zero();

    for ( inode = 1; inode <= nd_nnodes; inode++ ) {
        DofManager *node = newd->giveNode(inode);
        /* HUHU CHEATING */
#ifdef __PARALLEL_MODE
        if ( ( node->giveParallelMode() == DofManager_null ) ||
            ( node->giveParallelMode() == DofManager_remote ) ) {
            continue;
        }

#endif

#ifdef OOFEM_MAPPING_CHECK_REGIONS
        // build up region list for node
        nodeConnectivity = conTable->giveDofManConnectivityArray(inode);
        reglist.resize( nodeConnectivity->giveSize() );
        reglist.clear();
        for ( int indx = 1; indx <= nodeConnectivity->giveSize(); indx++ ) {
            reglist.insertSortedOnce( newd->giveElement( nodeConnectivity->at(indx) )->giveRegionNumber() );
        }

#endif
        ///@todo Shouldn't we pass a primary field or something to this function?
        if ( this->evaluateAt(unknownValues, dofidMask, mode, oldd, * node->giveCoordinates(), reglist, tStep) ) {
            ///@todo This doesn't respect local coordinate systems in nodes. Supporting that would require major reworking.
            for ( int ii = 1; ii <= dofidMask.giveSize(); ii++ ) {
                // exclude slaves; they are determined from masters
                auto it = node->findDofWithDofId((DofIDItem)dofidMask.at(ii));
                if ( it != node->end() ) {
                    Dof *dof = *it;
                    if ( dof->isPrimaryDof() ) {
                        int eq = dof->giveEquationNumber(EModelDefaultEquationNumbering());
                        answer.at( eq ) += unknownValues.at(ii);
                    }
                }
            }
        } else {
            OOFEM_ERROR("evaluateAt service failed for node %d", inode);
        }
    }

    return 1;
}
コード例 #3
0
int
ParmetisLoadBalancer :: packSharedDmanPartitions(ProcessCommunicator &pc)
{
    int myrank = domain->giveEngngModel()->giveRank();
    int iproc = pc.giveRank();
    int ndofman, idofman;
    DofManager *dofman;

    if ( iproc == myrank ) {
        return 1;                // skip local partition
    }

    // query process communicator to use
    ProcessCommunicatorBuff *pcbuff = pc.giveProcessCommunicatorBuff();
    // loop over dofManagers and pack shared dofMan data
    ndofman = domain->giveNumberOfDofManagers();
    for ( idofman = 1; idofman <= ndofman; idofman++ ) {
        dofman = domain->giveDofManager(idofman);
        // test if iproc is in list of existing shared partitions
        if ( ( dofman->giveParallelMode() == DofManager_shared ) &&
            ( dofman->givePartitionList()->findFirstIndexOf(iproc) ) ) {
            // send new partitions to remote representation
            // fprintf (stderr, "[%d] sending shared plist of %d to [%d]\n", myrank, dofman->giveGlobalNumber(), iproc);
            pcbuff->write( dofman->giveGlobalNumber() );
            this->giveDofManPartitions(idofman)->storeYourself(*pcbuff);
        }
    }

    pcbuff->write((int)PARMETISLB_END_DATA);
    return 1;
}
コード例 #4
0
int
StructuralEngngModel :: unpackDofManagers(FloatArray *dest, ProcessCommunicator &processComm, bool prescribedEquations)
{
    int result = 1;
    int i, size;
    int j, ndofs, eqNum;
    Domain *domain = this->giveDomain(1);
    dofManagerParallelMode dofmanmode;
    IntArray const *toRecvMap = processComm.giveToRecvMap();
    ProcessCommunicatorBuff *pcbuff = processComm.giveProcessCommunicatorBuff();
    DofManager *dman;
    Dof *jdof;
    double value;


    size = toRecvMap->giveSize();
    for ( i = 1; i <= size; i++ ) {
        dman = domain->giveDofManager( toRecvMap->at(i) );
        ndofs = dman->giveNumberOfDofs();
        dofmanmode = dman->giveParallelMode();
        for ( j = 1; j <= ndofs; j++ ) {
            jdof = dman->giveDof(j);
            if ( prescribedEquations ) {
                eqNum = jdof->__givePrescribedEquationNumber();
            } else {
                eqNum = jdof->__giveEquationNumber();
            }
            if ( jdof->isPrimaryDof() && eqNum ) {
                result &= pcbuff->unpackDouble(value);
                if ( dofmanmode == DofManager_shared ) {
                    dest->at(eqNum) += value;
                } else if ( dofmanmode == DofManager_remote ) {
                    dest->at(eqNum)  = value;
                } else {
                    _error("unpackReactions: unknown dof namager parallel mode");
                }
            }
        }
    }

    return result;
}
コード例 #5
0
void
ParmetisLoadBalancer :: labelDofManagers()
{
    int idofman, ndofman = domain->giveNumberOfDofManagers();
    ConnectivityTable *ct = domain->giveConnectivityTable();
    const IntArray *dofmanconntable;
    DofManager *dofman;
    Element *ielem;
    dofManagerParallelMode dmode;
    std :: set< int, std :: less< int > >__dmanpartitions;
    int myrank = domain->giveEngngModel()->giveRank();
    int nproc = domain->giveEngngModel()->giveNumberOfProcesses();
    int ie, npart;

    // resize label array
    dofManState.resize(ndofman);
    dofManState.zero();
    // resize dof man partitions
    dofManPartitions.clear();
    dofManPartitions.resize(ndofman);

 #ifdef ParmetisLoadBalancer_DEBUG_PRINT
    int _cols = 0;
    fprintf(stderr, "[%d] DofManager labels:\n", myrank);
 #endif

    // loop over local dof managers
    for ( idofman = 1; idofman <= ndofman; idofman++ ) {
        dofman = domain->giveDofManager(idofman);
        dmode = dofman->giveParallelMode();
        if ( ( dmode == DofManager_local ) || ( dmode == DofManager_shared ) ) {
            dofmanconntable = ct->giveDofManConnectivityArray(idofman);
            __dmanpartitions.clear();
            for ( ie = 1; ie <= dofmanconntable->giveSize(); ie++ ) {
                ielem = domain->giveElement( dofmanconntable->at(ie) );
                // assemble list of partitions sharing idofman dofmanager
                // set is used to include possibly repeated partition only once
                if ( ielem->giveParallelMode() == Element_local ) {
                    __dmanpartitions.insert( giveElementPartition( dofmanconntable->at(ie) ) );
                }
            }

            npart = __dmanpartitions.size();
            dofManPartitions [ idofman - 1 ].resize( __dmanpartitions.size() );
            int i = 1;
            for ( auto &dm: __dmanpartitions ) {
                dofManPartitions [ idofman - 1 ].at(i++) = dm;
            }
        }
    }

    // handle master slave links between dofmans (master and slave required on same partition)
    this->handleMasterSlaveDofManLinks();


    /* Exchange new partitions for shared nodes */
    CommunicatorBuff cb(nproc, CBT_dynamic);
    Communicator com(domain->giveEngngModel(), &cb, myrank, nproc, CommMode_Dynamic);
    com.packAllData(this, & ParmetisLoadBalancer :: packSharedDmanPartitions);
    com.initExchange(SHARED_DOFMAN_PARTITIONS_TAG);
    com.unpackAllData(this, & ParmetisLoadBalancer :: unpackSharedDmanPartitions);
    com.finishExchange();

    /* label dof managers */
    for ( idofman = 1; idofman <= ndofman; idofman++ ) {
        dofman = domain->giveDofManager(idofman);
        dmode = dofman->giveParallelMode();
        npart = dofManPartitions [ idofman - 1 ].giveSize();
        if ( ( dmode == DofManager_local ) || ( dmode == DofManager_shared ) ) {
            // determine its state after balancing -> label
            dofManState.at(idofman) = this->determineDofManState(idofman, myrank, npart, & dofManPartitions [ idofman - 1 ]);
        } else {
            dofManState.at(idofman) = DM_NULL;
        }
    }


 #ifdef ParmetisLoadBalancer_DEBUG_PRINT
    for ( idofman = 1; idofman <= ndofman; idofman++ ) {
        fprintf(stderr, " | %d: ", idofman);
        if ( dofManState.at(idofman) == DM_NULL ) {
            fprintf(stderr, "NULL  ");
        } else if ( dofManState.at(idofman) == DM_Local ) {
            fprintf(stderr, "Local ");
        } else if ( dofManState.at(idofman) == DM_Shared ) {
            fprintf(stderr, "Shared");
        } else if ( dofManState.at(idofman) == DM_Remote ) {
            fprintf(stderr, "Remote");
        } else {
            fprintf(stderr, "Unknown");
        }

        //else if (dofManState.at(idofman) == DM_SharedExclude)fprintf (stderr, "ShdExc");
        //else if (dofManState.at(idofman) == DM_SharedNew)    fprintf (stderr, "ShdNew");
        //else if (dofManState.at(idofman) == DM_SharedUpdate) fprintf (stderr, "ShdUpd");

        if ( ( ( ++_cols % 4 ) == 0 ) || ( idofman == ndofman ) ) {
            fprintf(stderr, "\n");
        }
    }

 #endif
}
コード例 #6
0
ファイル: nldeidynamic.C プロジェクト: nitramkaroh/OOFEM
void NlDEIDynamic :: solveYourselfAt(TimeStep *tStep)
{
    //
    // Creates system of governing eq's and solves them at given time step.
    //

    Domain *domain = this->giveDomain(1);
    int neq = this->giveNumberOfDomainEquations( 1, EModelDefaultEquationNumbering() );
    int nman  = domain->giveNumberOfDofManagers();

    DofManager *node;

    int i, k, j, jj;
    double coeff, maxDt, maxOm = 0.;
    double prevIncrOfDisplacement, incrOfDisplacement;

    if ( initFlag ) {
#ifdef VERBOSE
        OOFEM_LOG_DEBUG("Assembling mass matrix\n");
#endif

        //
        // Assemble mass matrix.
        //
        this->computeMassMtrx(massMatrix, maxOm, tStep);

        if ( drFlag ) {
            // If dynamic relaxation: Assemble amplitude load vector.
            loadRefVector.resize(neq);
            loadRefVector.zero();

            this->computeLoadVector(loadRefVector, VM_Total, tStep);

#ifdef __PARALLEL_MODE
            // Compute the processor part of load vector norm pMp
            this->pMp = 0.0;
            double my_pMp = 0.0, coeff = 1.0;
            int eqNum, ndofman = domain->giveNumberOfDofManagers();
            dofManagerParallelMode dofmanmode;
            DofManager *dman;
            for ( int dm = 1; dm <= ndofman; dm++ ) {
                dman = domain->giveDofManager(dm);
                dofmanmode = dman->giveParallelMode();

                // Skip all remote and null dofmanagers
                coeff = 1.0;
                if ( ( dofmanmode == DofManager_remote ) || ( ( dofmanmode == DofManager_null ) ) ) {
                    continue;
                } else if ( dofmanmode == DofManager_shared ) {
                    coeff = 1. / dman->givePartitionsConnectivitySize();
                }

                // For shared nodes we add locally an average = 1/givePartitionsConnectivitySize()*contribution,
                for ( Dof *dof: *dman ) {
                    if ( dof->isPrimaryDof() && ( eqNum = dof->__giveEquationNumber() ) ) {
                        my_pMp += coeff * loadRefVector.at(eqNum) * loadRefVector.at(eqNum) / massMatrix.at(eqNum);
                    }
                }
            }

            // Sum up the contributions from processors.
            MPI_Allreduce(& my_pMp, & pMp, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
#else
            this->pMp = 0.0;
            for ( i = 1; i <= neq; i++ ) {
                pMp += loadRefVector.at(i) * loadRefVector.at(i) / massMatrix.at(i);
            }
#endif
            // Solve for rate of loading process (parameter "c") (undamped system assumed),
            if ( dumpingCoef < 1.e-3 ) {
                c = 3.0 * this->pyEstimate / pMp / Tau / Tau;
            } else {
                c = this->pyEstimate * Tau * dumpingCoef * dumpingCoef * dumpingCoef / pMp /
                    ( -3.0 / 2.0 + dumpingCoef * Tau + 2.0 * exp(-dumpingCoef * Tau) - 0.5 * exp(-2.0 * dumpingCoef * Tau) );
            }
        }

        initFlag = 0;
    }


    if ( tStep->isTheFirstStep() ) {
        //
        // Special init step - Compute displacements at tstep 0.
        //
        displacementVector.resize(neq);
        displacementVector.zero();
        previousIncrementOfDisplacementVector.resize(neq);
        previousIncrementOfDisplacementVector.zero();
        velocityVector.resize(neq);
        velocityVector.zero();
        accelerationVector.resize(neq);
        accelerationVector.zero();

        for ( j = 1; j <= nman; j++ ) {
            node = domain->giveDofManager(j);

            for ( Dof *dof: *node ) {
                // Ask for initial values obtained from
                // bc (boundary conditions) and ic (initial conditions)
                // all dofs are expected to be  DisplacementVector type.
                if ( !dof->isPrimaryDof() ) {
                    continue;
                }

                jj = dof->__giveEquationNumber();
                if ( jj ) {
                    displacementVector.at(jj) = dof->giveUnknown(VM_Total, tStep);
                    velocityVector.at(jj)     = dof->giveUnknown(VM_Velocity, tStep);
                    accelerationVector.at(jj) = dof->giveUnknown(VM_Acceleration, tStep);
                }
            }
        }

        //
        // Set-up numerical model.
        //

        // Try to determine the best deltaT,
        maxDt = 2.0 / sqrt(maxOm);
        if ( deltaT > maxDt ) {
            // Print reduced time step increment and minimum period Tmin
            OOFEM_LOG_RELEVANT("deltaT reduced to %e, Tmin is %e\n", maxDt, maxDt * M_PI);
            deltaT = maxDt;
            tStep->setTimeIncrement(deltaT);
        }

        for ( j = 1; j <= neq; j++ ) {
            previousIncrementOfDisplacementVector.at(j) =  velocityVector.at(j) * ( deltaT );
            displacementVector.at(j) -= previousIncrementOfDisplacementVector.at(j);
        }
#ifdef VERBOSE
        OOFEM_LOG_RELEVANT( "\n\nSolving [Step number %8d, Time %15e]\n", tStep->giveNumber(), tStep->giveTargetTime() );
#endif
        return;
    } // end of init step

#ifdef VERBOSE
    OOFEM_LOG_DEBUG("Assembling right hand side\n");
#endif

    displacementVector.add(previousIncrementOfDisplacementVector);

    // Update solution state counter
    tStep->incrementStateCounter();

    // Compute internal forces.
    this->giveInternalForces(internalForces, false, 1, tStep);

    if ( !drFlag ) {
        //
        // Assembling the element part of load vector.
        //
        this->computeLoadVector(loadVector, VM_Total, tStep);
        //
        // Assembling additional parts of right hand side.
        //
        loadVector.subtract(internalForces);
    } else {
        // Dynamic relaxation
        // compute load factor
        pt = 0.0;

#ifdef __PARALLEL_MODE
        double my_pt = 0.0, coeff = 1.0;
        int eqNum, ndofman = domain->giveNumberOfDofManagers();
        dofManagerParallelMode dofmanmode;
        DofManager *dman;
        for ( int dm = 1; dm <= ndofman; dm++ ) {
            dman = domain->giveDofManager(dm);
            dofmanmode = dman->giveParallelMode();
            // skip all remote and null dofmanagers
            coeff = 1.0;
            if ( ( dofmanmode == DofManager_remote ) || ( dofmanmode == DofManager_null ) ) {
                continue;
            } else if ( dofmanmode == DofManager_shared ) {
                coeff = 1. / dman->givePartitionsConnectivitySize();
            }

            // For shared nodes we add locally an average= 1/givePartitionsConnectivitySize()*contribution.
            for ( Dof *dof: *dman ) {
                if ( dof->isPrimaryDof() && ( eqNum = dof->__giveEquationNumber() ) ) {
                    my_pt += coeff * internalForces.at(eqNum) * loadRefVector.at(eqNum) / massMatrix.at(eqNum);
                }
            }
        }

        // Sum up the contributions from processors.
        MPI_Allreduce(& my_pt, & pt, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
#else
        for ( k = 1; k <= neq; k++ ) {
            pt += internalForces.at(k) * loadRefVector.at(k) / massMatrix.at(k);
        }

#endif
        pt = pt / pMp;
        if ( dumpingCoef < 1.e-3 ) {
            pt += c * ( Tau - tStep->giveTargetTime() ) / Tau;
        } else {
            pt += c * ( 1.0 - exp( dumpingCoef * ( tStep->giveTargetTime() - Tau ) ) ) / dumpingCoef / Tau;
        }

        loadVector.resize( this->giveNumberOfDomainEquations( 1, EModelDefaultEquationNumbering() ) );
        for ( k = 1; k <= neq; k++ ) {
            loadVector.at(k) = pt * loadRefVector.at(k) - internalForces.at(k);
        }


        // Compute relative error.
        double err = 0.0;
#ifdef __PARALLEL_MODE
        double my_err = 0.0;

        for ( int dm = 1; dm <= ndofman; dm++ ) {
            dman = domain->giveDofManager(dm);
            dofmanmode = dman->giveParallelMode();
            // Skip all remote and null dofmanagers.
            coeff = 1.0;
            if ( ( dofmanmode == DofManager_remote ) || ( dofmanmode == DofManager_null ) ) {
                continue;
            } else if ( dofmanmode == DofManager_shared ) {
                coeff = 1. / dman->givePartitionsConnectivitySize();
            }

            // For shared nodes we add locally an average= 1/givePartitionsConnectivitySize()*contribution.
            for ( Dof *dof: *dman ) {
                if ( dof->isPrimaryDof() && ( eqNum = dof->__giveEquationNumber() ) ) {
                    my_err += coeff * loadVector.at(eqNum) * loadVector.at(eqNum) / massMatrix.at(eqNum);
                }
            }
        }

        // Sum up the contributions from processors.
        MPI_Allreduce(& my_err, & err, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);

#else
        for ( k = 1; k <= neq; k++ ) {
            err = loadVector.at(k) * loadVector.at(k) / massMatrix.at(k);
        }

#endif
        err = err / ( pMp * pt * pt );
        OOFEM_LOG_RELEVANT("Relative error is %e, loadlevel is %e\n", err, pt);
    }

    for ( j = 1; j <= neq; j++ ) {
        coeff =  massMatrix.at(j);
        loadVector.at(j) +=
            coeff * ( ( 1. / ( deltaT * deltaT ) ) - dumpingCoef * 1. / ( 2. * deltaT ) ) *
            previousIncrementOfDisplacementVector.at(j);
    }

    //
    // Set-up numerical model
    //
    /* it is not necesary to call numerical method
     * approach used here is not good, but effective enough
     * inverse of diagonal mass matrix is done here
     */
    //
    // call numerical model to solve arised problem - done localy here
    //
#ifdef VERBOSE
    OOFEM_LOG_RELEVANT( "\n\nSolving [Step number %8d, Time %15e]\n", tStep->giveNumber(), tStep->giveTargetTime() );
#endif

    //     NM_Status s = nMethod->solve(*massMatrix, loadVector, displacementVector);
    //    if ( !(s & NM_Success) ) {
    //        OOFEM_ERROR("No success in solving system. Ma=f");
    //    }


    for ( i = 1; i <= neq; i++ ) {
        prevIncrOfDisplacement = previousIncrementOfDisplacementVector.at(i);
        incrOfDisplacement = loadVector.at(i) /
        ( massMatrix.at(i) * ( 1. / ( deltaT * deltaT ) + dumpingCoef / ( 2. * deltaT ) ) );

        accelerationVector.at(i) = ( incrOfDisplacement - prevIncrOfDisplacement ) / ( deltaT * deltaT );
        velocityVector.at(i)     = ( incrOfDisplacement + prevIncrOfDisplacement ) / ( 2. * deltaT );
        previousIncrementOfDisplacementVector.at(i) = incrOfDisplacement;
    }
}
コード例 #7
0
ファイル: petscordering.C プロジェクト: JimBrouzoulis/oofem-1
void
PetscNatural2GlobalOrdering :: init(EngngModel *emodel, EquationID ut, int di, EquationType et)
{
    Domain *d = emodel->giveDomain(di);
    int i, j, k, p, ndofs, ndofman = d->giveNumberOfDofManagers();
    int myrank = emodel->giveRank();
    DofManager *dman;
    // determine number of local eqs + number of those shared DOFs which are numbered by receiver
    // shared dofman is numbered on partition with lovest rank number
    EModelDefaultEquationNumbering dn;
    EModelDefaultPrescribedEquationNumbering dpn;

#ifdef __VERBOSE_PARALLEL
    VERBOSEPARALLEL_PRINT("PetscNatural2GlobalOrdering :: init", "initializing N2G ordering", myrank);
#endif

    l_neqs = 0;
    for ( i = 1; i <= ndofman; i++ ) {
        dman = d->giveDofManager(i);
        /*
         *  if (dman->giveParallelMode() == DofManager_local) { // count all dofman eqs
         *    ndofs = dman->giveNumberOfDofs ();
         *    for (j=1; j<=ndofs; j++) {
         *      if (dman->giveDof(j)->isPrimaryDof()) {
         *        if (dman->giveDof(j)->giveEquationNumber()) l_neqs++;
         *      }
         *    }
         *  } else if (dman->giveParallelMode() == DofManager_shared) {
         *    // determine if problem is the lowest one sharing the dofman; if yes the receiver is responsible to
         *    // deliver number
         *    IntArray *plist = dman->givePartitionList();
         *    int n = plist->giveSize();
         *    int minrank = myrank;
         *    for (j=1; j<=n; j++) minrank = min (minrank, plist->at(j));
         *    if (minrank == myrank) { // count eqs
         *      ndofs = dman->giveNumberOfDofs ();
         *      for (j=1; j<=ndofs; j++) {
         *        if (dman->giveDof(j)->isPrimaryDof()) {
         *          if (dman->giveDof(j)->giveEquationNumber()) l_neqs++;
         *        }
         *      }
         *    }
         *  } // end shared dman
         */
        if ( isLocal(dman) ) {
            ndofs = dman->giveNumberOfDofs();
            for ( j = 1; j <= ndofs; j++ ) {
                if ( dman->giveDof(j)->isPrimaryDof() ) {
                    if ( et == et_standard ) {
                        if ( dman->giveDof(j)->giveEquationNumber(dn) ) {
                            l_neqs++;
                        }
                    } else {
                        if ( dman->giveDof(j)->giveEquationNumber(dpn) ) {
                            l_neqs++;
                        }
                    }
                }
            }
        }
    }

    // exchange with other procs the number of eqs numbered on particular procs
    int *leqs = new int [ emodel->giveNumberOfProcesses() ];
    MPI_Allgather(& l_neqs, 1, MPI_INT, leqs, 1, MPI_INT, MPI_COMM_WORLD);
    // compute local offset
    int offset = 0;
    for ( j = 0; j < myrank; j++ ) {
        offset += leqs [ j ];
    }

    // count global number of eqs
    for ( g_neqs = 0, j = 0; j < emodel->giveNumberOfProcesses(); j++ ) {
        g_neqs += leqs [ j ];
    }

    // send numbered shared ones
    if ( et == et_standard ) {
        locGlobMap.resize( emodel->giveNumberOfEquations(ut) );
    } else {
        locGlobMap.resize( emodel->giveNumberOfPrescribedEquations(ut) );
    }

    // determine shared dofs
    int psize, nproc = emodel->giveNumberOfProcesses();
    IntArray sizeToSend(nproc), sizeToRecv(nproc), nrecToReceive(nproc);
#ifdef __VERBOSE_PARALLEL
    IntArray nrecToSend(nproc);
#endif
    const IntArray *plist;
    for ( i = 1; i <= ndofman; i++ ) {
        // if (domain->giveDofManager(i)->giveParallelMode() == DofManager_shared) {
        if ( isShared( d->giveDofManager(i) ) ) {
            int n = d->giveDofManager(i)->giveNumberOfDofs();
            plist = d->giveDofManager(i)->givePartitionList();
            psize = plist->giveSize();
            int minrank = myrank;
            for ( j = 1; j <= psize; j++ ) {
                minrank = min( minrank, plist->at(j) );
            }

            if ( minrank == myrank ) { // count to send
                for ( j = 1; j <= psize; j++ ) {
#ifdef __VERBOSE_PARALLEL
                    nrecToSend( plist->at(j) )++;
#endif
                    sizeToSend( plist->at(j) ) += ( 1 + n );  // ndofs+dofman number
                }
            } else {
                nrecToReceive(minrank)++;
                sizeToRecv(minrank) += ( 1 + n );      // ndofs+dofman number
            }
        }
    }

#ifdef __VERBOSE_PARALLEL
    for ( i = 0; i < nproc; i++ ) {
        OOFEM_LOG_INFO("[%d] Record Statistics: Sending %d Receiving %d to %d\n",
                       myrank, nrecToSend(i), nrecToReceive(i), i);
    }

#endif



    std :: map< int, int >globloc; //  global->local mapping for shared
    // number local guys
    int globeq = offset;
    for ( i = 1; i <= ndofman; i++ ) {
        dman = d->giveDofManager(i);
        //if (dman->giveParallelMode() == DofManager_shared) {
        if ( isShared(dman) ) {
            globloc [ dman->giveGlobalNumber() ] = i; // build global->local mapping for shared

            plist = dman->givePartitionList();
            psize = plist->giveSize();
            int minrank = myrank;
            for ( j = 1; j <= psize; j++ ) {
                minrank = min( minrank, plist->at(j) );
            }

            if ( minrank == myrank ) { // local
                ndofs = dman->giveNumberOfDofs();
                for ( j = 1; j <= ndofs; j++ ) {
                    if ( dman->giveDof(j)->isPrimaryDof() ) {
                        int eq;
                        if ( et == et_standard ) {
                            eq = dman->giveDof(j)->giveEquationNumber(dn);
                        } else {
                            eq = dman->giveDof(j)->giveEquationNumber(dpn);
                        }

                        if ( eq ) {
                            locGlobMap.at(eq) = globeq++;
                        }
                    }
                }
            }

            //} else if (dman->giveParallelMode() == DofManager_local) {
        } else {
            ndofs = dman->giveNumberOfDofs();
            for ( j = 1; j <= ndofs; j++ ) {
                if ( dman->giveDof(j)->isPrimaryDof() ) {
                    int eq;
                    if ( et == et_standard ) {
                        eq = dman->giveDof(j)->giveEquationNumber(dn);
                    } else {
                        eq = dman->giveDof(j)->giveEquationNumber(dpn);
                    }

                    if ( eq ) {
                        locGlobMap.at(eq) = globeq++;
                    }
                }
            }
        }
    }


    /*
     * fprintf (stderr, "[%d] locGlobMap: ", myrank);
     * for (i=1; i<=locGlobMap.giveSize(); i++)
     * fprintf (stderr, "%d ",locGlobMap.at(i));
     */

    // pack data for remote procs
    CommunicationBuffer **buffs = new CommunicationBuffer * [ nproc ];
    for ( p = 0; p < nproc; p++ ) {
        buffs [ p ] = new StaticCommunicationBuffer(MPI_COMM_WORLD, 0);
        buffs [ p ]->resize( buffs [ p ]->givePackSize(MPI_INT, 1) * sizeToSend(p) );

#if 0
        OOFEM_LOG_INFO( "[%d]PetscN2G:: init: Send buffer[%d] size %d\n",
                       myrank, p, sizeToSend(p) );
#endif
    }


    for ( i = 1; i <= ndofman; i++ ) {
        if ( isShared( d->giveDofManager(i) ) ) {
            dman = d->giveDofManager(i);
            plist = dman->givePartitionList();
            psize = plist->giveSize();
            int minrank = myrank;
            for ( j = 1; j <= psize; j++ ) {
                minrank = min( minrank, plist->at(j) );
            }

            if ( minrank == myrank ) { // do send
                for ( j = 1; j <= psize; j++ ) {
                    p = plist->at(j);
                    if ( p == myrank ) {
                        continue;
                    }

#if 0
                    OOFEM_LOG_INFO("[%d]PetscN2G:: init: Sending localShared node %d[%d] to proc %d\n",
                                   myrank, i, dman->giveGlobalNumber(), p);
#endif
                    buffs [ p ]->packInt( dman->giveGlobalNumber() );
                    ndofs = dman->giveNumberOfDofs();
                    for ( k = 1; k <= ndofs; k++ ) {
                        if ( dman->giveDof(k)->isPrimaryDof() ) {
                            int eq;
                            if ( et == et_standard ) {
                                eq = dman->giveDof(k)->giveEquationNumber(dn);
                            } else {
                                eq = dman->giveDof(k)->giveEquationNumber(dpn);
                            }

                            if ( eq ) {
                                buffs [ p ]->packInt( locGlobMap.at(eq) );
                            }
                        }
                    }
                }
            }
        }
    }


    //fprintf (stderr, "[%d] Sending glob nums ...", myrank);
    // send buffers
    for ( p = 0; p < nproc; p++ ) {
        if ( p != myrank ) {
            buffs [ p ]->iSend(p, 999);
        }
    }


    /****
    *
    *  for (p=0; p<nproc; p++) {
    *   if (p == myrank) continue;
    *   for (i=1;  i<= ndofman; i++) {
    *     //if (domain->giveDofManager(i)->giveParallelMode() == DofManager_shared) {
    *     if (isShared(d->giveDofManager(i))) {
    *       dman = d->giveDofManager(i);
    *       plist = dman->givePartitionList();
    *       psize = plist->giveSize();
    *       int minrank = myrank;
    *       for (j=1; j<=psize; j++) minrank = min (minrank, plist->at(j));
    *       if (minrank == myrank) { // do send
    *         buffs[p]->packInt(dman->giveGlobalNumber());
    *         ndofs = dman->giveNumberOfDofs ();
    *         for (j=1; j<=ndofs; j++) {
    *           if (dman->giveDof(j)->isPrimaryDof()) {
    *             buffs[p]->packInt(locGlobMap.at(dman->giveDof(j)->giveEquationNumber()));
    *           }
    *         }
    *       }
    *     }
    *   }
    *   // send buffer
    *   buffs[p]->iSend(p, 999);
    *  }
    ****/

    // receive remote eqs and complete global numbering
    CommunicationBuffer **rbuffs = new CommunicationBuffer * [ nproc ];
    for ( p = 0; p < nproc; p++ ) {
        rbuffs [ p ] = new StaticCommunicationBuffer(MPI_COMM_WORLD, 0);
        rbuffs [ p ]->resize( rbuffs [ p ]->givePackSize(MPI_INT, 1) * sizeToRecv(p) );
#if 0
        OOFEM_LOG_INFO( "[%d]PetscN2G:: init: Receive buffer[%d] size %d\n",
                       myrank, p, sizeToRecv(p) );
#endif
    }


    //fprintf (stderr, "[%d] Receiving glob nums ...", myrank);
    for ( p = 0; p < nproc; p++ ) {
        if ( p != myrank ) {
            rbuffs [ p ]->iRecv(p, 999);
        }
    }


    IntArray finished(nproc);
    finished.zero();
    int fin = 1;
    finished.at(emodel->giveRank() + 1) = 1;
    do {
        for ( p = 0; p < nproc; p++ ) {
            if ( finished.at(p + 1) == 0 ) {
                if ( rbuffs [ p ]->testCompletion() ) {
                    // data are here
                    // unpack them
                    int nite = nrecToReceive(p);
                    int shdm, ldm;
                    for ( i = 1; i <= nite; i++ ) {
                        rbuffs [ p ]->unpackInt(shdm);

#if 0
                        OOFEM_LOG_INFO("[%d]PetscN2G:: init: Received shared node [%d] from proc %d\n",
                                       myrank, shdm, p);
#endif
                        //
                        // find local guy coorecponding to shdm
                        if ( globloc.find(shdm) != globloc.end() ) {
                            ldm = globloc [ shdm ];
                        } else {
                            OOFEM_ERROR3("[%d] PetscNatural2GlobalOrdering :: init: invalid shared dofman received, globnum %d\n", myrank, shdm);
                        }

                        dman = d->giveDofManager(ldm);
                        ndofs = dman->giveNumberOfDofs();
                        for ( j = 1; j <= ndofs; j++ ) {
                            if ( dman->giveDof(j)->isPrimaryDof() ) {
                                int eq;
                                if ( et == et_standard ) {
                                    eq = dman->giveDof(j)->giveEquationNumber(dn);
                                } else {
                                    eq = dman->giveDof(j)->giveEquationNumber(dpn);
                                }

                                if ( eq ) {
                                    int val;
                                    rbuffs [ p ]->unpackInt(val);
                                    locGlobMap.at(eq) = val;
                                }
                            }
                        }
                    }

                    finished.at(p + 1) = 1;
                    fin++;
                }
            }
        }
    } while ( fin < nproc );


    /*
     * fprintf (stderr, "[%d] Finished receiving glob nums ...", myrank);
     *
     * fprintf (stderr, "[%d] locGlobMap:", myrank);
     * for (i=1; i<=locGlobMap.giveSize(); i++)
     * fprintf (stderr, "%d ",locGlobMap.at(i));
     */

#ifdef  __VERBOSE_PARALLEL
    if ( et == et_standard ) {
        int _eq;
        char *ptr;
        char *locname = "local", *shname = "shared", *unkname = "unknown";
        for ( i = 1; i <= ndofman; i++ ) {
            dman = d->giveDofManager(i);
            if ( dman->giveParallelMode() == DofManager_local ) {
                ptr = locname;
            } else if ( dman->giveParallelMode() == DofManager_shared ) {
                ptr = shname;
            } else {
                ptr = unkname;
            }

            ndofs = dman->giveNumberOfDofs();
            for ( j = 1; j <= ndofs; j++ ) {
                if ( ( _eq = dman->giveDof(j)->giveEquationNumber(dn) ) ) {
                    fprintf( stderr, "[%d] n:%6s %d[%d] (%d), leq = %d, geq = %d\n", emodel->giveRank(), ptr, i, dman->giveGlobalNumber(), j, _eq, locGlobMap.at(_eq) );
                } else {
                    fprintf(stderr, "[%d] n:%6s %d[%d] (%d), leq = %d, geq = %d\n", emodel->giveRank(), ptr, i, dman->giveGlobalNumber(), j, _eq, 0);
                }
            }
        }
    }

#endif


    // build reverse map
    int lneq;
    if ( et == et_standard ) {
        lneq = emodel->giveNumberOfEquations(ut);
    } else {
        lneq = emodel->giveNumberOfPrescribedEquations(ut);
    }

    globLocMap.clear();
    for ( i = 1; i <= lneq; i++ ) {
        globLocMap [ locGlobMap.at(i) ] = i;
    }

    for ( p = 0; p < nproc; p++ ) {
        delete rbuffs [ p ];
        delete buffs [ p ];
    }

    delete[] rbuffs;
    delete[] buffs;
    delete[] leqs;

    MPI_Barrier(MPI_COMM_WORLD);
#ifdef __VERBOSE_PARALLEL
    VERBOSEPARALLEL_PRINT("PetscNatural2GlobalOrdering :: init", "done", myrank);
#endif
}
コード例 #8
0
ファイル: problemcomm.C プロジェクト: JimBrouzoulis/oofem-1
void
ProblemCommunicator :: setUpCommunicationMapsForElementCut(EngngModel *pm,
                                                           bool excludeSelfCommFlag)
{
    Domain *domain = pm->giveDomain(1);
    int nnodes = domain->giveNumberOfDofManagers();
    int i, j, partition;

    if ( this->mode == ProblemCommMode__ELEMENT_CUT ) {
        /*
         * Initially, each partition knows for which nodes a receive
         * is needed (and can therefore compute easily the recv map),
         * but does not know for which nodes it should send data to which
         * partition. Hence, the communication setup is performed by
         * broadcasting "send request" lists of nodes for which
         * a partition expects to receive data (ie. of those nodes
         * which the partition uses, but does not own) to all
         * collaborating processes. The "send request" list are
         * converted into send maps.
         */

        // receive maps can be build locally,
        // but send maps should be assembled from broadcasted lists (containing
        // expected receive nodes) of remote partitions.

        // first build local receive map
        IntArray domainNodeRecvCount(size);
        const IntArray *partitionList;
        DofManager *dofMan;
        //Element    *element;
        int domainRecvListSize = 0, domainRecvListPos = 0;
        //int nelems;
        int result = 1;

        for ( i = 1; i <= nnodes; i++ ) {
            partitionList = domain->giveDofManager(i)->givePartitionList();
            if ( domain->giveDofManager(i)->giveParallelMode() == DofManager_remote ) {
                // size of partitionList should be 1 <== only ine master
                for ( j = 1; j <= partitionList->giveSize(); j++ ) {
                    if ( !( excludeSelfCommFlag && ( this->rank == partitionList->at(j) ) ) ) {
                        domainRecvListSize++;
                        domainNodeRecvCount.at(partitionList->at(j) + 1)++;
                    }
                }
            }
        }

        // build maps simultaneously
        IntArray pos(size);
        IntArray **maps = new IntArray * [ size ];
        for ( i = 0; i < size; i++ ) {
            maps [ i ] = new IntArray( domainNodeRecvCount.at(i + 1) );
        }

        // allocate also domain receive list to be broadcasted
        IntArray domainRecvList(domainRecvListSize);

        if ( domainRecvListSize ) {
            for ( i = 1; i <= nnodes; i++ ) {
                // test if node is remote DofMan
                dofMan = domain->giveDofManager(i);
                if ( dofMan->giveParallelMode() == DofManager_remote ) {
                    domainRecvList.at(++domainRecvListPos) = dofMan->giveGlobalNumber();

                    partitionList = domain->giveDofManager(i)->givePartitionList();
                    // size of partitionList should be 1 <== only ine master
                    for ( j = 1; j <= partitionList->giveSize(); j++ ) {
                        if ( !( excludeSelfCommFlag && ( this->rank == partitionList->at(j) ) ) ) {
                            partition = partitionList->at(j);
                            maps [ partition ]->at( ++pos.at(partition + 1) ) = i;
                        }
                    }
                }
            }
        }

        // set up process recv communicator maps
        for ( i = 0; i < size; i++ ) {
            this->setProcessCommunicatorToRecvArry(this->giveProcessCommunicator(i), * maps [ i ]);
            //this->giveDomainCommunicator(i)->setToRecvArry (this->engngModel, *maps[i]);
        }

        // delete local maps
        for ( i = 0; i < size; i++ ) {
            delete maps [ i ];
        }

        delete maps;

        // to assemble send maps, we must analyze broadcasted remote domain send lists
        // and we must also broadcast our send list.

#ifdef __VERBOSE_PARALLEL
        VERBOSEPARALLEL_PRINT("ProblemCommunicator::setUpCommunicationMaps", "Element-cut broadcasting started", rank);
#endif


        StaticCommunicationBuffer commBuff(MPI_COMM_WORLD);
        IntArray remoteDomainRecvList;
        IntArray toSendMap;
        int localExpectedSize, globalRecvSize;
        int sendMapPos, sendMapSize, globalDofManNum;

        // determine the size of receive buffer using AllReduce operation
#ifndef IBM_MPI_IMPLEMENTATION
        localExpectedSize = domainRecvList.givePackSize(commBuff);
#else
        localExpectedSize = domainRecvList.givePackSize(commBuff) + 1;
#endif

#ifdef __USE_MPI
        result = MPI_Allreduce(& localExpectedSize, & globalRecvSize, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD);
        if ( result != MPI_SUCCESS ) {
            _error("setUpCommunicationMaps: MPI_Allreduce failed");
        }

#else
WARNING: NOT SUPPORTED MESSAGE PARSING LIBRARY
#endif

#ifdef __VERBOSE_PARALLEL
        VERBOSEPARALLEL_PRINT("ProblemCommunicator::setUpCommunicationMaps", "Finished reducing receiveBufferSize", rank);
#endif


        // resize to fit largest received message
        commBuff.resize(globalRecvSize);

        // resize toSend map to max possible size
        toSendMap.resize(globalRecvSize);

        for ( i = 0; i < size; i++ ) { // loop over domains
            commBuff.init();
            if ( i == rank ) {
                //current domain has to send its receive list to all domains
                // broadcast domainRecvList

#ifdef __VERBOSE_PARALLEL
                VERBOSEPARALLEL_PRINT("ProblemCommunicator::setUpCommunicationMaps", "Broadcasting own send list", rank);
#endif

                commBuff.packIntArray(domainRecvList);
                result = commBuff.bcast(i);
                if ( result != MPI_SUCCESS ) {
                    _error("setUpCommunicationMaps: commBuff broadcast failed");
                }

#ifdef __VERBOSE_PARALLEL
                VERBOSEPARALLEL_PRINT("ProblemCommunicator::setUpCommunicationMaps", "Broadcasting own send list finished", rank);
#endif
            } else {
#ifdef __VERBOSE_PARALLEL
                OOFEM_LOG_DEBUG("[process rank %3d]: %-30s: Receiving broadcasted send map from partition %3d\n",
                                rank, "ProblemCommunicator :: unpackAllData", i);
#endif
                // receive broadcasted lists
                result = commBuff.bcast(i);
                if ( result != MPI_SUCCESS ) {
                    _error("setUpCommunicationMaps: commBuff broadcast failed");
                }

#ifdef __VERBOSE_PARALLEL
                OOFEM_LOG_DEBUG("[process rank %3d]: %-30s: Receiving broadcasted send map from partition %3d finished\n",
                                rank, "ProblemCommunicator :: unpackAllData", i);
#endif


                // unpack remote receive list
                if ( !commBuff.unpackIntArray(remoteDomainRecvList) ) {
                    _error("ProblemCommunicator::setUpCommunicationMaps: unpack remote receive list failed");
                }

                // find if remote nodes are in local partition
                // if yes add them into send map for correcponding i-th partition
                sendMapPos = 0;
                sendMapSize = 0;
                // determine sendMap size
                for ( j = 1; j <= nnodes; j++ ) { // loop over local DofManagers
                    dofMan = domain->giveDofManager(j);
                    globalDofManNum = dofMan->giveGlobalNumber();
                    // test id globalDofManNum is in remoteDomainRecvList
                    if ( remoteDomainRecvList.findFirstIndexOf(globalDofManNum) ) {
                        sendMapSize++;
                    }
                }

                toSendMap.resize(sendMapSize);

                for ( j = 1; j <= nnodes; j++ ) { // loop over local DofManagers
                    dofMan = domain->giveDofManager(j);
                    globalDofManNum = dofMan->giveGlobalNumber();
                    // test id globalDofManNum is in remoteDomainRecvList
                    if ( remoteDomainRecvList.findFirstIndexOf(globalDofManNum) ) {
                        // add this local DofManager number to sed map for active partition
                        toSendMap.at(++sendMapPos) = j;
                    }
                } // end loop over local DofManagers

                // set send map to i-th process communicator
                this->setProcessCommunicatorToSendArry(this->giveProcessCommunicator(i), toSendMap);
                //this->giveDomainCommunicator(i)->setToSendArry (this->engngModel, toSendMap);
            } // end receiving broadcasted lists

#ifdef __VERBOSE_PARALLEL
            VERBOSEPARALLEL_PRINT("ProblemCommunicator::setUpCommunicationMaps", "Receiving broadcasted send maps finished", rank);
#endif
        } // end loop over domains

    } else {
        _error("setUpCommunicationMapsForElementCut: unknown mode");
    }
}