Exemplo n.º 1
0
void Foam::multiSolver::setUpParallel()
{
    if (Pstream::master())
    {
        fileNameList roots(Pstream::nProcs());
        
        roots[0] = multiDictRegistry_.rootPath();
        manageLocalRoot_ = true;
        
        // Receive from slaves
        for
        (
            int slave=Pstream::firstSlave();
            slave<=Pstream::lastSlave();
            slave++
        )
        {
            IPstream fromSlave(Pstream::blocking, slave);
            roots[slave] = fileName(fromSlave);
        }
        
        // Distribute
        for
        (
            int slave=Pstream::firstSlave();
            slave<=Pstream::lastSlave();
            slave++
        )
        {
            OPstream toSlave(Pstream::blocking, slave);
            if (roots[slave] != roots[slave - 1])
            {
                toSlave << true;
            }
            else
            {
                toSlave << false;
            }
        }
    }
    else
    {
        // Send to master
        {
            OPstream toMaster(Pstream::blocking, Pstream::masterNo());
            toMaster << fileName(multiDictRegistry_.rootPath());
        }
        // Receive from master
        {
            IPstream fromMaster(Pstream::blocking, Pstream::masterNo());
            manageLocalRoot_ = readBool(fromMaster);
        }
    }
}
Exemplo n.º 2
0
Foam::List<Foam::labelPair> Foam::mapDistribute::schedule
(
    const labelListList& subMap,
    const labelListList& constructMap
)
{
    // Communications: send and receive processor
    List<labelPair> allComms;

    {
        HashSet<labelPair, labelPair::Hash<> > commsSet(Pstream::nProcs());

        // Find what communication is required
        forAll(subMap, procI)
        {
            if (procI != Pstream::myProcNo())
            {
                if (subMap[procI].size())
                {
                    // I need to send to procI
                    commsSet.insert(labelPair(Pstream::myProcNo(), procI));
                }
                if (constructMap[procI].size())
                {
                    // I need to receive from procI
                    commsSet.insert(labelPair(procI, Pstream::myProcNo()));
                }
            }
        }
        allComms = commsSet.toc();
    }


    // Reduce
    if (Pstream::master())
    {
        // Receive and merge
        for
        (
            int slave=Pstream::firstSlave();
            slave<=Pstream::lastSlave();
            slave++
        )
        {
            IPstream fromSlave(Pstream::scheduled, slave);
            List<labelPair> nbrData(fromSlave);

            forAll(nbrData, i)
            {
                if (findIndex(allComms, nbrData[i]) == -1)
                {
                    label sz = allComms.size();
                    allComms.setSize(sz+1);
                    allComms[sz] = nbrData[i];
                }
            }
        }
        // Send back
        for
        (
            int slave=Pstream::firstSlave();
            slave<=Pstream::lastSlave();
            slave++
        )
        {
            OPstream toSlave(Pstream::scheduled, slave);
            toSlave << allComms;
        }
    }
    else
    {
        {
Exemplo n.º 3
0
Foam::label Foam::scotchDecomp::decompose
(
    const fileName& meshPath,
    const List<label>& adjncy,
    const List<label>& xadj,
    const scalarField& cWeights,

    List<label>& finalDecomp
)
{
    if (!Pstream::parRun())
    {
        decomposeOneProc
        (
            meshPath,
            adjncy,
            xadj,
            cWeights,
            finalDecomp
        );
    }
    else
    {
        if (debug)
        {
            Info<< "scotchDecomp : running in parallel."
                << " Decomposing all of graph on master processor." << endl;
        }
        globalIndex globalCells(xadj.size()-1);
        label nTotalConnections = returnReduce(adjncy.size(), sumOp<label>());

        // Send all to master. Use scheduled to save some storage.
        if (Pstream::master())
        {
            Field<label> allAdjncy(nTotalConnections);
            Field<label> allXadj(globalCells.size()+1);
            scalarField allWeights(globalCells.size());

            // Insert my own
            label nTotalCells = 0;
            forAll(cWeights, cellI)
            {
                allXadj[nTotalCells] = xadj[cellI];
                allWeights[nTotalCells++] = cWeights[cellI];
            }
            nTotalConnections = 0;
            forAll(adjncy, i)
            {
                allAdjncy[nTotalConnections++] = adjncy[i];
            }

            for (int slave=1; slave<Pstream::nProcs(); slave++)
            {
                IPstream fromSlave(Pstream::scheduled, slave);
                Field<label> nbrAdjncy(fromSlave);
                Field<label> nbrXadj(fromSlave);
                scalarField nbrWeights(fromSlave);

                // Append.
                //label procStart = nTotalCells;
                forAll(nbrXadj, cellI)
                {
                    allXadj[nTotalCells] = nTotalConnections+nbrXadj[cellI];
                    allWeights[nTotalCells++] = nbrWeights[cellI];
                }
                // No need to renumber xadj since already global.
                forAll(nbrAdjncy, i)
                {
                    allAdjncy[nTotalConnections++] = nbrAdjncy[i];
                }
            }
            allXadj[nTotalCells] = nTotalConnections;


            Field<label> allFinalDecomp;
            decomposeOneProc
            (
                meshPath,
                allAdjncy,
                allXadj,
                allWeights,
                allFinalDecomp
            );


            // Send allFinalDecomp back
            for (int slave=1; slave<Pstream::nProcs(); slave++)
            {
                OPstream toSlave(Pstream::scheduled, slave);
                toSlave << SubField<label>
                (
                    allFinalDecomp,
                    globalCells.localSize(slave),
                    globalCells.offset(slave)
                );
            }
            // Get my own part (always first)
            finalDecomp = SubField<label>
            (
                allFinalDecomp,
                globalCells.localSize()
            );
        }
Exemplo n.º 4
0
Gather<T0>::Gather(const T0& localData, const bool redistribute)
:
    List<T0>(0),
    nProcs_(max(1, Pstream::nProcs()))
{
    this->setSize(nProcs_);

    //
    // Collect sizes on all processor
    //

    if (Pstream::parRun())
    {
        if (Pstream::master())
        {
            this->operator[](0) = localData;

            // Receive data
            for
            (
                int slave = Pstream::firstSlave(), procIndex = 1;
                slave <= Pstream::lastSlave();
                slave++, procIndex++
            )
            {
                IPstream fromSlave(Pstream::scheduled, slave);
                fromSlave >> this->operator[](procIndex);
            }

            // Send data
            for
            (
                int slave = Pstream::firstSlave(), procIndex = 1;
                slave <= Pstream::lastSlave();
                slave++, procIndex++
            )
            {
                OPstream toSlave(Pstream::scheduled, slave);

                if (redistribute)
                {
                    toSlave << *this;
                }
                else
                {
                    // Dummy send just to balance sends/receives
                    toSlave << 0;
                }
            }
        }
        else
        {
            // Slave: send my local data to master
            {
                OPstream toMaster(Pstream::scheduled, Pstream::masterNo());
                toMaster << localData;
            }

            // Receive data from master
            {
                IPstream fromMaster(Pstream::scheduled, Pstream::masterNo());
                if (redistribute)
                {
                    fromMaster >> *this;
                }
                else
                {
                    label dummy;
                    fromMaster >> dummy;
                }
            }
        }