Пример #1
0
void Foam::multiSolver::synchronizeParallel() const
{
    if (Pstream::master())
    {
        // Give go signal
        for
        (
            int slave=Pstream::firstSlave();
            slave<=Pstream::lastSlave();
            slave++
        )
        {
            OPstream toSlave(Pstream::blocking, slave);
            toSlave << true;
        }
    }
    else
    {
        // Recieve go signal
        {
            IPstream fromMaster(Pstream::blocking, Pstream::masterNo());
            bool okayToGo(readBool(fromMaster));
        }
    }
}
Пример #2
0
void Foam::multiSolver::setUpParallel()
{
    if (Pstream::master())
    {
        fileNameList roots(Pstream::nProcs());
        
        roots[0] = multiDictRegistry_.rootPath();
        manageLocalRoot_ = true;
        
        // Receive from slaves
        for
        (
            int slave=Pstream::firstSlave();
            slave<=Pstream::lastSlave();
            slave++
        )
        {
            IPstream fromSlave(Pstream::blocking, slave);
            roots[slave] = fileName(fromSlave);
        }
        
        // Distribute
        for
        (
            int slave=Pstream::firstSlave();
            slave<=Pstream::lastSlave();
            slave++
        )
        {
            OPstream toSlave(Pstream::blocking, slave);
            if (roots[slave] != roots[slave - 1])
            {
                toSlave << true;
            }
            else
            {
                toSlave << false;
            }
        }
    }
    else
    {
        // Send to master
        {
            OPstream toMaster(Pstream::blocking, Pstream::masterNo());
            toMaster << fileName(multiDictRegistry_.rootPath());
        }
        // Receive from master
        {
            IPstream fromMaster(Pstream::blocking, Pstream::masterNo());
            manageLocalRoot_ = readBool(fromMaster);
        }
    }
}
Пример #3
0
Foam::List<Foam::labelPair> Foam::mapDistribute::schedule
(
    const labelListList& subMap,
    const labelListList& constructMap
)
{
    // Communications: send and receive processor
    List<labelPair> allComms;

    {
        HashSet<labelPair, labelPair::Hash<> > commsSet(Pstream::nProcs());

        // Find what communication is required
        forAll(subMap, procI)
        {
            if (procI != Pstream::myProcNo())
            {
                if (subMap[procI].size())
                {
                    // I need to send to procI
                    commsSet.insert(labelPair(Pstream::myProcNo(), procI));
                }
                if (constructMap[procI].size())
                {
                    // I need to receive from procI
                    commsSet.insert(labelPair(procI, Pstream::myProcNo()));
                }
            }
        }
        allComms = commsSet.toc();
    }


    // Reduce
    if (Pstream::master())
    {
        // Receive and merge
        for
        (
            int slave=Pstream::firstSlave();
            slave<=Pstream::lastSlave();
            slave++
        )
        {
            IPstream fromSlave(Pstream::scheduled, slave);
            List<labelPair> nbrData(fromSlave);

            forAll(nbrData, i)
            {
                if (findIndex(allComms, nbrData[i]) == -1)
                {
                    label sz = allComms.size();
                    allComms.setSize(sz+1);
                    allComms[sz] = nbrData[i];
                }
            }
        }
        // Send back
        for
        (
            int slave=Pstream::firstSlave();
            slave<=Pstream::lastSlave();
            slave++
        )
        {
            OPstream toSlave(Pstream::scheduled, slave);
            toSlave << allComms;
        }
    }
    else
    {
        {
Пример #4
0
Foam::label Foam::scotchDecomp::decompose
(
    const fileName& meshPath,
    const List<label>& adjncy,
    const List<label>& xadj,
    const scalarField& cWeights,

    List<label>& finalDecomp
)
{
    if (!Pstream::parRun())
    {
        decomposeOneProc
        (
            meshPath,
            adjncy,
            xadj,
            cWeights,
            finalDecomp
        );
    }
    else
    {
        if (debug)
        {
            Info<< "scotchDecomp : running in parallel."
                << " Decomposing all of graph on master processor." << endl;
        }
        globalIndex globalCells(xadj.size()-1);
        label nTotalConnections = returnReduce(adjncy.size(), sumOp<label>());

        // Send all to master. Use scheduled to save some storage.
        if (Pstream::master())
        {
            Field<label> allAdjncy(nTotalConnections);
            Field<label> allXadj(globalCells.size()+1);
            scalarField allWeights(globalCells.size());

            // Insert my own
            label nTotalCells = 0;
            forAll(cWeights, cellI)
            {
                allXadj[nTotalCells] = xadj[cellI];
                allWeights[nTotalCells++] = cWeights[cellI];
            }
            nTotalConnections = 0;
            forAll(adjncy, i)
            {
                allAdjncy[nTotalConnections++] = adjncy[i];
            }

            for (int slave=1; slave<Pstream::nProcs(); slave++)
            {
                IPstream fromSlave(Pstream::scheduled, slave);
                Field<label> nbrAdjncy(fromSlave);
                Field<label> nbrXadj(fromSlave);
                scalarField nbrWeights(fromSlave);

                // Append.
                //label procStart = nTotalCells;
                forAll(nbrXadj, cellI)
                {
                    allXadj[nTotalCells] = nTotalConnections+nbrXadj[cellI];
                    allWeights[nTotalCells++] = nbrWeights[cellI];
                }
                // No need to renumber xadj since already global.
                forAll(nbrAdjncy, i)
                {
                    allAdjncy[nTotalConnections++] = nbrAdjncy[i];
                }
            }
            allXadj[nTotalCells] = nTotalConnections;


            Field<label> allFinalDecomp;
            decomposeOneProc
            (
                meshPath,
                allAdjncy,
                allXadj,
                allWeights,
                allFinalDecomp
            );


            // Send allFinalDecomp back
            for (int slave=1; slave<Pstream::nProcs(); slave++)
            {
                OPstream toSlave(Pstream::scheduled, slave);
                toSlave << SubField<label>
                (
                    allFinalDecomp,
                    globalCells.localSize(slave),
                    globalCells.offset(slave)
                );
            }
            // Get my own part (always first)
            finalDecomp = SubField<label>
            (
                allFinalDecomp,
                globalCells.localSize()
            );
        }
Пример #5
0
// Read mesh if available. Otherwise create empty mesh with same non-proc
// patches as proc0 mesh. Requires all processors to have all patches
// (and in same order).
autoPtr<fvMesh> createMesh
(
    const Time& runTime,
    const word& regionName,
    const fileName& instDir,
    const bool haveMesh
)
{
    //Pout<< "Create mesh for time = "
    //    << runTime.timeName() << nl << endl;

    IOobject io
    (
        regionName,
        instDir,
        runTime,
        IOobject::MUST_READ
    );

    if (!haveMesh)
    {
        // Create dummy mesh. Only used on procs that don't have mesh.
        IOobject noReadIO(io);
        noReadIO.readOpt() = IOobject::NO_READ;
        fvMesh dummyMesh
        (
            noReadIO,
            xferCopy(pointField()),
            xferCopy(faceList()),
            xferCopy(labelList()),
            xferCopy(labelList()),
            false
        );
        // Add some dummy zones so upon reading it does not read them
        // from the undecomposed case. Should be done as extra argument to
        // regIOobject::readStream?
        List<pointZone*> pz
        (
            1,
            new pointZone
            (
                "dummyPointZone",
                labelList(0),
                0,
                dummyMesh.pointZones()
            )
        );
        List<faceZone*> fz
        (
            1,
            new faceZone
            (
                "dummyFaceZone",
                labelList(0),
                boolList(0),
                0,
                dummyMesh.faceZones()
            )
        );
        List<cellZone*> cz
        (
            1,
            new cellZone
            (
                "dummyCellZone",
                labelList(0),
                0,
                dummyMesh.cellZones()
            )
        );
        dummyMesh.addZones(pz, fz, cz);
        //Pout<< "Writing dummy mesh to " << dummyMesh.polyMesh::objectPath()
        //    << endl;
        dummyMesh.write();
    }

    //Pout<< "Reading mesh from " << io.objectPath() << endl;
    autoPtr<fvMesh> meshPtr(new fvMesh(io));
    fvMesh& mesh = meshPtr();


    // Sync patches
    // ~~~~~~~~~~~~

    if (Pstream::master())
    {
        // Send patches
        for
        (
            int slave=Pstream::firstSlave();
            slave<=Pstream::lastSlave();
            slave++
        )
        {
            OPstream toSlave(Pstream::scheduled, slave);
            toSlave << mesh.boundaryMesh();
        }
    }
    else
    {
        // Receive patches
        IPstream fromMaster(Pstream::scheduled, Pstream::masterNo());
        PtrList<entry> patchEntries(fromMaster);

        if (haveMesh)
        {
            // Check master names against mine

            const polyBoundaryMesh& patches = mesh.boundaryMesh();

            forAll(patchEntries, patchI)
            {
                const entry& e = patchEntries[patchI];
                const word type(e.dict().lookup("type"));
                const word& name = e.keyword();

                if (type == processorPolyPatch::typeName)
                {
                    break;
                }

                if (patchI >= patches.size())
                {
                    FatalErrorIn
                    (
                        "createMesh(const Time&, const fileName&, const bool)"
                    )   << "Non-processor patches not synchronised."
                        << endl
                        << "Processor " << Pstream::myProcNo()
                        << " has only " << patches.size()
                        << " patches, master has "
                        << patchI
                        << exit(FatalError);
                }

                if
                (
                    type != patches[patchI].type()
                 || name != patches[patchI].name()
                )
                {
                    FatalErrorIn
                    (
                        "createMesh(const Time&, const fileName&, const bool)"
                    )   << "Non-processor patches not synchronised."
                        << endl
                        << "Master patch " << patchI
                        << " name:" << type
                        << " type:" << type << endl
                        << "Processor " << Pstream::myProcNo()
                        << " patch " << patchI
                        << " has name:" << patches[patchI].name()
                        << " type:" << patches[patchI].type()
                        << exit(FatalError);
                }
            }
        }
        else
        {
            // Add patch
            List<polyPatch*> patches(patchEntries.size());
            label nPatches = 0;

            forAll(patchEntries, patchI)
            {
                const entry& e = patchEntries[patchI];
                const word type(e.dict().lookup("type"));
                const word& name = e.keyword();

                if (type == processorPolyPatch::typeName)
                {
                    break;
                }

                //Pout<< "Adding patch:" << nPatches
                //    << " name:" << name << " type:" << type << endl;

                dictionary patchDict(e.dict());
                patchDict.remove("nFaces");
                patchDict.add("nFaces", 0);
                patchDict.remove("startFace");
                patchDict.add("startFace", 0);

                patches[patchI] = polyPatch::New
                (
                    name,
                    patchDict,
                    nPatches++,
                    mesh.boundaryMesh()
                ).ptr();
            }
            patches.setSize(nPatches);
            mesh.addFvPatches(patches, false);  // no parallel comms

            //// Write empty mesh now we have correct patches
            //meshPtr().write();
        }
    }
Пример #6
0
// Read mesh if available. Otherwise create empty mesh with same non-proc
// patches as proc0 mesh. Requires all processors to have all patches
// (and in same order).
autoPtr<fvMesh> createMesh
(
    const Time& runTime,
    const word& regionName,
    const fileName& instDir,
    const bool haveMesh
)
{
    Pout<< "Create mesh for time = "
        << runTime.timeName() << nl << endl;

    IOobject io
    (
        regionName,
        instDir,
        runTime,
        IOobject::MUST_READ
    );

    if (!haveMesh)
    {
        // Create dummy mesh. Only used on procs that don't have mesh.
        fvMesh dummyMesh
        (
            io,
            xferCopy(pointField()),
            xferCopy(faceList()),
            xferCopy(labelList()),
            xferCopy(labelList()),
            false
        );
        Pout<< "Writing dummy mesh to " << dummyMesh.polyMesh::objectPath()
            << endl;
        dummyMesh.write();
    }

    Pout<< "Reading mesh from " << io.objectPath() << endl;
    autoPtr<fvMesh> meshPtr(new fvMesh(io));
    fvMesh& mesh = meshPtr();


    // Determine patches.
    if (Pstream::master())
    {
        // Send patches
        for
        (
            int slave=Pstream::firstSlave();
            slave<=Pstream::lastSlave();
            slave++
        )
        {
            OPstream toSlave(Pstream::blocking, slave);
            toSlave << mesh.boundaryMesh();
        }
    }
    else
    {
        // Receive patches
        IPstream fromMaster(Pstream::blocking, Pstream::masterNo());
        PtrList<entry> patchEntries(fromMaster);

        if (haveMesh)
        {
            // Check master names against mine

            const polyBoundaryMesh& patches = mesh.boundaryMesh();

            forAll(patchEntries, patchI)
            {
                const entry& e = patchEntries[patchI];
                const word type(e.dict().lookup("type"));
                const word& name = e.keyword();

                if (type == processorPolyPatch::typeName)
                {
                    break;
                }

                if (patchI >= patches.size())
                {
                    FatalErrorIn
                    (
                        "createMesh(const Time&, const fileName&, const bool)"
                    )   << "Non-processor patches not synchronised."
                        << endl
                        << "Processor " << Pstream::myProcNo()
                        << " has only " << patches.size()
                        << " patches, master has "
                        << patchI
                        << exit(FatalError);
                }

                if
                (
                    type != patches[patchI].type()
                 || name != patches[patchI].name()
                )
                {
                    FatalErrorIn
                    (
                        "createMesh(const Time&, const fileName&, const bool)"
                    )   << "Non-processor patches not synchronised."
                        << endl
                        << "Master patch " << patchI
                        << " name:" << type
                        << " type:" << type << endl
                        << "Processor " << Pstream::myProcNo()
                        << " patch " << patchI
                        << " has name:" << patches[patchI].name()
                        << " type:" << patches[patchI].type()
                        << exit(FatalError);
                }
            }
        }
        else
        {
            // Add patch
            List<polyPatch*> patches(patchEntries.size());
            label nPatches = 0;

            forAll(patchEntries, patchI)
            {
                const entry& e = patchEntries[patchI];
                const word type(e.dict().lookup("type"));
                const word& name = e.keyword();

                if (type == processorPolyPatch::typeName)
                {
                    break;
                }

                Pout<< "Adding patch:" << nPatches
                    << " name:" << name
                    << " type:" << type << endl;

                dictionary patchDict(e.dict());
                patchDict.remove("nFaces");
                patchDict.add("nFaces", 0);
                patchDict.remove("startFace");
                patchDict.add("startFace", 0);

                patches[patchI] = polyPatch::New
                (
                    name,
                    patchDict,
                    nPatches++,
                    mesh.boundaryMesh()
                ).ptr();
            }
            patches.setSize(nPatches);
            mesh.addFvPatches(patches, false);  // no parallel comms

            //// Write empty mesh now we have correct patches
            //meshPtr().write();
        }
    }
Пример #7
0
Gather<T0>::Gather(const T0& localData, const bool redistribute)
:
    List<T0>(0),
    nProcs_(max(1, Pstream::nProcs()))
{
    this->setSize(nProcs_);

    //
    // Collect sizes on all processor
    //

    if (Pstream::parRun())
    {
        if (Pstream::master())
        {
            this->operator[](0) = localData;

            // Receive data
            for
            (
                int slave = Pstream::firstSlave(), procIndex = 1;
                slave <= Pstream::lastSlave();
                slave++, procIndex++
            )
            {
                IPstream fromSlave(Pstream::scheduled, slave);
                fromSlave >> this->operator[](procIndex);
            }

            // Send data
            for
            (
                int slave = Pstream::firstSlave(), procIndex = 1;
                slave <= Pstream::lastSlave();
                slave++, procIndex++
            )
            {
                OPstream toSlave(Pstream::scheduled, slave);

                if (redistribute)
                {
                    toSlave << *this;
                }
                else
                {
                    // Dummy send just to balance sends/receives
                    toSlave << 0;
                }
            }
        }
        else
        {
            // Slave: send my local data to master
            {
                OPstream toMaster(Pstream::scheduled, Pstream::masterNo());
                toMaster << localData;
            }

            // Receive data from master
            {
                IPstream fromMaster(Pstream::scheduled, Pstream::masterNo());
                if (redistribute)
                {
                    fromMaster >> *this;
                }
                else
                {
                    label dummy;
                    fromMaster >> dummy;
                }
            }
        }
Пример #8
0
void Foam::argList::parse
(
    bool checkArgs,
    bool checkOpts,
    bool initialise
)
{
    // Help/documentation options:
    //   -help    print the usage
    //   -doc     display application documentation in browser
    //   -srcDoc  display source code in browser
    if
    (
        options_.found("help")
     || options_.found("doc")
     || options_.found("srcDoc")
    )
    {
        if (options_.found("help"))
        {
            printUsage();
        }

        // Only display one or the other
        if (options_.found("srcDoc"))
        {
            displayDoc(true);
        }
        else if (options_.found("doc"))
        {
            displayDoc(false);
        }

        ::exit(0);
    }

    // Print the usage message and exit if the number of arguments is incorrect
    if (!check(checkArgs, checkOpts))
    {
        FatalError.exit();
    }


    if (initialise)
    {
        string dateString = clock::date();
        string timeString = clock::clockTime();

        // Print the banner once only for parallel runs
        if (Pstream::master() && bannerEnabled)
        {
            IOobject::writeBanner(Info, true)
                << "Build  : " << Foam::FOAMbuild << nl
                << "Exec   : " << argListStr_.c_str() << nl
                << "Date   : " << dateString.c_str() << nl
                << "Time   : " << timeString.c_str() << nl
                << "Host   : " << hostName() << nl
                << "PID    : " << pid() << endl;
        }

        jobInfo.add("startDate", dateString);
        jobInfo.add("startTime", timeString);
        jobInfo.add("userName", userName());
        jobInfo.add("foamVersion", word(FOAMversion));
        jobInfo.add("code", executable_);
        jobInfo.add("argList", argListStr_);
        jobInfo.add("currentDir", cwd());
        jobInfo.add("PPID", ppid());
        jobInfo.add("PGID", pgid());

        // Add build information - only use the first word
        {
            std::string build(Foam::FOAMbuild);
            std::string::size_type found = build.find(' ');
            if (found != std::string::npos)
            {
                build.resize(found);
            }
            jobInfo.add("foamBuild", build);
        }
    }

    // Case is a single processor run unless it is running parallel
    int nProcs = 1;

    // Roots if running distributed
    fileNameList roots;


    // If this actually is a parallel run
    if (parRunControl_.parRun())
    {
        // For the master
        if (Pstream::master())
        {
            // Establish rootPath_/globalCase_/case_ for master
            getRootCase();

            // See if running distributed (different roots for different procs)
            label dictNProcs = -1;
            fileName source;

            if (options_.found("roots"))
            {
                source = "-roots";
                IStringStream is(options_["roots"]);
                roots = readList<fileName>(is);

                if (roots.size() != 1)
                {
                    dictNProcs = roots.size()+1;
                }
            }
            else
            {
                source = rootPath_/globalCase_/"system/decomposeParDict";
                IFstream decompDictStream(source);

                if (!decompDictStream.good())
                {
                    FatalError
                        << "Cannot read "
                        << decompDictStream.name()
                        << exit(FatalError);
                }

                dictionary decompDict(decompDictStream);

                dictNProcs = readLabel
                (
                    decompDict.lookup("numberOfSubdomains")
                );

                if (decompDict.lookupOrDefault("distributed", false))
                {
                    decompDict.lookup("roots") >> roots;
                }
            }

            // Convenience:
            // when a single root is specified, use it for all processes
            if (roots.size() == 1)
            {
                const fileName rootName(roots[0]);
                roots.setSize(Pstream::nProcs()-1, rootName);

                // adjust dictNProcs for command-line '-roots' option
                if (dictNProcs < 0)
                {
                    dictNProcs = roots.size()+1;
                }
            }


            // Check number of processors.
            // nProcs     => number of actual procs
            // dictNProcs => number of procs specified in decompositionDict
            // nProcDirs  => number of processor directories
            //               (n/a when running distributed)
            //
            // - normal running : nProcs = dictNProcs = nProcDirs
            // - decomposition to more  processors : nProcs = dictNProcs
            // - decomposition to fewer processors : nProcs = nProcDirs
            if (dictNProcs > Pstream::nProcs())
            {
                FatalError
                    << source
                    << " specifies " << dictNProcs
                    << " processors but job was started with "
                    << Pstream::nProcs() << " processors."
                    << exit(FatalError);
            }


            // Distributed data
            if (roots.size())
            {
                if (roots.size() != Pstream::nProcs()-1)
                {
                    FatalError
                        << "number of entries in roots "
                        << roots.size()
                        << " is not equal to the number of slaves "
                        << Pstream::nProcs()-1
                        << exit(FatalError);
                }

                forAll(roots, i)
                {
                    roots[i].expand();
                }

                // Distribute the master's argument list (with new root)
                bool hadCaseOpt = options_.found("case");
                for
                (
                    int slave = Pstream::firstSlave();
                    slave <= Pstream::lastSlave();
                    slave++
                )
                {
                    options_.set("case", roots[slave-1]/globalCase_);

                    OPstream toSlave(Pstream::scheduled, slave);
                    toSlave << args_ << options_;
                }
                options_.erase("case");

                // Restore [-case dir]
                if (hadCaseOpt)
                {
                    options_.set("case", rootPath_/globalCase_);
                }
            }
            else
            {
                // Possibly going to fewer processors.
                // Check if all procDirs are there.
                if (dictNProcs < Pstream::nProcs())
                {
                    label nProcDirs = 0;
                    while
                    (
                        isDir
                        (
                            rootPath_/globalCase_/"processor"
                          + name(++nProcDirs)
                        )
                    )
                    {}

                    if (nProcDirs != Pstream::nProcs())
                    {
                        FatalError
                            << "number of processor directories = "
                            << nProcDirs
                            << " is not equal to the number of processors = "
                            << Pstream::nProcs()
                            << exit(FatalError);
                    }
                }

                // Distribute the master's argument list (unaltered)
                for
                (
                    int slave = Pstream::firstSlave();
                    slave <= Pstream::lastSlave();
                    slave++
                )
                {
                    OPstream toSlave(Pstream::scheduled, slave);
                    toSlave << args_ << options_;
                }
            }
        }