bool Foam::Pstream::init(int& argc, char**& argv)
{
    MPI_Init(&argc, &argv);

    int numprocs;
    MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
    MPI_Comm_rank(MPI_COMM_WORLD, &myProcNo_);

    if (numprocs <= 1)
    {
        FatalErrorIn("Pstream::init(int& argc, char**& argv)")
            << "bool Pstream::init(int& argc, char**& argv) : "
               "attempt to run parallel on 1 processor"
            << Foam::abort(FatalError);
    }

    procIDs_.setSize(numprocs);

    forAll(procIDs_, procNo)
    {
        procIDs_[procNo] = procNo;
    }

    setParRun();

#   ifndef SGIMPI
    string bufferSizeName = getEnv("MPI_BUFFER_SIZE");

    if (bufferSizeName.size())
    {
        int bufferSize = atoi(bufferSizeName.c_str());

        if (bufferSize)
        {
            MPI_Buffer_attach(new char[bufferSize], bufferSize);
        }
    }
    else
    {
        FatalErrorIn("Pstream::init(int& argc, char**& argv)")
            << "Pstream::init(int& argc, char**& argv) : "
            << "environment variable MPI_BUFFER_SIZE not defined"
            << Foam::abort(FatalError);
    }
#   endif

    int processorNameLen;
    char processorName[MPI_MAX_PROCESSOR_NAME];

    MPI_Get_processor_name(processorName, &processorNameLen);

    //signal(SIGABRT, stop);

    // Now that nprocs is known construct communication tables.
    initCommunicationSchedule();

    return true;
}
Esempio n. 2
0
bool Foam::UPstream::init(int& argc, char**& argv)
{
    MPI_Init(&argc, &argv);

    int numprocs;
    MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
    int myRank;
    MPI_Comm_rank(MPI_COMM_WORLD, &myRank);

    if (debug)
    {
        Pout<< "UPstream::init : initialised with numProcs:" << numprocs
            << " myRank:" << myRank << endl;
    }

    if (numprocs <= 1)
    {
        FatalErrorIn("UPstream::init(int& argc, char**& argv)")
            << "bool IPstream::init(int& argc, char**& argv) : "
               "attempt to run parallel on 1 processor"
            << Foam::abort(FatalError);
    }


    // Initialise parallel structure
    setParRun(numprocs);

#   ifndef SGIMPI
    string bufferSizeName = getEnv("MPI_BUFFER_SIZE");

    if (bufferSizeName.size())
    {
        int bufferSize = atoi(bufferSizeName.c_str());

        if (bufferSize)
        {
            MPI_Buffer_attach(new char[bufferSize], bufferSize);
        }
    }
    else
    {
        FatalErrorIn("UPstream::init(int& argc, char**& argv)")
            << "UPstream::init(int& argc, char**& argv) : "
            << "environment variable MPI_BUFFER_SIZE not defined"
            << Foam::abort(FatalError);
    }
#   endif

    //int processorNameLen;
    //char processorName[MPI_MAX_PROCESSOR_NAME];
    //
    //MPI_Get_processor_name(processorName, &processorNameLen);
    //processorName[processorNameLen] = '\0';
    //Pout<< "Processor name:" << processorName << endl;

    return true;
}
Esempio n. 3
0
bool Foam::mpiPstreamImpl::init(int& argc, char**& argv, int& myProcNo, List<int>& procIDs, bool& isParallel)
{
    MPI_Init(&argc, &argv);

    int numprocs;
    MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
    MPI_Comm_rank(MPI_COMM_WORLD, &myProcNo);

    if (numprocs <= 1)
    {
        FatalErrorIn("mpiPstreamImpl::init(int& argc, char**& argv)")
            << "bool mpiPstreamImpl::init(int& argc, char**& argv) : "
               "attempt to run parallel on 1 processor"
            << Foam::abort(FatalError);
    }

    procIDs.setSize(numprocs);

    forAll(procIDs, procNo)
    {
        procIDs[procNo] = procNo;
    }

    setParRun(isParallel);

#   ifndef SGIMPI
    //FIX <*****@*****.**>
    // Use default bufferSize and let the user override it
    // using $MPI_BUFFER_SIZE if she wants to.
    int bufferSize = 20000000;

    string bufferSizeName = getEnv("MPI_BUFFER_SIZE");

    if (bufferSizeName.size())
    {
        int tmpBufferSize = atoi(bufferSizeName.c_str());

        if (tmpBufferSize)
        {
            bufferSize = tmpBufferSize;
        }
    }
    MPI_Buffer_attach(new char[bufferSize], bufferSize);
#   endif

    int processorNameLen;
    char processorName[MPI_MAX_PROCESSOR_NAME];

    MPI_Get_processor_name(processorName, &processorNameLen);

    //signal(SIGABRT, stop);

    // Now that nprocs is known construct communication tables.
    PstreamImpl::initCommunicationSchedule();

    return true;
}