Ejemplo n.º 1
0
void
BoxLib::Initialize (int& argc, char**& argv, bool build_parm_parse, MPI_Comm mpi_comm)
{
#ifndef WIN32
    //
    // Make sure to catch new failures.
    //
    std::set_new_handler(BoxLib::OutOfMemory);
#endif

#ifdef BL_BACKTRACING
    signal(SIGSEGV, BLBackTrace::handler); // catch seg falult
    feenableexcept(FE_INVALID | FE_DIVBYZERO | FE_OVERFLOW);  // trap floating point exceptions
    signal(SIGFPE, BLBackTrace::handler);
#endif

    ParallelDescriptor::StartParallel(&argc, &argv, mpi_comm);

    if(ParallelDescriptor::NProcsSidecar() > 0) {
      if(ParallelDescriptor::InSidecarGroup()) {
        if (ParallelDescriptor::IOProcessor())
          std::cout << "===== SIDECARS INITIALIZED =====" << std::endl;
        ParallelDescriptor::SidecarProcess();
        BoxLib::Finalize();
        return;
      }
    }


    BL_PROFILE_INITIALIZE();

    //
    // Initialize random seed after we're running in parallel.
    //
    BoxLib::InitRandom(ParallelDescriptor::MyProc()+1, ParallelDescriptor::NProcs());

#ifdef BL_USE_MPI
    if (ParallelDescriptor::IOProcessor())
    {
        std::cout << "MPI initialized with "
                  << ParallelDescriptor::NProcs()
                  << " MPI processes\n";
    }
#endif

#ifdef _OPENMP
    if (ParallelDescriptor::IOProcessor())
    {
        std::cout << "OMP initialized with "
                  << omp_get_max_threads()
                  << " OMP threads\n";
    }
#endif

#ifndef BL_AMRPROF
    if (build_parm_parse)
    {
        if (argc == 1)
        {
            ParmParse::Initialize(0,0,0);
        }
        else
        {
            if (strchr(argv[1],'='))
            {
                ParmParse::Initialize(argc-1,argv+1,0);
            }
            else
            {
                ParmParse::Initialize(argc-2,argv+2,argv[1]);
            }
        }
    }
#endif

    mempool_init();

    std::cout << std::setprecision(10);

    if (double(std::numeric_limits<long>::max()) < 9.e18)
    {
	if (ParallelDescriptor::IOProcessor())
	{
	    std::cout << "!\n! WARNING: Maximum of long int, "
		      << std::numeric_limits<long>::max() 
		      << ", might be too small for big runs.\n!\n";
	}
    }
}
Ejemplo n.º 2
0
void
BoxLib::Initialize (int& argc, char**& argv, bool build_parm_parse, MPI_Comm mpi_comm)
{
#ifndef WIN32
    //
    // Make sure to catch new failures.
    //
    std::set_new_handler(BoxLib::OutOfMemory);
#endif

    ParallelDescriptor::StartParallel(&argc, &argv, mpi_comm);

    if(ParallelDescriptor::NProcsPerfMon() > 0) {
      if(ParallelDescriptor::MyProcAll() == ParallelDescriptor::MyProcAllPerfMon()) {
        std::cout << "Starting PerfMonProc:  myprocall = "
                  << ParallelDescriptor::MyProcAll() << std::endl;
        BLProfiler::PerfMonProcess();
        BoxLib::Finalize();
        return;
      }
    }

    BL_PROFILE_INITIALIZE();

    //
    // Initialize random seed after we're running in parallel.
    //
    BoxLib::InitRandom(ParallelDescriptor::MyProc()+1, ParallelDescriptor::NProcs());

#ifdef BL_USE_MPI
    if (ParallelDescriptor::IOProcessor())
    {
        std::cout << "MPI initialized with "
                  << ParallelDescriptor::NProcs()
                  << " MPI processes\n";
    }
#endif

#ifdef _OPENMP
    if (ParallelDescriptor::IOProcessor())
    {
        std::cout << "OMP initialized with "
                  << omp_get_max_threads()
                  << " OMP threads\n";
    }
#endif

#ifndef BL_AMRPROF
    if (build_parm_parse)
    {
        if (argc == 1)
        {
            ParmParse::Initialize(0,0,0);
        }
        else
        {
            if (strchr(argv[1],'='))
            {
                ParmParse::Initialize(argc-1,argv+1,0);
            }
            else
            {
                ParmParse::Initialize(argc-2,argv+2,argv[1]);
            }
        }
    }
#endif

    std::cout << std::setprecision(10);

    if (double(std::numeric_limits<long>::max()) < 9.e18)
    {
	if (ParallelDescriptor::IOProcessor())
	{
	    std::cout << "!\n! WARNING: Maximum of long int, "
		      << std::numeric_limits<long>::max() 
		      << ", might be too small for big runs.\n!\n";
	}
    }
}
Ejemplo n.º 3
0
void
BoxLib::Initialize (int& argc, char**& argv, bool build_parm_parse, MPI_Comm mpi_comm)
{
    ParallelDescriptor::StartParallel(&argc, &argv, mpi_comm);

#ifndef WIN32
    //
    // Make sure to catch new failures.
    //
    std::set_new_handler(BoxLib::OutOfMemory);

    if (argv[0][0] != '/') {
	char temp[1024];
	getcwd(temp,1024);
	exename = temp;
	exename += "/";
    }
    exename += argv[0];
#endif

#ifdef BL_USE_UPCXX
    upcxx::init(&argc, &argv);
    if (upcxx::myrank() != ParallelDescriptor::MyProc())
	BoxLib::Abort("UPC++ rank != MPI rank");
#endif

#ifdef BL_USE_MPI3
    MPI_Win_create_dynamic(MPI_INFO_NULL, MPI_COMM_WORLD, &ParallelDescriptor::cp_win);
    MPI_Win_create_dynamic(MPI_INFO_NULL, MPI_COMM_WORLD, &ParallelDescriptor::fb_win);
    MPI_Win_create_dynamic(MPI_INFO_NULL, MPI_COMM_WORLD, &ParallelDescriptor::fpb_win);
#endif

    while (!The_Initialize_Function_Stack.empty())
    {
        //
        // Call the registered function.
        //
        (*The_Initialize_Function_Stack.top())();
        //
        // And then remove it from the stack.
        //
        The_Initialize_Function_Stack.pop();
    }

    if(ParallelDescriptor::NProcsSidecar() > 0) {
      if(ParallelDescriptor::InSidecarGroup()) {
        if (ParallelDescriptor::IOProcessor())
          std::cout << "===== SIDECARS INITIALIZED =====" << std::endl;
        ParallelDescriptor::SidecarProcess();
        BoxLib::Finalize();
        return;
      }
    }

    BL_PROFILE_INITIALIZE();

    //
    // Initialize random seed after we're running in parallel.
    //
    BoxLib::InitRandom(ParallelDescriptor::MyProc()+1, ParallelDescriptor::NProcs());

#ifdef BL_USE_MPI
    if (ParallelDescriptor::IOProcessor())
    {
        std::cout << "MPI initialized with "
                  << ParallelDescriptor::NProcs()
                  << " MPI processes\n";
    }
#endif

#ifdef _OPENMP
    if (ParallelDescriptor::IOProcessor())
    {
        std::cout << "OMP initialized with "
                  << omp_get_max_threads()
                  << " OMP threads\n";
    }
#endif

    signal(SIGSEGV, BLBackTrace::handler); // catch seg falult
    signal(SIGINT,  BLBackTrace::handler);

#ifndef BL_AMRPROF
    if (build_parm_parse)
    {
        if (argc == 1)
        {
            ParmParse::Initialize(0,0,0);
        }
        else
        {
            if (strchr(argv[1],'='))
            {
                ParmParse::Initialize(argc-1,argv+1,0);
            }
            else
            {
                ParmParse::Initialize(argc-2,argv+2,argv[1]);
            }
        }
    }

    {
	ParmParse pp("boxlib");
	pp.query("v", verbose);
	pp.query("verbose", verbose);

	int invalid = 0, divbyzero=0, overflow=0;
	pp.query("fpe_trap_invalid", invalid);
	pp.query("fpe_trap_zero", divbyzero);
	pp.query("fpe_trap_overflow", overflow);
	int flags = 0;
	if (invalid)   flags |= FE_INVALID;
	if (divbyzero) flags |= FE_DIVBYZERO;
	if (overflow)  flags |= FE_OVERFLOW;
#if defined(__linux__)
#if !defined(__PGI) || (__PGIC__ >= 16)
	if (flags != 0) {
	    feenableexcept(flags);  // trap floating point exceptions
	    signal(SIGFPE,  BLBackTrace::handler);
	}
#endif
#endif
    }

    ParallelDescriptor::StartTeams();

    ParallelDescriptor::StartSubCommunicator();

    mempool_init();

#endif

    std::cout << std::setprecision(10);

    if (double(std::numeric_limits<long>::max()) < 9.e18)
    {
	if (ParallelDescriptor::IOProcessor())
	{
	    std::cout << "!\n! WARNING: Maximum of long int, "
		      << std::numeric_limits<long>::max() 
		      << ", might be too small for big runs.\n!\n";
	}
    }

#if defined(BL_USE_FORTRAN_MPI) || defined(BL_USE_F_INTERFACES)
    int fcomm = MPI_Comm_c2f(ParallelDescriptor::Communicator());
    bl_fortran_mpi_comm_init (fcomm);
#endif

#if defined(BL_MEM_PROFILING) && defined(BL_USE_F_BASELIB)
    MemProfiler_f::initialize();
#endif
}