Exemplo n.º 1
0
/*
 * Local helper function to build an array of all the procs in a
 * communicator, excluding this process.
 *
 * Killing a just the indicated peers must be implemented for
 * MPI_Abort() to work according to the standard language for
 * a 'high-quality' implementation.
 *
 * It would be nifty if we could differentiate between the
 * abort scenarios (but we don't, currently):
 *      - MPI_Abort()
 *      - MPI_ERRORS_ARE_FATAL
 *      - Victim of MPI_Abort()
 */
static void try_kill_peers(ompi_communicator_t *comm,
                           int errcode)
{
    int nprocs;
    ompi_process_name_t *procs;

    nprocs = ompi_comm_size(comm);
    /* ompi_comm_remote_size() returns 0 if not an intercomm, so
       this is safe */
    nprocs += ompi_comm_remote_size(comm);

    procs = (ompi_process_name_t*) calloc(nprocs, sizeof(ompi_process_name_t));
    if (NULL == procs) {
        /* quick clean orte and get out */
        ompi_rte_abort(errno, "Abort: unable to alloc memory to kill procs");
    }

    /* put all the local group procs in the abort list */
    int rank, i, count;
    rank = ompi_comm_rank(comm);
    for (count = i = 0; i < ompi_comm_size(comm); ++i) {
        if (rank == i) {
            /* Don't include this process in the array */
            --nprocs;
        } else {
            assert(count <= nprocs);
            procs[count++] =
                *OMPI_CAST_RTE_NAME(&ompi_group_get_proc_ptr(comm->c_remote_group, i)->super.proc_name);
        }
    }

    /* if requested, kill off remote group procs too */
    for (i = 0; i < ompi_comm_remote_size(comm); ++i) {
        assert(count <= nprocs);
        procs[count++] =
            *OMPI_CAST_RTE_NAME(&ompi_group_get_proc_ptr(comm->c_remote_group, i)->super.proc_name);
    }

    if (nprocs > 0) {
        ompi_rte_abort_peers(procs, nprocs, errcode);
    }

    /* We could fall through here if ompi_rte_abort_peers() fails, or
       if (nprocs == 0).  Either way, tidy up and let the caller
       handle it. */
    free(procs);
}
Exemplo n.º 2
0
int
ompi_mpi_abort(struct ompi_communicator_t* comm,
               int errcode,
               bool kill_remote_of_intercomm)
{
    int count = 0, i, ret;
    char *msg, *host, hostname[MAXHOSTNAMELEN];
    pid_t pid = 0;
    ompi_process_name_t *abort_procs;
    int32_t nabort_procs;

    /* Protection for recursive invocation */
    if (have_been_invoked) {
        return OMPI_SUCCESS;
    }
    have_been_invoked = true;

    /* If MPI is initialized, we know we have a runtime nodename, so use that.  Otherwise, call
       gethostname. */
    if (ompi_mpi_initialized) {
        host = ompi_process_info.nodename;
    } else {
        gethostname(hostname, sizeof(hostname));
        host = hostname;
    }
    pid = getpid();

    /* Should we print a stack trace?  Not aggregated because they
       might be different on all processes. */
    if (ompi_mpi_abort_print_stack) {
        char **messages;
        int len, i;

        if (OMPI_SUCCESS == opal_backtrace_buffer(&messages, &len)) {
            for (i = 0; i < len; ++i) {
                fprintf(stderr, "[%s:%d] [%d] func:%s\n", host, (int) pid, 
                        i, messages[i]);
                fflush(stderr);
            }
            free(messages);
        } else {
            /* This will print an message if it's unable to print the
               backtrace, so we don't need an additional "else" clause
               if opal_backtrace_print() is not supported. */
            opal_backtrace_print(stderr, NULL, 1);
        }
    }

    /* Notify the debugger that we're about to abort */

    if (errcode < 0 ||
        asprintf(&msg, "[%s:%d] aborting with MPI error %s%s", 
                 host, (int) pid, ompi_mpi_errnum_get_string(errcode), 
                 ompi_mpi_abort_print_stack ? 
                 " (stack trace available on stderr)" : "") < 0) {
        msg = NULL;
    }
    ompi_debugger_notify_abort(msg);
    if (NULL != msg) {
        free(msg);
    }

    /* Should we wait for a while before aborting? */

    if (0 != ompi_mpi_abort_delay) {
        if (ompi_mpi_abort_delay < 0) {
            fprintf(stderr ,"[%s:%d] Looping forever (MCA parameter mpi_abort_delay is < 0)\n",
                    host, (int) pid);
            fflush(stderr);
            while (1) { 
                sleep(5); 
            }
        } else {
            fprintf(stderr, "[%s:%d] Delaying for %d seconds before aborting\n",
                    host, (int) pid, ompi_mpi_abort_delay);
            do {
                sleep(1);
            } while (--ompi_mpi_abort_delay > 0);
        }
    }

    /* If OMPI isn't setup yet/any more, then don't even try killing
       everyone.  OMPI's initialized period covers all runtime
       initialized period of time, so no need to check that here.
       Sorry, Charlie... */

    if (!ompi_mpi_initialized || ompi_mpi_finalized) {
        fprintf(stderr, "[%s:%d] Local abort %s completed successfully; not able to aggregate error messages, and not able to guarantee that all other processes were killed!\n",
                host, (int) pid, ompi_mpi_finalized ? 
                "after MPI_FINALIZE" : "before MPI_INIT");
        exit(errcode);
    }

    /* abort local procs in the communicator.  If the communicator is
       an intercommunicator AND the abort has explicitly requested
       that we abort the remote procs, then do that as well. */
    nabort_procs = ompi_comm_size(comm);

    if (kill_remote_of_intercomm) {
        /* ompi_comm_remote_size() returns 0 if not an intercomm, so
           this is cool */
        nabort_procs += ompi_comm_remote_size(comm);
    }

    abort_procs = (ompi_process_name_t*)malloc(sizeof(ompi_process_name_t) * nabort_procs);
    if (NULL == abort_procs) {
        /* quick clean orte and get out */
        ompi_rte_abort(errcode, "Abort unable to malloc memory to kill procs");
    }

    /* put all the local procs in the abort list */
    for (i = 0 ; i < ompi_comm_size(comm) ; ++i) {
        if (OPAL_EQUAL != ompi_rte_compare_name_fields(OMPI_RTE_CMP_ALL, 
                                 &comm->c_local_group->grp_proc_pointers[i]->proc_name,
                                 OMPI_PROC_MY_NAME)) {
            assert(count <= nabort_procs);
            abort_procs[count++] = comm->c_local_group->grp_proc_pointers[i]->proc_name;
        } else {
            /* don't terminate me just yet */
            nabort_procs--;
        }
    }

    /* if requested, kill off remote procs too */
    if (kill_remote_of_intercomm) {
        for (i = 0 ; i < ompi_comm_remote_size(comm) ; ++i) {
            if (OPAL_EQUAL != ompi_rte_compare_name_fields(OMPI_RTE_CMP_ALL, 
                                     &comm->c_remote_group->grp_proc_pointers[i]->proc_name,
                                     OMPI_PROC_MY_NAME)) {
                assert(count <= nabort_procs);
                abort_procs[count++] =
                    comm->c_remote_group->grp_proc_pointers[i]->proc_name;
            } else {
                /* don't terminate me just yet */
                nabort_procs--;
            }
        }
    }

    if (nabort_procs > 0) {
        /* This must be implemented for MPI_Abort() to work according to the
         * standard language for a 'high-quality' implementation.
         * It would be nifty if we could differentiate between the
         * abort scenarios:
         *      - MPI_Abort()
         *      - MPI_ERRORS_ARE_FATAL
         *      - Victim of MPI_Abort()
         */
        /*
         * Abort peers in this communicator group. Does not include self.
         */
        if( OMPI_SUCCESS != (ret = ompi_rte_abort_peers(abort_procs, nabort_procs, errcode)) ) {
            ompi_rte_abort(errcode, "Open MPI failed to abort all of the procs requested (%d).", ret);
        }
    }

    /* now that we've aborted everyone else, gracefully die. */
    ompi_rte_abort(errcode, NULL);
    
    return OMPI_SUCCESS;
}
Exemplo n.º 3
0
int ompi_mpi_register_params(void)
{
    int value;

    /* Whether we want MPI API function parameter checking or not. Disable this by default if
       parameter checking is compiled out. */

    ompi_mpi_param_check = !!(MPI_PARAM_CHECK);
    (void) mca_base_var_register("ompi", "mpi", NULL, "param_check",
                                 "Whether you want MPI API parameters checked at run-time or not.  Possible values are 0 (no checking) and 1 (perform checking at run-time)",
                                 MCA_BASE_VAR_TYPE_BOOL, NULL, 0, 0,
                                 OPAL_INFO_LVL_9,
                                 MCA_BASE_VAR_SCOPE_READONLY,
                                 &ompi_mpi_param_check);
    if (ompi_mpi_param_check && !MPI_PARAM_CHECK) {
        opal_show_help("help-mpi-runtime.txt",
                       "mpi-param-check-enabled-but-compiled-out",
                       true);
        ompi_mpi_param_check = false;
    }

    /*
     * opal_progress: decide whether to yield and the event library
     * tick rate
     */
    /* JMS: Need ORTE data here -- set this to 0 when
       exactly/under-subscribed, or 1 when oversubscribed */
    ompi_mpi_yield_when_idle = true;
    (void) mca_base_var_register("ompi", "mpi", NULL, "yield_when_idle",
                                 "Yield the processor when waiting for MPI communication (for MPI processes, will default to 1 when oversubscribing nodes)",
                                 MCA_BASE_VAR_TYPE_BOOL, NULL, 0, 0,
                                 OPAL_INFO_LVL_9,
                                 MCA_BASE_VAR_SCOPE_READONLY,
                                 &ompi_mpi_yield_when_idle);

    ompi_mpi_event_tick_rate = -1;
    (void) mca_base_var_register("ompi", "mpi", NULL, "event_tick_rate",
                                 "How often to progress TCP communications (0 = never, otherwise specified in microseconds)",
                                 MCA_BASE_VAR_TYPE_INT, NULL, 0, 0,
                                 OPAL_INFO_LVL_9,
                                 MCA_BASE_VAR_SCOPE_READONLY,
                                 &ompi_mpi_event_tick_rate);

    /* Whether or not to show MPI handle leaks */
    ompi_debug_show_handle_leaks = false;
    (void) mca_base_var_register("ompi", "mpi", NULL, "show_handle_leaks",
                                 "Whether MPI_FINALIZE shows all MPI handles that were not freed or not",
                                 MCA_BASE_VAR_TYPE_BOOL, NULL, 0, 0,
                                 OPAL_INFO_LVL_9,
                                 MCA_BASE_VAR_SCOPE_READONLY,
                                 &ompi_debug_show_handle_leaks);

    /* Whether or not to free MPI handles.  Useless without run-time
       param checking, so implicitly set that to true if we don't want
       to free the handles. */
    ompi_debug_no_free_handles = false;
    (void) mca_base_var_register("ompi", "mpi", NULL, "no_free_handles",
                                 "Whether to actually free MPI objects when their handles are freed",
                                 MCA_BASE_VAR_TYPE_BOOL, NULL, 0, 0,
                                 OPAL_INFO_LVL_9,
                                 MCA_BASE_VAR_SCOPE_READONLY,
                                 &ompi_debug_no_free_handles);
    if (ompi_debug_no_free_handles) {
        ompi_mpi_param_check = true;
        if (!MPI_PARAM_CHECK) {
            opal_output(0, "WARNING: MCA parameter mpi_no_free_handles set to true, but MPI");
            opal_output(0, "WARNING: parameter checking has been compiled out of Open MPI.");
            opal_output(0, "WARNING: mpi_no_free_handles is therefore only partially effective!");
        }
    }

    /* Whether or not to show MPI_ALLOC_MEM leaks */
    ompi_debug_show_mpi_alloc_mem_leaks = 0;
    (void) mca_base_var_register("ompi", "mpi", NULL, "show_mpi_alloc_mem_leaks",
                                 "If >0, MPI_FINALIZE will show up to this many instances of memory allocated by MPI_ALLOC_MEM that was not freed by MPI_FREE_MEM",
                                 MCA_BASE_VAR_TYPE_INT, NULL, 0, 0,
                                 OPAL_INFO_LVL_9,
                                 MCA_BASE_VAR_SCOPE_READONLY,
                                 &ompi_debug_show_mpi_alloc_mem_leaks);

    /* Whether or not to print all MCA parameters in MPI_INIT */
    ompi_mpi_show_mca_params_string = NULL;
    (void) mca_base_var_register("ompi", "mpi", NULL, "show_mca_params",
                                 "Whether to show all MCA parameter values during MPI_INIT or not (good for reproducability of MPI jobs "
                                 "for debug purposes). Accepted values are all, default, file, api, and enviro - or a comma "
                                 "delimited combination of them",
                                 MCA_BASE_VAR_TYPE_STRING, NULL, 0, 0,
                                 OPAL_INFO_LVL_9,
                                 MCA_BASE_VAR_SCOPE_READONLY,
                                 &ompi_mpi_show_mca_params_string);
    if (NULL != ompi_mpi_show_mca_params_string) {
        char **args;
        int i;

        ompi_mpi_show_mca_params = true;
        args = opal_argv_split(ompi_mpi_show_mca_params_string, ',');
        if (NULL == args) {
            opal_output(0, "WARNING: could not parse mpi_show_mca_params request - defaulting to show \"all\"");
            show_default_mca_params = true;
            show_file_mca_params = true;
            show_enviro_mca_params = true;
            show_override_mca_params = true;
        } else {
            for (i=0; NULL != args[i]; i++) {
                if (0 == strcasecmp(args[i], "all")  || 0 == strcmp(args[i], "1")) {
                    show_default_mca_params = true;
                    show_file_mca_params = true;
                    show_enviro_mca_params = true;
                    show_override_mca_params = true;
                } else if (0 == strcasecmp(args[i], "default")) {
                    show_default_mca_params = true;
                } else if (0 == strcasecmp(args[i], "file")) {
                    show_file_mca_params = true;
                } else if (0 == strncasecmp(args[i], "env", 3)) {
                    show_enviro_mca_params = true;
                } else if (0 == strcasecmp(args[i], "api")) {
                    show_override_mca_params = true;
                }
            }
            opal_argv_free(args);
        }
    }

    /* File to use when dumping the parameters */
    ompi_mpi_show_mca_params_file = "";
    (void) mca_base_var_register("ompi", "mpi", NULL, "show_mca_params_file",
                                 "If mpi_show_mca_params is true, setting this string to a valid filename tells Open MPI to dump all the MCA parameter values into a file suitable for reading via the mca_param_files parameter (good for reproducability of MPI jobs)",
                                 MCA_BASE_VAR_TYPE_STRING, NULL, 0, 0,
                                 OPAL_INFO_LVL_9,
                                 MCA_BASE_VAR_SCOPE_READONLY,
                                 &ompi_mpi_show_mca_params_file);

    /* User-level process pinning controls */

    /* MPI_ABORT controls */
    ompi_mpi_abort_delay = 0;
    (void) mca_base_var_register("ompi", "mpi", NULL, "abort_delay",
                                 "If nonzero, print out an identifying message when MPI_ABORT is invoked (hostname, PID of the process that called MPI_ABORT) and delay for that many seconds before exiting (a negative delay value means to never abort).  This allows attaching of a debugger before quitting the job.",
                                 MCA_BASE_VAR_TYPE_INT, NULL, 0, 0,
                                 OPAL_INFO_LVL_9,
                                 MCA_BASE_VAR_SCOPE_READONLY,
                                 &ompi_mpi_abort_delay);

    ompi_mpi_abort_print_stack = false;
    (void) mca_base_var_register("ompi", "mpi", NULL, "abort_print_stack",
                                 "If nonzero, print out a stack trace when MPI_ABORT is invoked",
                                 MCA_BASE_VAR_TYPE_BOOL, NULL, 0,
                                 /* If we do not have stack trace
                                    capability, make this a constant
                                    MCA variable */
#if OPAL_WANT_PRETTY_PRINT_STACKTRACE && defined(HAVE_BACKTRACE)
                                 0,
                                 OPAL_INFO_LVL_9,
                                 MCA_BASE_VAR_SCOPE_READONLY,
#else
                                 MCA_BASE_VAR_FLAG_DEFAULT_ONLY,
                                 OPAL_INFO_LVL_9,
                                 MCA_BASE_VAR_SCOPE_CONSTANT,
#endif
                                 &ompi_mpi_abort_print_stack);

    ompi_mpi_preconnect_mpi = false;
    value = mca_base_var_register("ompi", "mpi", NULL, "preconnect_mpi",
                                  "Whether to force MPI processes to fully "
                                  "wire-up the MPI connections between MPI "
                                  "processes during "
                                  "MPI_INIT (vs. making connections lazily -- "
                                  "upon the first MPI traffic between each "
                                  "process peer pair)",
                                  MCA_BASE_VAR_TYPE_BOOL, NULL, 0,
                                  MCA_BASE_VAR_FLAG_INTERNAL,
                                  OPAL_INFO_LVL_9,
                                  MCA_BASE_VAR_SCOPE_READONLY,
                                  &ompi_mpi_preconnect_mpi);
    mca_base_var_register_synonym(value, "ompi", "mpi", NULL, "preconnect_all",
                                  MCA_BASE_VAR_SYN_FLAG_DEPRECATED);

    /* Leave pinned parameter */
    ompi_mpi_leave_pinned = -1;
    (void) mca_base_var_register("ompi", "mpi", NULL, "leave_pinned",
                                 "Whether to use the \"leave pinned\" protocol or not.  Enabling this setting can help bandwidth performance when repeatedly sending and receiving large messages with the same buffers over RDMA-based networks (0 = do not use \"leave pinned\" protocol, 1 = use \"leave pinned\" protocol, -1 = allow network to choose at runtime).",
                                 MCA_BASE_VAR_TYPE_INT, NULL, 0, 0,
                                 OPAL_INFO_LVL_9,
                                 MCA_BASE_VAR_SCOPE_READONLY,
                                 &ompi_mpi_leave_pinned);

    ompi_mpi_leave_pinned_pipeline = false;
    (void) mca_base_var_register("ompi", "mpi", NULL, "leave_pinned_pipeline",
                                 "Whether to use the \"leave pinned pipeline\" protocol or not.",
                                 MCA_BASE_VAR_TYPE_BOOL, NULL, 0, 0,
                                 OPAL_INFO_LVL_9,
                                 MCA_BASE_VAR_SCOPE_READONLY,
                                 &ompi_mpi_leave_pinned_pipeline);

    if (ompi_mpi_leave_pinned > 0 && ompi_mpi_leave_pinned_pipeline) {
        ompi_mpi_leave_pinned_pipeline = 0;
        opal_show_help("help-mpi-runtime.txt",
                       "mpi-params:leave-pinned-and-pipeline-selected",
                       true);
    }

    ompi_warn_on_fork = true;
    (void) mca_base_var_register("ompi", "mpi", NULL, "warn_on_fork",
                                 "If nonzero, issue a warning if program forks under conditions that could cause system errors",
                                 MCA_BASE_VAR_TYPE_BOOL, NULL, 0, 0,
                                 OPAL_INFO_LVL_9,
                                 MCA_BASE_VAR_SCOPE_READONLY,
                                 &ompi_warn_on_fork);

    /* Sparse group storage support */
    (void) mca_base_var_register("ompi", "mpi", NULL, "have_sparse_group_storage",
                                 "Whether this Open MPI installation supports storing of data in MPI groups in \"sparse\" formats (good for extremely large process count MPI jobs that create many communicators/groups)",
                                 MCA_BASE_VAR_TYPE_BOOL, NULL, 0,
                                 MCA_BASE_VAR_FLAG_DEFAULT_ONLY,
                                 OPAL_INFO_LVL_9,
                                 MCA_BASE_VAR_SCOPE_CONSTANT,
                                 &ompi_mpi_have_sparse_group_storage);

    ompi_use_sparse_group_storage = ompi_mpi_have_sparse_group_storage;
    (void) mca_base_var_register("ompi", "mpi", NULL, "use_sparse_group_storage",
                                 "Whether to use \"sparse\" storage formats for MPI groups (only relevant if mpi_have_sparse_group_storage is 1)",
                                 MCA_BASE_VAR_TYPE_BOOL, NULL, 0,
                                 ompi_mpi_have_sparse_group_storage ? 0 : MCA_BASE_VAR_FLAG_DEFAULT_ONLY,
                                 OPAL_INFO_LVL_9,
                                 ompi_mpi_have_sparse_group_storage ? MCA_BASE_VAR_SCOPE_READONLY : MCA_BASE_VAR_SCOPE_CONSTANT,
                                 &ompi_use_sparse_group_storage);
    if (ompi_use_sparse_group_storage && !ompi_mpi_have_sparse_group_storage) {
        opal_show_help("help-mpi-runtime.txt",
                       "sparse groups enabled but compiled out",
                       true);
        ompi_use_sparse_group_storage = false;
    }

    (void) mca_base_var_register("ompi", "mpi", NULL, "built_with_cuda_support",
                                 "Whether CUDA GPU buffer support is built into library or not",
                                 MCA_BASE_VAR_TYPE_BOOL, NULL, 0,
                                 MCA_BASE_VAR_FLAG_DEFAULT_ONLY,
                                 OPAL_INFO_LVL_4,
                                 MCA_BASE_VAR_SCOPE_CONSTANT,
                                 &ompi_mpi_built_with_cuda_support);

    /* Current default is to enable CUDA support if it is built into library */
    ompi_mpi_cuda_support = ompi_mpi_built_with_cuda_support;

    (void) mca_base_var_register("ompi", "mpi", NULL, "cuda_support",
                                 "Whether CUDA GPU buffer support is enabled or not",
                                 MCA_BASE_VAR_TYPE_BOOL, NULL, 0, 0,
                                 OPAL_INFO_LVL_4,
                                 MCA_BASE_VAR_SCOPE_LOCAL,
                                 &ompi_mpi_cuda_support);
    if (ompi_mpi_cuda_support && !ompi_mpi_built_with_cuda_support) {
        opal_show_help("help-mpi-runtime.txt", "no cuda support",
                       true);
        ompi_rte_abort(1, NULL);
    }

    /* cutoff for retrieving hostnames */
    ompi_hostname_cutoff = UINT32_MAX;
    (void) mca_base_var_register ("ompi", "ompi", NULL, "hostname_cutoff",
                                  "If the number of processes in the application exceeds the provided value,"
                                  "hostnames for remote processes will not be retrieved by applications [default: UINT32_MAX]",
                                  MCA_BASE_VAR_TYPE_INT, NULL, 0, 0,
                                  OPAL_INFO_LVL_9, MCA_BASE_VAR_SCOPE_READONLY,
                                  &ompi_hostname_cutoff);


    return OMPI_SUCCESS;
}
Exemplo n.º 4
0
int
ompi_mpi_abort(struct ompi_communicator_t* comm,
               int errcode)
{
    char *msg, *host, hostname[MAXHOSTNAMELEN];
    pid_t pid = 0;

    /* Protection for recursive invocation */
    if (have_been_invoked) {
        return OMPI_SUCCESS;
    }
    have_been_invoked = true;

    /* If MPI is initialized, we know we have a runtime nodename, so
       use that.  Otherwise, call gethostname. */
    if (ompi_rte_initialized) {
        host = ompi_process_info.nodename;
    } else {
        gethostname(hostname, sizeof(hostname));
        host = hostname;
    }
    pid = getpid();

    /* Should we print a stack trace?  Not aggregated because they
       might be different on all processes. */
    if (ompi_mpi_abort_print_stack) {
        char **messages;
        int len, i;

        if (OMPI_SUCCESS == opal_backtrace_buffer(&messages, &len)) {
            for (i = 0; i < len; ++i) {
                fprintf(stderr, "[%s:%d] [%d] func:%s\n", host, (int) pid,
                        i, messages[i]);
                fflush(stderr);
            }
            free(messages);
        } else {
            /* This will print an message if it's unable to print the
               backtrace, so we don't need an additional "else" clause
               if opal_backtrace_print() is not supported. */
            opal_backtrace_print(stderr, NULL, 1);
        }
    }

    /* Notify the debugger that we're about to abort */

    if (errcode < 0 ||
        asprintf(&msg, "[%s:%d] aborting with MPI error %s%s",
                 host, (int) pid, ompi_mpi_errnum_get_string(errcode),
                 ompi_mpi_abort_print_stack ?
                 " (stack trace available on stderr)" : "") < 0) {
        msg = NULL;
    }
    ompi_debugger_notify_abort(msg);
    if (NULL != msg) {
        free(msg);
    }

    /* Should we wait for a while before aborting? */

    if (0 != ompi_mpi_abort_delay) {
        if (ompi_mpi_abort_delay < 0) {
            fprintf(stderr ,"[%s:%d] Looping forever (MCA parameter mpi_abort_delay is < 0)\n",
                    host, (int) pid);
            fflush(stderr);
            while (1) {
                sleep(5);
            }
        } else {
            fprintf(stderr, "[%s:%d] Delaying for %d seconds before aborting\n",
                    host, (int) pid, ompi_mpi_abort_delay);
            do {
                sleep(1);
            } while (--ompi_mpi_abort_delay > 0);
        }
    }

    /* If the RTE isn't setup yet/any more, then don't even try
       killing everyone.  Sorry, Charlie... */
    if (!ompi_rte_initialized) {
        fprintf(stderr, "[%s:%d] Local abort %s completed successfully, but am not able to aggregate error messages, and not able to guarantee that all other processes were killed!\n",
                host, (int) pid, ompi_mpi_finalized ?
                "after MPI_FINALIZE started" : "before MPI_INIT completed");
        _exit(errcode == 0 ? 1 : errcode);
    }

    /* If OMPI is initialized and we have a non-NULL communicator,
       then try to kill just that set of processes */
    if (ompi_mpi_initialized && !ompi_mpi_finalized && NULL != comm) {
        try_kill_peers(comm, errcode);
    }

    /* We can fall through to here in a few cases:

       1. The attempt to kill just a subset of peers via
          try_kill_peers() failed (e.g., as of July 2014, ORTE does
          returns NOT_IMPLENTED from orte_rte_abort_peers()).
       2. MPI wasn't initialized, was already finalized, or we got a
          NULL communicator.

       In all of these cases, the only sensible thing left to do is to
       kill the entire job.  Wah wah. */
    ompi_rte_abort(errcode, NULL);

    /* Does not return */
}
Exemplo n.º 5
0
int oshmem_shmem_abort(int errcode)
{
    char *host, hostname[OPAL_MAXHOSTNAMELEN];
    pid_t pid = 0;

    /* Protection for recursive invocation */
    if (have_been_invoked) {
        return OSHMEM_SUCCESS;
    }
    have_been_invoked = true;

    /* If ORTE is initialized, use its nodename.  Otherwise, call
     gethostname. */

    /* If MPI is initialized, we know we have a runtime nodename, so
       use that.  Otherwise, call gethostname. */
    if (ompi_rte_initialized) {
        host = ompi_process_info.nodename;
    } else {
        gethostname(hostname, sizeof(hostname));
        host = hostname;
    }
    pid = getpid();

    opal_show_help("help-shmem-api.txt",
                   "shmem-abort",
                   true,
                   OMPI_PROC_MY_NAME->vpid,
                   pid,
                   host,
                   errcode);

    /* Should we print a stack trace?  Not aggregated because they
     might be different on all processes. */
    if (opal_abort_print_stack) {
        char **messages;
        int len, i;

        if (OPAL_SUCCESS == opal_backtrace_buffer(&messages, &len)) {
            for (i = 0; i < len; ++i) {
                fprintf(stderr,
                        "[%s:%05d] [%d] func:%s\n",
                        host,
                        (int) pid,
                        i,
                        messages[i]);
                fflush(stderr);
            }
            free(messages);
        } else {
            /* This will print an message if it's unable to print the
             backtrace, so we don't need an additional "else" clause
             if opal_backtrace_print() is not supported. */
            opal_backtrace_print(stderr, NULL, 1);
        }
    }

    /* Wait for a while before aborting */
    opal_delay_abort();

    if (!oshmem_shmem_initialized) {
        if (!opal_initialized) {
            /* TODO help message from SHMEM not from MPI is needed*/
            opal_show_help("help-shmem-runtime.txt",
                           "oshmem shmem abort:cannot guarantee all killed",
                           true,
                           host,
                           (int) pid);
        } else {
            fprintf(stderr,
                    "[%s:%05d] Local abort completed successfully; not able to aggregate error messages, and not able to guarantee that all other processes were killed!\n",
                    host,
                    (int) pid);
        }
        oshmem_shmem_aborted = true;
        exit(errcode);
    }

    /* abort local procs in the communicator.  If the communicator is
     an intercommunicator AND the abort has explicitly requested
     that we abort the remote procs, then do that as well. */

    oshmem_shmem_aborted = true;
    /* now that we've aborted everyone else, gracefully die. */

    ompi_rte_abort(errcode, NULL );

    return OSHMEM_SUCCESS;
}
Exemplo n.º 6
0
int ompi_mpi_register_params(void)
{
    int value;

    /* Whether we want MPI API function parameter checking or not. Disable this by default if
       parameter checking is compiled out. */

    ompi_mpi_param_check = !!(MPI_PARAM_CHECK);
    (void) mca_base_var_register("ompi", "mpi", NULL, "param_check",
                                 "Whether you want MPI API parameters checked at run-time or not.  Possible values are 0 (no checking) and 1 (perform checking at run-time)",
                                 MCA_BASE_VAR_TYPE_BOOL, NULL, 0, 0,
                                 OPAL_INFO_LVL_9,
                                 MCA_BASE_VAR_SCOPE_READONLY,
                                 &ompi_mpi_param_check);
    if (ompi_mpi_param_check && !MPI_PARAM_CHECK) {
        opal_show_help("help-mpi-runtime.txt",
                       "mpi-param-check-enabled-but-compiled-out",
                       true);
        ompi_mpi_param_check = false;
    }

    /*
     * opal_progress: decide whether to yield and the event library
     * tick rate
     */
    /* JMS: Need ORTE data here -- set this to 0 when
       exactly/under-subscribed, or 1 when oversubscribed */
    ompi_mpi_yield_when_idle = true;
    (void) mca_base_var_register("ompi", "mpi", NULL, "yield_when_idle",
                                 "Yield the processor when waiting for MPI communication (for MPI processes, will default to 1 when oversubscribing nodes)",
                                 MCA_BASE_VAR_TYPE_BOOL, NULL, 0, 0,
                                 OPAL_INFO_LVL_9,
                                 MCA_BASE_VAR_SCOPE_READONLY,
                                 &ompi_mpi_yield_when_idle);

    ompi_mpi_event_tick_rate = -1;
    (void) mca_base_var_register("ompi", "mpi", NULL, "event_tick_rate",
                                 "How often to progress TCP communications (0 = never, otherwise specified in microseconds)",
                                 MCA_BASE_VAR_TYPE_INT, NULL, 0, 0,
                                 OPAL_INFO_LVL_9,
                                 MCA_BASE_VAR_SCOPE_READONLY,
                                 &ompi_mpi_event_tick_rate);

    /* Whether or not to show MPI handle leaks */
    ompi_debug_show_handle_leaks = false;
    (void) mca_base_var_register("ompi", "mpi", NULL, "show_handle_leaks",
                                 "Whether MPI_FINALIZE shows all MPI handles that were not freed or not",
                                 MCA_BASE_VAR_TYPE_BOOL, NULL, 0, 0,
                                 OPAL_INFO_LVL_9,
                                 MCA_BASE_VAR_SCOPE_READONLY,
                                 &ompi_debug_show_handle_leaks);

    /* Whether or not to free MPI handles.  Useless without run-time
       param checking, so implicitly set that to true if we don't want
       to free the handles. */
    ompi_debug_no_free_handles = false;
    (void) mca_base_var_register("ompi", "mpi", NULL, "no_free_handles",
                                 "Whether to actually free MPI objects when their handles are freed",
                                 MCA_BASE_VAR_TYPE_BOOL, NULL, 0, 0,
                                 OPAL_INFO_LVL_9,
                                 MCA_BASE_VAR_SCOPE_READONLY,
                                 &ompi_debug_no_free_handles);
    if (ompi_debug_no_free_handles) {
        ompi_mpi_param_check = true;
        if (!MPI_PARAM_CHECK) {
            opal_output(0, "WARNING: MCA parameter mpi_no_free_handles set to true, but MPI");
            opal_output(0, "WARNING: parameter checking has been compiled out of Open MPI.");
            opal_output(0, "WARNING: mpi_no_free_handles is therefore only partially effective!");
        }
    }

    /* Whether or not to show MPI_ALLOC_MEM leaks */
    ompi_debug_show_mpi_alloc_mem_leaks = 0;
    (void) mca_base_var_register("ompi", "mpi", NULL, "show_mpi_alloc_mem_leaks",
                                 "If >0, MPI_FINALIZE will show up to this many instances of memory allocated by MPI_ALLOC_MEM that was not freed by MPI_FREE_MEM",
                                 MCA_BASE_VAR_TYPE_INT, NULL, 0, 0,
                                 OPAL_INFO_LVL_9,
                                 MCA_BASE_VAR_SCOPE_READONLY,
                                 &ompi_debug_show_mpi_alloc_mem_leaks);

    /* Whether or not to print all MCA parameters in MPI_INIT */
    ompi_mpi_show_mca_params_string = NULL;
    (void) mca_base_var_register("ompi", "mpi", NULL, "show_mca_params",
                                 "Whether to show all MCA parameter values during MPI_INIT or not (good for reproducability of MPI jobs "
                                 "for debug purposes). Accepted values are all, default, file, api, and enviro - or a comma "
                                 "delimited combination of them",
                                 MCA_BASE_VAR_TYPE_STRING, NULL, 0, 0,
                                 OPAL_INFO_LVL_9,
                                 MCA_BASE_VAR_SCOPE_READONLY,
                                 &ompi_mpi_show_mca_params_string);
    if (NULL != ompi_mpi_show_mca_params_string) {
        char **args;
        int i;

        ompi_mpi_show_mca_params = true;
        args = opal_argv_split(ompi_mpi_show_mca_params_string, ',');
        if (NULL == args) {
            opal_output(0, "WARNING: could not parse mpi_show_mca_params request - defaulting to show \"all\"");
            show_default_mca_params = true;
            show_file_mca_params = true;
            show_enviro_mca_params = true;
            show_override_mca_params = true;
        } else {
            for (i=0; NULL != args[i]; i++) {
                if (0 == strcasecmp(args[i], "all")  || 0 == strcmp(args[i], "1")) {
                    show_default_mca_params = true;
                    show_file_mca_params = true;
                    show_enviro_mca_params = true;
                    show_override_mca_params = true;
                } else if (0 == strcasecmp(args[i], "default")) {
                    show_default_mca_params = true;
                } else if (0 == strcasecmp(args[i], "file")) {
                    show_file_mca_params = true;
                } else if (0 == strncasecmp(args[i], "env", 3)) {
                    show_enviro_mca_params = true;
                } else if (0 == strcasecmp(args[i], "api")) {
                    show_override_mca_params = true;
                }
            }
            opal_argv_free(args);
        }
    }

    /* File to use when dumping the parameters */
    (void) mca_base_var_register("ompi", "mpi", NULL, "show_mca_params_file",
                                 "If mpi_show_mca_params is true, setting this string to a valid filename tells Open MPI to dump all the MCA parameter values into a file suitable for reading via the mca_param_files parameter (good for reproducability of MPI jobs)",
                                 MCA_BASE_VAR_TYPE_STRING, NULL, 0, 0,
                                 OPAL_INFO_LVL_9,
                                 MCA_BASE_VAR_SCOPE_READONLY,
                                 &ompi_mpi_show_mca_params_file);

    /* User-level process pinning controls */

    ompi_mpi_preconnect_mpi = false;
    value = mca_base_var_register("ompi", "mpi", NULL, "preconnect_mpi",
                                  "Whether to force MPI processes to fully "
                                  "wire-up the MPI connections between MPI "
                                  "processes during "
                                  "MPI_INIT (vs. making connections lazily -- "
                                  "upon the first MPI traffic between each "
                                  "process peer pair)",
                                  MCA_BASE_VAR_TYPE_BOOL, NULL, 0,
                                  MCA_BASE_VAR_FLAG_INTERNAL,
                                  OPAL_INFO_LVL_9,
                                  MCA_BASE_VAR_SCOPE_READONLY,
                                  &ompi_mpi_preconnect_mpi);
    mca_base_var_register_synonym(value, "ompi", "mpi", NULL, "preconnect_all",
                                  MCA_BASE_VAR_SYN_FLAG_DEPRECATED);

    /* Sparse group storage support */
    (void) mca_base_var_register("ompi", "mpi", NULL, "have_sparse_group_storage",
                                 "Whether this Open MPI installation supports storing of data in MPI groups in \"sparse\" formats (good for extremely large process count MPI jobs that create many communicators/groups)",
                                 MCA_BASE_VAR_TYPE_BOOL, NULL, 0,
                                 MCA_BASE_VAR_FLAG_DEFAULT_ONLY,
                                 OPAL_INFO_LVL_9,
                                 MCA_BASE_VAR_SCOPE_CONSTANT,
                                 &ompi_mpi_have_sparse_group_storage);

    ompi_use_sparse_group_storage = ompi_mpi_have_sparse_group_storage;
    (void) mca_base_var_register("ompi", "mpi", NULL, "use_sparse_group_storage",
                                 "Whether to use \"sparse\" storage formats for MPI groups (only relevant if mpi_have_sparse_group_storage is 1)",
                                 MCA_BASE_VAR_TYPE_BOOL, NULL, 0,
                                 ompi_mpi_have_sparse_group_storage ? 0 : MCA_BASE_VAR_FLAG_DEFAULT_ONLY,
                                 OPAL_INFO_LVL_9,
                                 ompi_mpi_have_sparse_group_storage ? MCA_BASE_VAR_SCOPE_READONLY : MCA_BASE_VAR_SCOPE_CONSTANT,
                                 &ompi_use_sparse_group_storage);
    if (ompi_use_sparse_group_storage && !ompi_mpi_have_sparse_group_storage) {
        opal_show_help("help-mpi-runtime.txt",
                       "sparse groups enabled but compiled out",
                       true);
        ompi_use_sparse_group_storage = false;
    }

    value = mca_base_var_find ("opal", "opal", NULL, "cuda_support");
    if (0 <= value) {
        mca_base_var_register_synonym(value, "ompi", "mpi", NULL, "cuda_support",
                                      MCA_BASE_VAR_SYN_FLAG_DEPRECATED);
    }

    value = mca_base_var_find ("opal", "opal", NULL, "built_with_cuda_support");
    if (0 <= value) {
        mca_base_var_register_synonym(value, "ompi", "mpi", NULL, "built_with_cuda_support", 0);
    }

    if (opal_cuda_support && !opal_built_with_cuda_support) {
        opal_show_help("help-mpi-runtime.txt", "no cuda support",
                       true);
        ompi_rte_abort(1, NULL);
    }

    ompi_add_procs_cutoff = OMPI_ADD_PROCS_CUTOFF_DEFAULT;
    (void) mca_base_var_register ("ompi", "mpi", NULL, "add_procs_cutoff",
                                  "Maximum world size for pre-allocating resources for all "
                                  "remote processes. Increasing this limit may improve "
                                  "communication performance at the cost of memory usage",
                                  MCA_BASE_VAR_TYPE_UNSIGNED_INT, NULL,
                                  0, 0, OPAL_INFO_LVL_3, MCA_BASE_VAR_SCOPE_LOCAL,
                                  &ompi_add_procs_cutoff);

    ompi_mpi_dynamics_enabled = true;
    (void) mca_base_var_register("ompi", "mpi", NULL, "dynamics_enabled",
                                 "Is the MPI dynamic process functionality enabled (e.g., MPI_COMM_SPAWN)?  Default is yes, but certain transports and/or environments may disable it.",
                                 MCA_BASE_VAR_TYPE_BOOL, NULL, 0, 0,
                                 OPAL_INFO_LVL_4,
                                 MCA_BASE_VAR_SCOPE_READONLY,
                                 &ompi_mpi_dynamics_enabled);

    ompi_async_mpi_init = false;
    (void) mca_base_var_register("ompi", "async", "mpi", "init",
                                 "Do not perform a barrier at the end of MPI_Init",
                                 MCA_BASE_VAR_TYPE_BOOL, NULL, 0, 0,
                                 OPAL_INFO_LVL_9,
                                 MCA_BASE_VAR_SCOPE_READONLY,
                                 &ompi_async_mpi_init);

    ompi_async_mpi_finalize = false;
    (void) mca_base_var_register("ompi", "async", "mpi", "finalize",
                                 "Do not perform a barrier at the beginning of MPI_Finalize",
                                 MCA_BASE_VAR_TYPE_BOOL, NULL, 0, 0,
                                 OPAL_INFO_LVL_9,
                                 MCA_BASE_VAR_SCOPE_READONLY,
                                 &ompi_async_mpi_finalize);

    value = mca_base_var_find ("opal", "opal", NULL, "abort_delay");
    if (0 <= value) {
        (void) mca_base_var_register_synonym(value, "ompi", "mpi", NULL, "abort_delay",
                                      MCA_BASE_VAR_SYN_FLAG_DEPRECATED);
    }

    value = mca_base_var_find ("opal", "opal", NULL, "abort_print_stack");
    if (0 <= value) {
        (void) mca_base_var_register_synonym(value, "ompi", "mpi", NULL, "abort_print_stack",
                                      MCA_BASE_VAR_SYN_FLAG_DEPRECATED);
    }

    return OMPI_SUCCESS;
}