コード例 #1
0
ファイル: mpi_MPI.c プロジェクト: bureddy/ompi-release
jboolean ompi_java_exceptionCheck(JNIEnv *env, int rc)
{
    if (rc < 0) {
        /* handle ompi error code */
        rc = ompi_errcode_get_mpi_code (rc);
        /* ompi_mpi_errcode_get_class CAN NOT handle negative error codes.
         * all Open MPI MPI error codes should be > 0. */
        assert (rc >= 0);
    }

    if(MPI_SUCCESS == rc)
    {
        return JNI_FALSE;
    }
    else if((*env)->ExceptionCheck(env))
    {
        return JNI_TRUE;
    }
    else
    {
        int     errClass = ompi_mpi_errcode_get_class(rc);
        char    *message = ompi_mpi_errnum_get_string(rc);
        jstring jmessage = (*env)->NewStringUTF(env, (const char*)message);

        jobject mpiex = (*env)->NewObject(env, ompi_java.ExceptionClass,
                                          ompi_java.ExceptionInit,
                                          rc, errClass, jmessage);
        (*env)->Throw(env, mpiex);
        (*env)->DeleteLocalRef(env, mpiex);
        (*env)->DeleteLocalRef(env, jmessage);
        return JNI_TRUE;
    }
}
コード例 #2
0
ファイル: mpi_MPI.c プロジェクト: IanYXXL/A1
jboolean ompi_java_exceptionCheck(JNIEnv *env, int rc)
{
    if(MPI_SUCCESS == rc)
    {
        return JNI_FALSE;
    }
    else if((*env)->ExceptionCheck(env))
    {
        return JNI_TRUE;
    }
    else
    {
        int     errClass = ompi_mpi_errcode_get_class(rc);
        char    *message = ompi_mpi_errnum_get_string(rc);
        jstring jmessage = (*env)->NewStringUTF(env, (const char*)message);

        jobject mpiex = (*env)->NewObject(env, ompi_java.ExceptionClass,
                                          ompi_java.ExceptionInit,
                                          rc, errClass, jmessage);
        (*env)->Throw(env, mpiex);
        (*env)->DeleteLocalRef(env, mpiex);
        (*env)->DeleteLocalRef(env, jmessage);
        return JNI_TRUE;
    }
}
コード例 #3
0
ファイル: errhandler_predefined.c プロジェクト: 00datman/ompi
/*
 * Use opal_show_help() to aggregate the error messages (i.e., show it
 * once rather than N times).
 *
 * Note that this function will only be invoked for errors during the
 * MPI application (i.e., after MPI_INIT and before MPI_FINALIZE).  So
 * there's no need to handle the pre-MPI_INIT and post-MPI_FINALIZE
 * errors here.
 */
static void backend_fatal_aggregate(char *type,
                                    struct ompi_communicator_t *comm,
                                    char *name, int *error_code,
                                    va_list arglist)
{
    char *arg, *prefix, *err_msg = "Unknown error";
    bool err_msg_need_free = false;

    arg = va_arg(arglist, char*);
    va_end(arglist);

    asprintf(&prefix, "[%s:%d]", ompi_process_info.nodename,
             (int) ompi_process_info.pid);

    if (NULL != error_code) {
        err_msg = ompi_mpi_errnum_get_string(*error_code);
        if (NULL == err_msg) {
            err_msg_need_free = true;
            asprintf(&err_msg, "Error code: %d (no associated error message)",
                     *error_code);
        }
    }

    if (NULL != name) {
        opal_show_help("help-mpi-errors.txt",
                       "mpi_errors_are_fatal", false,
                       prefix, (NULL == arg) ? "" : "in",
                       (NULL == arg) ? "" : arg,
                       prefix, OMPI_PROC_MY_NAME->jobid, OMPI_PROC_MY_NAME->vpid,
                       prefix, type, name, prefix, err_msg, prefix, type, prefix);
    } else {
        opal_show_help("help-mpi-errors.txt",
                       "mpi_errors_are_fatal unknown handle", false,
                       prefix, (NULL == arg) ? "" : "in",
                       (NULL == arg) ? "" : arg,
                       prefix, OMPI_PROC_MY_NAME->jobid, OMPI_PROC_MY_NAME->vpid,
                       prefix, type, prefix, err_msg, prefix, type, prefix);
    }

    if (err_msg_need_free) {
        free(err_msg);
    }
}
コード例 #4
0
ファイル: ompi_mpi_abort.c プロジェクト: IanYXXL/A1
int
ompi_mpi_abort(struct ompi_communicator_t* comm,
               int errcode,
               bool kill_remote_of_intercomm)
{
    int count = 0, i, ret;
    char *msg, *host, hostname[MAXHOSTNAMELEN];
    pid_t pid = 0;
    ompi_process_name_t *abort_procs;
    int32_t nabort_procs;

    /* Protection for recursive invocation */
    if (have_been_invoked) {
        return OMPI_SUCCESS;
    }
    have_been_invoked = true;

    /* If MPI is initialized, we know we have a runtime nodename, so use that.  Otherwise, call
       gethostname. */
    if (ompi_mpi_initialized) {
        host = ompi_process_info.nodename;
    } else {
        gethostname(hostname, sizeof(hostname));
        host = hostname;
    }
    pid = getpid();

    /* Should we print a stack trace?  Not aggregated because they
       might be different on all processes. */
    if (ompi_mpi_abort_print_stack) {
        char **messages;
        int len, i;

        if (OMPI_SUCCESS == opal_backtrace_buffer(&messages, &len)) {
            for (i = 0; i < len; ++i) {
                fprintf(stderr, "[%s:%d] [%d] func:%s\n", host, (int) pid, 
                        i, messages[i]);
                fflush(stderr);
            }
            free(messages);
        } else {
            /* This will print an message if it's unable to print the
               backtrace, so we don't need an additional "else" clause
               if opal_backtrace_print() is not supported. */
            opal_backtrace_print(stderr, NULL, 1);
        }
    }

    /* Notify the debugger that we're about to abort */

    if (errcode < 0 ||
        asprintf(&msg, "[%s:%d] aborting with MPI error %s%s", 
                 host, (int) pid, ompi_mpi_errnum_get_string(errcode), 
                 ompi_mpi_abort_print_stack ? 
                 " (stack trace available on stderr)" : "") < 0) {
        msg = NULL;
    }
    ompi_debugger_notify_abort(msg);
    if (NULL != msg) {
        free(msg);
    }

    /* Should we wait for a while before aborting? */

    if (0 != ompi_mpi_abort_delay) {
        if (ompi_mpi_abort_delay < 0) {
            fprintf(stderr ,"[%s:%d] Looping forever (MCA parameter mpi_abort_delay is < 0)\n",
                    host, (int) pid);
            fflush(stderr);
            while (1) { 
                sleep(5); 
            }
        } else {
            fprintf(stderr, "[%s:%d] Delaying for %d seconds before aborting\n",
                    host, (int) pid, ompi_mpi_abort_delay);
            do {
                sleep(1);
            } while (--ompi_mpi_abort_delay > 0);
        }
    }

    /* If OMPI isn't setup yet/any more, then don't even try killing
       everyone.  OMPI's initialized period covers all runtime
       initialized period of time, so no need to check that here.
       Sorry, Charlie... */

    if (!ompi_mpi_initialized || ompi_mpi_finalized) {
        fprintf(stderr, "[%s:%d] Local abort %s completed successfully; not able to aggregate error messages, and not able to guarantee that all other processes were killed!\n",
                host, (int) pid, ompi_mpi_finalized ? 
                "after MPI_FINALIZE" : "before MPI_INIT");
        exit(errcode);
    }

    /* abort local procs in the communicator.  If the communicator is
       an intercommunicator AND the abort has explicitly requested
       that we abort the remote procs, then do that as well. */
    nabort_procs = ompi_comm_size(comm);

    if (kill_remote_of_intercomm) {
        /* ompi_comm_remote_size() returns 0 if not an intercomm, so
           this is cool */
        nabort_procs += ompi_comm_remote_size(comm);
    }

    abort_procs = (ompi_process_name_t*)malloc(sizeof(ompi_process_name_t) * nabort_procs);
    if (NULL == abort_procs) {
        /* quick clean orte and get out */
        ompi_rte_abort(errcode, "Abort unable to malloc memory to kill procs");
    }

    /* put all the local procs in the abort list */
    for (i = 0 ; i < ompi_comm_size(comm) ; ++i) {
        if (OPAL_EQUAL != ompi_rte_compare_name_fields(OMPI_RTE_CMP_ALL, 
                                 &comm->c_local_group->grp_proc_pointers[i]->proc_name,
                                 OMPI_PROC_MY_NAME)) {
            assert(count <= nabort_procs);
            abort_procs[count++] = comm->c_local_group->grp_proc_pointers[i]->proc_name;
        } else {
            /* don't terminate me just yet */
            nabort_procs--;
        }
    }

    /* if requested, kill off remote procs too */
    if (kill_remote_of_intercomm) {
        for (i = 0 ; i < ompi_comm_remote_size(comm) ; ++i) {
            if (OPAL_EQUAL != ompi_rte_compare_name_fields(OMPI_RTE_CMP_ALL, 
                                     &comm->c_remote_group->grp_proc_pointers[i]->proc_name,
                                     OMPI_PROC_MY_NAME)) {
                assert(count <= nabort_procs);
                abort_procs[count++] =
                    comm->c_remote_group->grp_proc_pointers[i]->proc_name;
            } else {
                /* don't terminate me just yet */
                nabort_procs--;
            }
        }
    }

    if (nabort_procs > 0) {
        /* This must be implemented for MPI_Abort() to work according to the
         * standard language for a 'high-quality' implementation.
         * It would be nifty if we could differentiate between the
         * abort scenarios:
         *      - MPI_Abort()
         *      - MPI_ERRORS_ARE_FATAL
         *      - Victim of MPI_Abort()
         */
        /*
         * Abort peers in this communicator group. Does not include self.
         */
        if( OMPI_SUCCESS != (ret = ompi_rte_abort_peers(abort_procs, nabort_procs, errcode)) ) {
            ompi_rte_abort(errcode, "Open MPI failed to abort all of the procs requested (%d).", ret);
        }
    }

    /* now that we've aborted everyone else, gracefully die. */
    ompi_rte_abort(errcode, NULL);
    
    return OMPI_SUCCESS;
}
コード例 #5
0
ファイル: errhandler_predefined.c プロジェクト: 00datman/ompi
/*
 * Note that this function has to handle pre-MPI_INIT and
 * post-MPI_FINALIZE errors, which backend_fatal_aggregate() does not
 * have to handle.
 *
 * This function also intentionally does not call malloc(), just in
 * case we're being called due to some kind of stack/memory error --
 * we *might* be able to get a message out if we're not further
 * corrupting the stack by calling malloc()...
 */
static void backend_fatal_no_aggregate(char *type,
                                       struct ompi_communicator_t *comm,
                                       char *name, int *error_code,
                                       va_list arglist)
{
    char *arg;

    assert(!ompi_mpi_initialized || ompi_mpi_finalized);

    fflush(stdout);
    fflush(stderr);

    arg = va_arg(arglist, char*);

    /* Per #2152, print out in plain english if something was invoked
       before MPI_INIT* or after MPI_FINALIZE */
    if (!ompi_mpi_init_started && !ompi_mpi_initialized) {
        if (NULL != arg) {
            out("*** The %s() function was called before MPI_INIT was invoked.\n"
                "*** This is disallowed by the MPI standard.\n", arg);
        } else {
            out("*** An MPI function was called before MPI_INIT was invoked.\n"
                "*** This is disallowed by the MPI standard.\n"
                "*** Unfortunately, no further information is available on *which* MPI\n"
                "*** function was invoked, sorry.  :-(\n", NULL);
        }
        out("*** Your MPI job will now abort.\n", NULL);
    } else if (ompi_mpi_finalized) {
        if (NULL != arg) {
            out("*** The %s() function was called after MPI_FINALIZE was invoked.\n"
                "*** This is disallowed by the MPI standard.\n", arg);
        } else {
            out("*** An MPI function was called after MPI_FINALIZE was invoked.\n"
                "*** This is disallowed by the MPI standard.\n"
                "*** Unfortunately, no further information is available on *which* MPI\n"
                "*** function was invoked, sorry.  :-(\n", NULL);
        }
        out("*** Your MPI job will now abort.\n", NULL);
    }

    else {
        int len;
        char str[MPI_MAX_PROCESSOR_NAME * 2];

        /* THESE MESSAGES ARE COORDINATED WITH FIXED STRINGS IN
           help-mpi-errors.txt!  Do not change these messages without
           also changing help-mpi-errors.txt! */

        /* This is after MPI_INIT* and before MPI_FINALIZE, so print
           the error message normally */
        if (NULL != arg) {
            out("*** An error occurred in %s\n", arg);
        } else {
            out("*** An error occurred\n", NULL);
        }

        if (NULL != name) {
            /* Don't use asprintf() here because there may be stack /
               heap corruption by the time we're invoked, so just do
               it on the stack */
            str[0] = '\0';
            len = sizeof(str) - 1;
            strncat(str, type, len);

            len -= strlen(type);
            if (len > 0) {
                strncat(str, " ", len);

                --len;
                if (len > 0) {
                    strncat(str, name, len);
                }
            }
            out("*** on %s", str);
        } else if (NULL == name) {
            out("*** on a NULL %s\n", type);
        }

        if (NULL != error_code) {
            char *tmp = ompi_mpi_errnum_get_string(*error_code);
            if (NULL != tmp) {
                out("*** %s\n", tmp);
            } else {
                char intbuf[32];
                snprintf(intbuf, 32, "%d", *error_code);
                out("*** Error code: %d (no associated error message)\n", intbuf);
            }
        }
        /* out("*** MPI_ERRORS_ARE_FATAL: your MPI job will now abort\n", NULL); */
        out("*** MPI_ERRORS_ARE_FATAL (processes in this %s will now abort,\n", type);
        out("***    and potentially your MPI job)\n", NULL);

    }
    va_end(arglist);
}
コード例 #6
0
int
ompi_mpi_abort(struct ompi_communicator_t* comm,
               int errcode,
               bool kill_remote_of_intercomm)
{
    int count = 0, i;
    char *msg, *host, hostname[MAXHOSTNAMELEN];
    pid_t pid = 0;
    orte_process_name_t *abort_procs;
    orte_std_cntr_t nabort_procs;

    /* Protection for recursive invocation */
    if (have_been_invoked) {
        return OMPI_SUCCESS;
    }
    have_been_invoked = true;

    /* If ORTE is initialized, use its nodename.  Otherwise, call
       gethostname. */

    if (orte_initialized) {
        host = orte_process_info.nodename;
    } else {
        gethostname(hostname, sizeof(hostname));
        host = hostname;
    }
    pid = getpid();

    /* Should we print a stack trace? */

    if (ompi_mpi_abort_print_stack) {
        char **messages;
        int len, i;

        if (OMPI_SUCCESS == opal_backtrace_buffer(&messages, &len)) {
            for (i = 0; i < len; ++i) {
                fprintf(stderr, "[%s:%d] [%d] func:%s\n", host, (int) pid, 
                        i, messages[i]);
                fflush(stderr);
            }
            free(messages);
        } else {
            /* This will print an message if it's unable to print the
               backtrace, so we don't need an additional "else" clause
               if opal_backtrace_print() is not supported. */
            opal_backtrace_print(stderr);
        }
    }

    /* Notify the debugger that we're about to abort */

    if (errcode < 0 ||
        asprintf(&msg, "[%s:%d] aborting with MPI error %s%s", 
                 host, (int) pid, ompi_mpi_errnum_get_string(errcode), 
                 ompi_mpi_abort_print_stack ? 
                 " (stack trace available on stderr)" : "") < 0) {
        msg = NULL;
    }
    ompi_debugger_notify_abort(msg);
    if (NULL != msg) {
        free(msg);
    }

    /* Should we wait for a while before aborting? */

    if (0 != ompi_mpi_abort_delay) {
        if (ompi_mpi_abort_delay < 0) {
            fprintf(stderr ,"[%s:%d] Looping forever (MCA parameter mpi_abort_delay is < 0)\n",
                    host, (int) pid);
            fflush(stderr);
            while (1) { 
                sleep(5); 
            }
        } else {
            fprintf(stderr, "[%s:%d] Delaying for %d seconds before aborting\n",
                    host, (int) pid, ompi_mpi_abort_delay);
            do {
                sleep(1);
            } while (--ompi_mpi_abort_delay > 0);
        }
    }

    /* If OMPI isn't setup yet/any more, then don't even try killing
       everyone.  Ditto for ORTE (e.g., ORTE may be initialized before
       MPI_INIT is over, but ompi_initialized will be false because
       communicators are not setup yet). Sorry, Charlie... */

    if (!orte_initialized || !ompi_mpi_initialized || ompi_mpi_finalized) {
        fprintf(stderr, "[%s:%d] Abort %s completed successfully; not able to guarantee that all other processes were killed!\n",
                host, (int) pid, ompi_mpi_finalized ? 
                "after MPI_FINALIZE" : "before MPI_INIT");
        exit(errcode);
    }

    /* abort local procs in the communicator.  If the communicator is
       an intercommunicator AND the abort has explicitly requested
       that we abort the remote procs, then do that as well. */
    nabort_procs = ompi_comm_size(comm);

    if (kill_remote_of_intercomm) {
        /* ompi_comm_remote_size() returns 0 if not an intercomm, so
           this is cool */
        nabort_procs += ompi_comm_remote_size(comm);
    }

    abort_procs = (orte_process_name_t*)malloc(sizeof(orte_process_name_t) * nabort_procs);
    if (NULL == abort_procs) {
        /* quick clean orte and get out */
        orte_errmgr.abort(errcode, "Abort unable to malloc memory to kill procs");
    }

    /* put all the local procs in the abort list */
    for (i = 0 ; i < ompi_comm_size(comm) ; ++i) {
        if (OPAL_EQUAL != orte_util_compare_name_fields(ORTE_NS_CMP_ALL, 
                                 &comm->c_local_group->grp_proc_pointers[i]->proc_name,
                                 ORTE_PROC_MY_NAME)) {
            assert(count <= nabort_procs);
            abort_procs[count++] = comm->c_local_group->grp_proc_pointers[i]->proc_name;
        } else {
            /* don't terminate me just yet */
            nabort_procs--;
        }
    }

    /* if requested, kill off remote procs too */
    if (kill_remote_of_intercomm) {
        for (i = 0 ; i < ompi_comm_remote_size(comm) ; ++i) {
            if (OPAL_EQUAL != orte_util_compare_name_fields(ORTE_NS_CMP_ALL, 
                                     &comm->c_remote_group->grp_proc_pointers[i]->proc_name,
                                     ORTE_PROC_MY_NAME)) {
                assert(count <= nabort_procs);
                abort_procs[count++] =
                    comm->c_remote_group->grp_proc_pointers[i]->proc_name;
            } else {
                /* don't terminate me just yet */
                nabort_procs--;
            }
        }
    }

    if (nabort_procs > 0) {
#if 0
        int ret = orte_errmgr.abort_procs_request(abort_procs, nabort_procs);
        if (OMPI_SUCCESS != ret) {
            orte_errmgr.abort(ret, "Open MPI failed to abort procs as requested (%d). Exiting.", ret);
        }
#endif
    }

    /* now that we've aborted everyone else, gracefully die. */
    orte_errmgr.abort(errcode, NULL);
    
    return OMPI_SUCCESS;
}
コード例 #7
0
ファイル: ompi_mpi_abort.c プロジェクト: nasailja/ompi
int
ompi_mpi_abort(struct ompi_communicator_t* comm,
               int errcode)
{
    char *msg, *host, hostname[MAXHOSTNAMELEN];
    pid_t pid = 0;

    /* Protection for recursive invocation */
    if (have_been_invoked) {
        return OMPI_SUCCESS;
    }
    have_been_invoked = true;

    /* If MPI is initialized, we know we have a runtime nodename, so
       use that.  Otherwise, call gethostname. */
    if (ompi_rte_initialized) {
        host = ompi_process_info.nodename;
    } else {
        gethostname(hostname, sizeof(hostname));
        host = hostname;
    }
    pid = getpid();

    /* Should we print a stack trace?  Not aggregated because they
       might be different on all processes. */
    if (ompi_mpi_abort_print_stack) {
        char **messages;
        int len, i;

        if (OMPI_SUCCESS == opal_backtrace_buffer(&messages, &len)) {
            for (i = 0; i < len; ++i) {
                fprintf(stderr, "[%s:%d] [%d] func:%s\n", host, (int) pid,
                        i, messages[i]);
                fflush(stderr);
            }
            free(messages);
        } else {
            /* This will print an message if it's unable to print the
               backtrace, so we don't need an additional "else" clause
               if opal_backtrace_print() is not supported. */
            opal_backtrace_print(stderr, NULL, 1);
        }
    }

    /* Notify the debugger that we're about to abort */

    if (errcode < 0 ||
        asprintf(&msg, "[%s:%d] aborting with MPI error %s%s",
                 host, (int) pid, ompi_mpi_errnum_get_string(errcode),
                 ompi_mpi_abort_print_stack ?
                 " (stack trace available on stderr)" : "") < 0) {
        msg = NULL;
    }
    ompi_debugger_notify_abort(msg);
    if (NULL != msg) {
        free(msg);
    }

    /* Should we wait for a while before aborting? */

    if (0 != ompi_mpi_abort_delay) {
        if (ompi_mpi_abort_delay < 0) {
            fprintf(stderr ,"[%s:%d] Looping forever (MCA parameter mpi_abort_delay is < 0)\n",
                    host, (int) pid);
            fflush(stderr);
            while (1) {
                sleep(5);
            }
        } else {
            fprintf(stderr, "[%s:%d] Delaying for %d seconds before aborting\n",
                    host, (int) pid, ompi_mpi_abort_delay);
            do {
                sleep(1);
            } while (--ompi_mpi_abort_delay > 0);
        }
    }

    /* If the RTE isn't setup yet/any more, then don't even try
       killing everyone.  Sorry, Charlie... */
    if (!ompi_rte_initialized) {
        fprintf(stderr, "[%s:%d] Local abort %s completed successfully, but am not able to aggregate error messages, and not able to guarantee that all other processes were killed!\n",
                host, (int) pid, ompi_mpi_finalized ?
                "after MPI_FINALIZE started" : "before MPI_INIT completed");
        _exit(errcode == 0 ? 1 : errcode);
    }

    /* If OMPI is initialized and we have a non-NULL communicator,
       then try to kill just that set of processes */
    if (ompi_mpi_initialized && !ompi_mpi_finalized && NULL != comm) {
        try_kill_peers(comm, errcode);
    }

    /* We can fall through to here in a few cases:

       1. The attempt to kill just a subset of peers via
          try_kill_peers() failed (e.g., as of July 2014, ORTE does
          returns NOT_IMPLENTED from orte_rte_abort_peers()).
       2. MPI wasn't initialized, was already finalized, or we got a
          NULL communicator.

       In all of these cases, the only sensible thing left to do is to
       kill the entire job.  Wah wah. */
    ompi_rte_abort(errcode, NULL);

    /* Does not return */
}