예제 #1
0
/* only APPS call this function - daemons have their own */
int orte_util_decode_pidmap(opal_byte_object_t *bo)
{
    orte_vpid_t i, num_procs, *vptr, daemon;
    orte_vpid_t *daemons=NULL;
    orte_local_rank_t *local_rank=NULL;
    orte_node_rank_t *node_rank=NULL;
#if OPAL_HAVE_HWLOC
    opal_hwloc_level_t bind_level = OPAL_HWLOC_NODE_LEVEL, pbind, *lvptr;
    unsigned int *bind_idx=NULL, pbidx, *uiptr;
#endif
    opal_hwloc_locality_t locality;
    orte_std_cntr_t n;
    opal_buffer_t buf;
    int rc;
    orte_proc_state_t *states = NULL;
    orte_app_idx_t *app_idx = NULL;
    int32_t *restarts = NULL;
    orte_process_name_t proc, dmn;
    orte_namelist_t *nm;
    opal_list_t jobs;
    char *hostname;

    /* xfer the byte object to a buffer for unpacking */
    OBJ_CONSTRUCT(&buf, opal_buffer_t);
    if (ORTE_SUCCESS != (rc = opal_dss.load(&buf, bo->bytes, bo->size))) {
        ORTE_ERROR_LOG(rc);
        goto cleanup;
    }
    
    n = 1;
    /* cycle through the buffer */
    OBJ_CONSTRUCT(&jobs, opal_list_t);
    while (ORTE_SUCCESS == (rc = opal_dss.unpack(&buf, &proc.jobid, &n, ORTE_JOBID))) {
        OPAL_OUTPUT_VERBOSE((2, orte_nidmap_output,
                             "%s orte:util:decode:pidmap working job %s",
                             ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
                             ORTE_JOBID_PRINT(proc.jobid)));
        /* record the jobid */
        nm = OBJ_NEW(orte_namelist_t);
        nm->name.jobid = proc.jobid;
        opal_list_append(&jobs, &nm->super);

        /* unpack and store the number of procs */
        n=1;
        if (ORTE_SUCCESS != (rc = opal_dss.unpack(&buf, &num_procs, &n, ORTE_VPID))) {
            ORTE_ERROR_LOG(rc);
            goto cleanup;
        }
        proc.vpid = ORTE_VPID_INVALID;
        if (ORTE_SUCCESS != (rc = orte_db.store(&proc, ORTE_DB_NPROCS, &num_procs, ORTE_VPID))) {
            ORTE_ERROR_LOG(rc);
            goto cleanup;
        }

#if OPAL_HAVE_HWLOC
        /* unpack and store the binding level */
        n=1;
        if (ORTE_SUCCESS != (rc = opal_dss.unpack(&buf, &bind_level, &n, OPAL_HWLOC_LEVEL_T))) {
            ORTE_ERROR_LOG(rc);
            goto cleanup;
        }
        /* store it */
        proc.vpid = ORTE_VPID_INVALID;
        if (ORTE_SUCCESS != (rc = orte_db.store(&proc, ORTE_DB_BIND_LEVEL, &bind_level, OPAL_HWLOC_LEVEL_T))) {
            ORTE_ERROR_LOG(rc);
            goto cleanup;
        }
        /* set mine */
        if (proc.jobid == ORTE_PROC_MY_NAME->jobid) {
            orte_process_info.bind_level = bind_level;
        }

        OPAL_OUTPUT_VERBOSE((2, orte_nidmap_output,
                             "%s orte:util:decode:pidmap nprocs %s bind level %s",
                             ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
                             ORTE_VPID_PRINT(num_procs),
                             opal_hwloc_base_print_level(bind_level)));
#endif

        /* allocate memory for the daemon info */
        daemons = (orte_vpid_t*)malloc(num_procs * sizeof(orte_vpid_t));
        /* unpack it in one shot */
        n=num_procs;
        if (ORTE_SUCCESS != (rc = opal_dss.unpack(&buf, daemons, &n, ORTE_VPID))) {
            ORTE_ERROR_LOG(rc);
            goto cleanup;
        }

        /* allocate memory for local ranks */
        local_rank = (orte_local_rank_t*)malloc(num_procs*sizeof(orte_local_rank_t));
        /* unpack them in one shot */
        n=num_procs;
        if (ORTE_SUCCESS != (rc = opal_dss.unpack(&buf, local_rank, &n, ORTE_LOCAL_RANK))) {
            ORTE_ERROR_LOG(rc);
            goto cleanup;
        }
        if (proc.jobid == ORTE_PROC_MY_NAME->jobid) {
            /* set mine */
            orte_process_info.my_local_rank = local_rank[ORTE_PROC_MY_NAME->vpid];
            if (ORTE_SUCCESS != (rc = orte_db.store(ORTE_PROC_MY_NAME, ORTE_DB_LOCALRANK,
                                                    &orte_process_info.my_local_rank, ORTE_LOCAL_RANK))) {
                ORTE_ERROR_LOG(rc);
                goto cleanup;
            }
        }
        
        /* allocate memory for node ranks */
        node_rank = (orte_node_rank_t*)malloc(num_procs*sizeof(orte_node_rank_t));
        /* unpack node ranks in one shot */
        n=num_procs;
        if (ORTE_SUCCESS != (rc = opal_dss.unpack(&buf, node_rank, &n, ORTE_NODE_RANK))) {
            ORTE_ERROR_LOG(rc);
            goto cleanup;
        }
        if (proc.jobid == ORTE_PROC_MY_NAME->jobid) {
            /* set mine */
            orte_process_info.my_node_rank = node_rank[ORTE_PROC_MY_NAME->vpid];
            if (ORTE_SUCCESS != (rc = orte_db.store(ORTE_PROC_MY_NAME, ORTE_DB_NODERANK,
                                                    &orte_process_info.my_node_rank, ORTE_NODE_RANK))) {
                ORTE_ERROR_LOG(rc);
                goto cleanup;
            }
        }
        
#if OPAL_HAVE_HWLOC
        /* allocate memory for bind_idx */
        bind_idx = (unsigned int*)malloc(num_procs*sizeof(unsigned int));
        /* unpack bind_idx in one shot */
        n=num_procs;
        if (ORTE_SUCCESS != (rc = opal_dss.unpack(&buf, bind_idx, &n, OPAL_UINT))) {
            ORTE_ERROR_LOG(rc);
            goto cleanup;
        }
        if (proc.jobid == ORTE_PROC_MY_NAME->jobid) {
            /* set mine */
            orte_process_info.bind_idx = bind_idx[ORTE_PROC_MY_NAME->vpid];
            if (ORTE_SUCCESS != (rc = orte_db.store(ORTE_PROC_MY_NAME, ORTE_DB_BIND_INDEX,
                                                    &orte_process_info.bind_idx, OPAL_UINT))) {
                ORTE_ERROR_LOG(rc);
                goto cleanup;
            }
        }
#endif

        /* allocate memory for states */
        states = (orte_proc_state_t*)malloc(num_procs*sizeof(orte_proc_state_t));
        /* unpack states in one shot */
        n=num_procs;
        if (OPAL_SUCCESS != (rc = opal_dss.unpack(&buf, states, &n, ORTE_PROC_STATE))) {
            ORTE_ERROR_LOG(rc);
            goto cleanup;
        }
        /* dump this info - apps don't need it */
        free(states);
        states = NULL;

        /* allocate memory for app_idx's */
        app_idx = (orte_app_idx_t*)malloc(num_procs*sizeof(orte_app_idx_t));
        /* unpack app_idx's in one shot */
        n=num_procs;
        if (OPAL_SUCCESS != (rc = opal_dss.unpack(&buf, app_idx, &n, ORTE_APP_IDX))) {
            ORTE_ERROR_LOG(rc);
            goto cleanup;
        }
        /* dump this info - apps don't need it */
        free(app_idx);
        app_idx = NULL;

        /* allocate memory for restarts */
        restarts = (int32_t*)malloc(num_procs*sizeof(int32_t));
        /* unpack restarts in one shot */
        n=num_procs;
        if (OPAL_SUCCESS != (rc = opal_dss.unpack(&buf, restarts, &n, OPAL_INT32))) {
            ORTE_ERROR_LOG(rc);
            goto cleanup;
        }
        /* dump this info - apps don't need it */
        free(restarts);
        restarts = NULL;

        /* set the daemon jobid */
        dmn.jobid = ORTE_DAEMON_JOBID(ORTE_PROC_MY_NAME->jobid);

        /* xfer the data */
        for (i=0; i < num_procs; i++) {
            if (proc.jobid == ORTE_PROC_MY_NAME->jobid &&
                i == ORTE_PROC_MY_NAME->vpid) {
                continue;
            }
            proc.vpid = i;
            if (ORTE_SUCCESS != (rc = orte_db.store(&proc, ORTE_DB_DAEMON_VPID, &daemons[i], ORTE_VPID))) {
                ORTE_ERROR_LOG(rc);
                goto cleanup;
            }
            /* lookup and store the hostname for this proc */
            dmn.vpid = daemons[i];
            if (ORTE_SUCCESS != (rc = orte_db.fetch_pointer(&dmn, ORTE_DB_HOSTNAME, (void**)&hostname, OPAL_STRING))) {
                ORTE_ERROR_LOG(rc);
                goto cleanup;
            }
            if (ORTE_SUCCESS != (rc = orte_db.store(&proc, ORTE_DB_HOSTNAME, hostname, OPAL_STRING))) {
                ORTE_ERROR_LOG(rc);
                goto cleanup;
            }
            if (ORTE_SUCCESS != (rc = orte_db.store(&proc, ORTE_DB_LOCALRANK, &local_rank[i], ORTE_LOCAL_RANK))) {
                ORTE_ERROR_LOG(rc);
                goto cleanup;
            }
            if (ORTE_SUCCESS != (rc = orte_db.store(&proc, ORTE_DB_NODERANK, &node_rank[i], ORTE_NODE_RANK))) {
                ORTE_ERROR_LOG(rc);
                goto cleanup;
            }
#if OPAL_HAVE_HWLOC
            if (ORTE_SUCCESS != (rc = orte_db.store(&proc, ORTE_DB_BIND_INDEX, &bind_idx[i], OPAL_UINT))) {
                ORTE_ERROR_LOG(rc);
                goto cleanup;
            }
            OPAL_OUTPUT_VERBOSE((10, orte_nidmap_output,
                                 "%s orte:util:decode:pidmap proc %s host %s lrank %d nrank %d bindidx %u",
                                 ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
                                 ORTE_NAME_PRINT(&proc), hostname,
                                 (int)local_rank[i], (int)node_rank[i], bind_idx[i]));
#endif
        }
        /* release data */
        free(daemons);
        daemons = NULL;
        free(local_rank);
        local_rank = NULL;
        free(node_rank);
        node_rank = NULL;
#if OPAL_HAVE_HWLOC
        free(bind_idx);
        bind_idx = NULL;
#endif
        /* setup for next cycle */
        n = 1;
    }
    if (ORTE_ERR_UNPACK_READ_PAST_END_OF_BUFFER != rc) {
        ORTE_ERROR_LOG(rc);
        goto cleanup;
    }
    rc = ORTE_SUCCESS;

    /* now that we have all the data, we are guaranteed
     * to know our own node, so go back and record the
     * locality of each proc relative to me
     */
    while (NULL != (nm = (orte_namelist_t*)opal_list_remove_first(&jobs))) {
        proc.jobid = nm->name.jobid;
        /* recover the number of procs in this job */
        vptr = &num_procs;
        proc.vpid = ORTE_VPID_INVALID;
        if (ORTE_SUCCESS != (rc = orte_db.fetch(&proc, ORTE_DB_NPROCS, (void**)&vptr, ORTE_VPID))) {
            ORTE_ERROR_LOG(rc);
            goto cleanup;
        }

        for (i=0; i < num_procs; i++) {
            if (ORTE_PROC_MY_NAME->vpid == i &&
                ORTE_PROC_MY_NAME->jobid == proc.jobid) {
                /* this is me */
                continue;
            }
            proc.vpid = i;
            /* recover the daemon for this proc */
            vptr = &daemon;
            if (ORTE_SUCCESS != (rc = orte_db.fetch(&proc, ORTE_DB_DAEMON_VPID, (void**)&vptr, ORTE_VPID))) {
                ORTE_ERROR_LOG(rc);
                goto cleanup;
            }
            if (daemon == ORTE_PROC_MY_DAEMON->vpid) {
                OPAL_OUTPUT_VERBOSE((2, orte_nidmap_output,
                                     "%s orte:util:decode:pidmap proc %s shares node",
                                     ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
                                     ORTE_NAME_PRINT(&proc)));
                /* we share a node, so add them to the count of peers
                 * sharing the node with me
                 */
                orte_process_info.num_local_peers++;
#if OPAL_HAVE_HWLOC
                /* retrieve the bind level for the other proc's job */
                lvptr = &pbind;
                proc.vpid = ORTE_VPID_INVALID;
                if (ORTE_SUCCESS != (rc = orte_db.fetch(&proc, ORTE_DB_BIND_LEVEL, (void**)&lvptr, OPAL_HWLOC_LEVEL_T))) {
                    ORTE_ERROR_LOG(rc);
                    goto cleanup;
                }

                /* retrieve the other's proc's bind idx */
                uiptr = &pbidx;
                proc.vpid = i;
                if (ORTE_SUCCESS != (rc = orte_db.fetch(&proc, ORTE_DB_BIND_INDEX, (void**)&uiptr, OPAL_UINT))) {
                    ORTE_ERROR_LOG(rc);
                    goto cleanup;
                }

                /* we share a node - see what else we share */
                locality = opal_hwloc_base_get_relative_locality(opal_hwloc_topology,
                                                                 orte_process_info.bind_level,
                                                                 orte_process_info.bind_idx,
                                                                 pbind, pbidx);
#else
                locality = OPAL_PROC_ON_NODE;
#endif
            } else {
                /* we don't share a node */
                OPAL_OUTPUT_VERBOSE((2, orte_nidmap_output,
                                     "%s orte:util:decode:pidmap proc %s does NOT node [my daemon %s, their daemon %s]",
                                     ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
                                     ORTE_NAME_PRINT(&proc),
                                     ORTE_VPID_PRINT(ORTE_PROC_MY_DAEMON->vpid),
                                     ORTE_VPID_PRINT(daemon)));
                locality = OPAL_PROC_NON_LOCAL;
            }
            /* store the locality */
                OPAL_OUTPUT_VERBOSE((2, orte_nidmap_output,
                                     "%s orte:util:decode:pidmap set proc %s locality to %s",
                                     ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
                                     ORTE_NAME_PRINT(&proc),
                                     opal_hwloc_base_print_locality(locality)));
            if (ORTE_SUCCESS != (rc = orte_db.store(&proc, ORTE_DB_LOCALITY, &locality, OPAL_HWLOC_LOCALITY_T))) {
                ORTE_ERROR_LOG(rc);
                goto cleanup;
            }
        }
    }
        
    
 cleanup:
    if (NULL != daemons) {
        free(daemons);
    }
    if (NULL != local_rank) {
        free(local_rank);
    }
    if (NULL != node_rank) {
        free(node_rank);
    }
#if OPAL_HAVE_HWLOC
    if (NULL != bind_idx) {
        free(bind_idx);
    }
#endif
    if (NULL != states) {
        free(states);
    }
    if (NULL != app_idx) {
        free(app_idx);
    }
    if (NULL != restarts) {
        free(restarts);
    }
    OBJ_DESTRUCT(&buf);
    return rc;
}
예제 #2
0
int orte_plm_base_fork_hnp(void)
{
    int p[2], death_pipe[2];
    char *cmd;
    char **argv = NULL;
    int argc;
    char *param, *cptr, *pmix_uri;
    sigset_t sigs;
    int buffer_length, num_chars_read, chunk;
    char *orted_uri;
    int rc;
    orte_jobid_t jobid;

    /* A pipe is used to communicate between the parent and child to
       indicate whether the exec ultimately succeeded or failed.  The
       child sets the pipe to be close-on-exec; the child only ever
       writes anything to the pipe if there is an error (e.g.,
       executable not found, exec() fails, etc.).  The parent does a
       blocking read on the pipe; if the pipe closed with no data,
       then the exec() succeeded.  If the parent reads something from
       the pipe, then the child was letting us know that it failed.
    */
    if (pipe(p) < 0) {
        ORTE_ERROR_LOG(ORTE_ERR_SYS_LIMITS_PIPES);
        return ORTE_ERR_SYS_LIMITS_PIPES;
    }
    
    /* we also have to give the HNP a pipe it can watch to know when
     * we terminated. Since the HNP is going to be a child of us, it
     * can't just use waitpid to see when we leave - so it will watch
     * the pipe instead
     */
    if (pipe(death_pipe) < 0) {
        ORTE_ERROR_LOG(ORTE_ERR_SYS_LIMITS_PIPES);
        return ORTE_ERR_SYS_LIMITS_PIPES;
    }
    
    /* find the orted binary using the install_dirs support - this also
     * checks to ensure that we can see this executable and it *is* executable by us
     */
    cmd = opal_path_access("orted", opal_install_dirs.bindir, X_OK);
    if (NULL == cmd) {
        /* guess we couldn't do it - best to abort */
        ORTE_ERROR_LOG(ORTE_ERR_FILE_NOT_EXECUTABLE);
        close(p[0]);
        close(p[1]);
        return ORTE_ERR_FILE_NOT_EXECUTABLE;
    }
    
    /* okay, setup an appropriate argv */
    opal_argv_append(&argc, &argv, "orted");
    
    /* tell the daemon it is to be the HNP */
    opal_argv_append(&argc, &argv, "--hnp");

    /* tell the daemon to get out of our process group */
    opal_argv_append(&argc, &argv, "--set-sid");
    
    /* tell the daemon to report back its uri so we can connect to it */
    opal_argv_append(&argc, &argv, "--report-uri");
    asprintf(&param, "%d", p[1]);
    opal_argv_append(&argc, &argv, param);
    free(param);
    
    /* give the daemon a pipe it can watch to tell when we have died */
    opal_argv_append(&argc, &argv, "--singleton-died-pipe");
    asprintf(&param, "%d", death_pipe[0]);
    opal_argv_append(&argc, &argv, param);
    free(param);
    
    /* add any debug flags */
    if (orte_debug_flag) {
        opal_argv_append(&argc, &argv, "--debug");
    }

    if (orte_debug_daemons_flag) {
        opal_argv_append(&argc, &argv, "--debug-daemons");
    }
    
    if (orte_debug_daemons_file_flag) {
        if (!orte_debug_daemons_flag) {
            opal_argv_append(&argc, &argv, "--debug-daemons");
        }
        opal_argv_append(&argc, &argv, "--debug-daemons-file");
    }
    
    /* indicate that it must use the novm state machine */
    opal_argv_append(&argc, &argv, "-"OPAL_MCA_CMD_LINE_ID);
    opal_argv_append(&argc, &argv, "state_novm_select");
    opal_argv_append(&argc, &argv, "1");

    /* pass it a jobid to match my job family */
    opal_argv_append(&argc, &argv, "-"OPAL_MCA_CMD_LINE_ID);
    opal_argv_append(&argc, &argv, "ess_base_jobid");
    jobid = ORTE_DAEMON_JOBID(ORTE_PROC_MY_NAME->jobid);
    if (ORTE_SUCCESS != (rc = orte_util_convert_jobid_to_string(&param, jobid))) {
        ORTE_ERROR_LOG(rc);
        return rc;
    }
    opal_argv_append(&argc, &argv, param);
    free(param);

    /* Fork off the child */
    orte_process_info.hnp_pid = fork();
    if(orte_process_info.hnp_pid < 0) {
        ORTE_ERROR_LOG(ORTE_ERR_SYS_LIMITS_CHILDREN);
        close(p[0]);
        close(p[1]);
        close(death_pipe[0]);
        close(death_pipe[1]);
        free(cmd);
        opal_argv_free(argv);
        return ORTE_ERR_SYS_LIMITS_CHILDREN;
    }
    
    if (orte_process_info.hnp_pid == 0) {
        close(p[0]);
        close(death_pipe[1]);
        /* I am the child - exec me */
        
        /* Set signal handlers back to the default.  Do this close
           to the execve() because the event library may (and likely
           will) reset them.  If we don't do this, the event
           library may have left some set that, at least on some
           OS's, don't get reset via fork() or exec().  Hence, the
           orted could be unkillable (for example). */
        set_handler_default(SIGTERM);
        set_handler_default(SIGINT);
        set_handler_default(SIGHUP);
        set_handler_default(SIGPIPE);
        set_handler_default(SIGCHLD);
        
        /* Unblock all signals, for many of the same reasons that
           we set the default handlers, above.  This is noticable
           on Linux where the event library blocks SIGTERM, but we
           don't want that blocked by the orted (or, more
           specifically, we don't want it to be blocked by the
           orted and then inherited by the ORTE processes that it
           forks, making them unkillable by SIGTERM). */
        sigprocmask(0, 0, &sigs);
        sigprocmask(SIG_UNBLOCK, &sigs, 0);
        
        execv(cmd, argv);
        
        /* if I get here, the execv failed! */
        orte_show_help("help-ess-base.txt", "ess-base:execv-error",
                       true, cmd, strerror(errno));
        exit(1);
        
    } else {
        /* I am the parent - wait to hear something back and
         * report results
         */
        close(p[1]);  /* parent closes the write - orted will write its contact info to it*/
        close(death_pipe[0]);  /* parent closes the death_pipe's read */
        opal_argv_free(argv);
        
        /* setup the buffer to read the HNP's uri */
        buffer_length = ORTE_URI_MSG_LGTH;
        chunk = ORTE_URI_MSG_LGTH-1;
        num_chars_read = 0;
        orted_uri = (char*)malloc(buffer_length);

        while (chunk == (rc = read(p[0], &orted_uri[num_chars_read], chunk))) {
            /* we read an entire buffer - better get more */
            num_chars_read += chunk;
            buffer_length += ORTE_URI_MSG_LGTH;
            orted_uri = realloc((void*)orted_uri, buffer_length);
        }
        num_chars_read += rc;

        if (num_chars_read <= 0) {
            /* we didn't get anything back - this is bad */
            ORTE_ERROR_LOG(ORTE_ERR_HNP_COULD_NOT_START);
            free(orted_uri);
            return ORTE_ERR_HNP_COULD_NOT_START;
        }

	/* parse the sysinfo from the returned info - must
         * start from the end of the string as the uri itself
         * can contain brackets */
        if (NULL == (param = strrchr(orted_uri, '['))) {
            ORTE_ERROR_LOG(ORTE_ERR_COMM_FAILURE);
            free(orted_uri);
            return ORTE_ERR_COMM_FAILURE;
        }
        *param = '\0'; /* terminate the uri string */
        ++param;  /* point to the start of the sysinfo */

        /* find the end of the sysinfo */
        if (NULL == (cptr = strchr(param, ']'))) {
            ORTE_ERROR_LOG(ORTE_ERR_COMM_FAILURE);
            free(orted_uri);
            return ORTE_ERR_COMM_FAILURE;
        }
        *cptr = '\0';  /* terminate the sysinfo string */
        ++cptr;  /* point to the start of the pmix uri */

        /* convert the sysinfo string */
        if (ORTE_SUCCESS != (rc = orte_util_convert_string_to_sysinfo(&orte_local_cpu_type,
								      &orte_local_cpu_model, param))) {
            ORTE_ERROR_LOG(rc);
            free(orted_uri);
            return rc;
        }

        /* save the daemon uri - we will process it later */
        orte_process_info.my_daemon_uri = strdup(orted_uri);
        /* Set the contact info in the RML - this won't actually establish
         * the connection, but just tells the RML how to reach the daemon
         * if/when we attempt to send to it
         */
        orte_rml.set_contact_info(orte_process_info.my_daemon_uri);
        if (ORTE_SUCCESS != (rc = orte_rml_base_parse_uris(orte_process_info.my_daemon_uri,
                                                           ORTE_PROC_MY_DAEMON, NULL))) {
            ORTE_ERROR_LOG(rc);
            return rc;
        }

        /* likewise, since this is also the HNP, set that uri too */
        orte_process_info.my_hnp_uri = strdup(orted_uri);
        orte_rml.set_contact_info(orte_process_info.my_hnp_uri);
        if (ORTE_SUCCESS != (rc = orte_rml_base_parse_uris(orte_process_info.my_hnp_uri,
                                                           ORTE_PROC_MY_HNP, NULL))) {
            ORTE_ERROR_LOG(rc);
            return rc;
        }

        /* push the pmix_uri into our environment - need to protect it */
        (void)asprintf(&pmix_uri, "PMIX_SERVER_URI=%s", cptr);
        putenv(pmix_uri);
        /* now re-init the pmix framework so we can connect when required */
        if (OPAL_SUCCESS != (rc = opal_pmix.init())) {
            ORTE_ERROR_LOG(rc);
            return rc;
        }
        /* now call fence to push our own modex data into the
         * newly-launched HNP in case someone else needs it */
        if (OPAL_SUCCESS != (rc = opal_pmix.fence(NULL, 0))) {
            ORTE_ERROR_LOG(rc);
            return rc;
        }
        /* all done - report success */
        free(orted_uri);
        return ORTE_SUCCESS;
    }
}
예제 #3
0
/* decode a nodemap for an application process */
int orte_util_decode_nodemap(opal_byte_object_t *bo)
{
    int n;
    int32_t num_nodes, i, num_daemons;
    orte_process_name_t daemon;
    opal_buffer_t buf;
    int rc;
    uint8_t oversub;
    char *nodename;

    OPAL_OUTPUT_VERBOSE((1, orte_nidmap_output,
                         "%s decode:nidmap decoding nodemap",
                         ORTE_NAME_PRINT(ORTE_PROC_MY_NAME)));

    /* xfer the byte object to a buffer for unpacking */
    OBJ_CONSTRUCT(&buf, opal_buffer_t);
    opal_dss.load(&buf, bo->bytes, bo->size);
    
    /* unpack number of nodes */
    n=1;
    if (ORTE_SUCCESS != (rc = opal_dss.unpack(&buf, &num_nodes, &n, OPAL_INT32))) {
        ORTE_ERROR_LOG(rc);
        return rc;
    }
 
    OPAL_OUTPUT_VERBOSE((1, orte_nidmap_output,
                         "%s decode:nidmap decoding %d nodes",
                         ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), num_nodes));
    
    /* set the daemon jobid */
    daemon.jobid = ORTE_DAEMON_JOBID(ORTE_PROC_MY_NAME->jobid);

    num_daemons = 0;
    for (i=0; i < num_nodes; i++) {
        /* unpack the daemon vpid */
        n=1;
        if (ORTE_SUCCESS != (rc = opal_dss.unpack(&buf, &daemon.vpid, &n, ORTE_VPID))) {
            ORTE_ERROR_LOG(rc);
            return rc;
        }
        if (ORTE_VPID_INVALID != daemon.vpid) {
            ++num_daemons;
        }
        /* unpack and store the node's name */
        n=1;
        if (ORTE_SUCCESS != (rc = opal_dss.unpack(&buf, &nodename, &n, OPAL_STRING))) {
            ORTE_ERROR_LOG(rc);
            return rc;
        }
        if (ORTE_SUCCESS != (rc = orte_db.store(&daemon, ORTE_DB_HOSTNAME, nodename, OPAL_STRING))) {
            ORTE_ERROR_LOG(rc);
            return rc;
        }
        /* now store a direct reference so we can quickly lookup the daemon from a hostname */
        if (ORTE_SUCCESS != (rc = orte_db.store(ORTE_NAME_WILDCARD, nodename, &daemon.vpid, ORTE_VPID))) {
            ORTE_ERROR_LOG(rc);
            return rc;
        }

        OPAL_OUTPUT_VERBOSE((2, orte_nidmap_output,
                             "%s orte:util:decode:nidmap daemon %s node %s",
                             ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
                             ORTE_VPID_PRINT(daemon.vpid), nodename));

        /* if this is my daemon, then store the data for me too */
        if (daemon.vpid == ORTE_PROC_MY_DAEMON->vpid) {
            if (ORTE_SUCCESS != (rc = orte_db.store(ORTE_PROC_MY_NAME, ORTE_DB_HOSTNAME, nodename, OPAL_STRING))) {
                ORTE_ERROR_LOG(rc);
                return rc;
            }
            if (ORTE_SUCCESS != (rc = orte_db.store(ORTE_PROC_MY_NAME, ORTE_DB_DAEMON_VPID, &daemon.vpid, ORTE_VPID))) {
                ORTE_ERROR_LOG(rc);
                return rc;
            }
        }

        /* if requested, unpack any aliases */
        if (orte_retain_aliases) {
            char *alias;
            uint8_t naliases, ni;
            n=1;
            if (ORTE_SUCCESS != (rc = opal_dss.unpack(&buf, &naliases, &n, OPAL_UINT8))) {
                ORTE_ERROR_LOG(rc);
                return rc;
            }
            for (ni=0; ni < naliases; ni++) {
                n=1;
                if (ORTE_SUCCESS != (rc = opal_dss.unpack(&buf, &alias, &n, OPAL_STRING))) {
                    ORTE_ERROR_LOG(rc);
                    return rc;
                }
                /* store a cross-reference to the daemon for this nodename */
                if (ORTE_SUCCESS != (rc = orte_db.store(ORTE_NAME_WILDCARD, alias, &daemon.vpid, ORTE_VPID))) {
                    ORTE_ERROR_LOG(rc);
                    return rc;
                }
                free(alias);
            }
        }

        /* unpack and discard the oversubscribed flag - procs don't need it */
        n=1;
        if (ORTE_SUCCESS != (rc = opal_dss.unpack(&buf, &oversub, &n, OPAL_UINT8))) {
            ORTE_ERROR_LOG(rc);
            return rc;
        }
    }
    
    /* update num_daemons */
    orte_process_info.num_daemons = num_daemons;
    
    OBJ_DESTRUCT(&buf);
    return ORTE_SUCCESS;
}