static int rte_init(void) { int ret; char *error = NULL; /* run the prolog */ if (ORTE_SUCCESS != (ret = orte_ess_base_std_prolog())) { error = "orte_ess_base_std_prolog"; goto error; } /* Start by getting a unique name from the enviro */ slave_set_name(); /* use the default procedure to finish my setup */ if (ORTE_SUCCESS != (ret = orte_ess_base_app_setup())) { ORTE_ERROR_LOG(ret); error = "orte_ess_base_app_setup"; goto error; } /* init my nidmap arrays - no data can be available, but * we want to ensure that nobody else who looks at * those arrays will segfault */ if (ORTE_SUCCESS != (ret = orte_util_nidmap_init(NULL))) { ORTE_ERROR_LOG(ret); error = "orte_util_nidmap_init"; goto error; } if (ORTE_SUCCESS != (ret = orte_util_setup_local_nidmap_entries())) { ORTE_ERROR_LOG(ret); return ret; } return ORTE_SUCCESS; error: orte_show_help("help-orte-runtime.txt", "orte_init:startup:internal-failure", true, error, ORTE_ERROR_NAME(ret), ret); return ret; }
static int rte_init(void) { int ret; char *error = NULL; char **hosts = NULL; char *slurm_nodelist; /* run the prolog */ if (ORTE_SUCCESS != (ret = orte_ess_base_std_prolog())) { error = "orte_ess_base_std_prolog"; goto error; } /* Start by getting a unique name */ slurm_set_name(); /* if I am a daemon, complete my setup using the * default procedure */ if (ORTE_PROC_IS_DAEMON) { /* get the list of nodes used for this job */ mca_base_param_reg_string_name("orte", "nodelist", "List of nodes in job", true, false, NULL, &slurm_nodelist); if (NULL != slurm_nodelist) { /* split the node list into an argv array */ hosts = opal_argv_split(slurm_nodelist, ','); } if (ORTE_SUCCESS != (ret = orte_ess_base_orted_setup(hosts))) { ORTE_ERROR_LOG(ret); error = "orte_ess_base_orted_setup"; goto error; } opal_argv_free(hosts); return ORTE_SUCCESS; } if (ORTE_PROC_IS_TOOL) { /* otherwise, if I am a tool proc, use that procedure */ if (ORTE_SUCCESS != (ret = orte_ess_base_tool_setup())) { ORTE_ERROR_LOG(ret); error = "orte_ess_base_tool_setup"; goto error; } /* as a tool, I don't need a nidmap - so just return now */ return ORTE_SUCCESS; } /* otherwise, I must be an application process - use * the default procedure to finish my setup */ if (ORTE_SUCCESS != (ret = orte_ess_base_app_setup())) { ORTE_ERROR_LOG(ret); error = "orte_ess_base_app_setup"; goto error; } /* setup the nidmap arrays */ if (ORTE_SUCCESS != (ret = orte_util_nidmap_init(orte_process_info.sync_buf))) { ORTE_ERROR_LOG(ret); error = "orte_util_nidmap_init"; goto error; } return ORTE_SUCCESS; error: orte_show_help("help-orte-runtime.txt", "orte_init:startup:internal-failure", true, error, ORTE_ERROR_NAME(ret), ret); return ret; }
static int rte_init(void) { int ret; char *error = NULL; char **hosts = NULL; /* run the prolog */ if (ORTE_SUCCESS != (ret = orte_ess_base_std_prolog())) { error = "orte_ess_base_std_prolog"; goto error; } /* Start by getting a unique name from the enviro */ env_set_name(); /* if I am a daemon, complete my setup using the * default procedure */ if (ORTE_PROC_IS_DAEMON) { if (NULL != orte_node_regex) { /* extract the nodes */ if (ORTE_SUCCESS != (ret = orte_regex_extract_node_names(orte_node_regex, &hosts))) { error = "orte_regex_extract_node_names"; goto error; } } if (ORTE_SUCCESS != (ret = orte_ess_base_orted_setup(hosts))) { ORTE_ERROR_LOG(ret); error = "orte_ess_base_orted_setup"; goto error; } opal_argv_free(hosts); return ORTE_SUCCESS; } if (ORTE_PROC_IS_TOOL) { /* otherwise, if I am a tool proc, use that procedure */ if (ORTE_SUCCESS != (ret = orte_ess_base_tool_setup())) { ORTE_ERROR_LOG(ret); error = "orte_ess_base_tool_setup"; goto error; } /* as a tool, I don't need a nidmap - so just return now */ return ORTE_SUCCESS; } /* otherwise, I must be an application process - use * the default procedure to finish my setup */ if (ORTE_SUCCESS != (ret = orte_ess_base_app_setup())) { ORTE_ERROR_LOG(ret); error = "orte_ess_base_app_setup"; goto error; } /* if data was provided, update the database */ if (ORTE_SUCCESS != (ret = orte_util_nidmap_init(orte_process_info.sync_buf))) { ORTE_ERROR_LOG(ret); error = "orte_util_nidmap_init"; goto error; } /* setup process binding */ if (ORTE_SUCCESS != (ret = orte_ess_base_proc_binding())) { error = "proc_binding"; goto error; } return ORTE_SUCCESS; error: if (ORTE_ERR_SILENT != ret && !orte_report_silent_errors) { orte_show_help("help-orte-runtime.txt", "orte_init:startup:internal-failure", true, error, ORTE_ERROR_NAME(ret), ret); } return ret; }
static int rte_init(void) { int rc, ret; char *error = NULL; char *envar, *ev1, *ev2; uint64_t unique_key[2]; char *string_key; opal_value_t *kv; char *val; int u32, *u32ptr; uint16_t u16, *u16ptr; orte_process_name_t name; /* run the prolog */ if (ORTE_SUCCESS != (rc = orte_ess_base_std_prolog())) { ORTE_ERROR_LOG(rc); return rc; } u32ptr = &u32; u16ptr = &u16; if (NULL != mca_ess_singleton_component.server_uri) { /* we are going to connect to a server HNP */ if (0 == strncmp(mca_ess_singleton_component.server_uri, "file", strlen("file")) || 0 == strncmp(mca_ess_singleton_component.server_uri, "FILE", strlen("FILE"))) { char input[1024], *filename; FILE *fp; /* it is a file - get the filename */ filename = strchr(mca_ess_singleton_component.server_uri, ':'); if (NULL == filename) { /* filename is not correctly formatted */ orte_show_help("help-orterun.txt", "orterun:ompi-server-filename-bad", true, "singleton", mca_ess_singleton_component.server_uri); return ORTE_ERROR; } ++filename; /* space past the : */ if (0 >= strlen(filename)) { /* they forgot to give us the name! */ orte_show_help("help-orterun.txt", "orterun:ompi-server-filename-missing", true, "singleton", mca_ess_singleton_component.server_uri); return ORTE_ERROR; } /* open the file and extract the uri */ fp = fopen(filename, "r"); if (NULL == fp) { /* can't find or read file! */ orte_show_help("help-orterun.txt", "orterun:ompi-server-filename-access", true, "singleton", mca_ess_singleton_component.server_uri); return ORTE_ERROR; } memset(input, 0, 1024); // initialize the array to ensure a NULL termination if (NULL == fgets(input, 1023, fp)) { /* something malformed about file */ fclose(fp); orte_show_help("help-orterun.txt", "orterun:ompi-server-file-bad", true, "singleton", mca_ess_singleton_component.server_uri, "singleton"); return ORTE_ERROR; } fclose(fp); input[strlen(input)-1] = '\0'; /* remove newline */ orte_process_info.my_hnp_uri = strdup(input); } else { orte_process_info.my_hnp_uri = strdup(mca_ess_singleton_component.server_uri); } /* save the daemon uri - we will process it later */ orte_process_info.my_daemon_uri = strdup(orte_process_info.my_hnp_uri); /* construct our name - we are in their job family, so we know that * much. However, we cannot know how many other singletons and jobs * this HNP is running. Oh well - if someone really wants to use this * option, they can try to figure it out. For now, we'll just assume * we are the only ones */ ORTE_PROC_MY_NAME->jobid = ORTE_CONSTRUCT_LOCAL_JOBID(ORTE_PROC_MY_HNP->jobid, 1); /* obviously, we are vpid=0 for this job */ ORTE_PROC_MY_NAME->vpid = 0; /* for convenience, push the pubsub version of this param into the environ */ opal_setenv (OPAL_MCA_PREFIX"pubsub_orte_server", orte_process_info.my_hnp_uri, true, &environ); } else if (NULL != getenv("SINGULARITY_CONTAINER") || mca_ess_singleton_component.isolated) { /* ensure we use the isolated pmix component */ opal_setenv (OPAL_MCA_PREFIX"pmix", "isolated", true, &environ); } else { /* spawn our very own HNP to support us */ if (ORTE_SUCCESS != (rc = fork_hnp())) { ORTE_ERROR_LOG(rc); return rc; } /* our name was given to us by the HNP */ opal_setenv (OPAL_MCA_PREFIX"pmix", "^s1,s2,cray,isolated", true, &environ); } /* get an async event base - we use the opal_async one so * we don't startup extra threads if not needed */ orte_event_base = opal_progress_thread_init(NULL); progress_thread_running = true; /* open and setup pmix */ if (OPAL_SUCCESS != (ret = mca_base_framework_open(&opal_pmix_base_framework, 0))) { error = "opening pmix"; goto error; } if (OPAL_SUCCESS != (ret = opal_pmix_base_select())) { error = "select pmix"; goto error; } /* set the event base */ opal_pmix_base_set_evbase(orte_event_base); /* initialize the selected module */ if (!opal_pmix.initialized() && (OPAL_SUCCESS != (ret = opal_pmix.init()))) { /* we cannot run */ error = "pmix init"; goto error; } /* pmix.init set our process name down in the OPAL layer, * so carry it forward here */ ORTE_PROC_MY_NAME->jobid = OPAL_PROC_MY_NAME.jobid; ORTE_PROC_MY_NAME->vpid = OPAL_PROC_MY_NAME.vpid; name.jobid = OPAL_PROC_MY_NAME.jobid; name.vpid = ORTE_VPID_WILDCARD; /* get our local rank from PMI */ OPAL_MODEX_RECV_VALUE(ret, OPAL_PMIX_LOCAL_RANK, ORTE_PROC_MY_NAME, &u16ptr, OPAL_UINT16); if (OPAL_SUCCESS != ret) { error = "getting local rank"; goto error; } orte_process_info.my_local_rank = u16; /* get our node rank from PMI */ OPAL_MODEX_RECV_VALUE(ret, OPAL_PMIX_NODE_RANK, ORTE_PROC_MY_NAME, &u16ptr, OPAL_UINT16); if (OPAL_SUCCESS != ret) { error = "getting node rank"; goto error; } orte_process_info.my_node_rank = u16; /* get max procs */ OPAL_MODEX_RECV_VALUE(ret, OPAL_PMIX_MAX_PROCS, &name, &u32ptr, OPAL_UINT32); if (OPAL_SUCCESS != ret) { error = "getting max procs"; goto error; } orte_process_info.max_procs = u32; /* we are a singleton, so there is only one proc in the job */ orte_process_info.num_procs = 1; /* push into the environ for pickup in MPI layer for * MPI-3 required info key */ if (NULL == getenv(OPAL_MCA_PREFIX"orte_ess_num_procs")) { asprintf(&ev1, OPAL_MCA_PREFIX"orte_ess_num_procs=%d", orte_process_info.num_procs); putenv(ev1); added_num_procs = true; } if (NULL == getenv("OMPI_APP_CTX_NUM_PROCS")) { asprintf(&ev2, "OMPI_APP_CTX_NUM_PROCS=%d", orte_process_info.num_procs); putenv(ev2); added_app_ctx = true; } /* get our app number from PMI - ok if not found */ OPAL_MODEX_RECV_VALUE(ret, OPAL_PMIX_APPNUM, ORTE_PROC_MY_NAME, &u32ptr, OPAL_UINT32); if (OPAL_SUCCESS == ret) { orte_process_info.app_num = u32; } else { orte_process_info.app_num = 0; } /* set some other standard values */ orte_process_info.num_local_peers = 0; /* setup transport keys in case the MPI layer needs them - * we can use the jobfam and stepid as unique keys * because they are unique values assigned by the RM */ if (NULL == getenv(OPAL_MCA_PREFIX"orte_precondition_transports")) { unique_key[0] = ORTE_JOB_FAMILY(ORTE_PROC_MY_NAME->jobid); unique_key[1] = ORTE_LOCAL_JOBID(ORTE_PROC_MY_NAME->jobid); if (NULL == (string_key = orte_pre_condition_transports_print(unique_key))) { ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE); return ORTE_ERR_OUT_OF_RESOURCE; } asprintf(&envar, OPAL_MCA_PREFIX"orte_precondition_transports=%s", string_key); putenv(envar); added_transport_keys = true; /* cannot free the envar as that messes up our environ */ free(string_key); } /* retrieve our topology */ OPAL_MODEX_RECV_VALUE(ret, OPAL_PMIX_LOCAL_TOPO, &name, &val, OPAL_STRING); if (OPAL_SUCCESS == ret && NULL != val) { /* load the topology */ if (0 != hwloc_topology_init(&opal_hwloc_topology)) { ret = OPAL_ERROR; free(val); error = "setting topology"; goto error; } if (0 != hwloc_topology_set_xmlbuffer(opal_hwloc_topology, val, strlen(val))) { ret = OPAL_ERROR; free(val); hwloc_topology_destroy(opal_hwloc_topology); error = "setting topology"; goto error; } /* since we are loading this from an external source, we have to * explicitly set a flag so hwloc sets things up correctly */ if (0 != hwloc_topology_set_flags(opal_hwloc_topology, (HWLOC_TOPOLOGY_FLAG_IS_THISSYSTEM | HWLOC_TOPOLOGY_FLAG_WHOLE_SYSTEM | HWLOC_TOPOLOGY_FLAG_IO_DEVICES))) { ret = OPAL_ERROR; hwloc_topology_destroy(opal_hwloc_topology); free(val); error = "setting topology"; goto error; } /* now load the topology */ if (0 != hwloc_topology_load(opal_hwloc_topology)) { ret = OPAL_ERROR; hwloc_topology_destroy(opal_hwloc_topology); free(val); error = "setting topology"; goto error; } free(val); } else { /* it wasn't passed down to us, so go get it */ if (OPAL_SUCCESS != (ret = opal_hwloc_base_get_topology())) { error = "topology discovery"; goto error; } /* push it into the PMIx database in case someone * tries to retrieve it so we avoid an attempt to * get it again */ kv = OBJ_NEW(opal_value_t); kv->key = strdup(OPAL_PMIX_LOCAL_TOPO); kv->type = OPAL_STRING; if (0 != (ret = hwloc_topology_export_xmlbuffer(opal_hwloc_topology, &kv->data.string, &u32))) { error = "topology export"; goto error; } if (OPAL_SUCCESS != (ret = opal_pmix.store_local(ORTE_PROC_MY_NAME, kv))) { error = "topology store"; goto error; } OBJ_RELEASE(kv); } /* use the std app init to complete the procedure */ if (ORTE_SUCCESS != (rc = orte_ess_base_app_setup(true))) { ORTE_ERROR_LOG(rc); return rc; } /* push our hostname so others can find us, if they need to */ OPAL_MODEX_SEND_VALUE(ret, OPAL_PMIX_GLOBAL, OPAL_PMIX_HOSTNAME, orte_process_info.nodename, OPAL_STRING); if (ORTE_SUCCESS != ret) { error = "db store hostname"; goto error; } return ORTE_SUCCESS; error: if (ORTE_ERR_SILENT != ret && !orte_report_silent_errors) { orte_show_help("help-orte-runtime.txt", "orte_init:startup:internal-failure", true, error, ORTE_ERROR_NAME(ret), ret); } return ret; }
static int rte_init(void) { int ret; char *error = NULL; char **nodes = NULL, **ppnlist = NULL; char *envar; int32_t jobfam; int i, j, *ppn; orte_nid_t *node; orte_jmap_t *jmap; orte_pmap_t *pmap; orte_vpid_t vpid; bool byslot; /* run the prolog */ if (ORTE_SUCCESS != (ret = orte_ess_base_std_prolog())) { error = "orte_ess_base_std_prolog"; goto error; } /* Only application procs can use this module. Since we * were directly launched by someone, we need to bootstrap * our own global info so we can startup. */ /* ensure that static ports were assigned - otherwise, we cant * work since we won't know how to talk to anyone else */ if (NULL == getenv("OMPI_MCA_oob_tcp_static_ports") && NULL == getenv("OMPI_MCA_oob_tcp_static_ports_v6")) { error = "static ports were not assigned"; goto error; } /* declare ourselves to be standalone - i.e., not launched by orted */ orte_standalone_operation = true; /* extract a jobid from the environment - can be totally * arbitrary. if one isn't provided, just fake it */ if (NULL != (envar = getenv("OMPI_MCA_orte_jobid"))) { jobfam = strtol(envar, NULL, 10); } else { jobfam = 1; } ORTE_PROC_MY_NAME->jobid = ORTE_CONSTRUCT_LOCAL_JOBID(0, jobfam); /* extract a rank from the environment */ if (NULL == (envar = getenv("OMPI_MCA_orte_rank"))) { error = "could not get process rank"; goto error; } ORTE_PROC_MY_NAME->vpid = strtol(envar, NULL, 10); ORTE_EPOCH_SET(ORTE_PROC_MY_NAME->epoch,ORTE_EPOCH_MIN); OPAL_OUTPUT_VERBOSE((1, orte_ess_base_output, "%s completed name definition", ORTE_NAME_PRINT(ORTE_PROC_MY_NAME))); /* get the number of procs in this job */ if (NULL == (envar = getenv("OMPI_MCA_orte_num_procs"))) { error = "could not get number of processes in job"; goto error; } orte_process_info.num_procs = strtol(envar, NULL, 10); if (orte_process_info.max_procs < orte_process_info.num_procs) { orte_process_info.max_procs = orte_process_info.num_procs; } /* set the app_num so that MPI attributes get set correctly */ orte_process_info.app_num = 1; /* get the list of nodes */ if (NULL == (envar = getenv("OMPI_MCA_orte_nodes"))) { error = "could not get list of nodes"; goto error; } /* break this down */ nodes = opal_argv_split(envar, ','); orte_process_info.num_nodes = opal_argv_count(nodes); /* get the ppn */ if (NULL == (envar = getenv("OMPI_MCA_orte_ppn"))) { error = "could not get ppn"; goto error; } ppnlist = opal_argv_split(envar, ','); ppn = (int*)malloc(orte_process_info.num_nodes * sizeof(int)); if (1 == opal_argv_count(ppnlist)) { /* constant ppn */ j = strtol(ppnlist[0], NULL, 10); for (i=0; i < orte_process_info.num_nodes; i++) { ppn[i] = j; } } else { for (i=0; i < orte_process_info.num_nodes; i++) { ppn[i] = strtol(ppnlist[i], NULL, 10); } } opal_argv_free(ppnlist); /* get the mapping mode - default to byslot */ byslot = true; if (NULL != (envar = getenv("OMPI_MCA_mapping")) && 0 == strcmp(envar, "bynode")) { byslot = false; } /* setup the nidmap arrays */ if (ORTE_SUCCESS != (ret = orte_util_nidmap_init(NULL))) { ORTE_ERROR_LOG(ret); error = "orte_util_nidmap_init"; goto error; } /* set the size of the nidmap storage so we minimize realloc's */ if (ORTE_SUCCESS != (ret = opal_pointer_array_set_size(&orte_nidmap, orte_process_info.num_nodes))) { error = "could not set pointer array size for nidmap"; goto error; } /* construct the nidmap */ for (i=0; i < orte_process_info.num_nodes; i++) { node = OBJ_NEW(orte_nid_t); if (0 == strcmp(nodes[i], orte_process_info.nodename) || opal_ifislocal(nodes[i])) { node->name = strdup(orte_process_info.nodename); } else { node->name = strdup(nodes[i]); } node->daemon = i; node->index = i; opal_pointer_array_set_item(&orte_nidmap, i, node); } opal_argv_free(nodes); /* create a job map for this job */ jmap = OBJ_NEW(orte_jmap_t); jmap->job = ORTE_PROC_MY_NAME->jobid; opal_pointer_array_add(&orte_jobmap, jmap); /* update the num procs */ jmap->num_procs = orte_process_info.num_procs; /* set the size of the pidmap storage so we minimize realloc's */ if (ORTE_SUCCESS != (ret = opal_pointer_array_set_size(&jmap->pmap, jmap->num_procs))) { ORTE_ERROR_LOG(ret); error = "could not set pointer array size for pidmap"; goto error; } /* construct the pidmap */ if (byslot) { vpid = 0; for (i=0; i < orte_process_info.num_nodes; i++) { node = (orte_nid_t*)opal_pointer_array_get_item(&orte_nidmap, i); /* for each node, cycle through the ppn */ for (j=0; j < ppn[i]; j++) { pmap = OBJ_NEW(orte_pmap_t); pmap->node = i; pmap->local_rank = j; pmap->node_rank = j; if (ORTE_SUCCESS != (ret = opal_pointer_array_set_item(&jmap->pmap, vpid, pmap))) { ORTE_ERROR_LOG(ret); error = "could not set pmap values"; goto error; } /* if this is me, then define the daemon's vpid to * be the node number */ if (vpid == ORTE_PROC_MY_NAME->vpid) { ORTE_PROC_MY_DAEMON->jobid = 0; ORTE_PROC_MY_DAEMON->vpid = i; ORTE_EPOCH_SET(ORTE_PROC_MY_DAEMON->epoch,ORTE_PROC_MY_NAME->epoch); } OPAL_OUTPUT_VERBOSE((1, orte_ess_base_output, "%s node %d name %s rank %s", ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), (int) node->index, node->name, ORTE_VPID_PRINT(vpid))); vpid++; } } } else { /* cycle across the nodes */ vpid = 0; while (vpid < orte_process_info.num_procs) { for (i=0; i < orte_process_info.num_nodes && vpid < orte_process_info.num_procs; i++) { node = (orte_nid_t*)opal_pointer_array_get_item(&orte_nidmap, i); if (0 < ppn[i]) { pmap = OBJ_NEW(orte_pmap_t); pmap->node = i; pmap->local_rank = ppn[i]-1; pmap->node_rank = ppn[i]-1; if (ORTE_SUCCESS != (ret = opal_pointer_array_set_item(&jmap->pmap, vpid, pmap))) { ORTE_ERROR_LOG(ret); error = "could not set pmap values"; goto error; } /* if this is me, then define the daemon's vpid to * be the node number */ if (vpid == ORTE_PROC_MY_NAME->vpid) { ORTE_PROC_MY_DAEMON->jobid = 0; ORTE_PROC_MY_DAEMON->vpid = i; ORTE_EPOCH_SET(ORTE_PROC_MY_DAEMON->epoch,ORTE_PROC_MY_NAME->epoch); } OPAL_OUTPUT_VERBOSE((1, orte_ess_base_output, "%s node %d name %s rank %d", ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), (int) node->index, node->name, (int)vpid)); vpid++; --ppn[i]; } } } } free(ppn); /* ensure we pick the correct critical components */ putenv("OMPI_MCA_grpcomm=hier"); putenv("OMPI_MCA_routed=direct"); /* use the default procedure to finish my setup */ if (ORTE_SUCCESS != (ret = orte_ess_base_app_setup())) { ORTE_ERROR_LOG(ret); error = "orte_ess_base_app_setup"; goto error; } if (0 < opal_output_get_verbosity(orte_ess_base_output)) { orte_nidmap_dump(); orte_jobmap_dump(); } return ORTE_SUCCESS; error: orte_show_help("help-orte-runtime.txt", "orte_init:startup:internal-failure", true, error, ORTE_ERROR_NAME(ret), ret); return ret; }
static int rte_init(char flags) { int ret; char *error = NULL; orte_jmap_t *jmap; /* run the prolog */ if (ORTE_SUCCESS != (ret = orte_ess_base_std_prolog())) { error = "orte_ess_base_std_prolog"; goto error; } /* Start by getting a unique name */ slurm_set_name(); /* if I am a daemon, complete my setup using the * default procedure */ if (orte_process_info.daemon) { if (ORTE_SUCCESS != (ret = orte_ess_base_orted_setup())) { ORTE_ERROR_LOG(ret); error = "orte_ess_base_orted_setup"; goto error; } } else if (orte_process_info.tool) { /* otherwise, if I am a tool proc, use that procedure */ if (ORTE_SUCCESS != (ret = orte_ess_base_tool_setup())) { ORTE_ERROR_LOG(ret); error = "orte_ess_base_tool_setup"; goto error; } } else { /* otherwise, I must be an application process - use * the default procedure to finish my setup */ if (ORTE_SUCCESS != (ret = orte_ess_base_app_setup())) { ORTE_ERROR_LOG(ret); error = "orte_ess_base_app_setup"; goto error; } /* setup the nidmap arrays */ OBJ_CONSTRUCT(&nidmap, opal_pointer_array_t); opal_pointer_array_init(&nidmap, 8, INT32_MAX, 8); /* setup array of jmaps */ OBJ_CONSTRUCT(&jobmap, opal_pointer_array_t); opal_pointer_array_init(&jobmap, 1, INT32_MAX, 1); jmap = OBJ_NEW(orte_jmap_t); jmap->job = ORTE_PROC_MY_NAME->jobid; opal_pointer_array_add(&jobmap, jmap); /* if one was provided, build my nidmap */ if (ORTE_SUCCESS != (ret = orte_ess_base_build_nidmap(orte_process_info.sync_buf, &nidmap, &jmap->pmap, &nprocs))) { ORTE_ERROR_LOG(ret); error = "orte_ess_base_build_nidmap"; goto error; } } return ORTE_SUCCESS; error: orte_show_help("help-orte-runtime.txt", "orte_init:startup:internal-failure", true, error, ORTE_ERROR_NAME(ret), ret); return ret; }
static int rte_init(void) { int ret; char *error = NULL; char **hosts = NULL; /* run the prolog */ if (ORTE_SUCCESS != (ret = orte_ess_base_std_prolog())) { error = "orte_ess_base_std_prolog"; goto error; } /* Start by getting a unique name from the enviro */ env_set_name(); /* if I am a daemon, complete my setup using the * default procedure */ if (ORTE_PROC_IS_DAEMON) { if (NULL != orte_node_regex) { /* extract the nodes */ if (ORTE_SUCCESS != (ret = orte_regex_extract_node_names(orte_node_regex, &hosts))) { error = "orte_regex_extract_node_names"; goto error; } } if (ORTE_SUCCESS != (ret = orte_ess_base_orted_setup(hosts))) { ORTE_ERROR_LOG(ret); error = "orte_ess_base_orted_setup"; goto error; } opal_argv_free(hosts); return ORTE_SUCCESS; } if (ORTE_PROC_IS_TOOL) { /* otherwise, if I am a tool proc, use that procedure */ if (ORTE_SUCCESS != (ret = orte_ess_base_tool_setup())) { ORTE_ERROR_LOG(ret); error = "orte_ess_base_tool_setup"; goto error; } /* as a tool, I don't need a nidmap - so just return now */ return ORTE_SUCCESS; } /* use the default procedure to finish my setup */ if (ORTE_SUCCESS != (ret = orte_ess_base_app_setup(true))) { ORTE_ERROR_LOG(ret); error = "orte_ess_base_app_setup"; goto error; } /* if data was provided, update the database */ if (ORTE_SUCCESS != (ret = orte_util_nidmap_init(orte_process_info.sync_buf))) { ORTE_ERROR_LOG(ret); error = "orte_util_nidmap_init"; goto error; } /* setup process binding */ if (ORTE_SUCCESS != (ret = orte_ess_base_proc_binding())) { error = "proc_binding"; goto error; } /* if we are an ORTE app - and not an MPI app - then * we need to exchange our connection info here. * MPI_Init has its own modex, so we don't need to do * two of them. However, if we don't do a modex at all, * then processes have no way to communicate * * NOTE: only do this when the process originally launches. * Cannot do this on a restart as the rest of the processes * in the job won't be executing this step, so we would hang */ if (ORTE_PROC_IS_NON_MPI && !orte_do_not_barrier) { orte_grpcomm_collective_t coll; OBJ_CONSTRUCT(&coll, orte_grpcomm_collective_t); coll.id = orte_process_info.peer_modex; coll.active = true; if (ORTE_SUCCESS != (ret = orte_grpcomm.modex(&coll))) { ORTE_ERROR_LOG(ret); error = "orte modex"; goto error; } ORTE_WAIT_FOR_COMPLETION(coll.active); OBJ_DESTRUCT(&coll); } return ORTE_SUCCESS; error: if (ORTE_ERR_SILENT != ret && !orte_report_silent_errors) { orte_show_help("help-orte-runtime.txt", "orte_init:startup:internal-failure", true, error, ORTE_ERROR_NAME(ret), ret); } return ret; }
static int rte_init(void) { int ret; char *error = NULL; char *envar, *ev1, *ev2; uint64_t unique_key[2]; char *string_key; char *rmluri; opal_value_t *kv; char *val; int u32, *u32ptr; uint16_t u16, *u16ptr; char **peers=NULL, *mycpuset, **cpusets=NULL; opal_process_name_t name; size_t i; /* run the prolog */ if (ORTE_SUCCESS != (ret = orte_ess_base_std_prolog())) { error = "orte_ess_base_std_prolog"; goto error; } /* get an async event base - we use the opal_async one so * we don't startup extra threads if not needed */ orte_event_base = opal_progress_thread_init(NULL); progress_thread_running = true; /* open and setup pmix */ if (OPAL_SUCCESS != (ret = mca_base_framework_open(&opal_pmix_base_framework, 0))) { ORTE_ERROR_LOG(ret); /* we cannot run */ error = "pmix init"; goto error; } if (OPAL_SUCCESS != (ret = opal_pmix_base_select())) { /* we cannot run */ error = "pmix init"; goto error; } /* set the event base */ opal_pmix_base_set_evbase(orte_event_base); /* initialize the selected module */ if (!opal_pmix.initialized() && (OPAL_SUCCESS != (ret = opal_pmix.init()))) { /* we cannot run */ error = "pmix init"; goto error; } u32ptr = &u32; u16ptr = &u16; /**** THE FOLLOWING ARE REQUIRED VALUES ***/ /* pmix.init set our process name down in the OPAL layer, * so carry it forward here */ ORTE_PROC_MY_NAME->jobid = OPAL_PROC_MY_NAME.jobid; ORTE_PROC_MY_NAME->vpid = OPAL_PROC_MY_NAME.vpid; /* get our local rank from PMI */ OPAL_MODEX_RECV_VALUE(ret, OPAL_PMIX_LOCAL_RANK, ORTE_PROC_MY_NAME, &u16ptr, OPAL_UINT16); if (OPAL_SUCCESS != ret) { error = "getting local rank"; goto error; } orte_process_info.my_local_rank = u16; /* get our node rank from PMI */ OPAL_MODEX_RECV_VALUE(ret, OPAL_PMIX_NODE_RANK, ORTE_PROC_MY_NAME, &u16ptr, OPAL_UINT16); if (OPAL_SUCCESS != ret) { error = "getting node rank"; goto error; } orte_process_info.my_node_rank = u16; /* get max procs */ OPAL_MODEX_RECV_VALUE(ret, OPAL_PMIX_MAX_PROCS, ORTE_PROC_MY_NAME, &u32ptr, OPAL_UINT32); if (OPAL_SUCCESS != ret) { error = "getting max procs"; goto error; } orte_process_info.max_procs = u32; /* get job size */ OPAL_MODEX_RECV_VALUE(ret, OPAL_PMIX_JOB_SIZE, ORTE_PROC_MY_NAME, &u32ptr, OPAL_UINT32); if (OPAL_SUCCESS != ret) { error = "getting job size"; goto error; } orte_process_info.num_procs = u32; /* push into the environ for pickup in MPI layer for * MPI-3 required info key */ if (NULL == getenv(OPAL_MCA_PREFIX"orte_ess_num_procs")) { asprintf(&ev1, OPAL_MCA_PREFIX"orte_ess_num_procs=%d", orte_process_info.num_procs); putenv(ev1); added_num_procs = true; } if (NULL == getenv("OMPI_APP_CTX_NUM_PROCS")) { asprintf(&ev2, "OMPI_APP_CTX_NUM_PROCS=%d", orte_process_info.num_procs); putenv(ev2); added_app_ctx = true; } /* get our app number from PMI - ok if not found */ OPAL_MODEX_RECV_VALUE_OPTIONAL(ret, OPAL_PMIX_APPNUM, ORTE_PROC_MY_NAME, &u32ptr, OPAL_UINT32); if (OPAL_SUCCESS == ret) { orte_process_info.app_num = u32; } else { orte_process_info.app_num = 0; } /* get the number of local peers - required for wireup of * shared memory BTL */ OPAL_MODEX_RECV_VALUE(ret, OPAL_PMIX_LOCAL_SIZE, ORTE_PROC_MY_NAME, &u32ptr, OPAL_UINT32); if (OPAL_SUCCESS == ret) { orte_process_info.num_local_peers = u32 - 1; // want number besides ourselves } else { orte_process_info.num_local_peers = 0; } /* setup transport keys in case the MPI layer needs them - * we can use the jobfam and stepid as unique keys * because they are unique values assigned by the RM */ if (NULL == getenv(OPAL_MCA_PREFIX"orte_precondition_transports")) { unique_key[0] = ORTE_JOB_FAMILY(ORTE_PROC_MY_NAME->jobid); unique_key[1] = ORTE_LOCAL_JOBID(ORTE_PROC_MY_NAME->jobid); if (NULL == (string_key = orte_pre_condition_transports_print(unique_key))) { ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE); return ORTE_ERR_OUT_OF_RESOURCE; } opal_output_verbose(2, orte_ess_base_framework.framework_output, "%s transport key %s", ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), string_key); asprintf(&envar, OPAL_MCA_PREFIX"orte_precondition_transports=%s", string_key); putenv(envar); added_transport_keys = true; /* cannot free the envar as that messes up our environ */ free(string_key); } /* retrieve our topology */ val = NULL; OPAL_MODEX_RECV_VALUE_OPTIONAL(ret, OPAL_PMIX_LOCAL_TOPO, ORTE_PROC_MY_NAME, &val, OPAL_STRING); if (OPAL_SUCCESS == ret && NULL != val) { /* load the topology */ if (0 != hwloc_topology_init(&opal_hwloc_topology)) { ret = OPAL_ERROR; free(val); error = "setting topology"; goto error; } if (0 != hwloc_topology_set_xmlbuffer(opal_hwloc_topology, val, strlen(val))) { ret = OPAL_ERROR; free(val); hwloc_topology_destroy(opal_hwloc_topology); error = "setting topology"; goto error; } /* since we are loading this from an external source, we have to * explicitly set a flag so hwloc sets things up correctly */ if (0 != hwloc_topology_set_flags(opal_hwloc_topology, (HWLOC_TOPOLOGY_FLAG_IS_THISSYSTEM | HWLOC_TOPOLOGY_FLAG_WHOLE_SYSTEM | HWLOC_TOPOLOGY_FLAG_IO_DEVICES))) { ret = OPAL_ERROR; hwloc_topology_destroy(opal_hwloc_topology); free(val); error = "setting topology"; goto error; } /* now load the topology */ if (0 != hwloc_topology_load(opal_hwloc_topology)) { ret = OPAL_ERROR; hwloc_topology_destroy(opal_hwloc_topology); free(val); error = "setting topology"; goto error; } free(val); /* filter the cpus thru any default cpu set */ if (OPAL_SUCCESS != (ret = opal_hwloc_base_filter_cpus(opal_hwloc_topology))) { error = "filtering topology"; goto error; } } else { /* it wasn't passed down to us, so go get it */ if (OPAL_SUCCESS != (ret = opal_hwloc_base_get_topology())) { error = "topology discovery"; goto error; } /* push it into the PMIx database in case someone * tries to retrieve it so we avoid an attempt to * get it again */ kv = OBJ_NEW(opal_value_t); kv->key = strdup(OPAL_PMIX_LOCAL_TOPO); kv->type = OPAL_STRING; if (0 != (ret = hwloc_topology_export_xmlbuffer(opal_hwloc_topology, &kv->data.string, &u32))) { error = "topology export"; goto error; } if (OPAL_SUCCESS != (ret = opal_pmix.store_local(ORTE_PROC_MY_NAME, kv))) { error = "topology store"; goto error; } OBJ_RELEASE(kv); } /* get our local peers */ if (0 < orte_process_info.num_local_peers) { /* if my local rank if too high, then that's an error */ if (orte_process_info.num_local_peers < orte_process_info.my_local_rank) { ret = ORTE_ERR_BAD_PARAM; error = "num local peers"; goto error; } /* retrieve the local peers */ OPAL_MODEX_RECV_VALUE(ret, OPAL_PMIX_LOCAL_PEERS, ORTE_PROC_MY_NAME, &val, OPAL_STRING); if (OPAL_SUCCESS == ret && NULL != val) { peers = opal_argv_split(val, ','); free(val); /* and their cpusets, if available */ OPAL_MODEX_RECV_VALUE_OPTIONAL(ret, OPAL_PMIX_LOCAL_CPUSETS, ORTE_PROC_MY_NAME, &val, OPAL_STRING); if (OPAL_SUCCESS == ret && NULL != val) { cpusets = opal_argv_split(val, ':'); free(val); } else { cpusets = NULL; } } else { peers = NULL; cpusets = NULL; } } else { peers = NULL; cpusets = NULL; } /* set the locality */ if (NULL != peers) { /* indentify our cpuset */ if (NULL != cpusets) { mycpuset = cpusets[orte_process_info.my_local_rank]; } else { mycpuset = NULL; } name.jobid = ORTE_PROC_MY_NAME->jobid; for (i=0; NULL != peers[i]; i++) { kv = OBJ_NEW(opal_value_t); kv->key = strdup(OPAL_PMIX_LOCALITY); kv->type = OPAL_UINT16; name.vpid = strtoul(peers[i], NULL, 10); if (name.vpid == ORTE_PROC_MY_NAME->vpid) { /* we are fully local to ourselves */ u16 = OPAL_PROC_ALL_LOCAL; } else if (NULL == mycpuset || NULL == cpusets[i] || 0 == strcmp(cpusets[i], "UNBOUND")) { /* all we can say is that it shares our node */ u16 = OPAL_PROC_ON_CLUSTER | OPAL_PROC_ON_CU | OPAL_PROC_ON_NODE; } else { /* we have it, so compute the locality */ u16 = opal_hwloc_base_get_relative_locality(opal_hwloc_topology, mycpuset, cpusets[i]); } OPAL_OUTPUT_VERBOSE((1, orte_ess_base_framework.framework_output, "%s ess:pmi:locality: proc %s locality %x", ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), ORTE_NAME_PRINT(&name), u16)); kv->data.uint16 = u16; ret = opal_pmix.store_local(&name, kv); if (OPAL_SUCCESS != ret) { error = "local store of locality"; opal_argv_free(peers); opal_argv_free(cpusets); goto error; } OBJ_RELEASE(kv); } opal_argv_free(peers); opal_argv_free(cpusets); } /* now that we have all required info, complete the setup */ if (ORTE_SUCCESS != (ret = orte_ess_base_app_setup(false))) { ORTE_ERROR_LOG(ret); error = "orte_ess_base_app_setup"; goto error; } /* setup process binding */ if (ORTE_SUCCESS != (ret = orte_ess_base_proc_binding())) { error = "proc_binding"; goto error; } /* this needs to be set to enable debugger use when direct launched */ if (NULL == orte_process_info.my_daemon_uri) { orte_standalone_operation = true; } /* set max procs */ if (orte_process_info.max_procs < orte_process_info.num_procs) { orte_process_info.max_procs = orte_process_info.num_procs; } /*** PUSH DATA FOR OTHERS TO FIND ***/ /* push our RML URI in case others need to talk directly to us */ rmluri = orte_rml.get_contact_info(); /* push it out for others to use */ OPAL_MODEX_SEND_VALUE(ret, OPAL_PMIX_GLOBAL, OPAL_PMIX_PROC_URI, rmluri, OPAL_STRING); if (ORTE_SUCCESS != ret) { error = "pmix put uri"; goto error; } free(rmluri); /* push our hostname so others can find us, if they need to */ OPAL_MODEX_SEND_VALUE(ret, OPAL_PMIX_GLOBAL, OPAL_PMIX_HOSTNAME, orte_process_info.nodename, OPAL_STRING); if (ORTE_SUCCESS != ret) { error = "db store hostname"; goto error; } /* if we are an ORTE app - and not an MPI app - then * we need to exchange our connection info here. * MPI_Init has its own modex, so we don't need to do * two of them. However, if we don't do a modex at all, * then processes have no way to communicate * * NOTE: only do this when the process originally launches. * Cannot do this on a restart as the rest of the processes * in the job won't be executing this step, so we would hang */ if (ORTE_PROC_IS_NON_MPI && !orte_do_not_barrier) { opal_pmix.fence(NULL, 0); } return ORTE_SUCCESS; error: if (!progress_thread_running) { /* can't send the help message, so ensure it * comes out locally */ orte_show_help_finalize(); } if (ORTE_ERR_SILENT != ret && !orte_report_silent_errors) { orte_show_help("help-orte-runtime.txt", "orte_init:startup:internal-failure", true, error, ORTE_ERROR_NAME(ret), ret); } return ret; }
static int rte_init(void) { int ret; char *error = NULL; char *envar, *ev1, *ev2; uint64_t unique_key[2]; char *string_key; char *rmluri; opal_value_t *kv, kvn; opal_list_t vals; /* run the prolog */ if (ORTE_SUCCESS != (ret = orte_ess_base_std_prolog())) { error = "orte_ess_base_std_prolog"; goto error; } /* we don't have to call pmix.init because the pmix select did it */ /**** THE FOLLOWING ARE REQUIRED VALUES ***/ /* get our jobid from PMI */ if (!opal_pmix.get_attr(PMIX_JOBID, &kv)) { error = "getting jobid"; ret = ORTE_ERR_NOT_FOUND; goto error; } ORTE_PROC_MY_NAME->jobid = kv->data.uint32; OBJ_RELEASE(kv); /* get our global rank from PMI */ if (!opal_pmix.get_attr(PMIX_RANK, &kv)) { error = "getting rank"; ret = ORTE_ERR_NOT_FOUND; goto error; } ORTE_PROC_MY_NAME->vpid = kv->data.uint32; OBJ_RELEASE(kv); /* get our local rank from PMI */ if (!opal_pmix.get_attr(PMIX_LOCAL_RANK, &kv)) { error = "getting local rank"; ret = ORTE_ERR_NOT_FOUND; goto error; } orte_process_info.my_local_rank = (orte_local_rank_t)kv->data.uint16; OBJ_RELEASE(kv); /* get our node rank from PMI */ if (!opal_pmix.get_attr(PMIX_NODE_RANK, &kv)) { error = "getting node rank"; ret = ORTE_ERR_NOT_FOUND; goto error; } orte_process_info.my_node_rank = (orte_local_rank_t)kv->data.uint16; /* get universe size */ if (!opal_pmix.get_attr(PMIX_UNIV_SIZE, &kv)) { error = "getting univ size"; ret = ORTE_ERR_NOT_FOUND; goto error; } orte_process_info.num_procs = kv->data.uint32; OBJ_RELEASE(kv); /* push into the environ for pickup in MPI layer for * MPI-3 required info key */ if (NULL == getenv(OPAL_MCA_PREFIX"orte_ess_num_procs")) { asprintf(&ev1, OPAL_MCA_PREFIX"orte_ess_num_procs=%d", orte_process_info.num_procs); putenv(ev1); added_num_procs = true; } if (NULL == getenv("OMPI_APP_CTX_NUM_PROCS")) { asprintf(&ev2, "OMPI_APP_CTX_NUM_PROCS=%d", orte_process_info.num_procs); putenv(ev2); added_app_ctx = true; } /* get our app number from PMI - ok if not found */ if (opal_pmix.get_attr(PMIX_APPNUM, &kv)) { orte_process_info.app_num = kv->data.uint32; OBJ_RELEASE(kv); } else { orte_process_info.app_num = 0; } /* get the number of local peers - required for wireup of * shared memory BTL */ if (opal_pmix.get_attr(PMIX_LOCAL_SIZE, &kv)) { orte_process_info.num_local_peers = kv->data.uint32 - 1; // want number besides ourselves OBJ_RELEASE(kv); } else { orte_process_info.num_local_peers = 0; } /* setup transport keys in case the MPI layer needs them - * we can use the jobfam and stepid as unique keys * because they are unique values assigned by the RM */ if (NULL == getenv(OPAL_MCA_PREFIX"orte_precondition_transports")) { unique_key[0] = ORTE_JOB_FAMILY(ORTE_PROC_MY_NAME->jobid); unique_key[1] = ORTE_LOCAL_JOBID(ORTE_PROC_MY_NAME->jobid); if (NULL == (string_key = orte_pre_condition_transports_print(unique_key))) { ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE); return ORTE_ERR_OUT_OF_RESOURCE; } asprintf(&envar, OPAL_MCA_PREFIX"orte_precondition_transports=%s", string_key); putenv(envar); added_transport_keys = true; /* cannot free the envar as that messes up our environ */ free(string_key); } #if OPAL_HAVE_HWLOC /* if it wasn't passed down to us, get the topology */ if (NULL == opal_hwloc_topology) { if (OPAL_SUCCESS != (ret = opal_hwloc_base_get_topology())) { error = "topology discovery"; goto error; } } #endif /* we don't need to force the routed system to pick the * "direct" component as that should happen automatically * in those cases where we are direct launched (i.e., no * HNP is defined in the environment */ /* now that we have all required info, complete the setup */ if (ORTE_SUCCESS != (ret = orte_ess_base_app_setup(false))) { ORTE_ERROR_LOG(ret); error = "orte_ess_base_app_setup"; goto error; } /* setup process binding */ if (ORTE_SUCCESS != (ret = orte_ess_base_proc_binding())) { error = "proc_binding"; goto error; } /* this needs to be set to enable debugger use when direct launched */ if (NULL == orte_process_info.my_daemon_uri) { orte_standalone_operation = true; } /* set max procs */ if (orte_process_info.max_procs < orte_process_info.num_procs) { orte_process_info.max_procs = orte_process_info.num_procs; } /*** PUSH DATA FOR OTHERS TO FIND ***/ /* if we are direct launched, then push our RML URI - there * is no need to do so when launched by mpirun as all apps * communicate thru their local daemon */ if (orte_standalone_operation) { OBJ_CONSTRUCT(&vals, opal_list_t); if (OPAL_SUCCESS != opal_dstore.fetch(opal_dstore_internal, &OPAL_PROC_MY_NAME, OPAL_DSTORE_URI, &vals)) { /* construct the RTE string */ rmluri = orte_rml.get_contact_info(); /* push it out for others to use */ OBJ_CONSTRUCT(&kvn, opal_value_t); kvn.key = strdup(OPAL_DSTORE_URI); kvn.type = OPAL_STRING; kvn.data.string = strdup(rmluri); if (ORTE_SUCCESS != (ret = opal_pmix.put(PMIX_GLOBAL, &kvn))) { error = "db store uri"; OBJ_DESTRUCT(&kvn); goto error; } OBJ_DESTRUCT(&kvn); free(rmluri); } OPAL_LIST_DESTRUCT(&vals); } /* push our hostname so others can find us, if they need to */ OBJ_CONSTRUCT(&kvn, opal_value_t); kvn.key = strdup(OPAL_DSTORE_HOSTNAME); kvn.type = OPAL_STRING; kvn.data.string = strdup(orte_process_info.nodename); if (ORTE_SUCCESS != (ret = opal_pmix.put(PMIX_GLOBAL, &kvn))) { error = "db store hostname"; OBJ_DESTRUCT(&kvn); goto error; } OBJ_DESTRUCT(&kvn); /* if our local rank was not provided by the system, then * push our local rank so others can access it */ OBJ_CONSTRUCT(&vals, opal_list_t); if (OPAL_SUCCESS != opal_dstore.fetch(opal_dstore_internal, &OPAL_PROC_MY_NAME, OPAL_DSTORE_LOCALRANK, &vals)) { OBJ_CONSTRUCT(&kvn, opal_value_t); kvn.key = strdup(OPAL_DSTORE_LOCALRANK); kvn.type = OPAL_UINT16; kvn.data.uint16 = orte_process_info.my_local_rank; if (ORTE_SUCCESS != (ret = opal_pmix.put(PMIX_GLOBAL, &kvn))) { error = "db store local rank"; OBJ_DESTRUCT(&kvn); goto error; } OBJ_DESTRUCT(&kvn); } OPAL_LIST_DESTRUCT(&vals); /* if our node rank was not provided by the system, then * push our node rank so others can access it */ OBJ_CONSTRUCT(&vals, opal_list_t); if (OPAL_SUCCESS != opal_dstore.fetch(opal_dstore_internal, &OPAL_PROC_MY_NAME, OPAL_DSTORE_NODERANK, &vals)) { OBJ_CONSTRUCT(&kvn, opal_value_t); kvn.key = strdup(OPAL_DSTORE_NODERANK); kvn.type = OPAL_UINT16; kvn.data.uint16 = orte_process_info.my_node_rank; if (ORTE_SUCCESS != (ret = opal_pmix.put(PMIX_GLOBAL, &kvn))) { error = "db store node rank"; OBJ_DESTRUCT(&kvn); goto error; } OBJ_DESTRUCT(&kvn); } OPAL_LIST_DESTRUCT(&vals); /* if we are an ORTE app - and not an MPI app - then * we need to exchange our connection info here. * MPI_Init has its own modex, so we don't need to do * two of them. However, if we don't do a modex at all, * then processes have no way to communicate * * NOTE: only do this when the process originally launches. * Cannot do this on a restart as the rest of the processes * in the job won't be executing this step, so we would hang */ if (ORTE_PROC_IS_NON_MPI && !orte_do_not_barrier) { opal_pmix.fence(NULL, 0); } return ORTE_SUCCESS; error: if (ORTE_ERR_SILENT != ret && !orte_report_silent_errors) { orte_show_help("help-orte-runtime.txt", "orte_init:startup:internal-failure", true, error, ORTE_ERROR_NAME(ret), ret); } return ret; }
static int rte_init(void) { int ret, i, j; char *error = NULL, *localj; int32_t jobfam, stepid; char *envar, *ev1, *ev2; uint64_t unique_key[2]; char *cs_env, *string_key; char *pmi_id=NULL; int *ranks; char *tmp; orte_jobid_t jobid; orte_process_name_t proc; orte_local_rank_t local_rank; orte_node_rank_t node_rank; /* run the prolog */ if (ORTE_SUCCESS != (ret = orte_ess_base_std_prolog())) { error = "orte_ess_base_std_prolog"; goto error; } #if OPAL_HAVE_HWLOC /* get the topology */ if (NULL == opal_hwloc_topology) { if (OPAL_SUCCESS != opal_hwloc_base_get_topology()) { error = "topology discovery"; goto error; } } #endif if (ORTE_PROC_IS_DAEMON) { /* I am a daemon, launched by mpirun */ /* we had to be given a jobid */ mca_base_param_reg_string_name("orte", "ess_jobid", "Process jobid", true, false, NULL, &tmp); if (NULL == tmp) { error = "missing jobid"; ret = ORTE_ERR_FATAL; goto error; } if (ORTE_SUCCESS != (ret = orte_util_convert_string_to_jobid(&jobid, tmp))) { ORTE_ERROR_LOG(ret); error = "convert jobid"; goto error; } free(tmp); ORTE_PROC_MY_NAME->jobid = jobid; /* get our rank from PMI */ if (PMI_SUCCESS != (ret = PMI_Get_rank(&i))) { ORTE_PMI_ERROR(ret, "PMI_Get_rank"); error = "could not get PMI rank"; goto error; } ORTE_PROC_MY_NAME->vpid = i + 1; /* compensate for orterun */ /* get the number of procs from PMI */ if (PMI_SUCCESS != (ret = PMI_Get_universe_size(&i))) { ORTE_PMI_ERROR(ret, "PMI_Get_universe_size"); error = "could not get PMI universe size"; goto error; } orte_process_info.num_procs = i + 1; /* compensate for orterun */ /* complete setup */ if (ORTE_SUCCESS != (ret = orte_ess_base_orted_setup(NULL))) { ORTE_ERROR_LOG(ret); error = "orte_ess_base_orted_setup"; goto error; } } else { /* we are a direct-launched MPI process */ /* get our PMI id length */ if (PMI_SUCCESS != (ret = PMI_Get_id_length_max(&pmi_maxlen))) { error = "PMI_Get_id_length_max"; goto error; } pmi_id = malloc(pmi_maxlen); if (PMI_SUCCESS != (ret = PMI_Get_kvs_domain_id(pmi_id, pmi_maxlen))) { free(pmi_id); error = "PMI_Get_kvs_domain_id"; goto error; } /* PMI is very nice to us - the domain id is an integer followed * by a '.', followed by essentially a stepid. The first integer * defines an overall job number. The second integer is the number of * individual jobs we have run within that allocation. So we translate * this as the overall job number equating to our job family, and * the individual number equating to our local jobid */ jobfam = strtol(pmi_id, &localj, 10); if (NULL == localj) { /* hmmm - no '.', so let's just use zero */ stepid = 0; } else { localj++; /* step over the '.' */ stepid = strtol(localj, NULL, 10) + 1; /* add one to avoid looking like a daemon */ } free(pmi_id); /* now build the jobid */ ORTE_PROC_MY_NAME->jobid = ORTE_CONSTRUCT_LOCAL_JOBID(jobfam << 16, stepid); /* get our rank */ if (PMI_SUCCESS != (ret = PMI_Get_rank(&i))) { ORTE_PMI_ERROR(ret, "PMI_Get_rank"); error = "could not get PMI rank"; goto error; } ORTE_PROC_MY_NAME->vpid = i; /* get the number of procs from PMI */ if (PMI_SUCCESS != (ret = PMI_Get_universe_size(&i))) { ORTE_PMI_ERROR(ret, "PMI_Get_universe_size"); error = "could not get PMI universe size"; goto error; } orte_process_info.num_procs = i; /* push into the environ for pickup in MPI layer for * MPI-3 required info key */ asprintf(&ev1, "OMPI_MCA_orte_ess_num_procs=%d", i); putenv(ev1); asprintf(&ev2, "OMPI_APP_CTX_NUM_PROCS=%d", i); putenv(ev2); /* setup transport keys in case the MPI layer needs them - * we can use the jobfam and stepid as unique keys * because they are unique values assigned by the RM */ unique_key[0] = (uint64_t)jobfam; unique_key[1] = (uint64_t)stepid; if (NULL == (string_key = orte_pre_condition_transports_print(unique_key))) { ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE); return ORTE_ERR_OUT_OF_RESOURCE; } if (NULL == (cs_env = mca_base_param_environ_variable("orte_precondition_transports",NULL,NULL))) { ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE); return ORTE_ERR_OUT_OF_RESOURCE; } asprintf(&envar, "%s=%s", cs_env, string_key); putenv(envar); /* cannot free the envar as that messes up our environ */ free(cs_env); free(string_key); /* our app_context number can only be 0 as we don't support * dynamic spawns */ orte_process_info.app_num = 0; /* setup my daemon's name - arbitrary, since we don't route * messages */ ORTE_PROC_MY_DAEMON->jobid = 0; ORTE_PROC_MY_DAEMON->vpid = 0; /* ensure we pick the correct critical components */ putenv("OMPI_MCA_grpcomm=pmi"); putenv("OMPI_MCA_routed=direct"); /* now use the default procedure to finish my setup */ if (ORTE_SUCCESS != (ret = orte_ess_base_app_setup())) { ORTE_ERROR_LOG(ret); error = "orte_ess_base_app_setup"; goto error; } /* store our info into the database */ if (ORTE_SUCCESS != (ret = orte_db.store(ORTE_PROC_MY_NAME, ORTE_DB_HOSTNAME, orte_process_info.nodename, OPAL_STRING))) { error = "db store daemon vpid"; goto error; } /* get our local proc info to find our local rank */ if (PMI_SUCCESS != (ret = PMI_Get_clique_size(&i))) { ORTE_PMI_ERROR(ret, "PMI_Get_clique_size"); error = "could not get PMI clique size"; goto error; } /* store that info - remember, we want the number of peers that * share the node WITH ME, so we have to subtract ourselves from * that number */ orte_process_info.num_local_peers = i - 1; /* now get the specific ranks */ ranks = (int*)malloc(i * sizeof(int)); if (PMI_SUCCESS != (ret = PMI_Get_clique_ranks(ranks, i))) { ORTE_PMI_ERROR(ret, "PMI_Get_clique_ranks"); error = "could not get clique ranks"; goto error; } /* The clique ranks are returned in rank order, so * cycle thru the array and update the local/node * rank info */ proc.jobid = ORTE_PROC_MY_NAME->jobid; for (j=0; j < i; j++) { proc.vpid = ranks[j]; local_rank = j; node_rank = j; if (ranks[j] == (int)ORTE_PROC_MY_NAME->vpid) { orte_process_info.my_local_rank = local_rank; orte_process_info.my_node_rank = node_rank; } if (ORTE_SUCCESS != (ret = orte_db.store(&proc, ORTE_DB_LOCALRANK, &local_rank, ORTE_LOCAL_RANK))) { error = "db store local rank"; goto error; } if (ORTE_SUCCESS != (ret = orte_db.store(&proc, ORTE_DB_NODERANK, &node_rank, ORTE_NODE_RANK))) { error = "db store node rank"; goto error; } } free(ranks); /* setup process binding */ if (ORTE_SUCCESS != (ret = orte_ess_base_proc_binding())) { error = "proc_binding"; goto error; } } /* set max procs */ if (orte_process_info.max_procs < orte_process_info.num_procs) { orte_process_info.max_procs = orte_process_info.num_procs; } /* flag that we completed init */ app_init_complete = true; return ORTE_SUCCESS; error: if (ORTE_ERR_SILENT != ret && !orte_report_silent_errors) { orte_show_help("help-orte-runtime.txt", "orte_init:startup:internal-failure", true, error, ORTE_ERROR_NAME(ret), ret); } return ret; }
static int rte_init(void) { int ret; char *error = NULL; char *envar, *ev1, *ev2; uint64_t unique_key[2]; char *string_key; opal_value_t *kv; char *val; int u32, *u32ptr; uint16_t u16, *u16ptr; char **peers=NULL, *mycpuset; opal_process_name_t wildcard_rank, pname; bool bool_val, *bool_ptr = &bool_val, tdir_mca_override = false; size_t i; /* run the prolog */ if (ORTE_SUCCESS != (ret = orte_ess_base_std_prolog())) { error = "orte_ess_base_std_prolog"; goto error; } /* get an async event base - we use the opal_async one so * we don't startup extra threads if not needed */ orte_event_base = opal_progress_thread_init(NULL); progress_thread_running = true; /* open and setup pmix */ if (OPAL_SUCCESS != (ret = mca_base_framework_open(&opal_pmix_base_framework, 0))) { ORTE_ERROR_LOG(ret); /* we cannot run */ error = "pmix init"; goto error; } if (OPAL_SUCCESS != (ret = opal_pmix_base_select())) { /* we cannot run */ error = "pmix init"; goto error; } /* set the event base */ opal_pmix_base_set_evbase(orte_event_base); /* initialize the selected module */ if (!opal_pmix.initialized() && (OPAL_SUCCESS != (ret = opal_pmix.init(NULL)))) { /* we cannot run - this could be due to being direct launched * without the required PMI support being built. Try to detect * that scenario and warn the user */ if (ORTE_SCHIZO_DIRECT_LAUNCHED == orte_schizo.check_launch_environment() && NULL != (envar = getenv("ORTE_SCHIZO_DETECTION"))) { if (0 == strcmp(envar, "SLURM")) { /* yes to both - so emit a hopefully helpful * error message and abort */ orte_show_help_finalize(); orte_show_help("help-ess-base.txt", "slurm-error", true); return ORTE_ERR_SILENT; } else if (0 == strcmp(envar, "ALPS")) { /* we were direct launched by ALPS */ orte_show_help_finalize(); orte_show_help("help-ess-base.txt", "alps-error", true); return ORTE_ERR_SILENT; } } error = "pmix init"; goto error; } u32ptr = &u32; u16ptr = &u16; /**** THE FOLLOWING ARE REQUIRED VALUES ***/ /* pmix.init set our process name down in the OPAL layer, * so carry it forward here */ ORTE_PROC_MY_NAME->jobid = OPAL_PROC_MY_NAME.jobid; ORTE_PROC_MY_NAME->vpid = OPAL_PROC_MY_NAME.vpid; /* setup a name for retrieving data associated with the job */ wildcard_rank.jobid = ORTE_PROC_MY_NAME->jobid; wildcard_rank.vpid = ORTE_NAME_WILDCARD->vpid; /* setup a name for retrieving proc-specific data */ pname.jobid = ORTE_PROC_MY_NAME->jobid; pname.vpid = 0; /* get our local rank from PMI */ OPAL_MODEX_RECV_VALUE(ret, OPAL_PMIX_LOCAL_RANK, ORTE_PROC_MY_NAME, &u16ptr, OPAL_UINT16); if (OPAL_SUCCESS != ret) { error = "getting local rank"; goto error; } orte_process_info.my_local_rank = u16; /* get our node rank from PMI */ OPAL_MODEX_RECV_VALUE(ret, OPAL_PMIX_NODE_RANK, ORTE_PROC_MY_NAME, &u16ptr, OPAL_UINT16); if (OPAL_SUCCESS != ret) { error = "getting node rank"; goto error; } orte_process_info.my_node_rank = u16; /* get max procs for this application */ OPAL_MODEX_RECV_VALUE(ret, OPAL_PMIX_MAX_PROCS, &wildcard_rank, &u32ptr, OPAL_UINT32); if (OPAL_SUCCESS != ret) { error = "getting max procs"; goto error; } orte_process_info.max_procs = u32; /* get job size */ OPAL_MODEX_RECV_VALUE(ret, OPAL_PMIX_JOB_SIZE, &wildcard_rank, &u32ptr, OPAL_UINT32); if (OPAL_SUCCESS != ret) { error = "getting job size"; goto error; } orte_process_info.num_procs = u32; /* push into the environ for pickup in MPI layer for * MPI-3 required info key */ if (NULL == getenv(OPAL_MCA_PREFIX"orte_ess_num_procs")) { asprintf(&ev1, OPAL_MCA_PREFIX"orte_ess_num_procs=%d", orte_process_info.num_procs); putenv(ev1); added_num_procs = true; } if (NULL == getenv("OMPI_APP_CTX_NUM_PROCS")) { asprintf(&ev2, "OMPI_APP_CTX_NUM_PROCS=%d", orte_process_info.num_procs); putenv(ev2); added_app_ctx = true; } /* get our app number from PMI - ok if not found */ OPAL_MODEX_RECV_VALUE_OPTIONAL(ret, OPAL_PMIX_APPNUM, ORTE_PROC_MY_NAME, &u32ptr, OPAL_UINT32); if (OPAL_SUCCESS == ret) { orte_process_info.app_num = u32; } else { orte_process_info.app_num = 0; } /* get the number of local peers - required for wireup of * shared memory BTL */ OPAL_MODEX_RECV_VALUE(ret, OPAL_PMIX_LOCAL_SIZE, &wildcard_rank, &u32ptr, OPAL_UINT32); if (OPAL_SUCCESS == ret) { orte_process_info.num_local_peers = u32 - 1; // want number besides ourselves } else { orte_process_info.num_local_peers = 0; } /* get number of nodes in the job */ OPAL_MODEX_RECV_VALUE_OPTIONAL(ret, OPAL_PMIX_NUM_NODES, &wildcard_rank, &u32ptr, OPAL_UINT32); if (OPAL_SUCCESS == ret) { orte_process_info.num_nodes = u32; } /* setup transport keys in case the MPI layer needs them - * we can use the jobfam and stepid as unique keys * because they are unique values assigned by the RM */ if (NULL == getenv(OPAL_MCA_PREFIX"orte_precondition_transports")) { unique_key[0] = ORTE_JOB_FAMILY(ORTE_PROC_MY_NAME->jobid); unique_key[1] = ORTE_LOCAL_JOBID(ORTE_PROC_MY_NAME->jobid); if (NULL == (string_key = orte_pre_condition_transports_print(unique_key))) { ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE); return ORTE_ERR_OUT_OF_RESOURCE; } opal_output_verbose(2, orte_ess_base_framework.framework_output, "%s transport key %s", ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), string_key); asprintf(&envar, OPAL_MCA_PREFIX"orte_precondition_transports=%s", string_key); putenv(envar); added_transport_keys = true; /* cannot free the envar as that messes up our environ */ free(string_key); } /* retrieve temp directories info */ OPAL_MODEX_RECV_VALUE_OPTIONAL(ret, OPAL_PMIX_TMPDIR, &wildcard_rank, &val, OPAL_STRING); if (OPAL_SUCCESS == ret && NULL != val) { /* We want to provide user with ability * to override RM settings at his own risk */ if( NULL == orte_process_info.top_session_dir ){ orte_process_info.top_session_dir = val; } else { /* keep the MCA setting */ tdir_mca_override = true; free(val); } val = NULL; } if( !tdir_mca_override ){ OPAL_MODEX_RECV_VALUE_OPTIONAL(ret, OPAL_PMIX_NSDIR, &wildcard_rank, &val, OPAL_STRING); if (OPAL_SUCCESS == ret && NULL != val) { /* We want to provide user with ability * to override RM settings at his own risk */ if( NULL == orte_process_info.job_session_dir ){ orte_process_info.job_session_dir = val; } else { /* keep the MCA setting */ free(val); tdir_mca_override = true; } val = NULL; } } if( !tdir_mca_override ){ OPAL_MODEX_RECV_VALUE_OPTIONAL(ret, OPAL_PMIX_PROCDIR, &wildcard_rank, &val, OPAL_STRING); if (OPAL_SUCCESS == ret && NULL != val) { /* We want to provide user with ability * to override RM settings at his own risk */ if( NULL == orte_process_info.proc_session_dir ){ orte_process_info.proc_session_dir = val; } else { /* keep the MCA setting */ tdir_mca_override = true; free(val); } val = NULL; } } if( !tdir_mca_override ){ OPAL_MODEX_RECV_VALUE_OPTIONAL(ret, OPAL_PMIX_TDIR_RMCLEAN, &wildcard_rank, &bool_ptr, OPAL_BOOL); if (OPAL_SUCCESS == ret ) { orte_process_info.rm_session_dirs = bool_val; } } /* get our local peers */ if (0 < orte_process_info.num_local_peers) { /* if my local rank if too high, then that's an error */ if (orte_process_info.num_local_peers < orte_process_info.my_local_rank) { ret = ORTE_ERR_BAD_PARAM; error = "num local peers"; goto error; } /* retrieve the local peers */ OPAL_MODEX_RECV_VALUE(ret, OPAL_PMIX_LOCAL_PEERS, &wildcard_rank, &val, OPAL_STRING); if (OPAL_SUCCESS == ret && NULL != val) { peers = opal_argv_split(val, ','); free(val); } else { peers = NULL; } } else { peers = NULL; } /* set the locality */ if (NULL != peers) { /* identify our location */ val = NULL; OPAL_MODEX_RECV_VALUE_OPTIONAL(ret, OPAL_PMIX_LOCALITY_STRING, ORTE_PROC_MY_NAME, &val, OPAL_STRING); if (OPAL_SUCCESS == ret && NULL != val) { mycpuset = val; } else { mycpuset = NULL; } pname.jobid = ORTE_PROC_MY_NAME->jobid; for (i=0; NULL != peers[i]; i++) { pname.vpid = strtoul(peers[i], NULL, 10); if (pname.vpid == ORTE_PROC_MY_NAME->vpid) { /* we are fully local to ourselves */ u16 = OPAL_PROC_ALL_LOCAL; } else { val = NULL; OPAL_MODEX_RECV_VALUE_OPTIONAL(ret, OPAL_PMIX_LOCALITY_STRING, &pname, &val, OPAL_STRING); if (OPAL_SUCCESS == ret && NULL != val) { u16 = opal_hwloc_compute_relative_locality(mycpuset, val); free(val); } else { /* all we can say is that it shares our node */ u16 = OPAL_PROC_ON_CLUSTER | OPAL_PROC_ON_CU | OPAL_PROC_ON_NODE; } } kv = OBJ_NEW(opal_value_t); kv->key = strdup(OPAL_PMIX_LOCALITY); kv->type = OPAL_UINT16; OPAL_OUTPUT_VERBOSE((1, orte_ess_base_framework.framework_output, "%s ess:pmi:locality: proc %s locality %s", ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), ORTE_NAME_PRINT(&pname), opal_hwloc_base_print_locality(u16))); kv->data.uint16 = u16; ret = opal_pmix.store_local(&pname, kv); if (OPAL_SUCCESS != ret) { error = "local store of locality"; opal_argv_free(peers); if (NULL != mycpuset) { free(mycpuset); } goto error; } OBJ_RELEASE(kv); } opal_argv_free(peers); if (NULL != mycpuset) { free(mycpuset); } } /* now that we have all required info, complete the setup */ if (ORTE_SUCCESS != (ret = orte_ess_base_app_setup(false))) { ORTE_ERROR_LOG(ret); error = "orte_ess_base_app_setup"; goto error; } /* setup process binding */ if (ORTE_SUCCESS != (ret = orte_ess_base_proc_binding())) { error = "proc_binding"; goto error; } /* this needs to be set to enable debugger use when direct launched */ if (NULL == orte_process_info.my_daemon_uri) { orte_standalone_operation = true; } /* set max procs */ if (orte_process_info.max_procs < orte_process_info.num_procs) { orte_process_info.max_procs = orte_process_info.num_procs; } /* push our hostname so others can find us, if they need to - the * native PMIx component will ignore this request as the hostname * is provided by the system */ OPAL_MODEX_SEND_VALUE(ret, OPAL_PMIX_GLOBAL, OPAL_PMIX_HOSTNAME, orte_process_info.nodename, OPAL_STRING); if (ORTE_SUCCESS != ret) { error = "db store hostname"; goto error; } /* if we are an ORTE app - and not an MPI app - then * we need to exchange our connection info here. * MPI_Init has its own modex, so we don't need to do * two of them. However, if we don't do a modex at all, * then processes have no way to communicate * * NOTE: only do this when the process originally launches. * Cannot do this on a restart as the rest of the processes * in the job won't be executing this step, so we would hang */ if (ORTE_PROC_IS_NON_MPI && !orte_do_not_barrier) { /* need to commit the data before we fence */ opal_pmix.commit(); opal_pmix.fence(NULL, 0); } return ORTE_SUCCESS; error: if (!progress_thread_running) { /* can't send the help message, so ensure it * comes out locally */ orte_show_help_finalize(); } if (ORTE_ERR_SILENT != ret && !orte_report_silent_errors) { orte_show_help("help-orte-runtime.txt", "orte_init:startup:internal-failure", true, error, ORTE_ERROR_NAME(ret), ret); } return ret; }
static int rte_init(void) { int ret, i; char *error = NULL; char **hosts = NULL; /* run the prolog */ if (ORTE_SUCCESS != (ret = orte_ess_base_std_prolog())) { error = "orte_ess_base_std_prolog"; goto error; } /* Start by getting a unique name */ alps_set_name(); /* if I am a daemon, complete my setup using the * default procedure */ if (ORTE_PROC_IS_DAEMON) { if (NULL != orte_node_regex) { /* extract the nodes */ if (ORTE_SUCCESS != (ret = orte_regex_extract_node_names(orte_node_regex, &hosts)) || NULL == hosts) { error = "orte_regex_extract_node_names"; goto error; } /* find our host in the list */ for (i=0; NULL != hosts[i]; i++) { if (0 == strncmp(hosts[i], orte_process_info.nodename, strlen(hosts[i]))) { /* correct our vpid */ ORTE_PROC_MY_NAME->vpid = starting_vpid + i; OPAL_OUTPUT_VERBOSE((1, orte_ess_base_output, "ess:alps reset name to %s", ORTE_NAME_PRINT(ORTE_PROC_MY_NAME))); break; } } } if (ORTE_SUCCESS != (ret = orte_ess_base_orted_setup(hosts))) { ORTE_ERROR_LOG(ret); error = "orte_ess_base_orted_setup"; goto error; } opal_argv_free(hosts); return ORTE_SUCCESS; } if (ORTE_PROC_IS_TOOL) { /* otherwise, if I am a tool proc, use that procedure */ if (ORTE_SUCCESS != (ret = orte_ess_base_tool_setup())) { ORTE_ERROR_LOG(ret); error = "orte_ess_base_tool_setup"; goto error; } /* as a tool, I don't need a nidmap - so just return now */ return ORTE_SUCCESS; } /* otherwise, I must be an application process - use * the default procedure to finish my setup */ if (ORTE_SUCCESS != (ret = orte_ess_base_app_setup())) { ORTE_ERROR_LOG(ret); error = "orte_ess_base_app_setup"; goto error; } /* setup the nidmap arrays */ if (ORTE_SUCCESS != (ret = orte_util_nidmap_init(orte_process_info.sync_buf))) { ORTE_ERROR_LOG(ret); error = "orte_util_nidmap_init"; goto error; } return ORTE_SUCCESS; error: orte_show_help("help-orte-runtime.txt", "orte_init:startup:internal-failure", true, error, ORTE_ERROR_NAME(ret), ret); return ret; }