static int setup_fork(orte_job_t *jdata, orte_app_context_t *app) { int i; char *param; bool oversubscribed; orte_node_t *node; char **envcpy, **nps, **firstranks; char *npstring, *firstrankstring; char *num_app_ctx; bool takeus = false; /* see if we are included */ for (i=0; NULL != jdata->personality[i]; i++) { if (0 == strcmp(jdata->personality[i], "ompi")) { takeus = true; break; } } if (!takeus) { return ORTE_ERR_TAKE_NEXT_OPTION; } /* see if the mapper thinks we are oversubscribed */ oversubscribed = false; if (NULL == (node = (orte_node_t*)opal_pointer_array_get_item(orte_node_pool, ORTE_PROC_MY_NAME->vpid))) { ORTE_ERROR_LOG(ORTE_ERR_NOT_FOUND); return ORTE_ERR_NOT_FOUND; } if (ORTE_FLAG_TEST(node, ORTE_NODE_FLAG_OVERSUBSCRIBED)) { oversubscribed = true; } /* setup base environment: copy the current environ and merge in the app context environ */ if (NULL != app->env) { /* manually free original context->env to avoid a memory leak */ char **tmp = app->env; envcpy = opal_environ_merge(orte_launch_environ, app->env); if (NULL != tmp) { opal_argv_free(tmp); } } else { envcpy = opal_argv_copy(orte_launch_environ); } app->env = envcpy; /* special case handling for --prefix: this is somewhat icky, but at least some users do this. :-\ It is possible that when using --prefix, the user will also "-x PATH" and/or "-x LD_LIBRARY_PATH", which would therefore clobber the work that was done in the prior pls to ensure that we have the prefix at the beginning of the PATH and LD_LIBRARY_PATH. So examine the context->env and see if we find PATH or LD_LIBRARY_PATH. If found, that means the prior work was clobbered, and we need to re-prefix those variables. */ param = NULL; orte_get_attribute(&app->attributes, ORTE_APP_PREFIX_DIR, (void**)¶m, OPAL_STRING); for (i = 0; NULL != param && NULL != app->env && NULL != app->env[i]; ++i) { char *newenv; /* Reset PATH */ if (0 == strncmp("PATH=", app->env[i], 5)) { asprintf(&newenv, "%s/bin:%s", param, app->env[i] + 5); opal_setenv("PATH", newenv, true, &app->env); free(newenv); } /* Reset LD_LIBRARY_PATH */ else if (0 == strncmp("LD_LIBRARY_PATH=", app->env[i], 16)) { asprintf(&newenv, "%s/lib:%s", param, app->env[i] + 16); opal_setenv("LD_LIBRARY_PATH", newenv, true, &app->env); free(newenv); } } if (NULL != param) { free(param); } /* pass my contact info to the local proc so we can talk */ opal_setenv("OMPI_MCA_orte_local_daemon_uri", orte_process_info.my_daemon_uri, true, &app->env); /* pass the hnp's contact info to the local proc in case it * needs it */ if (NULL != orte_process_info.my_hnp_uri) { opal_setenv("OMPI_MCA_orte_hnp_uri", orte_process_info.my_hnp_uri, true, &app->env); } /* setup yield schedule - do not override any user-supplied directive! */ if (oversubscribed) { opal_setenv("OMPI_MCA_mpi_yield_when_idle", "1", false, &app->env); } else { opal_setenv("OMPI_MCA_mpi_yield_when_idle", "0", false, &app->env); } /* set the app_context number into the environment */ asprintf(¶m, "%ld", (long)app->idx); opal_setenv("OMPI_MCA_orte_app_num", param, true, &app->env); free(param); /* although the total_slots_alloc is the universe size, users * would appreciate being given a public environmental variable * that also represents this value - something MPI specific - so * do that here. Also required by the ompi_attributes code! * * AND YES - THIS BREAKS THE ABSTRACTION BARRIER TO SOME EXTENT. * We know - just live with it */ asprintf(¶m, "%ld", (long)jdata->total_slots_alloc); opal_setenv("OMPI_UNIVERSE_SIZE", param, true, &app->env); free(param); /* pass the number of nodes involved in this job */ asprintf(¶m, "%ld", (long)(jdata->map->num_nodes)); opal_setenv("OMPI_MCA_orte_num_nodes", param, true, &app->env); free(param); /* pass a param telling the child what type and model of cpu we are on, * if we know it. If hwloc has the value, use what it knows. Otherwise, * see if we were explicitly given it and use that value. */ hwloc_obj_t obj; char *htmp; if (NULL != opal_hwloc_topology) { obj = hwloc_get_root_obj(opal_hwloc_topology); if (NULL != (htmp = (char*)hwloc_obj_get_info_by_name(obj, "CPUType")) || NULL != (htmp = orte_local_cpu_type)) { opal_setenv("OMPI_MCA_orte_cpu_type", htmp, true, &app->env); } if (NULL != (htmp = (char*)hwloc_obj_get_info_by_name(obj, "CPUModel")) || NULL != (htmp = orte_local_cpu_model)) { opal_setenv("OMPI_MCA_orte_cpu_model", htmp, true, &app->env); } } else { if (NULL != orte_local_cpu_type) { opal_setenv("OMPI_MCA_orte_cpu_type", orte_local_cpu_type, true, &app->env); } if (NULL != orte_local_cpu_model) { opal_setenv("OMPI_MCA_orte_cpu_model", orte_local_cpu_model, true, &app->env); } } /* get shmem's best component name so we can provide a hint to the shmem * framework. the idea here is to have someone figure out what component to * select (via the shmem framework) and then have the rest of the * components in shmem obey that decision. for more details take a look at * the shmem framework in opal. */ if (NULL != (param = opal_shmem_base_best_runnable_component_name())) { opal_setenv("OMPI_MCA_shmem_RUNTIME_QUERY_hint", param, true, &app->env); free(param); } /* Set an info MCA param that tells the launched processes that * any binding policy was applied by us (e.g., so that * MPI_INIT doesn't try to bind itself) */ opal_setenv("OMPI_MCA_orte_bound_at_launch", "1", true, &app->env); /* tell the ESS to avoid the singleton component - but don't override * anything that may have been provided elsewhere */ opal_setenv("OMPI_MCA_ess", "^singleton", false, &app->env); /* ensure that the spawned process ignores direct launch components, * but do not overrride anything we were given */ opal_setenv("OMPI_MCA_pmix", "^s1,s2,cray", false, &app->env); /* since we want to pass the name as separate components, make sure * that the "name" environmental variable is cleared! */ opal_unsetenv("OMPI_MCA_orte_ess_name", &app->env); asprintf(¶m, "%ld", (long)jdata->num_procs); opal_setenv("OMPI_MCA_orte_ess_num_procs", param, true, &app->env); /* although the num_procs is the comm_world size, users * would appreciate being given a public environmental variable * that also represents this value - something MPI specific - so * do that here. * * AND YES - THIS BREAKS THE ABSTRACTION BARRIER TO SOME EXTENT. * We know - just live with it */ opal_setenv("OMPI_COMM_WORLD_SIZE", param, true, &app->env); free(param); /* users would appreciate being given a public environmental variable * that also represents this value - something MPI specific - so * do that here. * * AND YES - THIS BREAKS THE ABSTRACTION BARRIER TO SOME EXTENT. * We know - just live with it */ asprintf(¶m, "%ld", (long)jdata->num_local_procs); opal_setenv("OMPI_COMM_WORLD_LOCAL_SIZE", param, true, &app->env); free(param); /* forcibly set the local tmpdir base to match ours */ opal_setenv("OMPI_MCA_orte_tmpdir_base", orte_process_info.tmpdir_base, true, &app->env); /* MPI-3 requires we provide some further info to the procs, * so we pass them as envars to avoid introducing further * ORTE calls in the MPI layer */ asprintf(&num_app_ctx, "%lu", (unsigned long)jdata->num_apps); /* build some common envars we need to pass for MPI-3 compatibility */ nps = NULL; firstranks = NULL; for (i=0; i < jdata->apps->size; i++) { if (NULL == (app = (orte_app_context_t*)opal_pointer_array_get_item(jdata->apps, i))) { continue; } opal_argv_append_nosize(&nps, ORTE_VPID_PRINT(app->num_procs)); opal_argv_append_nosize(&firstranks, ORTE_VPID_PRINT(app->first_rank)); } npstring = opal_argv_join(nps, ' '); firstrankstring = opal_argv_join(firstranks, ' '); opal_argv_free(nps); opal_argv_free(firstranks); /* add the MPI-3 envars */ opal_setenv("OMPI_NUM_APP_CTX", num_app_ctx, true, &app->env); opal_setenv("OMPI_FIRST_RANKS", firstrankstring, true, &app->env); opal_setenv("OMPI_APP_CTX_NUM_PROCS", npstring, true, &app->env); free(num_app_ctx); free(firstrankstring); free(npstring); return ORTE_SUCCESS; }
/* process incoming messages in order of receipt */ void orte_plm_base_recv(int status, orte_process_name_t* sender, opal_buffer_t* buffer, orte_rml_tag_t tag, void* cbdata) { orte_plm_cmd_flag_t command; orte_std_cntr_t count; orte_jobid_t job; orte_job_t *jdata, *parent; opal_buffer_t *answer; orte_vpid_t vpid; orte_proc_t *proc; orte_proc_state_t state; orte_exit_code_t exit_code; int32_t rc=ORTE_SUCCESS, ret; orte_app_context_t *app, *child_app; orte_process_name_t name; pid_t pid; bool running; int i; char **env; char *prefix_dir; OPAL_OUTPUT_VERBOSE((5, orte_plm_base_framework.framework_output, "%s plm:base:receive processing msg", ORTE_NAME_PRINT(ORTE_PROC_MY_NAME))); count = 1; if (ORTE_SUCCESS != (rc = opal_dss.unpack(buffer, &command, &count, ORTE_PLM_CMD))) { ORTE_ERROR_LOG(rc); goto CLEANUP; } switch (command) { case ORTE_PLM_LAUNCH_JOB_CMD: OPAL_OUTPUT_VERBOSE((5, orte_plm_base_framework.framework_output, "%s plm:base:receive job launch command from %s", ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), ORTE_NAME_PRINT(sender))); /* unpack the job object */ count = 1; if (ORTE_SUCCESS != (rc = opal_dss.unpack(buffer, &jdata, &count, ORTE_JOB))) { ORTE_ERROR_LOG(rc); goto ANSWER_LAUNCH; } /* record the sender so we know who to respond to */ jdata->originator.jobid = sender->jobid; jdata->originator.vpid = sender->vpid; /* get the parent's job object */ if (NULL != (parent = orte_get_job_data_object(sender->jobid))) { /* if the prefix was set in the parent's job, we need to transfer * that prefix to the child's app_context so any further launch of * orteds can find the correct binary. There always has to be at * least one app_context in both parent and child, so we don't * need to check that here. However, be sure not to overwrite * the prefix if the user already provided it! */ app = (orte_app_context_t*)opal_pointer_array_get_item(parent->apps, 0); child_app = (orte_app_context_t*)opal_pointer_array_get_item(jdata->apps, 0); prefix_dir = NULL; if (orte_get_attribute(&app->attributes, ORTE_APP_PREFIX_DIR, (void**)&prefix_dir, OPAL_STRING) && !orte_get_attribute(&child_app->attributes, ORTE_APP_PREFIX_DIR, NULL, OPAL_STRING)) { orte_set_attribute(&child_app->attributes, ORTE_APP_PREFIX_DIR, ORTE_ATTR_GLOBAL, prefix_dir, OPAL_STRING); } if (NULL != prefix_dir) { free(prefix_dir); } } /* if the user asked to forward any envars, cycle through the app contexts * in the comm_spawn request and add them */ if (NULL != orte_forwarded_envars) { for (i=0; i < jdata->apps->size; i++) { if (NULL == (app = (orte_app_context_t*)opal_pointer_array_get_item(jdata->apps, i))) { continue; } env = opal_environ_merge(orte_forwarded_envars, app->env); opal_argv_free(app->env); app->env = env; } } OPAL_OUTPUT_VERBOSE((5, orte_plm_base_framework.framework_output, "%s plm:base:receive adding hosts", ORTE_NAME_PRINT(ORTE_PROC_MY_NAME))); /* process any add-hostfile and add-host options that were provided */ if (ORTE_SUCCESS != (rc = orte_ras_base_add_hosts(jdata))) { ORTE_ERROR_LOG(rc); goto ANSWER_LAUNCH; } if (NULL != parent) { if (NULL == parent->bookmark) { /* find the sender's node in the job map */ if (NULL != (proc = (orte_proc_t*)opal_pointer_array_get_item(parent->procs, sender->vpid))) { /* set the bookmark so the child starts from that place - this means * that the first child process could be co-located with the proc * that called comm_spawn, assuming slots remain on that node. Otherwise, * the procs will start on the next available node */ jdata->bookmark = proc->node; } } else { jdata->bookmark = parent->bookmark; } } /* launch it */ OPAL_OUTPUT_VERBOSE((5, orte_plm_base_framework.framework_output, "%s plm:base:receive calling spawn", ORTE_NAME_PRINT(ORTE_PROC_MY_NAME))); if (ORTE_SUCCESS != (rc = orte_plm.spawn(jdata))) { ORTE_ERROR_LOG(rc); goto ANSWER_LAUNCH; } break; ANSWER_LAUNCH: OPAL_OUTPUT_VERBOSE((5, orte_plm_base_framework.framework_output, "%s plm:base:receive - error on launch: %d", ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), rc)); /* setup the response */ answer = OBJ_NEW(opal_buffer_t); /* pack the error code to be returned */ if (ORTE_SUCCESS != (ret = opal_dss.pack(answer, &rc, 1, OPAL_INT32))) { ORTE_ERROR_LOG(ret); } /* send the response back to the sender */ if (0 > (ret = orte_rml.send_buffer_nb(sender, answer, ORTE_RML_TAG_PLM_PROXY, orte_rml_send_callback, NULL))) { ORTE_ERROR_LOG(ret); OBJ_RELEASE(answer); } break; case ORTE_PLM_UPDATE_PROC_STATE: opal_output_verbose(5, orte_plm_base_framework.framework_output, "%s plm:base:receive update proc state command from %s", ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), ORTE_NAME_PRINT(sender)); count = 1; while (ORTE_SUCCESS == (rc = opal_dss.unpack(buffer, &job, &count, ORTE_JOBID))) { opal_output_verbose(5, orte_plm_base_framework.framework_output, "%s plm:base:receive got update_proc_state for job %s", ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), ORTE_JOBID_PRINT(job)); name.jobid = job; running = false; /* get the job object */ jdata = orte_get_job_data_object(job); count = 1; while (ORTE_SUCCESS == (rc = opal_dss.unpack(buffer, &vpid, &count, ORTE_VPID))) { if (ORTE_VPID_INVALID == vpid) { /* flag indicates that this job is complete - move on */ break; } name.vpid = vpid; /* unpack the pid */ count = 1; if (ORTE_SUCCESS != (rc = opal_dss.unpack(buffer, &pid, &count, OPAL_PID))) { ORTE_ERROR_LOG(rc); goto CLEANUP; } /* unpack the state */ count = 1; if (ORTE_SUCCESS != (rc = opal_dss.unpack(buffer, &state, &count, ORTE_PROC_STATE))) { ORTE_ERROR_LOG(rc); goto CLEANUP; } if (ORTE_PROC_STATE_RUNNING == state) { running = true; } /* unpack the exit code */ count = 1; if (ORTE_SUCCESS != (rc = opal_dss.unpack(buffer, &exit_code, &count, ORTE_EXIT_CODE))) { ORTE_ERROR_LOG(rc); goto CLEANUP; } OPAL_OUTPUT_VERBOSE((5, orte_plm_base_framework.framework_output, "%s plm:base:receive got update_proc_state for vpid %lu state %s exit_code %d", ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), (unsigned long)vpid, orte_proc_state_to_str(state), (int)exit_code)); if (NULL != jdata) { /* get the proc data object */ if (NULL == (proc = (orte_proc_t*)opal_pointer_array_get_item(jdata->procs, vpid))) { ORTE_ERROR_LOG(ORTE_ERR_NOT_FOUND); ORTE_FORCED_TERMINATE(ORTE_ERROR_DEFAULT_EXIT_CODE); } /* NEVER update the proc state before activating the state machine - let * the state cbfunc update it as it may need to compare this * state against the prior proc state */ proc->pid = pid; proc->exit_code = exit_code; ORTE_ACTIVATE_PROC_STATE(&name, state); } } /* record that we heard back from a daemon during app launch */ if (running && NULL != jdata) { jdata->num_daemons_reported++; if (orte_report_launch_progress) { if (0 == jdata->num_daemons_reported % 100 || jdata->num_daemons_reported == orte_process_info.num_procs) { ORTE_ACTIVATE_JOB_STATE(jdata, ORTE_JOB_STATE_REPORT_PROGRESS); } } } /* prepare for next job */ count = 1; } if (ORTE_ERR_UNPACK_READ_PAST_END_OF_BUFFER != rc) { ORTE_ERROR_LOG(rc); } else { rc = ORTE_SUCCESS; } break; case ORTE_PLM_REGISTERED_CMD: count=1; if (ORTE_SUCCESS != (rc = opal_dss.unpack(buffer, &job, &count, ORTE_JOBID))) { ORTE_ERROR_LOG(rc); goto DEPART; } name.jobid = job; /* get the job object */ if (NULL == (jdata = orte_get_job_data_object(job))) { ORTE_ERROR_LOG(ORTE_ERR_NOT_FOUND); rc = ORTE_ERR_NOT_FOUND; goto DEPART; } count=1; while (ORTE_SUCCESS == opal_dss.unpack(buffer, &vpid, &count, ORTE_VPID)) { name.vpid = vpid; ORTE_ACTIVATE_PROC_STATE(&name, ORTE_PROC_STATE_REGISTERED); count=1; } break; default: ORTE_ERROR_LOG(ORTE_ERR_VALUE_OUT_OF_BOUNDS); rc = ORTE_ERR_VALUE_OUT_OF_BOUNDS; break; } CLEANUP: if (ORTE_SUCCESS != rc) { goto DEPART; } DEPART: /* see if an error occurred - if so, wakeup the HNP so we can exit */ if (ORTE_PROC_IS_HNP && ORTE_SUCCESS != rc) { jdata = NULL; ORTE_FORCED_TERMINATE(ORTE_ERROR_DEFAULT_EXIT_CODE); } OPAL_OUTPUT_VERBOSE((5, orte_plm_base_framework.framework_output, "%s plm:base:receive done processing commands", ORTE_NAME_PRINT(ORTE_PROC_MY_NAME))); }
/* * setup env and argv for specified process */ static int setup_proc_env_and_argv(orte_job_t* jdata, orte_app_context_t* app, orte_proc_t* proc, char ***pargv, char ***penv) { char* param; char* param2; char* value; char* vp_id_str; char* job_id_str; int rc; int i, num_nodes; /* obtain app->argv */ if (!(app->argv)) { opal_output(0, "%s plm::yarn::setup_proc_env_and_argv: app->argv is null", ORTE_NAME_PRINT(ORTE_PROC_MY_NAME)); return ORTE_ERROR; } *pargv = opal_argv_copy(app->argv); if (ORTE_SUCCESS != orte_util_convert_jobid_to_string(&job_id_str, jdata->jobid)) { ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE); return ORTE_ERR_OUT_OF_RESOURCE; } if (ORTE_SUCCESS != orte_util_convert_vpid_to_string(&vp_id_str, proc->name.vpid)) { ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE); return ORTE_ERR_OUT_OF_RESOURCE; } // add stdout, stderr to app opal_argv_append_nosize(pargv, "1><LOG_DIR>/stdout"); opal_argv_append_nosize(pargv, "2><LOG_DIR>/stderr"); // add java executor to app opal_argv_prepend_nosize(pargv, vp_id_str); opal_argv_prepend_nosize(pargv, job_id_str); opal_argv_prepend_nosize(pargv, "com.pivotal.hamster.yarnexecutor.YarnExecutor"); opal_argv_prepend_nosize(pargv, "hamster-core.jar"); opal_argv_prepend_nosize(pargv, "-cp"); opal_argv_prepend_nosize(pargv, getenv("HAMSTER_JAVA_OPT")==NULL ? "-Xmx32M -Xms8M" : getenv("HAMSTER_JAVA_OPT")); opal_argv_prepend_nosize(pargv, "$JAVA_HOME/bin/java"); /* obtain app->env */ *penv = opal_environ_merge(environ, app->env); if (!proc->node) { opal_output(0, "%s plm::yarn::setup_proc_env_and_argv: node of proc[%d] is NULL", ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), proc->name.vpid); return ORTE_ERROR; } if (!proc->node->daemon) { opal_output(0, "%s plm::yarn::setup_proc_env_and_argv: daemon of node[%s] is NULL", ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), proc->node->name); return ORTE_ERROR; } /* set the app_context number into the environment */ //?? param = mca_base_param_env_var("orte_app_num"); asprintf(¶m2, "%ld", (long)app->idx); opal_setenv(param, param2, true, penv); free(param); free(param2); // pass the daemon's name param = mca_base_param_env_var("orte_local_daemon_uri"); opal_setenv(param, proc->node->daemon->rml_uri, true, penv); free(param); /* pass my contact info */ param = mca_base_param_env_var("orte_hnp_uri"); opal_setenv(param, orte_process_info.my_hnp_uri, true, penv); free(param); /* pass the jobid */ param = mca_base_param_env_var("orte_ess_jobid"); opal_setenv(param, job_id_str, true, penv); free(param); free(job_id_str); /* pass the rank */ param = mca_base_param_env_var("orte_ess_vpid"); opal_setenv(param, vp_id_str, true, penv); free(param); opal_setenv("OMPI_COMM_WORLD_RANK", vp_id_str, true, penv); free(vp_id_str); /* done with this now */ /* pass local rank */ asprintf(&value, "%lu", (unsigned long) proc->local_rank); opal_setenv("OMPI_COMM_WORLD_LOCAL_RANK", value, true, penv); free(value); /* pass node rank */ asprintf(&value, "%lu", (unsigned long) proc->node_rank); opal_setenv("OMPI_COMM_WORLD_NODE_RANK", value, true, penv); /* set an mca param for it too */ param = mca_base_param_env_var("orte_ess_node_rank"); opal_setenv(param, value, true, penv); free(param); free(value); /* pass a param telling the child what model of cpu we are on, * if we know it */ if (NULL != orte_local_cpu_type) { param = mca_base_param_env_var("orte_cpu_type"); /* do not overwrite what the user may have provided */ opal_setenv(param, orte_local_cpu_type, false, penv); free(param); } if (NULL != orte_local_cpu_model) { param = mca_base_param_env_var("orte_cpu_model"); /* do not overwrite what the user may have provided */ opal_setenv(param, orte_local_cpu_model, false, penv); free(param); } /* pass the number of nodes involved in this job */ param = mca_base_param_env_var("orte_num_nodes"); /* we have to count the number of nodes as the size of orte_node_pool * is only guaranteed to be equal or larger than that number - i.e., * the pointer_array increases the size by a block each time, so some * of the locations are left empty */ num_nodes = 0; for (i = 0; i < orte_node_pool->size; i++) { if (NULL != opal_pointer_array_get_item(orte_node_pool, i)) { num_nodes++; } } asprintf(&value, "%d", num_nodes); opal_setenv(param, value, true, penv); free(param); free(value); /* setup yield schedule */ param = mca_base_param_env_var("mpi_yield_when_idle"); opal_setenv(param, "0", false, penv); free(param); /* set MPI universe envar */ orte_ess_env_put(jdata->num_procs, proc->node->num_procs, penv); asprintf(&value, "%ld", (long) jdata->num_procs); opal_setenv("OMPI_UNIVERSE_SIZE", value, true, penv); free(value); /* pass collective ids for the std MPI operations */ param = mca_base_param_env_var("orte_peer_modex_id"); asprintf(&value, "%d", jdata->peer_modex); opal_setenv(param, value, true, penv); free(param); free(value); param = mca_base_param_env_var("orte_peer_init_barrier_id"); asprintf(&value, "%d", jdata->peer_init_barrier); opal_setenv(param, value, true, penv); free(param); free(value); param = mca_base_param_env_var("orte_peer_fini_barrier_id"); asprintf(&value, "%d", jdata->peer_fini_barrier); opal_setenv(param, value, true, penv); free(param); free(value); /* finally, we will set/unset some mca param to select modules */ opal_unsetenv("OMPI_MCA_plm", penv); opal_unsetenv("OMPI_MCA_ras", penv); opal_unsetenv("OMPI_MCA_ess", penv); opal_unsetenv("OMPI_MCA_state", penv); opal_unsetenv("OMPI_MCA_errmgr", penv); return 0; }