static int getConnInfoKVS( int rank, char *buf, int bufsize, MPIDI_PG_t *pg ) { #ifdef USE_PMI2_API char key[MPIDI_MAX_KVS_KEY_LEN]; int mpi_errno = MPI_SUCCESS, rc; int vallen; rc = MPL_snprintf(key, MPIDI_MAX_KVS_KEY_LEN, "P%d-businesscard", rank ); if (rc < 0 || rc > MPIDI_MAX_KVS_KEY_LEN) { MPIR_ERR_SETANDJUMP(mpi_errno,MPI_ERR_OTHER,"**nomem"); } mpi_errno = PMI2_KVS_Get(pg->connData, PMI2_ID_NULL, key, buf, bufsize, &vallen); if (mpi_errno) { MPIDI_PG_CheckForSingleton(); mpi_errno = PMI2_KVS_Get(pg->connData, PMI2_ID_NULL, key, buf, bufsize, &vallen); if (mpi_errno) MPIR_ERR_POP(mpi_errno); } fn_exit: return mpi_errno; fn_fail: goto fn_exit; #else char key[MPIDI_MAX_KVS_KEY_LEN]; int mpi_errno = MPI_SUCCESS, rc, pmi_errno; rc = MPL_snprintf(key, MPIDI_MAX_KVS_KEY_LEN, "P%d-businesscard", rank ); if (rc < 0 || rc > MPIDI_MAX_KVS_KEY_LEN) { MPIR_ERR_SETANDJUMP(mpi_errno,MPI_ERR_OTHER,"**nomem"); } MPID_THREAD_CS_ENTER(POBJ, MPIR_THREAD_POBJ_PMI_MUTEX); pmi_errno = PMI_KVS_Get(pg->connData, key, buf, bufsize ); if (pmi_errno) { MPIDI_PG_CheckForSingleton(); pmi_errno = PMI_KVS_Get(pg->connData, key, buf, bufsize ); } MPID_THREAD_CS_EXIT(POBJ, MPIR_THREAD_POBJ_PMI_MUTEX); if (pmi_errno) { MPIR_ERR_SETANDJUMP(mpi_errno,MPI_ERR_OTHER,"**pmi_kvs_get"); } fn_exit: return mpi_errno; fn_fail: goto fn_exit; #endif }
static void checked_PMI_KVS_Get(const char kvsname[], const char key[], char value[], int length) { int pmi_errno = PMI_KVS_Get(kvsname, key, value, length); if (pmi_errno != PMI_SUCCESS) { PRINTERROR("PMI: PMI_KVS_Get(kvsname=\"%s\", key=\"%s\", val, sizeof(val)) : failed", kvsname, key); exit(1); } }
static int kvs_get(const char key[], char value [], int maxvalue) { int rc; rc = PMI_KVS_Get(pmix_kvs_name, key, value, maxvalue); if( PMI_SUCCESS != rc ){ /* silently return an error - might be okay */ return OPAL_ERROR; } return OPAL_SUCCESS; }
static int kvs_get(const char key[], char value [], int maxvalue) { int rc; rc = PMI_KVS_Get(pmix_kvs_name, key, value, maxvalue); if( PMI_SUCCESS != rc ){ OPAL_PMI_ERROR(rc, "PMI_KVS_Get"); return OPAL_ERROR; } return OPAL_SUCCESS; }
static int kvs_get(const char *key, char *value, int valuelen) { #if WANT_CRAY_PMI2_EXT int len; return PMI2_KVS_Get(pmi_kvs_name, PMI2_ID_NULL, key, value, valuelen, &len); #else return PMI_KVS_Get(pmi_kvs_name, key, value, valuelen); #endif }
void do_kvs_get(void *value, size_t sz) { int rc; #if USE_PMI2_API int len; rc = PMI2_KVS_Get(kvs_name, PMI2_ID_NULL, kvs_key, kvs_value, max_val_len, &len); gasneti_assert(PMI2_SUCCESS == rc); #else rc = PMI_KVS_Get(kvs_name, kvs_key, kvs_value, max_val_len); gasneti_assert(PMI_SUCCESS == rc); #endif do_decode(value, sz); }
int MPIDI_check_for_failed_procs(void) { int mpi_errno = MPI_SUCCESS; int pmi_errno; int len; char *kvsname = MPIDI_global.jobid; char *failed_procs_string = NULL; MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDI_CHECK_FOR_FAILED_PROCS); MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPIDI_CHECK_FOR_FAILED_PROCS); /* FIXME: Currently this only handles failed processes in * comm_world. We need to fix hydra to include the pgid along * with the rank, then we need to create the failed group from * something bigger than comm_world. */ #ifdef USE_PMIX_API MPIR_Assert(0); #elif defined(USE_PMI2_API) { int vallen = 0; len = PMI2_MAX_VALLEN; failed_procs_string = MPL_malloc(len, MPL_MEM_OTHER); MPIR_Assert(failed_procs_string); pmi_errno = PMI2_KVS_Get(kvsname, PMI2_ID_NULL, "PMI_dead_processes", failed_procs_string, len, &vallen); MPIR_ERR_CHKANDJUMP(pmi_errno, mpi_errno, MPI_ERR_OTHER, "**pmi_kvs_get"); MPL_free(failed_procs_string); } #else pmi_errno = PMI_KVS_Get_value_length_max(&len); MPIR_ERR_CHKANDJUMP(pmi_errno, mpi_errno, MPI_ERR_OTHER, "**pmi_kvs_get_value_length_max"); failed_procs_string = MPL_malloc(len, MPL_MEM_OTHER); MPIR_Assert(failed_procs_string); pmi_errno = PMI_KVS_Get(kvsname, "PMI_dead_processes", failed_procs_string, len); MPIR_ERR_CHKANDJUMP(pmi_errno, mpi_errno, MPI_ERR_OTHER, "**pmi_kvs_get"); MPL_free(failed_procs_string); #endif MPL_DBG_MSG_FMT(MPIDI_CH4_DBG_GENERAL, VERBOSE, (MPL_DBG_FDEST, "Received proc fail notification: %s", failed_procs_string)); /* FIXME: handle ULFM failed groups here */ fn_exit: MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDI_CHECK_FOR_FAILED_PROCS); return mpi_errno; fn_fail: MPL_free(failed_procs_string); goto fn_exit; }
static void i_version_check(char *pg_id, int pg_rank, const char *ver) { if (pg_rank != 0) { char val[100] = "unknown"; int pmi_errno = PMI_KVS_Get(pg_id, "i_version", val, sizeof(val)); assert(pmi_errno == PMI_SUCCESS); if (strcmp(val, ver)) { fprintf(stderr, "MPI: warning: different mpi init versions (rank 0:'%s' != rank %d:'%s')\n", val, pg_rank, ver); } } }
static int pmi_get_proc_attr(const orte_process_name_t name, const char* attr_name, void **buffer, size_t *size) { char *attrval, *attr; int rc; if (NULL == pmi_kvs_name) { return ORTE_ERR_UNREACH; } OPAL_OUTPUT_VERBOSE((1, orte_grpcomm_base.output, "%s grpcomm:pmi: get attr %s for proc %s in KVS %s", ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), attr_name, ORTE_NAME_PRINT(&name), pmi_kvs_name)); attrval = malloc(pmi_vallen_max); if (NULL == attrval) { return ORTE_ERR_OUT_OF_RESOURCE; } if (0 > asprintf(&attr, "%s-%s", ORTE_NAME_PRINT(&name), attr_name)) { free(attrval); return ORTE_ERR_OUT_OF_RESOURCE; } rc = PMI_KVS_Get(pmi_kvs_name, attr, attrval, pmi_vallen_max); if (PMI_SUCCESS != rc) { ORTE_PMI_ERROR(rc, "PMI_KVS_Get"); free(attrval); free(attr); return ORTE_ERROR; } *buffer = pmi_decode((unsigned char *)attrval, size); free(attrval); free(attr); OPAL_OUTPUT_VERBOSE((1, orte_grpcomm_base.output, "%s grpcomm:pmi: got attr %s of size %lu", ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), attr_name, (unsigned long)(*size))); if (NULL == buffer) { return ORTE_ERR_OUT_OF_RESOURCE; } return ORTE_SUCCESS; }
static int test_item7(void) { int rc = 0; char tkey[100]; char tval[100]; char val[100]; int i = 0; for (i = 0; i < size; i++) { sprintf(tkey, "KEY-%d", i); sprintf(tval, "VALUE-%d", i); if (i == rank) { if (PMI_SUCCESS != (rc = PMI_KVS_Put(jobid, tkey, tval))) { log_fatal("PMI_KVS_Put [%s=%s] %d\n", tkey, tval, rc); return rc; } } } if (PMI_SUCCESS != (rc = PMI_KVS_Commit(jobid))) { log_fatal("PMI_KVS_Commit %d\n", rc); return rc; } if (PMI_SUCCESS != (rc = PMI_Barrier())) { log_fatal("PMI_Barrier %d\n", rc); return rc; } for (i = 0; i < size; i++) { sprintf(tkey, "KEY-%d", i); sprintf(tval, "VALUE-%d", i); if (PMI_SUCCESS != (rc = PMI_KVS_Get(jobid, tkey, val, sizeof(val)))) { log_fatal("PMI_KVS_Get [%s=?] %d\n", tkey, rc); return rc; } log_info("tkey=%s tval=%s val=%s\n", tkey, tval, val); log_assert(!strcmp(tval, val), "value does not meet expectation"); } return rc; }
int MPIDI_CH3_GetParentPort(char ** parent_port) { int mpi_errno = MPI_SUCCESS; int pmi_errno; char val[MPIDI_MAX_KVS_VALUE_LEN]; if (parent_port_name == NULL) { char *kvsname = NULL; /* We can always use PMI_KVS_Get on our own process group */ MPIDI_PG_GetConnKVSname( &kvsname ); #ifdef USE_PMI2_API { int vallen = 0; MPID_THREAD_CS_ENTER(POBJ, MPIR_THREAD_POBJ_PMI_MUTEX); pmi_errno = PMI2_KVS_Get(kvsname, PMI2_ID_NULL, PARENT_PORT_KVSKEY, val, sizeof(val), &vallen); MPID_THREAD_CS_EXIT(POBJ, MPIR_THREAD_POBJ_PMI_MUTEX); if (pmi_errno) MPIR_ERR_SETANDJUMP1(mpi_errno, MPI_ERR_OTHER, "**pmi_kvsget", "**pmi_kvsget %s", PARENT_PORT_KVSKEY); } #else MPID_THREAD_CS_ENTER(POBJ, MPIR_THREAD_POBJ_PMI_MUTEX); pmi_errno = PMI_KVS_Get( kvsname, PARENT_PORT_KVSKEY, val, sizeof(val)); MPID_THREAD_CS_EXIT(POBJ, MPIR_THREAD_POBJ_PMI_MUTEX); if (pmi_errno) { mpi_errno = MPIR_Err_create_code(MPI_SUCCESS, MPIR_ERR_FATAL, FCNAME, __LINE__, MPI_ERR_OTHER, "**pmi_kvsget", "**pmi_kvsget %d", pmi_errno); goto fn_exit; } #endif parent_port_name = MPL_strdup(val); if (parent_port_name == NULL) { MPIR_ERR_POP(mpi_errno); /* FIXME DARIUS */ } } *parent_port = parent_port_name; fn_exit: return mpi_errno; fn_fail: goto fn_exit; }
int MPID_NS_Lookup(MPID_NS_Handle handle, const MPIR_Info * info_ptr, const char service_name[], char port[]) { int err; /*printf("lookup kvs: <%s>\n", handle->kvsname);fflush(stdout); */ err = PMI_KVS_Get(handle->kvsname, service_name, port, MPI_MAX_PORT_NAME); /* --BEGIN ERROR HANDLING-- */ if (err != PMI_SUCCESS) { err = MPIR_Err_create_code(MPI_SUCCESS, MPIR_ERR_RECOVERABLE, __func__, __LINE__, MPI_ERR_NAME, "**pmi_kvs_get", 0); return err; } /* --END ERROR HANDLING-- */ if (port[0] == '\0') { return MPI_ERR_NAME; } return 0; }
static int test_item5(void) { int rc = 0; char *val = NULL; int val_size = 0; /* Predefined Job attributes */ const char *tkeys[] = { "PMI_process_mapping", NULL }; const char **ptr = tkeys; if (_legacy || !_legacy) { log_error("%s\n", "PMIx and SLURM/PMI1 do not set 'PMI_process_mapping' (Do not mark test as failed)"); return rc; } if (PMI_SUCCESS != (rc = PMI_KVS_Get_value_length_max(&val_size))) { log_fatal("PMI_KVS_Get_value_length_max failed: %d\n", rc); return rc; } val = alloca(val_size); if (!val) { return PMI_FAIL; } while (*ptr) { if (PMI_SUCCESS != (rc = PMI_KVS_Get(jobid, *ptr, val, val_size))) { log_fatal("PMI_KVS_Get: [%s] %d\n", *ptr, rc); return rc; } log_info("key=%s value=%.80s\n", *ptr, val); ptr++; } return rc; }
static int test_item6(void) { int rc = 0; char val[100]; const char *tkey = __func__; const char *tval = __FILE__; if (PMI_SUCCESS != (rc = PMI_KVS_Put(jobid, tkey, tval))) { log_fatal("PMI_KVS_Put %d\n", rc); return rc; } if (PMI_SUCCESS != (rc = PMI_KVS_Get(jobid, tkey, val, sizeof(val)))) { log_fatal("PMI_KVS_Get %d\n", rc); return rc; } log_info("tkey=%s tval=%s val=%s\n", tkey, tval, val); log_assert(!strcmp(tval, val), "value does not meet expectation"); return rc; }
int mca_common_pmi_get(const char *kvs_name, const char *key, char *value, int valuelen) { int rc; #if WANT_PMI2_SUPPORT if( mca_common_pmi_version == 2 ){ int len; rc = PMI2_KVS_Get(kvs_name, PMI2_ID_NULL, key, value, valuelen, &len); if( PMI2_SUCCESS != rc ){ // OPAL_PMI2_ERROR(rc, "PMI_KVS_Put"); return OPAL_ERROR; } } else #endif { rc = PMI_KVS_Get(kvs_name, key, value, valuelen); if( PMI_SUCCESS != rc ){ OPAL_PMI_ERROR(rc, "PMI_KVS_Put"); return OPAL_ERROR; } } return OPAL_SUCCESS; }
int MPIDU_bc_table_create(int rank, int size, int *nodemap, void *bc, int bc_len, int same_len, int roots_only, void **bc_table, size_t ** bc_indices) { int rc, mpi_errno = MPI_SUCCESS; int start, end, i; int key_max, val_max, name_max, out_len, rem; char *kvsname = NULL, *key = NULL, *val = NULL, *val_p; int local_rank = -1, local_leader = -1; size_t my_bc_len = bc_len; MPIR_NODEMAP_get_local_info(rank, size, nodemap, &local_size, &local_rank, &local_leader); rc = PMI_KVS_Get_name_length_max(&name_max); MPIR_ERR_CHKANDJUMP(rc, mpi_errno, MPI_ERR_OTHER, "**pmi_kvs_get_name_length_max"); rc = PMI_KVS_Get_key_length_max(&key_max); MPIR_ERR_CHKANDJUMP(rc, mpi_errno, MPI_ERR_OTHER, "**pmi_kvs_get_key_length_max"); rc = PMI_KVS_Get_value_length_max(&val_max); MPIR_ERR_CHKANDJUMP(rc, mpi_errno, MPI_ERR_OTHER, "**pmi_kvs_get_value_length_max"); /* if business cards can be different length, use the max value length */ if (!same_len) bc_len = val_max; mpi_errno = MPIDU_shm_seg_alloc(bc_len * size, (void **) &segment, MPL_MEM_ADDRESS); if (mpi_errno) MPIR_ERR_POP(mpi_errno); mpi_errno = MPIDU_shm_seg_commit(&memory, &barrier, local_size, local_rank, local_leader, rank, MPL_MEM_ADDRESS); if (mpi_errno) MPIR_ERR_POP(mpi_errno); if (size == 1) { memcpy(segment, bc, my_bc_len); goto single; } kvsname = MPL_malloc(name_max, MPL_MEM_ADDRESS); MPIR_Assert(kvsname); rc = PMI_KVS_Get_my_name(kvsname, name_max); MPIR_ERR_CHKANDJUMP(rc, mpi_errno, MPI_ERR_OTHER, "**pmi_kvs_get_my_name"); val = MPL_malloc(val_max, MPL_MEM_ADDRESS); memset(val, 0, val_max); val_p = val; rem = val_max; rc = MPL_str_add_binary_arg(&val_p, &rem, "mpi", (char *) bc, my_bc_len); MPIR_ERR_CHKANDJUMP(rc, mpi_errno, MPI_ERR_OTHER, "**buscard"); MPIR_Assert(rem >= 0); key = MPL_malloc(key_max, MPL_MEM_ADDRESS); MPIR_Assert(key); if (!roots_only || rank == local_leader) { sprintf(key, "bc-%d", rank); rc = PMI_KVS_Put(kvsname, key, val); MPIR_ERR_CHKANDJUMP(rc, mpi_errno, MPI_ERR_OTHER, "**pmi_kvs_put"); rc = PMI_KVS_Commit(kvsname); MPIR_ERR_CHKANDJUMP(rc, mpi_errno, MPI_ERR_OTHER, "**pmi_kvs_commit"); } rc = PMI_Barrier(); MPIR_ERR_CHKANDJUMP(rc, mpi_errno, MPI_ERR_OTHER, "**pmi_barrier"); if (!roots_only) { start = local_rank * (size / local_size); end = start + (size / local_size); if (local_rank == local_size - 1) end += size % local_size; for (i = start; i < end; i++) { sprintf(key, "bc-%d", i); rc = PMI_KVS_Get(kvsname, key, val, val_max); MPIR_ERR_CHKANDJUMP(rc, mpi_errno, MPI_ERR_OTHER, "**pmi_kvs_get"); rc = MPL_str_get_binary_arg(val, "mpi", &segment[i * bc_len], bc_len, &out_len); MPIR_ERR_CHKANDJUMP(rc, mpi_errno, MPI_ERR_OTHER, "**argstr_missinghost"); } } else { int num_nodes, *node_roots; MPIR_NODEMAP_get_node_roots(nodemap, size, &node_roots, &num_nodes); start = local_rank * (num_nodes / local_size); end = start + (num_nodes / local_size); if (local_rank == local_size - 1) end += num_nodes % local_size; for (i = start; i < end; i++) { sprintf(key, "bc-%d", node_roots[i]); rc = PMI_KVS_Get(kvsname, key, val, val_max); MPIR_ERR_CHKANDJUMP(rc, mpi_errno, MPI_ERR_OTHER, "**pmi_kvs_get"); rc = MPL_str_get_binary_arg(val, "mpi", &segment[i * bc_len], bc_len, &out_len); MPIR_ERR_CHKANDJUMP(rc, mpi_errno, MPI_ERR_OTHER, "**argstr_missinghost"); } MPL_free(node_roots); } mpi_errno = MPIDU_shm_barrier(barrier, local_size); if (mpi_errno) MPIR_ERR_POP(mpi_errno); single: if (!same_len) { indices = MPL_malloc(size * sizeof(size_t), MPL_MEM_ADDRESS); MPIR_Assert(indices); for (i = 0; i < size; i++) indices[i] = bc_len * i; *bc_indices = indices; } fn_exit: MPL_free(kvsname); MPL_free(key); MPL_free(val); *bc_table = segment; return mpi_errno; fn_fail: goto fn_exit; }
int MPIDI_CH3U_Check_for_failed_procs(void) { int mpi_errno = MPI_SUCCESS; int pmi_errno; int len; char *kvsname; MPIR_Group *prev_failed_group, *new_failed_group; MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDI_CH3U_CHECK_FOR_FAILED_PROCS); MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPIDI_CH3U_CHECK_FOR_FAILED_PROCS); /* FIXME: Currently this only handles failed processes in comm_world. We need to fix hydra to include the pgid along with the rank, then we need to create the failed group from something bigger than comm_world. */ mpi_errno = MPIDI_PG_GetConnKVSname(&kvsname); if (mpi_errno) MPIR_ERR_POP(mpi_errno); #ifdef USE_PMI2_API { int vallen = 0; pmi_errno = PMI2_KVS_Get(kvsname, PMI2_ID_NULL, "PMI_dead_processes", MPIDI_failed_procs_string, PMI2_MAX_VALLEN, &vallen); MPIR_ERR_CHKANDJUMP(pmi_errno, mpi_errno, MPI_ERR_OTHER, "**pmi_kvs_get"); } #else pmi_errno = PMI_KVS_Get_value_length_max(&len); MPIR_ERR_CHKANDJUMP(pmi_errno, mpi_errno, MPI_ERR_OTHER, "**pmi_kvs_get_value_length_max"); pmi_errno = PMI_KVS_Get(kvsname, "PMI_dead_processes", MPIDI_failed_procs_string, len); MPIR_ERR_CHKANDJUMP(pmi_errno, mpi_errno, MPI_ERR_OTHER, "**pmi_kvs_get"); #endif if (*MPIDI_failed_procs_string == '\0') { /* there are no failed processes */ MPIDI_Failed_procs_group = MPIR_Group_empty; goto fn_exit; } MPL_DBG_MSG_S(MPIDI_CH3_DBG_OTHER, TYPICAL, "Received proc fail notification: %s", MPIDI_failed_procs_string); /* save reference to previous group so we can identify new failures */ prev_failed_group = MPIDI_Failed_procs_group; /* Parse the list of failed processes */ MPIDI_CH3U_Get_failed_group(-2, &MPIDI_Failed_procs_group); /* get group of newly failed processes */ mpi_errno = MPIR_Group_difference_impl(MPIDI_Failed_procs_group, prev_failed_group, &new_failed_group); if (mpi_errno) MPIR_ERR_POP(mpi_errno); if (new_failed_group != MPIR_Group_empty) { mpi_errno = MPIDI_CH3I_Comm_handle_failed_procs(new_failed_group); if (mpi_errno) MPIR_ERR_POP(mpi_errno); mpi_errno = terminate_failed_VCs(new_failed_group); if (mpi_errno) MPIR_ERR_POP(mpi_errno); mpi_errno = MPIR_Group_release(new_failed_group); if (mpi_errno) MPIR_ERR_POP(mpi_errno); } /* free prev group */ if (prev_failed_group != MPIR_Group_empty) { mpi_errno = MPIR_Group_release(prev_failed_group); if (mpi_errno) MPIR_ERR_POP(mpi_errno); } fn_exit: MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDI_CH3U_CHECK_FOR_FAILED_PROCS); return mpi_errno; fn_oom: /* out-of-memory handler for utarray operations */ MPIR_ERR_SET1(mpi_errno, MPI_ERR_OTHER, "**nomem", "**nomem %s", "utarray"); fn_fail: goto fn_exit; }
int main(int argc, char *argv[]) { int initialized, rank, size; int i, max_name_len, max_key_len, max_val_len; char *name, *key, *val; if (PMI_SUCCESS != PMI_Initialized(&initialized)) { return 1; } if (0 == initialized) { if (PMI_SUCCESS != PMI_Init(&initialized)) { return 1; } } if (PMI_SUCCESS != PMI_Get_rank(&rank)) { return 1; } if (PMI_SUCCESS != PMI_Get_size(&size)) { return 1; } printf("Hello, World. I am %d of %d\n", rank, size); if (PMI_SUCCESS != PMI_KVS_Get_name_length_max(&max_name_len)) { return 1; } name = (char*) malloc(max_name_len); if (NULL == name) return 1; if (PMI_SUCCESS != PMI_KVS_Get_key_length_max(&max_key_len)) { return 1; } key = (char*) malloc(max_key_len); if (NULL == key) return 1; if (PMI_SUCCESS != PMI_KVS_Get_value_length_max(&max_val_len)) { return 1; } val = (char*) malloc(max_val_len); if (NULL == val) return 1; if (PMI_SUCCESS != PMI_KVS_Get_my_name(name, max_name_len)) { return 1; } /* put my information */ snprintf(key, max_key_len, "pmi_hello-%lu-test", (long unsigned) rank); snprintf(val, max_val_len, "%lu", (long unsigned) rank); if (PMI_SUCCESS != PMI_KVS_Put(name, key, val)) { return 1; } if (PMI_SUCCESS != PMI_KVS_Commit(name)) { return 1; } if (PMI_SUCCESS != PMI_Barrier()) { return 1; } /* verify everyone's information */ for (i = 0 ; i < size ; ++i) { snprintf(key, max_key_len, "pmi_hello-%lu-test", (long unsigned) i); if (PMI_SUCCESS != PMI_KVS_Get(name, key, val, max_val_len)) { return 1; } if (i != strtol(val, NULL, 0)) { fprintf(stderr, "%d: Error: Expected %d, got %d\n", rank, i, (int) strtol(val, NULL, 0)); return 1; } } PMI_Finalize(); return 0; }
main (int argc, char **argv) { int i, j, rc; int nprocs, procid; int clique_size, *clique_ranks = NULL; char *jobid_ptr, *nprocs_ptr, *procid_ptr; int pmi_rank, pmi_size, kvs_name_len, key_len, val_len; PMI_BOOL initialized; char *key, *val, *kvs_name; struct timeval tv1, tv2; long delta_t; char tv_str[20]; gettimeofday(&tv1, NULL); /* Get process count and our id from environment variables */ jobid_ptr = getenv("SLURM_JOB_ID"); nprocs_ptr = getenv("SLURM_NPROCS"); procid_ptr = getenv("SLURM_PROCID"); if (jobid_ptr == NULL) { printf("WARNING: PMI test not run under SLURM\n"); nprocs = 1; procid = 0; } else if ((nprocs_ptr == NULL) || (procid_ptr == NULL)) { printf("FAILURE: SLURM environment variables not set\n"); exit(1); } else { nprocs = atoi(nprocs_ptr); procid = atoi(procid_ptr); } /* Validate process count and our id */ if ((nprocs < 1) || (nprocs > 9999)) { printf("FAILURE: Invalid nprocs %s\n", nprocs_ptr); exit(1); } if ((procid < 0) || (procid > 9999)) { printf("FAILURE: Invalid procid %s\n", procid_ptr); exit(1); } /* Get process count and size from PMI and validate */ if ((rc = PMI_Init(&i)) != PMI_SUCCESS) { printf("FAILURE: PMI_Init: %d\n", rc); exit(1); } initialized = PMI_FALSE; if ((rc = PMI_Initialized(&initialized)) != PMI_SUCCESS) { printf("FAILURE: PMI_Initialized: %d\n", rc); exit(1); } if (initialized != PMI_TRUE) { printf("FAILURE: PMI_Initialized returned false\n"); exit(1); } if ((rc = PMI_Get_rank(&pmi_rank)) != PMI_SUCCESS) { printf("FAILURE: PMI_Get_rank: %d\n", rc); exit(1); } #if _DEBUG printf("PMI_Get_rank = %d\n", pmi_rank); #endif if ((rc = PMI_Get_size(&pmi_size)) != PMI_SUCCESS) { printf("FAILURE: PMI_Get_size: %d, task %d\n", rc, pmi_rank); exit(1); } #if _DEBUG printf("PMI_Get_size = %d\n", pmi_size); #endif if (pmi_rank != procid) { printf("FAILURE: Rank(%d) != PROCID(%d)\n", pmi_rank, procid); exit(1); } if (pmi_size != nprocs) { printf("FAILURE: Size(%d) != NPROCS(%d), task %d\n", pmi_size, nprocs, pmi_rank); exit(1); } if ((rc = PMI_Get_clique_size(&clique_size)) != PMI_SUCCESS) { printf("FAILURE: PMI_Get_clique_size: %d, task %d\n", rc, pmi_rank); exit(1); } clique_ranks = malloc(sizeof(int) * clique_size); if ((rc = PMI_Get_clique_ranks(clique_ranks, clique_size)) != PMI_SUCCESS) { printf("FAILURE: PMI_Get_clique_ranks: %d, task %d\n", rc, pmi_rank); exit(1); } #if _DEBUG for (i=0; i<clique_size; i++) printf("PMI_Get_clique_ranks[%d]=%d\n", i, clique_ranks[i]); #endif free(clique_ranks); if ((rc = PMI_KVS_Get_name_length_max(&kvs_name_len)) != PMI_SUCCESS) { printf("FAILURE: PMI_KVS_Get_name_length_max: %d, task %d\n", rc, pmi_rank); exit(1); } #if _DEBUG printf("PMI_KVS_Get_name_length_max = %d\n", kvs_name_len); #endif kvs_name = malloc(kvs_name_len); if ((rc = PMI_KVS_Get_my_name(kvs_name, kvs_name_len)) != PMI_SUCCESS) { printf("FAILURE: PMI_KVS_Get_my_name: %d, task %d\n", rc, pmi_rank); exit(1); } #if _DEBUG printf("PMI_KVS_Get_my_name = %s\n", kvs_name); #endif if ((rc = PMI_KVS_Get_key_length_max(&key_len)) != PMI_SUCCESS) { printf("FAILURE: PMI_KVS_Get_key_length_max: %d, task %d\n", rc, pmi_rank); exit(1); } key = malloc(key_len); if ((rc = PMI_KVS_Get_value_length_max(&val_len)) != PMI_SUCCESS) { printf("FAILURE: PMI_KVS_Get_value_length_max: %d, task %d\n", rc, pmi_rank); exit(1); } #if _DEBUG printf("PMI_KVS_Get_value_length_max = %d\n", val_len); #endif val = malloc(val_len); /* Build and set some key=val pairs */ snprintf(key, key_len, "ATTR_1_%d", procid); snprintf(val, val_len, "A%d", procid+OFFSET_1); if ((rc = PMI_KVS_Put(kvs_name, key, val)) != PMI_SUCCESS) { printf("FAILURE: PMI_KVS_Put(%s,%s,%s): %d, task %d\n", kvs_name, key, val, rc, pmi_rank); exit(1); } #if _DEBUG printf("PMI_KVS_Put(%s,%s,%s)\n", kvs_name, key, val); #endif snprintf(key, key_len, "attr_2_%d", procid); snprintf(val, val_len, "B%d", procid+OFFSET_2); if ((rc = PMI_KVS_Put(kvs_name, key, val)) != PMI_SUCCESS) { printf("FAILURE: PMI_KVS_Put(%s,%s,%s): %d, task %d\n", kvs_name, key, val, rc, pmi_rank); exit(1); } #if _DEBUG printf("PMI_KVS_Put(%s,%s,%s)\n", kvs_name, key, val); #endif /* Sync KVS across all tasks */ if ((rc = PMI_KVS_Commit(kvs_name)) != PMI_SUCCESS) { printf("FAILURE: PMI_KVS_Commit: %d, task %d\n", rc, pmi_rank); exit(1); } #if _DEBUG printf("PMI_KVS_Commit completed\n"); #endif if ((rc = PMI_Barrier()) != PMI_SUCCESS) { printf("FAILURE: PMI_Barrier: %d, task %d\n", rc, pmi_rank); exit(1); } #if _DEBUG printf("PMI_Barrier completed\n"); #endif /* Now lets get all keypairs and validate */ for (i=0; i<pmi_size; i++) { snprintf(key, key_len, "ATTR_1_%d", i); if ((rc = PMI_KVS_Get(kvs_name, key, val, val_len)) != PMI_SUCCESS) { printf("FAILURE: PMI_KVS_Get(%s): %d, task %d\n", key, rc, pmi_rank); exit(1); } if ((val[0] != 'A') || ((atoi(&val[1])-OFFSET_1) != i)) { printf("FAILURE: Bad keypair %s=%s, task %d\n", key, val, pmi_rank); exit(1); } #if _DEBUG if ((pmi_size <= 8) && (pmi_rank == 0)) /* limit output */ printf("PMI_KVS_Get(%s,%s) %s\n", kvs_name, key, val); #endif snprintf(key, key_len, "attr_2_%d", i); if ((rc = PMI_KVS_Get(kvs_name, key, val, val_len)) != PMI_SUCCESS) { printf("FAILURE: PMI_KVS_Get(%s): %d, task %d\n", key, rc, pmi_rank); exit(1); } if ((val[0] != 'B') || ((atoi(&val[1])-OFFSET_2) != i)) { printf("FAILURE: Bad keypair %s=%s, task %d\n", key,val, pmi_rank); exit(1); } #if _DEBUG if ((pmi_size <= 8) && (pmi_rank == 1)) /* limit output */ printf("PMI_KVS_Get(%s,%s) %s\n", kvs_name, key, val); #endif } /* use iterator */ if ((rc = PMI_KVS_Iter_first(kvs_name, key, key_len, val, val_len)) != PMI_SUCCESS) { printf("FAILURE: PMI_KVS_iter_first: %d, task %d\n", rc, pmi_rank); exit(1); } for (i=0; ; i++) { if (key[0] == '\0') { if (i != (pmi_size * 2)) { printf("FAILURE: PMI_KVS_iter_next " "cycle count(%d, %d), task %d\n", i, pmi_size, pmi_rank); } break; } #if _DEBUG if ((pmi_size <= 8) && (pmi_rank == 1)) { /* limit output */ printf("PMI_KVS_Iter_next(%s,%d): %s=%s\n", kvs_name, i, key, val); } #endif if ((rc = PMI_KVS_Iter_next(kvs_name, key, key_len, val, val_len)) != PMI_SUCCESS) { printf("FAILURE: PMI_KVS_iter_next: %d, task %d\n", rc, pmi_rank); exit(1); } } /* Build some more key=val pairs */ snprintf(key, key_len, "ATTR_3_%d", procid); snprintf(val, val_len, "C%d", procid+OFFSET_1); if ((rc = PMI_KVS_Put(kvs_name, key, val)) != PMI_SUCCESS) { printf("FAILURE: PMI_KVS_Put(%s,%s,%s): %d, task %d\n", kvs_name, key, val, rc, pmi_rank); exit(1); } #if _DEBUG printf("PMI_KVS_Put(%s,%s,%s)\n", kvs_name, key, val); #endif snprintf(key, key_len, "attr_4_%d", procid); snprintf(val, val_len, "D%d", procid+OFFSET_2); if ((rc = PMI_KVS_Put(kvs_name, key, val)) != PMI_SUCCESS) { printf("FAILURE: PMI_KVS_Put(%s,%s,%s): %d, task %d\n", kvs_name, key, val, rc, pmi_rank); exit(1); } #if _DEBUG printf("PMI_KVS_Put(%s,%s,%s)\n", kvs_name, key, val); #endif /* Sync KVS across all tasks */ if ((rc = PMI_KVS_Commit(kvs_name)) != PMI_SUCCESS) { printf("FAILURE: PMI_KVS_Commit: %d, task %d\n", rc, pmi_rank); exit(1); } #if _DEBUG printf("PMI_KVS_Commit completed\n"); #endif if ((rc = PMI_Barrier()) != PMI_SUCCESS) { printf("FAILURE: PMI_Barrier: %d, task %d\n", rc, pmi_rank); exit(1); } #if _DEBUG printf("PMI_Barrier completed\n"); #endif /* Now lets get some keypairs and validate */ for (i=0; i<pmi_size; i++) { snprintf(key, key_len, "ATTR_1_%d", i); if ((rc = PMI_KVS_Get(kvs_name, key, val, val_len)) != PMI_SUCCESS) { printf("FAILURE: PMI_KVS_Get(%s): %d, task %d\n", key, rc, pmi_rank); exit(1); } if ((val[0] != 'A') || ((atoi(&val[1])-OFFSET_1) != i)) { printf("FAILURE: Bad keypair %s=%s, task %d\n", key, val, pmi_rank); exit(1); } #if _DEBUG if ((pmi_size <= 8) && (pmi_rank == 1)) /* limit output */ printf("PMI_KVS_Get(%s,%s) %s\n", kvs_name, key, val); #endif snprintf(key, key_len, "attr_4_%d", i); if ((rc = PMI_KVS_Get(kvs_name, key, val, val_len)) != PMI_SUCCESS) { printf("FAILURE: PMI_KVS_Get(%s): %d, task %d\n", key, rc, pmi_rank); exit(1); } if ((val[0] != 'D') || ((atoi(&val[1])-OFFSET_2) != i)) { printf("FAILURE: Bad keypair %s=%s, task %d\n", key,val, pmi_rank); exit(1); } #if _DEBUG if ((pmi_size <= 8) && (pmi_rank == 1)) /* limit output */ printf("PMI_KVS_Get(%s,%s) %s\n", kvs_name, key, val); #endif } /* Replicate the very heavy load that MVAPICH2 puts on PMI * This load exceeds that of MPICH2 by a very wide margin */ #if _DEBUG printf("Starting %d iterations each with %d PMI_KVS_Put and \n" " one each PMI_KVS_Commit and KVS_Barrier\n", BARRIER_CNT, PUTS_PER_BARRIER); fflush(stdout); #endif for (i=0; i<BARRIER_CNT; i++) { for (j=0; j<PUTS_PER_BARRIER; j++) { snprintf(key, key_len, "ATTR_%d_%d_%d", i, j, procid); snprintf(val, val_len, "C%d", procid+OFFSET_1); if ((rc = PMI_KVS_Put(kvs_name, key, val)) != PMI_SUCCESS) { printf("FAILURE: PMI_KVS_Put(%s,%s,%s): " "%d, task %d\n", kvs_name, key, val, rc, pmi_rank); exit(1); } } if ((rc= PMI_KVS_Commit(kvs_name)) != PMI_SUCCESS) { printf("FAILURE: PMI_KVS_Commit: %d, task %d\n", rc, pmi_rank); exit(1); } if ((rc = PMI_Barrier()) != PMI_SUCCESS) { printf("FAILURE: PMI_Barrier: %d, task %d\n", rc, pmi_rank); exit(1); } /* Don't bother with PMI_KVS_Get as those are all local * and do not put a real load on srun or the network */ } #if _DEBUG printf("Interative PMI calls successful\n"); #endif /* create new keyspace and test it */ if ((rc = PMI_KVS_Create(kvs_name, kvs_name_len)) != PMI_SUCCESS) { printf("FAILURE: PMI_KVS_Create: %d, task %d\n", rc, pmi_rank); exit(1); } #if _DEBUG printf("PMI_KVS_Create %s\n", kvs_name); #endif if ((rc = PMI_KVS_Put(kvs_name, "KVS_KEY", "KVS_VAL")) != PMI_SUCCESS) { printf("FAILURE: PMI_KVS_Put: %d, task %d\n", rc, pmi_rank); exit(1); } #if _DEBUG printf("PMI_KVS_Put(%s,KVS_KEY,KVS_VAL)\n", kvs_name); #endif if ((rc = PMI_KVS_Get(kvs_name, "KVS_KEY", val, val_len)) != PMI_SUCCESS) { printf("FAILURE: PMI_KVS_Get(%s, KVS_KEY): %d, task %d\n", kvs_name, rc, pmi_rank); exit(1); } #if _DEBUG printf("PMI_KVS_Get(%s,%s) %s\n", kvs_name, "KVS_KEY", val); #endif if ((rc = PMI_KVS_Destroy(kvs_name)) != PMI_SUCCESS) { printf("FAILURE: PMI_KVS_Destroy(%s): %d, task %d\n", kvs_name, rc, pmi_rank); exit(1); } if ((rc = PMI_KVS_Get(kvs_name, "KVS_KEY", val, val_len)) != PMI_ERR_INVALID_KVS) { printf("FAILURE: PMI_KVS_Get(%s, KVS_KEY): %d, task %d\n", kvs_name, rc, pmi_rank); exit(1); } if ((rc = PMI_Finalize()) != PMI_SUCCESS) { printf("FAILURE: PMI_Finalize: %d, task %d\n", rc, pmi_rank); exit(1); } if (_DEBUG || (pmi_rank < 4)) { gettimeofday(&tv2, NULL); delta_t = (tv2.tv_sec - tv1.tv_sec) * 1000000; delta_t += tv2.tv_usec - tv1.tv_usec; snprintf(tv_str, sizeof(tv_str), "usec=%ld", delta_t); printf("PMI test ran successfully, for task %d, %s\n", pmi_rank, tv_str); } if (pmi_rank == 0) { printf("NOTE: All failures reported, "); printf("but only first four successes reported\n"); } exit(0); }
/*** MODEX SECTION ***/ static int modex(opal_list_t *procs) { int rc; char *rml_uri, *attr; orte_vpid_t v; orte_process_name_t name; OPAL_OUTPUT_VERBOSE((1, orte_grpcomm_base.output, "%s grpcomm:pmi: modex entered", ORTE_NAME_PRINT(ORTE_PROC_MY_NAME))); /* add our oob endpoint info so that oob communications * can be supported */ rml_uri = orte_rml.get_contact_info(); if (strlen(rml_uri) > (size_t)pmi_vallen_max) { opal_output(0, "grpcomm:pmi: RML uri length is too long\n"); return ORTE_ERROR; } if (0 > asprintf(&attr, "%s-RMLURI", ORTE_NAME_PRINT(ORTE_PROC_MY_NAME))) { free(rml_uri); return ORTE_ERR_OUT_OF_RESOURCE; } rc = PMI_KVS_Put(pmi_kvs_name, attr, rml_uri); if (PMI_SUCCESS != rc) { ORTE_PMI_ERROR(rc, "PMI_KVS_Put"); free(rml_uri); free(attr); return ORTE_ERROR; } free(rml_uri); free(attr); /* commit our modex info */ if (PMI_SUCCESS != (rc = PMI_KVS_Commit(pmi_kvs_name))) { ORTE_PMI_ERROR(rc, "PMI_KVS_Commit failed"); return ORTE_ERROR; } /* Barrier here to ensure all other procs have committed */ if (ORTE_SUCCESS != (rc = pmi_barrier())) { return rc; } /* harvest the oob endpoint info for all other procs * in our job so oob wireup can be completed */ rml_uri = malloc(pmi_vallen_max); if (NULL == rml_uri) { return ORTE_ERR_OUT_OF_RESOURCE; } name.jobid = ORTE_PROC_MY_NAME->jobid; for (v=0; v < orte_process_info.num_procs; v++) { if (v == ORTE_PROC_MY_NAME->vpid) { continue; } name.vpid = v; if (0 > asprintf(&attr, "%s-RMLURI", ORTE_NAME_PRINT(&name))) { free(rml_uri); return ORTE_ERR_OUT_OF_RESOURCE; } rc = PMI_KVS_Get(pmi_kvs_name, attr, rml_uri, pmi_vallen_max); if (PMI_SUCCESS != rc) { ORTE_PMI_ERROR(rc, "PMI_KVS_Get"); free(rml_uri); free(attr); return ORTE_ERROR; } free(attr); OPAL_OUTPUT_VERBOSE((5, orte_grpcomm_base.output, "%s grpcomm:pmi: proc %s oob endpoint %s", ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), ORTE_NAME_PRINT(&name), rml_uri)); /* set the contact info into the hash table */ if (ORTE_SUCCESS != (rc = orte_rml.set_contact_info(rml_uri))) { free(rml_uri); return rc; } } free(rml_uri); OPAL_OUTPUT_VERBOSE((1, orte_grpcomm_base.output, "%s grpcomm:pmi: modex completed", ORTE_NAME_PRINT(ORTE_PROC_MY_NAME))); return rc; }