static int get_vpid(orte_vpid_t *outvp, orte_vpid_t start_vpid) { #if ORTE_MCA_ESS_ALPS_HAVE_CNOS == 1 *outvp = (orte_vpid_t)cnos_get_rank() + start_vpid; return ORTE_SUCCESS; #else /* Cray XE6 Notes: * using PMI_GNI_LOC_ADDR to set vpid. */ int rank = 0; char *env; if (NULL == (env = getenv("PMI_GNI_LOC_ADDR"))) { OPAL_OUTPUT_VERBOSE((0, orte_ess_base_output, "PMI_GNI_LOC_ADDR not found, cannot continue\n")); ORTE_ERROR_LOG(ORTE_ERROR); return ORTE_ERROR; } errno = 0; rank = (int)strtol(env, (char **)NULL, 10); if (0 != errno) { OPAL_OUTPUT_VERBOSE((0, orte_ess_base_output, "strtol error detected at %s:%d\n", __FILE__, __LINE__)); ORTE_ERROR_LOG(ORTE_ERROR); return ORTE_ERROR; } *outvp = (orte_vpid_t)(rank + (int)start_vpid); return ORTE_SUCCESS; #endif /* ORTE_MCA_ESS_ALPS_HAVE_CNOS == 1 */ }
static int rte_init(void) { int rc; int nprocs; orte_dt_init(); /* Get our process information */ /* Procs in this environment are directly launched. Hence, there * was no mpirun to create a jobid for us, and each app proc is * going to have to fend for itself. For now, we assume that the * jobid is some arbitrary number (say, 1). */ ORTE_PROC_MY_NAME->jobid = 1; /* find our vpid from cnos */ ORTE_PROC_MY_NAME->vpid = (orte_vpid_t) cnos_get_rank(); /* Get the number of procs in the job from cnos */ orte_process_info.num_procs = (orte_std_cntr_t) cnos_get_size(); /* Get the nid map */ nprocs = cnos_get_nidpid_map(&map); if (nprocs <= 0) { opal_output(0, "%5d: cnos_get_nidpid_map() returned %d", cnos_get_rank(), nprocs); return ORTE_ERR_FATAL; } /* MPI_Init needs the grpcomm framework, so we have to init it */ if (ORTE_SUCCESS != (rc = orte_grpcomm_base_open())) { ORTE_ERROR_LOG(rc); return rc; } if (ORTE_SUCCESS != (rc = orte_grpcomm_base_select())) { ORTE_ERROR_LOG(rc); return rc; } /* that's all we need here */ return ORTE_SUCCESS; }
static int cnos_gethostname(char *name, int len) { sprintf(name,"%d",cnos_get_rank()); }