Exemple #1
0
static int test_item4(void)
{
    int rc = 0;
    int val = 0;
    int *ranks = NULL;
    int i = 0;

    val = -1;
    if (PMI_SUCCESS != (rc = PMI_Get_clique_size(&val))) {
        log_fatal("PMI_Get_clique_size failed: %d\n", rc);
        return rc;
    }
    log_info("PMI_Get_clique_size=%d\n", val);
    log_assert((0 < val) && (val <= size), "");

    ranks = alloca(val*sizeof(int));
    if (!ranks) {
        return PMI_FAIL;
    }

    memset(ranks, (-1), val*sizeof(int));
    if (PMI_SUCCESS != (rc = PMI_Get_clique_ranks(ranks, val))) {
        log_fatal("PMI_Get_clique_ranks failed: %d\n", rc);
        return rc;
    }

    for (i = 0; i < val; i++) {
        if (!((0 <= ranks[i]) && (ranks[i] < size))) {
            log_fatal("found invalid value in ranks array: ranks[%d]=%d\n", i, ranks[i]);
            return rc;
        }
    }

    return rc;
}
/***   MODEX SECTION ***/
static int modex(orte_grpcomm_collective_t *coll)
{
    int *local_ranks, local_rank_count;
    opal_hwloc_locality_t locality;
    const char *cpuset;
    orte_process_name_t name;
    orte_vpid_t v;
    bool local;
    int rc, i;

    OPAL_OUTPUT_VERBOSE((1, orte_grpcomm_base_framework.framework_output,
                         "%s grpcomm:pmi: modex entered",
                         ORTE_NAME_PRINT(ORTE_PROC_MY_NAME)));

    /* discover the local ranks */
#if WANT_PMI2_SUPPORT
    {
        char *pmapping = (char*)malloc(PMI2_MAX_VALLEN);
        int found, sid, nodes, k;
        orte_vpid_t n;
        char *p;
        rc = PMI2_Info_GetJobAttr("PMI_process_mapping", pmapping, PMI2_MAX_VALLEN, &found);
        if (!found || PMI_SUCCESS != rc) { /* can't check PMI2_SUCCESS as some folks (i.e., Cray) don't define it */
            opal_output(0, "%s could not get PMI_process_mapping (PMI2_Info_GetJobAttr() failed)",
                        ORTE_NAME_PRINT(ORTE_PROC_MY_NAME));
            return ORTE_ERROR;
        }

        i = 0; n = 0; local_rank_count = 0;
        if (NULL != (p = strstr(pmapping, "(vector"))) {
            while (NULL != (p = strstr(p+1, ",("))) {
                if (3 == sscanf(p, ",(%d,%d,%d)", &sid, &nodes, &local_rank_count)) {
                    for (k = 0; k < nodes; k++) {
                        if ((ORTE_PROC_MY_NAME->vpid >= n) &&
                            (ORTE_PROC_MY_NAME->vpid < (n + local_rank_count))) {
                            break;
                        }
                        n += local_rank_count;
                    }
                }
            }
        }
        free(pmapping);

        if (local_rank_count > 0) {
            local_ranks = (int*)malloc(local_rank_count * sizeof(int));
            for (i=0; i < local_rank_count; i++) {
                local_ranks[i] = n + i;
            }
        }

        if (NULL == local_ranks) {
            opal_output(0, "%s could not get PMI_process_mapping",
                        ORTE_NAME_PRINT(ORTE_PROC_MY_NAME));
            return ORTE_ERROR;
        }
    }
#else
    rc = PMI_Get_clique_size (&local_rank_count);
    if (PMI_SUCCESS != rc) {
	ORTE_ERROR_LOG(ORTE_ERROR);
	return ORTE_ERROR;
    }

    local_ranks = calloc (local_rank_count, sizeof (int));
    if (NULL == local_ranks) {
	ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE);
	return ORTE_ERR_OUT_OF_RESOURCE;
    }

    rc = PMI_Get_clique_ranks (local_ranks, local_rank_count);
    if (PMI_SUCCESS != rc) {
	ORTE_ERROR_LOG(ORTE_ERROR);
	return ORTE_ERROR;
    }
#endif


    /* our RTE data was constructed and pushed in the ESS pmi component */

    /* commit our modex info */
    opal_db.commit((opal_identifier_t *)ORTE_PROC_MY_NAME);

    /* cycle thru all my peers and collect their RTE info */
    name.jobid = ORTE_PROC_MY_NAME->jobid;
    for (v=0; v < orte_process_info.num_procs; v++) {
        if (v == ORTE_PROC_MY_NAME->vpid) {
            continue;
        }
        name.vpid = v;

	/* check if this is a local process */
	for (i = 0, local = false ; i < local_rank_count ; ++i) {
	    if ((orte_vpid_t) local_ranks[i] == v) {
		local = true;
		break;
	    }
	}

	/* compute and store the locality as it isn't something that gets pushed to PMI  - doing
         * this here will prevent the MPI layer from fetching data for all ranks
         */
        if (local) {
	    if (ORTE_SUCCESS != (rc = opal_db.fetch_pointer((opal_identifier_t*)&name, OPAL_DB_CPUSET,
                                                            (void **)&cpuset, OPAL_STRING))) {
		ORTE_ERROR_LOG(rc);
		return rc;
	    }

	    if (NULL == cpuset) {
		/* if we share a node, but we don't know anything more, then
		 * mark us as on the node as this is all we know
		 */
		locality = OPAL_PROC_ON_NODE;
	    } else {
		/* determine relative location on our node */
		locality = opal_hwloc_base_get_relative_locality(opal_hwloc_topology,
								 orte_process_info.cpuset,
								 (char *) cpuset);
	    }
	} else {
            /* this is on a different node, then mark as non-local */
            locality = OPAL_PROC_NON_LOCAL;
	}

        OPAL_OUTPUT_VERBOSE((1, orte_grpcomm_base_framework.framework_output,
                            "%s grpcomm:pmi proc %s locality %s",
                             ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
                             ORTE_NAME_PRINT(&name), opal_hwloc_base_print_locality(locality)));

        if (ORTE_SUCCESS != (rc = opal_db.store((opal_identifier_t*)&name, OPAL_SCOPE_INTERNAL,
                                                OPAL_DB_LOCALITY, &locality, OPAL_HWLOC_LOCALITY_T))) {
            ORTE_ERROR_LOG(rc);
            return rc;
        }
    }

    /* execute the callback */
    coll->active = false;
    if (NULL != coll->cbfunc) {
        coll->cbfunc(NULL, coll->cbdata);
    }
    return rc;
}
Exemple #3
0
static int s1_init(void)
{
    PMI_BOOL initialized;
    int spawned;
    int rc, ret = OPAL_ERROR;
    int i, rank, lrank, nrank;
    char *pmix_id, tmp[64];
    opal_value_t kv;
    char *str;
    uint32_t ui32;
    opal_process_name_t ldr;
    char **localranks=NULL;

    if (PMI_SUCCESS != (rc = PMI_Initialized(&initialized))) {
        OPAL_PMI_ERROR(rc, "PMI_Initialized");
        return OPAL_ERROR;
    }

    if (PMI_TRUE != initialized && PMI_SUCCESS != (rc = PMI_Init(&spawned))) {
        OPAL_PMI_ERROR(rc, "PMI_Init");
        return OPAL_ERROR;
    }

    // setup hash table
    opal_pmix_base_hash_init();

    // Initialize space demands
    rc = PMI_KVS_Get_value_length_max(&pmix_vallen_max);
    if (PMI_SUCCESS != rc) {
        OPAL_PMI_ERROR(rc, "PMI_KVS_Get_value_length_max");
        goto err_exit;
    }
    pmix_vallen_threshold = pmix_vallen_max * 3;
    pmix_vallen_threshold >>= 2;

    rc = PMI_KVS_Get_name_length_max(&pmix_kvslen_max);
    if (PMI_SUCCESS != rc) {
        OPAL_PMI_ERROR(rc, "PMI_KVS_Get_name_length_max");
        goto err_exit;
    }

    rc = PMI_KVS_Get_key_length_max(&pmix_keylen_max);
    if (PMI_SUCCESS != rc) {
        OPAL_PMI_ERROR(rc, "PMI_KVS_Get_key_length_max");
        goto err_exit;
    }

    // Initialize job environment information
    pmix_id = (char*)malloc(pmix_vallen_max);
    if (pmix_id == NULL) {
        ret = OPAL_ERR_OUT_OF_RESOURCE;
        goto err_exit;
    }
    /* Get domain id */
    if (PMI_SUCCESS != (rc = PMI_Get_kvs_domain_id(pmix_id, pmix_vallen_max))) {
        free(pmix_id);
        goto err_exit;
    }

    /* get our rank */
    ret = PMI_Get_rank(&rank);
    if( PMI_SUCCESS != ret ) {
        OPAL_PMI_ERROR(ret, "PMI_Get_rank");
        free(pmix_id);
        goto err_exit;
    }

    /* Slurm PMI provides the job id as an integer followed
     * by a '.', followed by essentially a stepid. The first integer
     * defines an overall job number. The second integer is the number of
     * individual jobs we have run within that allocation. */
    s1_pname.jobid = strtoul(pmix_id, &str, 10);
    s1_pname.jobid = (s1_pname.jobid << 16) & 0xffff0000;
    if (NULL != str) {
        ui32 = strtoul(str, NULL, 10);
        s1_pname.jobid |= (ui32 & 0x0000ffff);
    }
    ldr.jobid = s1_pname.jobid;
    s1_pname.vpid = rank;
    /* store our name in the opal_proc_t so that
     * debug messages will make sense - an upper
     * layer will eventually overwrite it, but that
     * won't do any harm */
    opal_proc_set_name(&s1_pname);
    opal_output_verbose(2, opal_pmix_base_framework.framework_output,
                        "%s pmix:s1: assigned tmp name",
                        OPAL_NAME_PRINT(s1_pname));

    OBJ_CONSTRUCT(&kv, opal_value_t);
    kv.key = strdup(OPAL_PMIX_JOBID);
    kv.type = OPAL_UINT32;
    kv.data.uint32 = s1_pname.jobid;
    if (OPAL_SUCCESS != (ret = opal_pmix_base_store(&OPAL_PROC_MY_NAME, &kv))) {
        OPAL_ERROR_LOG(ret);
        OBJ_DESTRUCT(&kv);
        goto err_exit;
    }
    OBJ_DESTRUCT(&kv);

    /* save it */
    OBJ_CONSTRUCT(&kv, opal_value_t);
    kv.key = strdup(OPAL_PMIX_RANK);
    kv.type = OPAL_UINT32;
    kv.data.uint32 = rank;
    if (OPAL_SUCCESS != (ret = opal_pmix_base_store(&OPAL_PROC_MY_NAME, &kv))) {
        OPAL_ERROR_LOG(ret);
        OBJ_DESTRUCT(&kv);
        goto err_exit;
    }
    OBJ_DESTRUCT(&kv);

    pmix_kvs_name = (char*)malloc(pmix_kvslen_max);
    if (pmix_kvs_name == NULL) {
        ret = OPAL_ERR_OUT_OF_RESOURCE;
        goto err_exit;
    }

    rc = PMI_KVS_Get_my_name(pmix_kvs_name, pmix_kvslen_max);
    if (PMI_SUCCESS != rc) {
        OPAL_PMI_ERROR(rc, "PMI_KVS_Get_my_name");
        goto err_exit;
    }

    /* get our local proc info to find our local rank */
    if (PMI_SUCCESS != (rc = PMI_Get_clique_size(&nlranks))) {
        OPAL_PMI_ERROR(rc, "PMI_Get_clique_size");
        return rc;
    }
    /* save the local size */
    OBJ_CONSTRUCT(&kv, opal_value_t);
    kv.key = strdup(OPAL_PMIX_LOCAL_SIZE);
    kv.type = OPAL_UINT32;
    kv.data.uint32 = nlranks;
    if (OPAL_SUCCESS != (ret = opal_pmix_base_store(&OPAL_PROC_MY_NAME, &kv))) {
        OPAL_ERROR_LOG(ret);
        OBJ_DESTRUCT(&kv);
        goto err_exit;
    }
    OBJ_DESTRUCT(&kv);
    lrank = 0;
    nrank = 0;
    ldr.vpid = rank;
    if (0 < nlranks) {
        /* now get the specific ranks */
        lranks = (int*)calloc(nlranks, sizeof(int));
        if (NULL == lranks) {
            rc = OPAL_ERR_OUT_OF_RESOURCE;
            OPAL_ERROR_LOG(rc);
            return rc;
        }
        if (PMI_SUCCESS != (rc = PMI_Get_clique_ranks(lranks, nlranks))) {
            OPAL_PMI_ERROR(rc, "PMI_Get_clique_ranks");
            free(lranks);
            return rc;
        }
        /* note the local ldr */
        ldr.vpid = lranks[0];
        /* save this */
        memset(tmp, 0, 64);
        for (i=0; i < nlranks; i++) {
            (void)snprintf(tmp, 64, "%d", lranks[i]);
            opal_argv_append_nosize(&localranks, tmp);
            if (rank == lranks[i]) {
                lrank = i;
                nrank = i;
            }
        }
        str = opal_argv_join(localranks, ',');
        opal_argv_free(localranks);
        OBJ_CONSTRUCT(&kv, opal_value_t);
        kv.key = strdup(OPAL_PMIX_LOCAL_PEERS);
        kv.type = OPAL_STRING;
        kv.data.string = str;
        if (OPAL_SUCCESS != (ret = opal_pmix_base_store(&OPAL_PROC_MY_NAME, &kv))) {
            OPAL_ERROR_LOG(ret);
            OBJ_DESTRUCT(&kv);
            goto err_exit;
        }
        OBJ_DESTRUCT(&kv);
    }

    /* save the local leader */
    OBJ_CONSTRUCT(&kv, opal_value_t);
    kv.key = strdup(OPAL_PMIX_LOCALLDR);
    kv.type = OPAL_UINT64;
    kv.data.uint64 = *(uint64_t*)&ldr;
    if (OPAL_SUCCESS != (ret = opal_pmix_base_store(&OPAL_PROC_MY_NAME, &kv))) {
        OPAL_ERROR_LOG(ret);
        OBJ_DESTRUCT(&kv);
        goto err_exit;
    }
    OBJ_DESTRUCT(&kv);
    /* save our local rank */
    OBJ_CONSTRUCT(&kv, opal_value_t);
    kv.key = strdup(OPAL_PMIX_LOCAL_RANK);
    kv.type = OPAL_UINT16;
    kv.data.uint16 = lrank;
    if (OPAL_SUCCESS != (ret = opal_pmix_base_store(&OPAL_PROC_MY_NAME, &kv))) {
        OPAL_ERROR_LOG(ret);
        OBJ_DESTRUCT(&kv);
        goto err_exit;
    }
    OBJ_DESTRUCT(&kv);
    /* and our node rank */
    OBJ_CONSTRUCT(&kv, opal_value_t);
    kv.key = strdup(OPAL_PMIX_NODE_RANK);
    kv.type = OPAL_UINT16;
    kv.data.uint16 = nrank;
    if (OPAL_SUCCESS != (ret = opal_pmix_base_store(&OPAL_PROC_MY_NAME, &kv))) {
        OPAL_ERROR_LOG(ret);
        OBJ_DESTRUCT(&kv);
        goto err_exit;
    }
    OBJ_DESTRUCT(&kv);

    /* get universe size */
    ret = PMI_Get_universe_size(&i);
    if (PMI_SUCCESS != ret) {
        OPAL_PMI_ERROR(ret, "PMI_Get_universe_size");
        goto err_exit;
    }
    /* push this into the dstore for subsequent fetches */
    OBJ_CONSTRUCT(&kv, opal_value_t);
    kv.key = strdup(OPAL_PMIX_UNIV_SIZE);
    kv.type = OPAL_UINT32;
    kv.data.uint32 = i;
    if (OPAL_SUCCESS != (ret = opal_pmix_base_store(&OPAL_PROC_MY_NAME, &kv))) {
        OPAL_ERROR_LOG(ret);
        OBJ_DESTRUCT(&kv);
        goto err_exit;
    }
    OBJ_DESTRUCT(&kv);

    /* get job size */
    ret = PMI_Get_size(&i);
    if (PMI_SUCCESS != ret) {
        OPAL_PMI_ERROR(ret, "PMI_Get_size");
        goto err_exit;
    }
    OBJ_CONSTRUCT(&kv, opal_value_t);
    kv.key = strdup(OPAL_PMIX_JOB_SIZE);
    kv.type = OPAL_UINT32;
    kv.data.uint32 = i;
    if (OPAL_SUCCESS != (ret = opal_pmix_base_store(&OPAL_PROC_MY_NAME, &kv))) {
        OPAL_ERROR_LOG(ret);
        OBJ_DESTRUCT(&kv);
        goto err_exit;
    }
    OBJ_DESTRUCT(&kv);

    /* get appnum */
    ret = PMI_Get_appnum(&i);
    if (PMI_SUCCESS != ret) {
        OPAL_PMI_ERROR(ret, "PMI_Get_appnum");
        goto err_exit;
    }
    OBJ_CONSTRUCT(&kv, opal_value_t);
    kv.key = strdup(OPAL_PMIX_APPNUM);
    kv.type = OPAL_UINT32;
    kv.data.uint32 = i;
    if (OPAL_SUCCESS != (ret = opal_pmix_base_store(&OPAL_PROC_MY_NAME, &kv))) {
        OPAL_ERROR_LOG(ret);
        OBJ_DESTRUCT(&kv);
        goto err_exit;
    }
    OBJ_DESTRUCT(&kv);

   /* increment the init count */
    ++pmix_init_count;

    return OPAL_SUCCESS;

 err_exit:
    PMI_Finalize();
    return ret;
}
Exemple #4
0
main (int argc, char **argv)
{
	int i, j, rc;
	int nprocs, procid;
	int clique_size, *clique_ranks = NULL;
	char *jobid_ptr, *nprocs_ptr, *procid_ptr;
	int pmi_rank, pmi_size, kvs_name_len, key_len, val_len;
	PMI_BOOL initialized;
	char *key, *val, *kvs_name;
	struct timeval tv1, tv2;
	long delta_t;
	char tv_str[20];

	gettimeofday(&tv1, NULL);

	/* Get process count and our id from environment variables */
	jobid_ptr  = getenv("SLURM_JOB_ID");
	nprocs_ptr = getenv("SLURM_NPROCS");
	procid_ptr = getenv("SLURM_PROCID");
	if (jobid_ptr == NULL) {
		printf("WARNING: PMI test not run under SLURM\n");
		nprocs = 1;
		procid = 0;
	} else if ((nprocs_ptr == NULL) || (procid_ptr == NULL)) {
		printf("FAILURE: SLURM environment variables not set\n");
		exit(1);
	} else {
		nprocs = atoi(nprocs_ptr);
		procid = atoi(procid_ptr);
	}

	/* Validate process count and our id */
	if ((nprocs < 1) || (nprocs > 9999)) {
		printf("FAILURE: Invalid nprocs %s\n", nprocs_ptr);
		exit(1);
	}
	if ((procid < 0) || (procid > 9999)) {
		printf("FAILURE: Invalid procid %s\n", procid_ptr);
		exit(1);
	}

	/* Get process count and size from PMI and validate */
	if ((rc = PMI_Init(&i)) != PMI_SUCCESS) {
		printf("FAILURE: PMI_Init: %d\n", rc);
		exit(1);
	}
	initialized = PMI_FALSE;
	if ((rc = PMI_Initialized(&initialized)) != PMI_SUCCESS) {
		printf("FAILURE: PMI_Initialized: %d\n", rc);
		exit(1);
	}
	if (initialized != PMI_TRUE) {
		printf("FAILURE: PMI_Initialized returned false\n");
		exit(1);
	}
	if ((rc = PMI_Get_rank(&pmi_rank)) != PMI_SUCCESS) {
		printf("FAILURE: PMI_Get_rank: %d\n", rc);
		exit(1);
	}
#if _DEBUG
	printf("PMI_Get_rank = %d\n", pmi_rank);
#endif
	if ((rc = PMI_Get_size(&pmi_size)) != PMI_SUCCESS) {
		printf("FAILURE: PMI_Get_size: %d, task %d\n", rc, pmi_rank);
		exit(1);
	}
#if _DEBUG
	printf("PMI_Get_size = %d\n", pmi_size);
#endif
	if (pmi_rank != procid) {
		printf("FAILURE: Rank(%d) != PROCID(%d)\n",
			pmi_rank, procid);
		exit(1);
	}
	if (pmi_size != nprocs) {
		printf("FAILURE: Size(%d) != NPROCS(%d), task %d\n",
			pmi_size, nprocs, pmi_rank);
		exit(1);
	}

	if ((rc = PMI_Get_clique_size(&clique_size)) != PMI_SUCCESS) {
		printf("FAILURE: PMI_Get_clique_size: %d, task %d\n",
			rc, pmi_rank);
		exit(1);
	}
	clique_ranks = malloc(sizeof(int) * clique_size);
	if ((rc = PMI_Get_clique_ranks(clique_ranks, clique_size)) !=
	     PMI_SUCCESS) {
		printf("FAILURE: PMI_Get_clique_ranks: %d, task %d\n",
			rc, pmi_rank);
		exit(1);
	}
#if _DEBUG
	for (i=0; i<clique_size; i++)
		printf("PMI_Get_clique_ranks[%d]=%d\n", i, clique_ranks[i]);
#endif
	free(clique_ranks);

	if ((rc = PMI_KVS_Get_name_length_max(&kvs_name_len)) != PMI_SUCCESS) {
		printf("FAILURE: PMI_KVS_Get_name_length_max: %d, task %d\n",
			rc, pmi_rank);
		exit(1);
	}
#if _DEBUG
	printf("PMI_KVS_Get_name_length_max = %d\n", kvs_name_len);
#endif
	kvs_name = malloc(kvs_name_len);
	if ((rc = PMI_KVS_Get_my_name(kvs_name, kvs_name_len)) != PMI_SUCCESS) {
		printf("FAILURE: PMI_KVS_Get_my_name: %d, task %d\n", rc,
			pmi_rank);
		exit(1);
	}
#if _DEBUG
	printf("PMI_KVS_Get_my_name = %s\n", kvs_name);
#endif
	if ((rc = PMI_KVS_Get_key_length_max(&key_len)) != PMI_SUCCESS) {
		printf("FAILURE: PMI_KVS_Get_key_length_max: %d, task %d\n",
			rc, pmi_rank);
		exit(1);
	}
	key = malloc(key_len);
	if ((rc = PMI_KVS_Get_value_length_max(&val_len)) != PMI_SUCCESS) {
		printf("FAILURE: PMI_KVS_Get_value_length_max: %d, task %d\n",
			rc, pmi_rank);
		exit(1);
	}
#if _DEBUG
	printf("PMI_KVS_Get_value_length_max = %d\n", val_len);
#endif
	val = malloc(val_len);

	/*  Build and set some key=val pairs */
	snprintf(key, key_len, "ATTR_1_%d", procid);
	snprintf(val, val_len, "A%d", procid+OFFSET_1);
	if ((rc = PMI_KVS_Put(kvs_name, key, val)) != PMI_SUCCESS) {
		printf("FAILURE: PMI_KVS_Put(%s,%s,%s): %d, task %d\n",
			kvs_name, key, val, rc, pmi_rank);
		exit(1);
	}
#if _DEBUG
	printf("PMI_KVS_Put(%s,%s,%s)\n", kvs_name, key, val);
#endif
	snprintf(key, key_len, "attr_2_%d", procid);
	snprintf(val, val_len, "B%d", procid+OFFSET_2);
	if ((rc = PMI_KVS_Put(kvs_name, key, val)) != PMI_SUCCESS) {
		printf("FAILURE: PMI_KVS_Put(%s,%s,%s): %d, task %d\n",
			kvs_name, key, val, rc, pmi_rank);
		exit(1);
	}
#if _DEBUG
	printf("PMI_KVS_Put(%s,%s,%s)\n", kvs_name, key, val);
#endif
	/* Sync KVS across all tasks */
	if ((rc = PMI_KVS_Commit(kvs_name)) != PMI_SUCCESS) {
		printf("FAILURE: PMI_KVS_Commit: %d, task %d\n", rc, pmi_rank);
		exit(1);
	}
#if _DEBUG
	printf("PMI_KVS_Commit completed\n");
#endif
	if ((rc = PMI_Barrier()) != PMI_SUCCESS) {
		printf("FAILURE: PMI_Barrier: %d, task %d\n", rc, pmi_rank);
		exit(1);
	}
#if _DEBUG
	printf("PMI_Barrier completed\n");
#endif
	/* Now lets get all keypairs and validate */
	for (i=0; i<pmi_size; i++) {
		snprintf(key, key_len, "ATTR_1_%d", i);
		if ((rc = PMI_KVS_Get(kvs_name, key, val, val_len))
				!= PMI_SUCCESS) {
			printf("FAILURE: PMI_KVS_Get(%s): %d, task %d\n",
				key, rc, pmi_rank);
			exit(1);
		}
		if ((val[0] != 'A')
		||  ((atoi(&val[1])-OFFSET_1) != i)) {
			printf("FAILURE: Bad keypair %s=%s, task %d\n",
				key, val, pmi_rank);
			exit(1);
		}
#if _DEBUG
		if ((pmi_size <= 8) && (pmi_rank == 0))	/* limit output */
			printf("PMI_KVS_Get(%s,%s) %s\n", kvs_name, key, val);
#endif
		snprintf(key, key_len, "attr_2_%d", i);
		if ((rc = PMI_KVS_Get(kvs_name, key, val, val_len))
				!= PMI_SUCCESS) {
			printf("FAILURE: PMI_KVS_Get(%s): %d, task %d\n",
				key, rc, pmi_rank);
			exit(1);
		}
		if ((val[0] != 'B')
		||  ((atoi(&val[1])-OFFSET_2) != i)) {
			printf("FAILURE: Bad keypair %s=%s, task %d\n",
				key,val, pmi_rank);
			exit(1);
		}
#if _DEBUG
		if ((pmi_size <= 8) && (pmi_rank == 1))	/* limit output */
			printf("PMI_KVS_Get(%s,%s) %s\n", kvs_name, key, val);
#endif
	}

	/* use iterator */
	if ((rc = PMI_KVS_Iter_first(kvs_name, key, key_len, val,
			val_len)) != PMI_SUCCESS) {
		printf("FAILURE: PMI_KVS_iter_first: %d, task %d\n", rc,
			pmi_rank);
		exit(1);
	}
	for (i=0; ; i++) {
		if (key[0] == '\0') {
			if (i != (pmi_size * 2)) {
				printf("FAILURE: PMI_KVS_iter_next "
					"cycle count(%d, %d), task %d\n",
					i, pmi_size, pmi_rank);
			}
			break;
		}
#if _DEBUG
		if ((pmi_size <= 8) && (pmi_rank == 1)) {	/* limit output */
			printf("PMI_KVS_Iter_next(%s,%d): %s=%s\n", kvs_name,
				i, key, val);
		}
#endif
		if ((rc = PMI_KVS_Iter_next(kvs_name, key, key_len,
				val, val_len)) != PMI_SUCCESS) {
			printf("FAILURE: PMI_KVS_iter_next: %d, task %d\n",
				rc, pmi_rank);
			exit(1);
		}
	}

	/* Build some more key=val pairs */
	snprintf(key, key_len, "ATTR_3_%d", procid);
	snprintf(val, val_len, "C%d", procid+OFFSET_1);
	if ((rc = PMI_KVS_Put(kvs_name, key, val)) != PMI_SUCCESS) {
		printf("FAILURE: PMI_KVS_Put(%s,%s,%s): %d, task %d\n",
			kvs_name, key, val, rc, pmi_rank);
		exit(1);
	}
#if _DEBUG
	printf("PMI_KVS_Put(%s,%s,%s)\n", kvs_name, key, val);
#endif
	snprintf(key, key_len, "attr_4_%d", procid);
	snprintf(val, val_len, "D%d", procid+OFFSET_2);
	if ((rc = PMI_KVS_Put(kvs_name, key, val)) != PMI_SUCCESS) {
		printf("FAILURE: PMI_KVS_Put(%s,%s,%s): %d, task %d\n",
			kvs_name, key, val, rc, pmi_rank);
		exit(1);
	}
#if _DEBUG
	printf("PMI_KVS_Put(%s,%s,%s)\n", kvs_name, key, val);
#endif
	/* Sync KVS across all tasks */
	if ((rc = PMI_KVS_Commit(kvs_name)) != PMI_SUCCESS) {
		printf("FAILURE: PMI_KVS_Commit: %d, task %d\n", rc, pmi_rank);
		exit(1);
	}
#if _DEBUG
	printf("PMI_KVS_Commit completed\n");
#endif
	if ((rc = PMI_Barrier()) != PMI_SUCCESS) {
		printf("FAILURE: PMI_Barrier: %d, task %d\n", rc, pmi_rank);
		exit(1);
	}
#if _DEBUG
	printf("PMI_Barrier completed\n");
#endif
	/* Now lets get some keypairs and validate */
	for (i=0; i<pmi_size; i++) {
		snprintf(key, key_len, "ATTR_1_%d", i);
		if ((rc = PMI_KVS_Get(kvs_name, key, val, val_len))
				!= PMI_SUCCESS) {
			printf("FAILURE: PMI_KVS_Get(%s): %d, task %d\n",
				key, rc, pmi_rank);
			exit(1);
		}
		if ((val[0] != 'A')
		||  ((atoi(&val[1])-OFFSET_1) != i)) {
			printf("FAILURE: Bad keypair %s=%s, task %d\n",
				key, val, pmi_rank);
			exit(1);
		}
#if _DEBUG
		if ((pmi_size <= 8) && (pmi_rank == 1))	/* limit output */
			printf("PMI_KVS_Get(%s,%s) %s\n", kvs_name, key, val);
#endif
		snprintf(key, key_len, "attr_4_%d", i);
		if ((rc = PMI_KVS_Get(kvs_name, key, val, val_len))
				!= PMI_SUCCESS) {
			printf("FAILURE: PMI_KVS_Get(%s): %d, task %d\n",
				key, rc, pmi_rank);
			exit(1);
		}
		if ((val[0] != 'D')
		||  ((atoi(&val[1])-OFFSET_2) != i)) {
			printf("FAILURE: Bad keypair %s=%s, task %d\n",
				key,val, pmi_rank);
			exit(1);
		}
#if _DEBUG
		if ((pmi_size <= 8) && (pmi_rank == 1))	/* limit output */
			printf("PMI_KVS_Get(%s,%s) %s\n", kvs_name, key, val);
#endif
	}

	/* Replicate the very heavy load that MVAPICH2 puts on PMI
	 * This load exceeds that of MPICH2 by a very wide margin */
#if _DEBUG
	printf("Starting %d iterations each with %d PMI_KVS_Put and \n"
		"  one each PMI_KVS_Commit and KVS_Barrier\n",
		BARRIER_CNT, PUTS_PER_BARRIER);
	fflush(stdout);
#endif
	for (i=0; i<BARRIER_CNT; i++) {
		for (j=0; j<PUTS_PER_BARRIER; j++) {
			snprintf(key, key_len, "ATTR_%d_%d_%d", i, j, procid);
			snprintf(val, val_len, "C%d", procid+OFFSET_1);
			if ((rc = PMI_KVS_Put(kvs_name, key, val)) !=
					PMI_SUCCESS) {
				printf("FAILURE: PMI_KVS_Put(%s,%s,%s): "
					"%d, task %d\n",
					kvs_name, key, val, rc, pmi_rank);
				exit(1);
			}
		}
		if ((rc= PMI_KVS_Commit(kvs_name)) != PMI_SUCCESS) {
			printf("FAILURE: PMI_KVS_Commit: %d, task %d\n",
				rc, pmi_rank);
			exit(1);
		}
		if ((rc = PMI_Barrier()) != PMI_SUCCESS) {
			printf("FAILURE: PMI_Barrier: %d, task %d\n",
				rc, pmi_rank);
			exit(1);
		}
		/* Don't bother with PMI_KVS_Get as those are all local
		 * and do not put a real load on srun or the network */
	}
#if _DEBUG
	printf("Interative PMI calls successful\n");
#endif

	/* create new keyspace and test it */
	if ((rc = PMI_KVS_Create(kvs_name, kvs_name_len)) != PMI_SUCCESS) {
		printf("FAILURE: PMI_KVS_Create: %d, task %d\n", rc, pmi_rank);
		exit(1);
	}
#if _DEBUG
	printf("PMI_KVS_Create %s\n", kvs_name);
#endif
	if ((rc = PMI_KVS_Put(kvs_name, "KVS_KEY", "KVS_VAL")) != PMI_SUCCESS) {
		printf("FAILURE: PMI_KVS_Put: %d, task %d\n", rc, pmi_rank);
		exit(1);
	}
#if _DEBUG
	printf("PMI_KVS_Put(%s,KVS_KEY,KVS_VAL)\n", kvs_name);
#endif
	if ((rc =  PMI_KVS_Get(kvs_name, "KVS_KEY", val, val_len)) !=
			PMI_SUCCESS) {
		printf("FAILURE: PMI_KVS_Get(%s, KVS_KEY): %d, task %d\n",
			kvs_name, rc, pmi_rank);
		exit(1);
	}
#if _DEBUG
	printf("PMI_KVS_Get(%s,%s) %s\n", kvs_name, "KVS_KEY", val);
#endif
	if ((rc = PMI_KVS_Destroy(kvs_name)) != PMI_SUCCESS) {
		printf("FAILURE: PMI_KVS_Destroy(%s): %d, task %d\n",
			kvs_name, rc, pmi_rank);
		exit(1);
	}
	if ((rc = PMI_KVS_Get(kvs_name, "KVS_KEY", val, val_len)) !=
			PMI_ERR_INVALID_KVS) {
		printf("FAILURE: PMI_KVS_Get(%s, KVS_KEY): %d, task %d\n",
			kvs_name, rc, pmi_rank);
		exit(1);
	}
	if ((rc = PMI_Finalize()) != PMI_SUCCESS) {
		printf("FAILURE: PMI_Finalize: %d, task %d\n", rc, pmi_rank);
		exit(1);
	}

	if (_DEBUG || (pmi_rank < 4)) {
		gettimeofday(&tv2, NULL);
		delta_t  = (tv2.tv_sec  - tv1.tv_sec) * 1000000;
		delta_t +=  tv2.tv_usec - tv1.tv_usec;
		snprintf(tv_str, sizeof(tv_str), "usec=%ld", delta_t);
		printf("PMI test ran successfully, for task %d, %s\n",
			pmi_rank, tv_str);
	}
	if (pmi_rank == 0) {
		printf("NOTE: All failures reported, ");
		printf("but only first four successes reported\n");
	}
	exit(0);
}
Exemple #5
0
int main(void)
{
  int rc;
  int rank, size;

  PMI_BOOL initialized;
  rc = PMI_Initialized(&initialized);
  if (rc!=PMI_SUCCESS)
    PMI_Abort(rc,"PMI_Initialized failed");

  if (initialized!=PMI_TRUE)
  {
    int spawned;
    rc = PMI_Init(&spawned);
    if (rc!=PMI_SUCCESS)
      PMI_Abort(rc,"PMI_Init failed");
  }

  rc = PMI_Get_rank(&rank);
  if (rc!=PMI_SUCCESS)
    PMI_Abort(rc,"PMI_Get_rank failed");

  rc = PMI_Get_size(&size);
  if (rc!=PMI_SUCCESS)
    PMI_Abort(rc,"PMI_Get_size failed");

  printf("rank %d of %d \n", rank, size);

  int rpn; /* rpn = ranks per node */
  rc = PMI_Get_clique_size(&rpn);
  if (rc!=PMI_SUCCESS)
    PMI_Abort(rc,"PMI_Get_clique_size failed");
  printf("rank %d clique size %d \n", rank, rpn);

  int * clique_ranks = malloc( rpn * sizeof(int) );
  if (clique_ranks==NULL)
    PMI_Abort(rpn,"malloc failed");

   rc = PMI_Get_clique_ranks(clique_ranks, rpn);
  if (rc!=PMI_SUCCESS)
    PMI_Abort(rc,"PMI_Get_clique_ranks failed");
  for(int i = 0; i<rpn; i++)
    printf("rank %d clique[%d] = %d \n", rank, i, clique_ranks[i]);

  int nid;
  rc = PMI_Get_nid(rank, &nid);
  if (rc!=PMI_SUCCESS)
    PMI_Abort(rc,"PMI_Get_nid failed");
  printf("rank %d PMI_Get_nid gives nid %d \n", rank, nid);

#if OLD
  rca_mesh_coord_t xyz;
  rca_get_meshcoord( (uint16_t) nid, &xyz);
  printf("rank %d rca_get_meshcoord returns (%2u,%2u,%2u)\n", 
         rank, xyz.mesh_x, xyz.mesh_y, xyz.mesh_z);
#else // UNTESTED
  pmi_mesh_coord_t xyz;
  PMI_Get_meshcoord((uint16_t) nid, &xyz);
  printf("rank %d PMI_Get_meshcoord returns (%2u,%2u,%2u)\n", 
         rank, xyz.mesh_x, xyz.mesh_y, xyz.mesh_z);
#endif

  fflush(stdout);
  return 0;
}
static int s1_init(void)
{
    PMI_BOOL initialized;
    int spawned;
    int rc, ret = OPAL_ERROR;
    int i;
    char *pmix_id, *tmp;
    uint32_t jobfam, stepid;
    opal_value_t kv;

    if (PMI_SUCCESS != (rc = PMI_Initialized(&initialized))) {
        OPAL_PMI_ERROR(rc, "PMI_Initialized");
        return OPAL_ERROR;
    }

    if( PMI_TRUE != initialized && PMI_SUCCESS != (rc = PMI_Init(&spawned)) ) {
        OPAL_PMI_ERROR(rc, "PMI_Init");
        return OPAL_ERROR;
    }

    // Initialize space demands
    rc = PMI_KVS_Get_value_length_max(&pmix_vallen_max);
    if( PMI_SUCCESS != rc ) {
        OPAL_PMI_ERROR(rc, "PMI_KVS_Get_value_length_max");
        goto err_exit;
    }

    rc = PMI_KVS_Get_name_length_max(&pmix_kvslen_max);
    if (PMI_SUCCESS != rc ) {
        OPAL_PMI_ERROR(rc, "PMI_KVS_Get_name_length_max");
        goto err_exit;
    }

    rc = PMI_KVS_Get_key_length_max(&pmix_keylen_max);
    if( PMI_SUCCESS != rc ) {
        OPAL_PMI_ERROR(rc, "PMI_KVS_Get_key_length_max");
        goto err_exit;
    }

    // Initialize job environment information
    pmix_id = (char*)malloc(pmix_vallen_max);
    if( pmix_id == NULL ){
        ret = OPAL_ERR_OUT_OF_RESOURCE;
        goto err_exit;
    }
    /* Get domain id */
    if (PMI_SUCCESS != (rc = PMI_Get_kvs_domain_id(pmix_id, pmix_vallen_max))) {
        free(pmix_id);
        goto err_exit;
    }
    /* Slurm PMI provides the job id as an integer followed
     * by a '.', followed by essentially a stepid. The first integer
     * defines an overall job number. The second integer is the number of
     * individual jobs we have run within that allocation. So we translate
     * this as the overall job number equating to our job family, and
     * the individual number equating to our local jobid
     */
    jobfam = strtoul(pmix_id, &tmp, 10);
    if (NULL == tmp) {
        /* hmmm - no '.', so let's just use zero */
        stepid = 0;
    } else {
        tmp++; /* step over the '.' */
        stepid = strtoul(tmp, NULL, 10);
    }
    /* now build the jobid */
    s1_jobid = (jobfam << 16) | stepid;
    free(pmix_id);

    /* get our rank */
    ret = PMI_Get_rank(&s1_rank);
    if( PMI_SUCCESS != ret ) {
        OPAL_PMI_ERROR(ret, "PMI_Get_rank");
        goto err_exit;
    }
    /* store our name in the opal_proc_t so that
     * debug messages will make sense - an upper
     * layer will eventually overwrite it, but that
     * won't do any harm */
    s1_pname.jid = s1_jobid;
    s1_pname.vid = s1_rank;
    opal_proc_set_name((opal_process_name_t*)&s1_pname);
    opal_output_verbose(2, opal_pmix_base_framework.framework_output,
                        "%s pmix:s1: assigned tmp name",
                        OPAL_NAME_PRINT(*(opal_process_name_t*)&s1_pname));

    pmix_kvs_name = (char*)malloc(pmix_kvslen_max);
    if( pmix_kvs_name == NULL ){
        ret = OPAL_ERR_OUT_OF_RESOURCE;
        goto err_exit;
    }

    rc = PMI_KVS_Get_my_name(pmix_kvs_name, pmix_kvslen_max);
    if( PMI_SUCCESS != rc ) {
        OPAL_PMI_ERROR(rc, "PMI_KVS_Get_my_name");
        goto err_exit;
    }

    /* get our local proc info to find our local rank */
    if (PMI_SUCCESS != (rc = PMI_Get_clique_size(&s1_nlranks))) {
        OPAL_PMI_ERROR(rc, "PMI_Get_clique_size");
        return rc;
    }
    /* now get the specific ranks */
    s1_lranks = (int*)calloc(s1_nlranks, sizeof(int));
    if (NULL == s1_lranks) {
        rc = OPAL_ERR_OUT_OF_RESOURCE;
        OPAL_ERROR_LOG(rc);
        return rc;
    }
    if (PMI_SUCCESS != (rc = PMI_Get_clique_ranks(s1_lranks, s1_nlranks))) {
        OPAL_PMI_ERROR(rc, "PMI_Get_clique_ranks");
        free(s1_lranks);
        return rc;
    }
    /* find ourselves */
    for (i=0; i < s1_nlranks; i++) {
        if (s1_rank == s1_lranks[i]) {
            s1_lrank = i;
            s1_nrank = i;
            break;
        }
    }

    /* get universe size */
    ret = PMI_Get_universe_size(&s1_usize);
    if (PMI_SUCCESS != ret) {
        OPAL_PMI_ERROR(ret, "PMI_Get_universe_size");
        goto err_exit;
    }
    /* push this into the dstore for subsequent fetches */
    OBJ_CONSTRUCT(&kv, opal_value_t);
    kv.key = strdup(OPAL_DSTORE_UNIV_SIZE);
    kv.type = OPAL_UINT32;
    kv.data.uint32 = s1_usize;
    if (OPAL_SUCCESS != (ret = opal_dstore.store(opal_dstore_internal, &OPAL_PROC_MY_NAME, &kv))) {
        OPAL_ERROR_LOG(ret);
        OBJ_DESTRUCT(&kv);
        goto err_exit;
    }
    OBJ_DESTRUCT(&kv);

    /* get job size */
    ret = PMI_Get_size(&s1_jsize);
    if (PMI_SUCCESS != ret) {
        OPAL_PMI_ERROR(ret, "PMI_Get_size");
        goto err_exit;
    }

    /* get appnum */
    ret = PMI_Get_appnum(&s1_appnum);
    if (PMI_SUCCESS != ret) {
        OPAL_PMI_ERROR(ret, "PMI_Get_appnum");
        goto err_exit;
    }

    return OPAL_SUCCESS;

 err_exit:
    PMI_Finalize();
    return ret;
}
static int rte_init(void)
{
    int ret, i, j;
    char *error = NULL, *localj;
    int32_t jobfam, stepid;
    char *envar, *ev1, *ev2;
    uint64_t unique_key[2];
    char *cs_env, *string_key;
    char *pmi_id=NULL;
    int *ranks;
    char *tmp;
    orte_jobid_t jobid;
    orte_process_name_t proc;
    orte_local_rank_t local_rank;
    orte_node_rank_t node_rank;

    /* run the prolog */
    if (ORTE_SUCCESS != (ret = orte_ess_base_std_prolog())) {
        error = "orte_ess_base_std_prolog";
        goto error;
    }
    
#if OPAL_HAVE_HWLOC
    /* get the topology */
    if (NULL == opal_hwloc_topology) {
        if (OPAL_SUCCESS != opal_hwloc_base_get_topology()) {
            error = "topology discovery";
            goto error;
        }
    }
#endif

    if (ORTE_PROC_IS_DAEMON) {  /* I am a daemon, launched by mpirun */
        /* we had to be given a jobid */
        mca_base_param_reg_string_name("orte", "ess_jobid", "Process jobid",
                                       true, false, NULL, &tmp);
        if (NULL == tmp) {
            error = "missing jobid";
            ret = ORTE_ERR_FATAL;
            goto error;
        }
        if (ORTE_SUCCESS != (ret = orte_util_convert_string_to_jobid(&jobid, tmp))) {
            ORTE_ERROR_LOG(ret);
            error = "convert jobid";
            goto error;
        }
        free(tmp);
        ORTE_PROC_MY_NAME->jobid = jobid;
        /* get our rank from PMI */
        if (PMI_SUCCESS != (ret = PMI_Get_rank(&i))) {
            ORTE_PMI_ERROR(ret, "PMI_Get_rank");
            error = "could not get PMI rank";
            goto error;
        }
        ORTE_PROC_MY_NAME->vpid = i + 1;  /* compensate for orterun */

        /* get the number of procs from PMI */
        if (PMI_SUCCESS != (ret = PMI_Get_universe_size(&i))) {
            ORTE_PMI_ERROR(ret, "PMI_Get_universe_size");
            error = "could not get PMI universe size";
            goto error;
        }
        orte_process_info.num_procs = i + 1;  /* compensate for orterun */

        /* complete setup */
        if (ORTE_SUCCESS != (ret = orte_ess_base_orted_setup(NULL))) {
            ORTE_ERROR_LOG(ret);
            error = "orte_ess_base_orted_setup";
            goto error;
        }
    } else {  /* we are a direct-launched MPI process */
        /* get our PMI id length */
        if (PMI_SUCCESS != (ret = PMI_Get_id_length_max(&pmi_maxlen))) {
            error = "PMI_Get_id_length_max";
            goto error;
        }
        pmi_id = malloc(pmi_maxlen);
        if (PMI_SUCCESS != (ret = PMI_Get_kvs_domain_id(pmi_id, pmi_maxlen))) {
            free(pmi_id);
            error = "PMI_Get_kvs_domain_id";
            goto error;
        }
        /* PMI is very nice to us - the domain id is an integer followed
         * by a '.', followed by essentially a stepid. The first integer
         * defines an overall job number. The second integer is the number of
         * individual jobs we have run within that allocation. So we translate
         * this as the overall job number equating to our job family, and
         * the individual number equating to our local jobid
         */
        jobfam = strtol(pmi_id, &localj, 10);
        if (NULL == localj) {
            /* hmmm - no '.', so let's just use zero */
            stepid = 0;
        } else {
            localj++; /* step over the '.' */
            stepid = strtol(localj, NULL, 10) + 1; /* add one to avoid looking like a daemon */
        }
        free(pmi_id);

        /* now build the jobid */
        ORTE_PROC_MY_NAME->jobid = ORTE_CONSTRUCT_LOCAL_JOBID(jobfam << 16, stepid);

        /* get our rank */
        if (PMI_SUCCESS != (ret = PMI_Get_rank(&i))) {
            ORTE_PMI_ERROR(ret, "PMI_Get_rank");
            error = "could not get PMI rank";
            goto error;
        }
        ORTE_PROC_MY_NAME->vpid = i;

        /* get the number of procs from PMI */
        if (PMI_SUCCESS != (ret = PMI_Get_universe_size(&i))) {
            ORTE_PMI_ERROR(ret, "PMI_Get_universe_size");
            error = "could not get PMI universe size";
            goto error;
        }
        orte_process_info.num_procs = i;
        /* push into the environ for pickup in MPI layer for
         * MPI-3 required info key
         */
        asprintf(&ev1, "OMPI_MCA_orte_ess_num_procs=%d", i);
        putenv(ev1);
        asprintf(&ev2, "OMPI_APP_CTX_NUM_PROCS=%d", i);
        putenv(ev2);

        /* setup transport keys in case the MPI layer needs them -
         * we can use the jobfam and stepid as unique keys
         * because they are unique values assigned by the RM
         */
        unique_key[0] = (uint64_t)jobfam;
        unique_key[1] = (uint64_t)stepid;
        if (NULL == (string_key = orte_pre_condition_transports_print(unique_key))) {
            ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE);
            return ORTE_ERR_OUT_OF_RESOURCE;
        }
        if (NULL == (cs_env = mca_base_param_environ_variable("orte_precondition_transports",NULL,NULL))) {
            ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE);
            return ORTE_ERR_OUT_OF_RESOURCE;
        }
        asprintf(&envar, "%s=%s", cs_env, string_key);
        putenv(envar);
        /* cannot free the envar as that messes up our environ */
        free(cs_env);
        free(string_key);

        /* our app_context number can only be 0 as we don't support
         * dynamic spawns
         */
        orte_process_info.app_num = 0;

        /* setup my daemon's name - arbitrary, since we don't route
         * messages
         */
        ORTE_PROC_MY_DAEMON->jobid = 0;
        ORTE_PROC_MY_DAEMON->vpid = 0;

        /* ensure we pick the correct critical components */
        putenv("OMPI_MCA_grpcomm=pmi");
        putenv("OMPI_MCA_routed=direct");
    
        /* now use the default procedure to finish my setup */
        if (ORTE_SUCCESS != (ret = orte_ess_base_app_setup())) {
            ORTE_ERROR_LOG(ret);
            error = "orte_ess_base_app_setup";
            goto error;
        }

        /* store our info into the database */
        if (ORTE_SUCCESS != (ret = orte_db.store(ORTE_PROC_MY_NAME, ORTE_DB_HOSTNAME, orte_process_info.nodename, OPAL_STRING))) {
            error = "db store daemon vpid";
            goto error;
        }
        /* get our local proc info to find our local rank */
        if (PMI_SUCCESS != (ret = PMI_Get_clique_size(&i))) {
            ORTE_PMI_ERROR(ret, "PMI_Get_clique_size");
            error = "could not get PMI clique size";
            goto error;
        }
        /* store that info - remember, we want the number of peers that
         * share the node WITH ME, so we have to subtract ourselves from
         * that number
         */
        orte_process_info.num_local_peers = i - 1;
        /* now get the specific ranks */
        ranks = (int*)malloc(i * sizeof(int));
        if (PMI_SUCCESS != (ret = PMI_Get_clique_ranks(ranks, i))) {
            ORTE_PMI_ERROR(ret, "PMI_Get_clique_ranks");
            error = "could not get clique ranks";
            goto error;
        }
        /* The clique ranks are returned in rank order, so
         * cycle thru the array and update the local/node
         * rank info
         */
        proc.jobid = ORTE_PROC_MY_NAME->jobid;
        for (j=0; j < i; j++) {
            proc.vpid = ranks[j];
            local_rank = j;
            node_rank = j;
            if (ranks[j] == (int)ORTE_PROC_MY_NAME->vpid) {
                orte_process_info.my_local_rank = local_rank;
                orte_process_info.my_node_rank = node_rank;
            }
            if (ORTE_SUCCESS != (ret = orte_db.store(&proc, ORTE_DB_LOCALRANK, &local_rank, ORTE_LOCAL_RANK))) {
                error = "db store local rank";
                goto error;
            }
            if (ORTE_SUCCESS != (ret = orte_db.store(&proc, ORTE_DB_NODERANK, &node_rank, ORTE_NODE_RANK))) {
                error = "db store node rank";
                goto error;
            }
        }
        free(ranks);

        /* setup process binding */
        if (ORTE_SUCCESS != (ret = orte_ess_base_proc_binding())) {
            error = "proc_binding";
            goto error;
        }
    }

    /* set max procs */
    if (orte_process_info.max_procs < orte_process_info.num_procs) {
        orte_process_info.max_procs = orte_process_info.num_procs;
    }

    /* flag that we completed init */
    app_init_complete = true;
    
    return ORTE_SUCCESS;

 error:
    if (ORTE_ERR_SILENT != ret && !orte_report_silent_errors) {
        orte_show_help("help-orte-runtime.txt",
                       "orte_init:startup:internal-failure",
                       true, error, ORTE_ERROR_NAME(ret), ret);
    }

    return ret;
}
Exemple #8
0
int main(int argc, char **argv, char **envp)
{
    int pmi_rank = -1;
    int pmi_process_group_size = -1;
    int num_local_procs = 0;
    int *local_rank_ids = NULL;
    int spawned = PMI_FALSE;
    int rc = EXIT_SUCCESS;
    char *err = NULL;
    PMI_BOOL pmi_initialized = PMI_FALSE;
    int pmi_vallen_max, max_length;
    char *pmi_kvs_name;

    /* sanity */
    if (PMI_SUCCESS != PMI_Initialized(&pmi_initialized) ||
        PMI_TRUE == pmi_initialized) {
        fprintf(stderr, "=== ERROR: PMI sanity failure\n");
        return EXIT_FAILURE;
    }
    if (PMI_SUCCESS != PMI_Init(&spawned)) {
        err = "PMI_Init failure!";
        goto done;
    }
    if (PMI_SUCCESS != PMI_Get_size(&pmi_process_group_size)) {
        err = "PMI_Get_size failure!";
        goto done;
    }
    if (PMI_SUCCESS != PMI_Get_rank(&pmi_rank)) {
        err = "PMI_Get_rank failure!";
        goto done;
    }
    if (PMI_SUCCESS != PMI_Get_clique_size(&num_local_procs)) {
        err = "PMI_Get_clique_size failure!";
        goto done;
    }
    if (PMI_SUCCESS != PMI_KVS_Get_value_length_max(&pmi_vallen_max)) {
        err = "PMI_KVS_Get_value_length_max failure!";
        goto done;
    }
    if (PMI_SUCCESS != PMI_KVS_Get_name_length_max(&max_length)) {
        err = "PMI_KVS_Get_name_length_max failure!";
        goto done;
    }
    pmi_kvs_name = (char*)malloc(max_length);
    if (NULL == pmi_kvs_name) {
        err = "malloc failure!";
        goto done;
    }
    if (PMI_SUCCESS != PMI_KVS_Get_my_name(pmi_kvs_name,max_length)) {
        err = "PMI_KVS_Get_my_name failure!";
        goto done;
    }

    if (NULL == (local_rank_ids = calloc(num_local_procs, sizeof(int)))) {
        err = "out of resources";
        goto done;
    }
    if (PMI_SUCCESS != PMI_Get_clique_ranks(local_rank_ids, num_local_procs)) {
        err = "PMI_Get_clique_size failure!";
        goto done;
    }
    /* lowest local rank will print env info and tag its output*/
    //    if (pmi_rank == local_rank_ids[0]) {
    //  for (; NULL != envp && NULL != *envp; ++envp) {
    //      printf("===[%d]: %s\n",  pmi_rank, *envp);
    //  }
    //}

done:
    if (PMI_TRUE == pmi_initialized) {
        if (PMI_SUCCESS != PMI_Finalize()) {
            err = "PMI_Finalize failure!";
        }
    }
    if (NULL != err) {
        fprintf(stderr, "=== ERROR [rank:%d] %s\n", pmi_rank, err);
        rc = EXIT_FAILURE;
    }
    return rc;
}
Exemple #9
0
int mca_common_pmi_local_info(int vpid, int **ranks_ret,
                              int *procs_ret, char **error)
{
    int *ranks;
    int procs = -1;
    int rc;

#if WANT_PMI2_SUPPORT
    if(mca_common_pmi_version == 2){

        {
            char *pmapping = (char*)malloc(PMI2_MAX_VALLEN);
            if( pmapping == NULL ){
                *error = "mca_common_pmi_local_info: could not get memory for PMIv2 process mapping";
                return OPAL_ERR_OUT_OF_RESOURCE;
            }
            int found;
            int my_node;

            rc = PMI2_Info_GetJobAttr("PMI_process_mapping", pmapping, PMI2_MAX_VALLEN, &found);
            if( !found || PMI2_SUCCESS != rc ) {
                /* can't check PMI2_SUCCESS as some folks (i.e., Cray) don't define it */
                OPAL_PMI_ERROR(rc,"PMI2_Info_GetJobAttr");
                *error = "mca_common_pmi_local_info: could not get PMI_process_mapping";
                return OPAL_ERROR;
            }

            ranks = mca_common_pmi2_parse_pmap(pmapping, vpid, &my_node, &procs);
            if (NULL == ranks) {
                *error = "mca_common_pmi_local_info: could not get memory for PMIv2 local ranks";
                return OPAL_ERR_OUT_OF_RESOURCE;
            }

            free(pmapping);
        }

    }
    else
#endif
    {
        /* get our local proc info to find our local rank */
        if (PMI_SUCCESS != (rc = PMI_Get_clique_size(&procs))) {
            OPAL_PMI_ERROR(rc, "PMI_Get_clique_size");
            *error = "mca_common_pmi_local_info: could not get PMI clique size";
            return OPAL_ERROR;
        }
        /* now get the specific ranks */
        ranks = (int*)calloc(procs, sizeof(int));
        if (NULL == ranks) {
            *error = "mca_common_pmi_local_info: could not get memory for local ranks";
            return OPAL_ERR_OUT_OF_RESOURCE;
        }
        if (PMI_SUCCESS != (rc = PMI_Get_clique_ranks(ranks, procs))) {
            OPAL_PMI_ERROR(rc, "PMI_Get_clique_ranks");
            *error = "mca_common_pmi_local_info: could not get clique ranks";
            return OPAL_ERROR;
        }
    }

    *ranks_ret = ranks;
    *procs_ret = procs;
    return OPAL_SUCCESS;
}