Esempio n. 1
0
TRACE_EXPORT int TRACE_Open( const char filespec[], TRACE_file *fp )
{
    int i, j;
    RLOG_IOStruct *pInput;

    if (filespec == NULL || fp == NULL)
	return TRACEINPUT_FAIL;

    if (strcmp(filespec, "-h") == 0)
    {
	*fp = NULL;
	return TRACEINPUT_SUCCESS;
    }

    *fp = (_trace_file*)MPL_malloc(sizeof(_trace_file));
    if (*fp == NULL)
	return TRACEINPUT_FAIL;

    (*fp)->pInput = pInput = RLOG_CreateInputStruct(filespec);
    if (pInput == NULL)
    {
	MPL_free(*fp);
	*fp = NULL;
	return TRACEINPUT_FAIL;
    }

    (*fp)->bArrowAvail = (RLOG_GetNextArrow(pInput, &(*fp)->arrow) == 0);
    if (pInput->nNumRanks > 0)
    {
	(*fp)->ppEvent = (RLOG_EVENT**)MPL_malloc(sizeof(RLOG_EVENT*) * pInput->nNumRanks);
	(*fp)->ppEventAvail = (int**)MPL_malloc(sizeof(int*) * pInput->nNumRanks);

	for (i=0; i<pInput->nNumRanks; i++)
	{
	    if (pInput->pNumEventRecursions[i] > 0)
	    {
		(*fp)->ppEvent[i] = (RLOG_EVENT*)MPL_malloc(sizeof(RLOG_EVENT) * pInput->pNumEventRecursions[i]);
		(*fp)->ppEventAvail[i] = (int*)MPL_malloc(sizeof(int) * pInput->pNumEventRecursions[i]);
	    }
	    else
	    {
		(*fp)->ppEvent[i] = NULL;
		(*fp)->ppEventAvail[i] = NULL;
	    }
	}
    }
    else
    {
	(*fp)->ppEvent = NULL;
	(*fp)->ppEventAvail = NULL;
    }
    for (j=0; j<pInput->nNumRanks; j++)
    {
	for (i=0; i<pInput->pNumEventRecursions[j]; i++)
	{
	    (*fp)->ppEventAvail[j][i] = (RLOG_GetNextEvent(pInput, j+pInput->header.nMinRank, i, &(*fp)->ppEvent[j][i]) == 0);
	}
    }
    return TRACEINPUT_SUCCESS;
}
Esempio n. 2
0
int MPID_NS_Create(const MPIR_Info * info_ptr, MPID_NS_Handle * handle_ptr)
{
    int err;
    int length;
    char *pmi_namepub_kvs;

    *handle_ptr = (MPID_NS_Handle) MPL_malloc(sizeof(struct MPID_NS_Handle));
    /* --BEGIN ERROR HANDLING-- */
    if (!*handle_ptr) {
        err =
            MPIR_Err_create_code(MPI_SUCCESS, MPIR_ERR_RECOVERABLE, __func__, __LINE__,
                                 MPI_ERR_OTHER, "**nomem", 0);
        return err;
    }
    /* --END ERROR HANDLING-- */

    err = PMI_KVS_Get_name_length_max(&length);
    /* --BEGIN ERROR HANDLING-- */
    if (err != PMI_SUCCESS) {
        err =
            MPIR_Err_create_code(MPI_SUCCESS, MPIR_ERR_RECOVERABLE, __func__, __LINE__,
                                 MPI_ERR_OTHER, "**fail", 0);
    }
    /* --END ERROR HANDLING-- */

    (*handle_ptr)->kvsname = (char *) MPL_malloc(length);
    /* --BEGIN ERROR HANDLING-- */
    if (!(*handle_ptr)->kvsname) {
        err =
            MPIR_Err_create_code(MPI_SUCCESS, MPIR_ERR_RECOVERABLE, __func__, __LINE__,
                                 MPI_ERR_OTHER, "**nomem", 0);
        return err;
    }
    /* --END ERROR HANDLING-- */

    pmi_namepub_kvs = getenv("PMI_NAMEPUB_KVS");
    if (pmi_namepub_kvs) {
        MPL_strncpy((*handle_ptr)->kvsname, pmi_namepub_kvs, length);
    } else {
        err = PMI_KVS_Get_my_name((*handle_ptr)->kvsname, length);
        /* --BEGIN ERROR HANDLING-- */
        if (err != PMI_SUCCESS) {
            err =
                MPIR_Err_create_code(MPI_SUCCESS, MPIR_ERR_RECOVERABLE, __func__, __LINE__,
                                     MPI_ERR_OTHER, "**fail", 0);
        }
        /* --END ERROR HANDLING-- */
    }

    /*printf("namepub kvs: <%s>\n", (*handle_ptr)->kvsname);fflush(stdout); */
    return 0;
}
Esempio n. 3
0
int MPIDI_check_for_failed_procs(void)
{
    int mpi_errno = MPI_SUCCESS;
    int pmi_errno;
    int len;
    char *kvsname = MPIDI_global.jobid;
    char *failed_procs_string = NULL;
    MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDI_CHECK_FOR_FAILED_PROCS);
    MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPIDI_CHECK_FOR_FAILED_PROCS);

    /* FIXME: Currently this only handles failed processes in
     * comm_world.  We need to fix hydra to include the pgid along
     * with the rank, then we need to create the failed group from
     * something bigger than comm_world. */
#ifdef USE_PMIX_API
    MPIR_Assert(0);
#elif defined(USE_PMI2_API)
    {
        int vallen = 0;
        len = PMI2_MAX_VALLEN;
        failed_procs_string = MPL_malloc(len, MPL_MEM_OTHER);
        MPIR_Assert(failed_procs_string);
        pmi_errno =
            PMI2_KVS_Get(kvsname, PMI2_ID_NULL, "PMI_dead_processes", failed_procs_string,
                         len, &vallen);
        MPIR_ERR_CHKANDJUMP(pmi_errno, mpi_errno, MPI_ERR_OTHER, "**pmi_kvs_get");
        MPL_free(failed_procs_string);
    }
#else
    pmi_errno = PMI_KVS_Get_value_length_max(&len);
    MPIR_ERR_CHKANDJUMP(pmi_errno, mpi_errno, MPI_ERR_OTHER, "**pmi_kvs_get_value_length_max");
    failed_procs_string = MPL_malloc(len, MPL_MEM_OTHER);
    MPIR_Assert(failed_procs_string);
    pmi_errno = PMI_KVS_Get(kvsname, "PMI_dead_processes", failed_procs_string, len);
    MPIR_ERR_CHKANDJUMP(pmi_errno, mpi_errno, MPI_ERR_OTHER, "**pmi_kvs_get");
    MPL_free(failed_procs_string);
#endif

    MPL_DBG_MSG_FMT(MPIDI_CH4_DBG_GENERAL, VERBOSE,
                    (MPL_DBG_FDEST, "Received proc fail notification: %s", failed_procs_string));

    /* FIXME: handle ULFM failed groups here */

  fn_exit:
    MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDI_CHECK_FOR_FAILED_PROCS);
    return mpi_errno;
  fn_fail:
    MPL_free(failed_procs_string);
    goto fn_exit;
}
Esempio n. 4
0
static int create_unique_type_struct(MPI_Aint count,
                                     const int *blklens,
                                     const MPI_Aint * disps,
                                     const MPI_Datatype * oldtypes,
                                     int type_pos, MPIR_Dataloop ** dlp_p, MPI_Aint * dlsz_p)
{
    /* the same type used more than once in the array; type_pos
     * indexes to the first of these.
     */
    int i, err, cur_pos = 0;
    MPI_Aint *tmp_blklens;
    MPI_Aint *tmp_disps;

    /* count is an upper bound on number of type instances */
    tmp_blklens = (MPI_Aint *) MPL_malloc(count * sizeof(MPI_Aint), MPL_MEM_DATATYPE);
    /* --BEGIN ERROR HANDLING-- */
    if (!tmp_blklens) {
        /* TODO: ??? */
        return create_struct_memory_error();
    }
    /* --END ERROR HANDLING-- */

    tmp_disps = (MPI_Aint *)
        MPL_malloc(count * sizeof(MPI_Aint), MPL_MEM_DATATYPE);
    /* --BEGIN ERROR HANDLING-- */
    if (!tmp_disps) {
        MPL_free(tmp_blklens);
        /* TODO: ??? */
        return create_struct_memory_error();
    }
    /* --END ERROR HANDLING-- */

    for (i = type_pos; i < count; i++) {
        if (oldtypes[i] == oldtypes[type_pos] && blklens != 0) {
            tmp_blklens[cur_pos] = blklens[i];
            tmp_disps[cur_pos] = disps[i];
            cur_pos++;
        }
    }

    err = MPII_Dataloop_create_indexed(cur_pos, tmp_blklens, tmp_disps, 1,      /* disp in bytes */
                                       oldtypes[type_pos], dlp_p, dlsz_p);

    MPL_free(tmp_blklens);
    MPL_free(tmp_disps);

    return err;

}
Esempio n. 5
0
int MPIR_Group_create( int nproc, MPIR_Group **new_group_ptr )
{
    int mpi_errno = MPI_SUCCESS;

    *new_group_ptr = (MPIR_Group *)MPIR_Handle_obj_alloc( &MPIR_Group_mem );
    /* --BEGIN ERROR HANDLING-- */
    if (!*new_group_ptr) {
	mpi_errno = MPIR_Err_create_code( MPI_SUCCESS, MPIR_ERR_RECOVERABLE, "MPIR_Group_create", __LINE__, MPI_ERR_OTHER, "**nomem", 0 );
	return mpi_errno;
    }
    /* --END ERROR HANDLING-- */
    MPIR_Object_set_ref( *new_group_ptr, 1 );
    (*new_group_ptr)->lrank_to_lpid = 
	(MPII_Group_pmap_t *)MPL_malloc( nproc * sizeof(MPII_Group_pmap_t) );
    /* --BEGIN ERROR HANDLING-- */
    if (!(*new_group_ptr)->lrank_to_lpid) {
	MPIR_Handle_obj_free( &MPIR_Group_mem, *new_group_ptr );
	*new_group_ptr = NULL;
	MPIR_CHKMEM_SETERR(mpi_errno,nproc*sizeof(MPII_Group_pmap_t),
			   "newgroup->lrank_to_lpid");
	return mpi_errno;
    }
    /* --END ERROR HANDLING-- */
    (*new_group_ptr)->size = nproc;
    /* Make sure that there is no question that the list of ranks sorted
       by pids is marked as uninitialized */
    (*new_group_ptr)->idx_of_first_lpid = -1;

    (*new_group_ptr)->is_local_dense_monotonic = FALSE;
    return mpi_errno;
}
Esempio n. 6
0
int main(int argc, char *argv[])
{
    MPIR_Group group, *group_ptr = &group;
    int i;

    MPI_Init(&argc, &argv);

    /* Setup a sample group */
    group.handle = 1;
    group.ref_count = 1;
    group.size = 4;
    group.rank = 0;
    group.idx_of_first_lpid = -1;
    group.lrank_to_lpid = (MPII_Group_pmap_t *)
        MPL_malloc(group.size * sizeof(MPII_Group_pmap_t), MPL_MEM_OTHER);
    for (i = 0; i < group.size; i++) {
        group.lrank_to_lpid[i].lrank = i;
        group.lrank_to_lpid[i].lpid = group.size - i - 1;
        group.lrank_to_lpid[i].next_lpid = -1;
        group.lrank_to_lpid[i].flag = 0;
    }

    /* Set up the group lpid list */
    MPII_Group_setup_lpid_list(group_ptr);

    /* Print the group structure */
    printf("Index of first lpid = %d\n", group.idx_of_first_lpid);
    for (i = 0; i < group.size; i++) {
        printf("lrank_to_lpid[%d].next_lpid = %d, .lpid = %d\n",
               i, group.lrank_to_lpid[i].next_lpid, group.lrank_to_lpid[i].lpid);
    }

    MPI_Finalize();
    return 0;
}
Esempio n. 7
0
int MPIDI_CH3U_Post_data_receive_unexpected(MPIR_Request * rreq)
{
    int mpi_errno = MPI_SUCCESS;
    MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDI_CH3U_POST_DATA_RECEIVE_UNEXPECTED);

    MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPIDI_CH3U_POST_DATA_RECEIVE_UNEXPECTED);

    /* FIXME: to improve performance, allocate temporary buffer from a 
       specialized buffer pool. */
    /* FIXME: to avoid memory exhaustion, integrate buffer pool management
       with flow control */
    MPL_DBG_MSG(MPIDI_CH3_DBG_OTHER,VERBOSE,"unexpected request allocated");
    
    rreq->dev.tmpbuf = MPL_malloc(rreq->dev.recv_data_sz, MPL_MEM_BUFFER);
    if (!rreq->dev.tmpbuf) {
	MPIR_ERR_SETANDJUMP1(mpi_errno,MPI_ERR_OTHER,"**nomem","**nomem %d",
			     rreq->dev.recv_data_sz);
    }
    rreq->dev.tmpbuf_sz = rreq->dev.recv_data_sz;
    
    rreq->dev.iov[0].MPL_IOV_BUF = (MPL_IOV_BUF_CAST)rreq->dev.tmpbuf;
    rreq->dev.iov[0].MPL_IOV_LEN = rreq->dev.recv_data_sz;
    rreq->dev.iov_count = 1;
    rreq->dev.OnDataAvail = MPIDI_CH3_ReqHandler_UnpackUEBufComplete;
    rreq->dev.recv_pending_count = 2;

 fn_fail:
    MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDI_CH3U_POST_DATA_RECEIVE_UNEXPECTED);
    return mpi_errno;
}
Esempio n. 8
0
File: mpit.c Progetto: zhanglt/mpich
/* Create a new category with name <cat_name>.
 * The new category is pushed at the back of cat_table.
 * Aslo, a new hash entry is added for the category in cat_hash.
 * Return the newly created category.
 */
static cat_table_entry_t *MPIR_T_cat_create(const char *cat_name)
{
    int cat_idx;
    cat_table_entry_t *cat;
    name2index_hash_t *hash_entry;

    /* New a category */
    utarray_extend_back(cat_table);
    cat =(cat_table_entry_t *)utarray_back(cat_table);
    cat->name = MPL_strdup(cat_name);
    cat->desc = NULL;
    utarray_new(cat->cvar_indices, &ut_int_icd);
    utarray_new(cat->pvar_indices, &ut_int_icd);
    utarray_new(cat->subcat_indices, &ut_int_icd);

    /* Record <cat_name, cat_idx> in cat_hash */
    cat_idx = utarray_len(cat_table) - 1;
    hash_entry = MPL_malloc(sizeof(name2index_hash_t));
    MPIU_Assert(hash_entry);
    /* Need not to Strdup cat_name, since cat_table and cat_hash co-exist */
    hash_entry->name = cat_name;
    hash_entry->idx = cat_idx;
    HASH_ADD_KEYPTR(hh, cat_hash, hash_entry->name,
                    strlen(hash_entry->name), hash_entry);

    return cat;
}
Esempio n. 9
0
void GenerateNewArgv(int *pargc, char ***pargv, int n)
{
    int argc;
    char **argv;
    int length, i;
    char *buffer, *str;

    length = (sizeof(char*) * (n+3)) +strlen((*pargv)[0]) + 1 + strlen((*pargv)[1]) + 1 + (15 * n);
    buffer = (char*)MPL_malloc(length);

    argc = n+2;
    argv = (char**)buffer;
    str = buffer + (sizeof(char*) * (n+4));
    argv[0] = str;
    str += sprintf(str, "%s", (*pargv)[0]);
    *str++ = '\0';
    argv[1] = str;
    str += sprintf(str, "%s", (*pargv)[1]);
    *str++ = '\0';
    for (i=0; i<n; i++)
    {
	argv[i+2] = str;
	str += sprintf(str, "log%d.irlog", i);
	*str++ = '\0';
    }
    argv[n+3] = NULL;

    *pargc = argc;
    *pargv = argv;
    s_bFreeArgv = TRUE;
}
Esempio n. 10
0
int MPIR_Ibsend_impl(const void *buf, int count, MPI_Datatype datatype, int dest, int tag,
                     MPIR_Comm * comm_ptr, MPI_Request * request)
{
    int mpi_errno = MPI_SUCCESS;
    MPIR_Request *request_ptr, *new_request_ptr;
    ibsend_req_info *ibinfo = 0;

    /* We don't try tbsend in for MPI_Ibsend because we must create a
     * request even if we can send the message */

    mpi_errno = MPIR_Bsend_isend(buf, count, datatype, dest, tag, comm_ptr, IBSEND, &request_ptr);
    if (mpi_errno != MPI_SUCCESS)
        goto fn_fail;
    MPII_SENDQ_REMEMBER(request_ptr, dest, tag, comm_ptr->context_id);

    /* FIXME: use the memory management macros */
    ibinfo = (ibsend_req_info *) MPL_malloc(sizeof(ibsend_req_info), MPL_MEM_BUFFER);
    ibinfo->req = request_ptr;
    ibinfo->cancelled = 0;
    mpi_errno = MPIR_Grequest_start(MPIR_Ibsend_query, MPIR_Ibsend_free,
                                    MPIR_Ibsend_cancel, ibinfo, &new_request_ptr);
    if (mpi_errno)
        MPIR_ERR_POP(mpi_errno);
    /* The request is immediately complete because the MPIR_Bsend_isend has
     * already moved the data out of the user's buffer */
    MPIR_Grequest_complete(new_request_ptr);
    MPIR_OBJ_PUBLISH_HANDLE(*request, new_request_ptr->handle);

  fn_exit:
    return mpi_errno;
  fn_fail:
    goto fn_exit;
}
Esempio n. 11
0
int MPII_Genutil_sched_imcast(const void *buf,
                              int count,
                              MPI_Datatype dt,
                              UT_array * dests,
                              int num_dests,
                              int tag,
                              MPIR_Comm * comm_ptr,
                              MPII_Genutil_sched_t * sched, int n_in_vtcs, int *in_vtcs)
{
    vtx_t *vtxp;
    int vtx_id;

    /* assign a new vertex */
    vtx_id = MPII_Genutil_vtx_create(sched, &vtxp);
    vtxp->vtx_kind = MPII_GENUTIL_VTX_KIND__IMCAST;
    MPII_Genutil_vtx_add_dependencies(sched, vtx_id, n_in_vtcs, in_vtcs);

    /* store the arguments */
    vtxp->u.imcast.buf = (void *) buf;
    vtxp->u.imcast.count = count;
    vtxp->u.imcast.dt = dt;
    vtxp->u.imcast.num_dests = num_dests;
    utarray_new(vtxp->u.imcast.dests, &ut_int_icd, MPL_MEM_COLL);
    utarray_concat(vtxp->u.imcast.dests, dests, MPL_MEM_COLL);
    vtxp->u.imcast.tag = tag;
    vtxp->u.imcast.comm = comm_ptr;
    vtxp->u.imcast.req =
        (struct MPIR_Request **) MPL_malloc(sizeof(struct MPIR_Request *) * num_dests,
                                            MPL_MEM_COLL);
    vtxp->u.imcast.last_complete = -1;

    MPL_DBG_MSG_FMT(MPIR_DBG_COLL, VERBOSE,
                    (MPL_DBG_FDEST, "Gentran: schedule [%d] imcast", vtx_id));
    return vtx_id;
}
Esempio n. 12
0
IRLOG_IOStruct *IRLOG_CreateOutputStruct(const char *filename)
{
    IRLOG_IOStruct *pOutput = NULL;

    /* allocate a data structure */
    pOutput = (IRLOG_IOStruct *) MPL_malloc(sizeof(IRLOG_IOStruct), MPL_MEM_DEBUG);
    if (pOutput == NULL) {
        MPL_error_printf("malloc failed - %s\n", strerror(errno));
        return NULL;
    }

    /* open the output clog file */
    pOutput->f = fopen(filename, "wb");
    if (pOutput->f == NULL) {
        MPL_error_printf("Unable to open output file '%s' - %s\n", filename, strerror(errno));
        MPL_free(pOutput);
        return NULL;
    }

    /* set all the data fields */
    pOutput->header.type = RLOG_INVALID_TYPE;
    pOutput->pCurHeader = pOutput->buffer;
    pOutput->pNextHeader = pOutput->buffer;
    pOutput->pEnd = &pOutput->buffer[RLOG_BUFFSIZE];

    return pOutput;
}
Esempio n. 13
0
static int create_basic_all_bytes_struct(MPI_Aint count,
                                         const int *blklens,
                                         const MPI_Aint * disps,
                                         const MPI_Datatype * oldtypes,
                                         MPIR_Dataloop ** dlp_p, MPI_Aint * dlsz_p)
{
    int i, err, cur_pos = 0;
    MPI_Aint *tmp_blklens;
    MPI_Aint *tmp_disps;

    /* count is an upper bound on number of type instances */
    tmp_blklens = (MPI_Aint *) MPL_malloc(count * sizeof(MPI_Aint), MPL_MEM_DATATYPE);

    /* --BEGIN ERROR HANDLING-- */
    if (!tmp_blklens) {
        return create_struct_memory_error();
    }
    /* --END ERROR HANDLING-- */

    tmp_disps = (MPI_Aint *) MPL_malloc(count * sizeof(MPI_Aint), MPL_MEM_DATATYPE);

    /* --BEGIN ERROR HANDLING-- */
    if (!tmp_disps) {
        MPL_free(tmp_blklens);
        return create_struct_memory_error();
    }
    /* --END ERROR HANDLING-- */

    for (i = 0; i < count; i++) {
        if (oldtypes[i] != MPI_LB && oldtypes[i] != MPI_UB && blklens[i] != 0) {
            MPI_Aint sz;

            MPIR_Datatype_get_size_macro(oldtypes[i], sz);
            tmp_blklens[cur_pos] = (int) sz *blklens[i];
            tmp_disps[cur_pos] = disps[i];
            cur_pos++;
        }
    }
    err = MPII_Dataloop_create_indexed(cur_pos, tmp_blklens, tmp_disps, 1,      /* disp in bytes */
                                       MPI_BYTE, dlp_p, dlsz_p);

    MPL_free(tmp_blklens);
    MPL_free(tmp_disps);

    return err;
}
Esempio n. 14
0
void MPIDI_OFI_mr_key_allocator_init()
{
    mr_key_allocator.chunk_size = 128;
    mr_key_allocator.num_ints = mr_key_allocator.chunk_size;
    mr_key_allocator.last_free_mr_key = 0;
    mr_key_allocator.bitmask = MPL_malloc(sizeof(uint64_t) * mr_key_allocator.num_ints,
                                          MPL_MEM_RMA);
    memset(mr_key_allocator.bitmask, 0xFF, sizeof(uint64_t) * mr_key_allocator.num_ints);
}
Esempio n. 15
0
File: mpit.c Progetto: zhanglt/mpich
/* A low level, generic and internally used interface to register
 * a pvar to MPIR_T. Other modules should use interfaces defined
 * for concrete pvar classes.
 *
 * IN: varclass, MPI_T_PVAR_CLASS_*
 * IN: dtype, MPI datatype for this pvar
 * IN: name, Name of the pvar
 * IN: addr, Pointer to the pvar if known at registeration, otherwise NULL.
 * IN: count, # of elements of this pvar if known at registeration, otherwise 0.
 * IN: etype, MPI_T_enum or MPI_T_ENUM_NULL
 * IN: verb, MPI_T_PVAR_VERBOSITY_*
 * IN: binding, MPI_T_BIND_*
 * IN: flags, Bitwise OR of MPIR_T_R_PVAR_FLAGS_{}
 * IN: get_value, If not NULL, it is a callback to read the pvar.
 * IN: get_count, If not NULL, it is a callback to read count of the pvar.
 * IN: cat, Catogery name of the pvar
 * IN: desc, Description of the pvar
 */
void MPIR_T_PVAR_REGISTER_impl(
    int varclass, MPI_Datatype dtype, const char* name, void *addr, int count,
    MPIR_T_enum_t *etype, int verb, int binding, int flags,
    MPIR_T_pvar_get_value_cb get_value, MPIR_T_pvar_get_count_cb get_count,
    const char * cat, const char * desc)
{
    name2index_hash_t *hash_entry;
    pvar_table_entry_t *pvar;
    int pvar_idx;
    int seq = varclass - MPIR_T_PVAR_CLASS_FIRST;

    /* Check whether this is a replicated pvar, whose name is unique per class */
    HASH_FIND_STR(pvar_hashs[seq], name, hash_entry);

    if (hash_entry != NULL) {
        /* Found it, the pvar already exists */
        pvar_idx = hash_entry->idx;
        pvar = (pvar_table_entry_t *)utarray_eltptr(pvar_table, pvar_idx);
        /* Should never override an existing & active var */
        MPIU_Assert(pvar->active != TRUE);
        pvar->active = TRUE;
        /* FIXME: Do we need to check consistency between the old and new? */
    } else {
        /* Not found, so push the pvar to back of pvar_table */
        utarray_extend_back(pvar_table);
        pvar = (pvar_table_entry_t *)utarray_back(pvar_table);
        pvar->active = TRUE;
        pvar->varclass = varclass;
        pvar->datatype = dtype;
        pvar->name = MPL_strdup(name);
        MPIU_Assert(pvar->name);
        pvar->addr = addr;
        pvar->count = count;
        pvar->enumtype = etype;
        pvar->verbosity = verb;
        pvar->bind = binding;
        pvar->flags = flags;
        pvar->get_value = get_value;
        pvar->get_count = get_count;
        pvar->desc = MPL_strdup(desc);
        MPIU_Assert(pvar->desc);

        /* Record <name, index> in hash table */
        pvar_idx = utarray_len(pvar_table) - 1;
        hash_entry = MPL_malloc(sizeof(name2index_hash_t));
        MPIU_Assert(hash_entry);
        /* Need not to Strdup name, since pvar_table and pvar_hashs co-exist */
        hash_entry->name = name;
        hash_entry->idx = pvar_idx;
        HASH_ADD_KEYPTR(hh, pvar_hashs[seq], hash_entry->name,
                        strlen(hash_entry->name), hash_entry);

        /* Add the pvar to a category */
        MPIR_T_cat_add_pvar(cat, utarray_len(pvar_table)-1);
    }
}
Esempio n. 16
0
struct MPIR_Segment *MPIR_Segment_alloc(const void *buf, MPI_Aint count, MPI_Datatype handle)
{
    struct MPIR_Segment *segp;

    segp = (struct MPIR_Segment *) MPL_malloc(sizeof(struct MPIR_Segment), MPL_MEM_DATATYPE);
    if (segp)
        segment_init(buf, count, handle, segp);

    return segp;
}
static int  mpi_to_pmi_keyvals( MPIR_Info *info_ptr, PMI_keyval_t **kv_ptr,
				int *nkeys_ptr )
{
    char key[MPI_MAX_INFO_KEY];
    PMI_keyval_t *kv = 0;
    int          i, nkeys = 0, vallen, flag, mpi_errno=MPI_SUCCESS;

    if (!info_ptr || info_ptr->handle == MPI_INFO_NULL) {
	goto fn_exit;
    }

    MPIR_Info_get_nkeys_impl( info_ptr, &nkeys );
    if (nkeys == 0) {
	goto fn_exit;
    }
    kv = (PMI_keyval_t *)MPL_malloc( nkeys * sizeof(PMI_keyval_t) );
    if (!kv) { MPIR_ERR_POP(mpi_errno); }

    for (i=0; i<nkeys; i++) {
	mpi_errno = MPIR_Info_get_nthkey_impl( info_ptr, i, key );
	if (mpi_errno) { MPIR_ERR_POP(mpi_errno); }
	MPIR_Info_get_valuelen_impl( info_ptr, key, &vallen, &flag );
        MPIR_ERR_CHKANDJUMP1(!flag, mpi_errno, MPI_ERR_OTHER,"**infonokey", "**infonokey %s", key);

	kv[i].key = MPL_strdup(key);
	kv[i].val = MPL_malloc( vallen + 1 );
	if (!kv[i].key || !kv[i].val) { 
	    MPIR_ERR_SETANDJUMP(mpi_errno,MPI_ERR_OTHER,"**nomem" );
	}
	MPIR_Info_get_impl( info_ptr, key, vallen+1, kv[i].val, &flag );
        MPIR_ERR_CHKANDJUMP1(!flag, mpi_errno, MPI_ERR_OTHER,"**infonokey", "**infonokey %s", key);
	MPL_DBG_MSG_FMT(MPIDI_CH3_DBG_OTHER,TERSE,(MPL_DBG_FDEST,"key: <%s>, value: <%s>\n", kv[i].key, kv[i].val));
    }

 fn_fail:
 fn_exit:
    *kv_ptr    = kv;
    *nkeys_ptr = nkeys;
    return mpi_errno;
}
Esempio n. 18
0
static int mpi_to_pmi_keyvals(MPIR_Info * info_ptr, PMI_keyval_t ** kv_ptr, int *nkeys_ptr)
{
    char key[MPI_MAX_INFO_KEY];
    PMI_keyval_t *kv = 0;
    int i, nkeys = 0, vallen, flag, mpi_errno = MPI_SUCCESS;

    MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDI_MPI_TO_PMI_KEYVALS);
    MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPIDI_MPI_TO_PMI_KEYVALS);

    if (!info_ptr || info_ptr->handle == MPI_INFO_NULL)
        goto fn_exit;

    MPIR_Info_get_nkeys_impl(info_ptr, &nkeys);

    if (nkeys == 0)
        goto fn_exit;

    kv = (PMI_keyval_t *) MPL_malloc(nkeys * sizeof(PMI_keyval_t), MPL_MEM_BUFFER);

    for (i = 0; i < nkeys; i++) {
        mpi_errno = MPIR_Info_get_nthkey_impl(info_ptr, i, key);
        if (mpi_errno)
            MPIR_ERR_POP(mpi_errno);
        MPIR_Info_get_valuelen_impl(info_ptr, key, &vallen, &flag);
        kv[i].key = (const char *) MPL_strdup(key);
        kv[i].val = (char *) MPL_malloc(vallen + 1, MPL_MEM_BUFFER);
        MPIR_Info_get_impl(info_ptr, key, vallen + 1, kv[i].val, &flag);
    }

  fn_exit:
    *kv_ptr = kv;
    *nkeys_ptr = nkeys;
    MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDI_MPI_TO_PMI_KEYVALS);
    return mpi_errno;

  fn_fail:
    goto fn_exit;
}
Esempio n. 19
0
/*
  Initialize a new PMI group that will be the parent of all
  PMIProcesses until the next group is created
  Each group also starts with a KV Space

  If there is an allocation failure, return non zero.
*/
int PMISetupNewGroup(int nProcess, PMIKVSpace * kvs)
{
    PMIGroup *g;
    curPMIGroup = (PMIGroup *) MPL_malloc(sizeof(PMIGroup), MPL_MEM_PM);
    if (!curPMIGroup)
        return 1;

    curPMIGroup->nProcess = nProcess;
    curPMIGroup->groupID = pmimaster.nGroups++;
    curPMIGroup->nInBarrier = 0;
    curPMIGroup->pmiProcess =
        (PMIProcess **) MPL_malloc(sizeof(PMIProcess *) * nProcess, MPL_MEM_PM);
    if (!curPMIGroup->pmiProcess)
        return 1;
    curPMIGroup->nextGroup = 0;
    curNprocess = 0;

    /* Add to PMIMaster */
    g = pmimaster.groups;
    if (!g) {
        pmimaster.groups = curPMIGroup;
    } else {
        while (g) {
            if (!g->nextGroup) {
                g->nextGroup = curPMIGroup;
                break;
            }
            g = g->nextGroup;
        }
    }
    if (kvs) {
        curPMIGroup->kvs = kvs;
    } else {
        curPMIGroup->kvs = fPMIKVSAllocate();
    }

    return 0;
}
Esempio n. 20
0
int MPL_args_deserialize(int len, const void *serialized_buf, int *argc, char ***argv)
{
    const char *buf = serialized_buf;
    int nargs;
    int *arg_lengths;
    char **targv;
    int i;

    nargs = *((int *) buf);
    buf += sizeof(int);

    targv = (char **) MPL_malloc(nargs * sizeof(char *), MPL_MEM_STRINGS);
    arg_lengths = (int *) MPL_malloc(nargs * sizeof(int), MPL_MEM_STRINGS);

    assert(targv && arg_lengths);

    for (i = 0; i < nargs; i++) {
        arg_lengths[i] = (*((int *) buf));
        buf += sizeof(int);

        /* allocate an extra end-of-string character for each string */
        targv[i] = (char *) MPL_malloc(arg_lengths[i] + 1, MPL_MEM_STRINGS);
        assert(targv[i]);
    }

    for (i = 0; i < nargs; i++) {
        memcpy(targv[i], buf, arg_lengths[i]);
        targv[i][arg_lengths[i]] = 0;   /* append an end of string character */
        buf += arg_lengths[i];
    }

    *argc = nargs;
    *argv = targv;

    MPL_free(arg_lengths);

    return 0;
}
Esempio n. 21
0
int MPIDI_CH3U_Receive_data_unexpected(MPIR_Request * rreq, void *buf, intptr_t *buflen, int *complete)
{
    int mpi_errno = MPI_SUCCESS;
    MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDI_CH3U_RECEIVE_DATA_UNEXPECTED);

    MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPIDI_CH3U_RECEIVE_DATA_UNEXPECTED);

    /* FIXME: to improve performance, allocate temporary buffer from a 
       specialized buffer pool. */
    /* FIXME: to avoid memory exhaustion, integrate buffer pool management
       with flow control */
    MPL_DBG_MSG(MPIDI_CH3_DBG_OTHER,VERBOSE,"unexpected request allocated");
    
    rreq->dev.tmpbuf = MPL_malloc(rreq->dev.recv_data_sz, MPL_MEM_BUFFER);
    if (!rreq->dev.tmpbuf) {
	MPIR_ERR_SETANDJUMP1(mpi_errno,MPI_ERR_OTHER,"**nomem","**nomem %d",
			     rreq->dev.recv_data_sz);
    }
    rreq->dev.tmpbuf_sz = rreq->dev.recv_data_sz;
    
    /* if all of the data has already been received, copy it
       now, otherwise build an iov and let the channel copy it */
    if (rreq->dev.recv_data_sz <= *buflen)
    {
        MPIR_Memcpy(rreq->dev.tmpbuf, buf, rreq->dev.recv_data_sz);
        *buflen = rreq->dev.recv_data_sz;
        rreq->dev.recv_pending_count = 1;
        *complete = TRUE;
    }
    else
    {
        rreq->dev.iov[0].MPL_IOV_BUF = (MPL_IOV_BUF_CAST)((char *)rreq->dev.tmpbuf);
        rreq->dev.iov[0].MPL_IOV_LEN = rreq->dev.recv_data_sz;
        rreq->dev.iov_count = 1;
        rreq->dev.recv_pending_count = 2;
        *buflen = 0;
        *complete = FALSE;
    }

    if (MPIDI_Request_get_msg_type(rreq) == MPIDI_REQUEST_EAGER_MSG)
        MPIR_T_PVAR_LEVEL_INC(RECVQ, unexpected_recvq_buffer_size, rreq->dev.tmpbuf_sz);

    rreq->dev.OnDataAvail = MPIDI_CH3_ReqHandler_UnpackUEBufComplete;

 fn_fail:
    MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDI_CH3U_RECEIVE_DATA_UNEXPECTED);
    return mpi_errno;
}
Esempio n. 22
0
int MPID_nem_lmt_vmsplice_start_recv(MPIDI_VC_t *vc, MPID_Request *rreq, MPL_IOV s_cookie)
{
    int mpi_errno = MPI_SUCCESS;
    int i;
    int complete = 0;
    struct lmt_vmsplice_node *node = NULL;
    MPIDI_CH3I_VC *vc_ch = &vc->ch;
    int pipe_fd;
    MPIDI_STATE_DECL(MPID_STATE_MPID_NEM_LMT_VMSPLICE_START_RECV);

    MPIDI_FUNC_ENTER(MPID_STATE_MPID_NEM_LMT_VMSPLICE_START_RECV);

    if (vc_ch->lmt_recv_copy_buf_handle == NULL) {
        MPIU_Assert(s_cookie.MPL_IOV_BUF != NULL);
        vc_ch->lmt_recv_copy_buf_handle = MPL_strdup(s_cookie.MPL_IOV_BUF);
    }

    /* XXX DJG FIXME in a real version we would want to cache the fd on the vc
       so that we don't have two open's on the critical path every time. */
    pipe_fd = open(vc_ch->lmt_recv_copy_buf_handle, O_NONBLOCK|O_RDONLY);
    MPIR_ERR_CHKANDJUMP1(pipe_fd < 0, mpi_errno, MPI_ERR_OTHER, "**open",
                         "**open %s", MPIU_Strerror(errno));

    MPID_nem_lmt_send_CTS(vc, rreq, NULL, 0);

    mpi_errno = populate_iov_from_req(rreq);
    if (mpi_errno) MPIR_ERR_POP(mpi_errno);

    mpi_errno = do_readv(rreq, pipe_fd, rreq->dev.iov, &rreq->dev.iov_offset,
                         &rreq->dev.iov_count, &complete);

    /* push request if not complete for progress checks later */
    if (!complete) {
        node = MPL_malloc(sizeof(struct lmt_vmsplice_node));
        node->pipe_fd = pipe_fd;
        node->req = rreq;
        node->next = outstanding_head;
        outstanding_head = node;
        ++MPID_nem_local_lmt_pending;
    }

fn_exit:
    MPIDI_FUNC_EXIT(MPID_STATE_MPID_NEM_LMT_VMSPLICE_START_RECV);
    return mpi_errno;
fn_fail:
    goto fn_exit;
}
Esempio n. 23
0
/* Create a structure that we will use to remember files created for
   publishing.  */
int MPID_NS_Create(const MPIR_Info * info_ptr, MPID_NS_Handle * handle_ptr)
{
    const char *dirname;
    struct stat st;
    int err, ret;

    *handle_ptr = (MPID_NS_Handle) MPL_malloc(sizeof(struct MPID_NS_Handle), MPL_MEM_PM);
    /* --BEGIN ERROR HANDLING-- */
    if (!*handle_ptr) {
        err =
            MPIR_Err_create_code(MPI_SUCCESS, MPIR_ERR_RECOVERABLE, __func__, __LINE__,
                                 MPI_ERR_OTHER, "**nomem", 0);
        return err;
    }
    /* --END ERROR HANDLING-- */
    (*handle_ptr)->nactive = 0;
    (*handle_ptr)->mypid = getpid();

    /* Get the dirname.  Could use an info value of NAMEPUB_CONTACT */
    dirname = MPIR_CVAR_NAMESERV_FILE_PUBDIR;
    if (!dirname) {
        /* user did not specify a directory, try using HOME */
        ret = MPL_env2str("HOME", &dirname);
        if (!ret) {
            /* HOME not found ; use current directory */
            dirname = ".";
        }
    }

    MPL_strncpy((*handle_ptr)->dirname, dirname, MAXPATHLEN);
    MPL_strnapp((*handle_ptr)->dirname, "/.mpinamepub/", MAXPATHLEN);

    /* Make the directory if necessary */
    /* FIXME : Determine if the directory exists before trying to create it */

    if (stat((*handle_ptr)->dirname, &st) || !S_ISDIR(st.st_mode)) {
        /* This mode is rwx by owner only.  */
        if (mkdir((*handle_ptr)->dirname, 0000700)) {
            /* FIXME : An error.  Ignore most ?
             * For example, ignore EEXIST?  */
            ;
        }
    }

    return 0;
}
Esempio n. 24
0
int MPID_nem_lmt_vmsplice_start_send(MPIDI_VC_t *vc, MPID_Request *sreq, MPL_IOV r_cookie)
{
    int mpi_errno = MPI_SUCCESS;
    MPIDI_STATE_DECL(MPID_STATE_MPID_NEM_LMT_VMSPLICE_START_SEND);
    int pipe_fd;
    int complete;
    struct lmt_vmsplice_node *node = NULL;
    int (*reqFn)(MPIDI_VC_t *, MPID_Request *, int *);
    MPIDI_CH3I_VC *vc_ch = &vc->ch;

    MPIDI_FUNC_ENTER(MPID_STATE_MPID_NEM_LMT_VMSPLICE_START_SEND);

    /* Must do this after the other side has opened for reading, otherwise we
       will error out with ENXIO.  This will be indicated by the receipt of a
       CTS message. */
    pipe_fd = open(vc_ch->lmt_copy_buf_handle, O_NONBLOCK|O_WRONLY);
    MPIR_ERR_CHKANDJUMP1(pipe_fd < 0, mpi_errno, MPI_ERR_OTHER, "**open",
                         "**open %s", MPIU_Strerror(errno));

    mpi_errno = populate_iov_from_req(sreq);
    if (mpi_errno) MPIR_ERR_POP(mpi_errno);

    /* send the first flight */
    sreq->ch.vc = vc; /* XXX DJG is this already assigned? */
    complete = 0;
    mpi_errno = do_vmsplice(sreq, pipe_fd, sreq->dev.iov,
                            &sreq->dev.iov_offset, &sreq->dev.iov_count, &complete);
    if (mpi_errno) MPIR_ERR_POP(mpi_errno);

    if (!complete) {
        /* push for later progress */
        node = MPL_malloc(sizeof(struct lmt_vmsplice_node));
        node->pipe_fd = pipe_fd;
        node->req = sreq;
        node->next = outstanding_head;
        outstanding_head = node;
        ++MPID_nem_local_lmt_pending;
    }

fn_fail:
fn_exit:
    MPIDI_FUNC_EXIT(MPID_STATE_MPID_NEM_LMT_VMSPLICE_START_SEND);
    return mpi_errno;
}
Esempio n. 25
0
int MPL_args_serialize(int argc, char **argv, int *len, void **serialized_buf)
{
    int buf_size, offset, tmp;
    char *buf;
    int i;

    /*
     * the serialized format will contain the following:
     *   1. An integer indicating how many arguments there are
     *   2. An array of integers indicating the length of each argument
     *   3. An array of strings with the actual arguments
     */

    buf_size = 0;
    buf_size += sizeof(int);    /* for the number of arguments */
    buf_size += argc * sizeof(int);     /* for the argument lengths */
    for (i = 0; i < argc; i++)
        buf_size += strlen(argv[i]);    /* for the arguments themselves */

    buf = MPL_malloc(buf_size, MPL_MEM_STRINGS);
    assert(buf);

    offset = 0;
    memcpy(buf, &argc, sizeof(int));
    offset += sizeof(int);

    for (i = 0; i < argc; i++) {
        tmp = strlen(argv[i]);
        memcpy(buf + offset, &tmp, sizeof(int));
        offset += sizeof(int);
    }

    for (i = 0; i < argc; i++) {
        memcpy(buf + offset, argv[i], strlen(argv[i]));
        offset += strlen(argv[i]);
    }

    *len = buf_size;
    *serialized_buf = buf;

    return 0;
}
Esempio n. 26
0
ArrowNode *GetArrowNode(int rank)
{
    ArrowNode *pNode = g_pArrowList;

    while (pNode)
    {
	if (pNode->rank == rank)
	    return pNode;
	pNode = pNode->pNext;
    }

    pNode = (ArrowNode *)MPL_malloc(sizeof(ArrowNode));
    pNode->pEndList = NULL;
    pNode->pStartList = NULL;
    pNode->rank = rank;
    pNode->pNext = g_pArrowList;
    g_pArrowList = pNode;

    return pNode;
}
Esempio n. 27
0
static
void init_grank_port_mapping(void)
{
	static int initialized = 0;
	unsigned int pg_size = MPIDI_Process.my_pg_size;
	unsigned int i;

	if (initialized) {
		PRINTERROR("Multiple calls of init_grank_port_mapping()\n");
		exit(1);
	}

	MPIDI_Process.grank2con = MPL_malloc(sizeof(MPIDI_Process.grank2con[0]) * pg_size, MPL_MEM_OBJECT);
	assert(MPIDI_Process.grank2con);

	for (i = 0; i < pg_size; i++) {
		grank2con_set(i, NULL);
	}

	initialized = 1;
}
Esempio n. 28
0
int MPIDI_Populate_vc_node_ids(MPIDI_PG_t *pg, int our_pg_rank)
{
    int mpi_errno = MPI_SUCCESS;
    int i;
    MPID_Node_id_t *out_nodemap;
    out_nodemap = (MPID_Node_id_t *) MPL_malloc(pg->size * sizeof(MPID_Node_id_t));

    mpi_errno = MPIR_NODEMAP_build_nodemap(pg->size, our_pg_rank, out_nodemap, &g_max_node_id);
    if (mpi_errno) MPIR_ERR_POP(mpi_errno);
    MPIR_Assert(g_max_node_id >= 0);

    for (i = 0; i < pg->size; i++) {
        pg->vct[i].node_id = out_nodemap[i];
    }

fn_exit:
    MPL_free(out_nodemap);
    return mpi_errno;
fn_fail:
    goto fn_exit;
}
Esempio n. 29
0
FORT_DLL_SPEC void FORT_CALL mpi_cart_sub_ ( MPI_Fint *v1, MPI_Fint v2[], MPI_Fint *v3, MPI_Fint *ierr ){
    int _ctsize;
    int *l2=0;
    {int _topotype;
    PMPI_Topo_test( (MPI_Comm)*v1, &_topotype );
    if (_topotype != MPI_CART) {
        _ctsize = 0;
    }
    else 
        PMPI_Cartdim_get( (MPI_Comm)*v1, &_ctsize );
    }

    if (_ctsize) {int li;
     l2 = (int *)MPL_malloc(_ctsize * sizeof(int), MPL_MEM_OTHER);
     for (li=0; li<_ctsize; li++) {
        l2[li] = MPII_FROM_FLOG(v2[li]);
     }
    }
    *ierr = MPI_Cart_sub( (MPI_Comm)(*v1), l2, (MPI_Comm *)(v3) );
    if (l2) { MPL_free( l2 ); }
}
Esempio n. 30
0
void SaveState(RLOG_STATE *pState)
{
    RLOG_State_list *pIter;

    pIter = g_pList;
    while (pIter)
    {
	if (pIter->state.event == pState->event)
	{
	    /* replace old with new */
	    memcpy(&pIter->state, pState, sizeof(RLOG_STATE));
	    return;
	}
	pIter = pIter->next;
    }

    pIter = (RLOG_State_list*)MPL_malloc(sizeof(RLOG_State_list));
    memcpy(&pIter->state, pState, sizeof(RLOG_STATE));
    pIter->next = g_pList;
    g_pList = pIter;
}