Example #1
0
/*
 * Indexed 
 */
MPID_Dataloop *MPID_Dataloop_init_indexed( int count, int *blocksize, 
					   MPI_Aint *offset )
{
    MPID_Dataloop *it;
    MPI_Aint      extent = 0;
    int           i;

    it = (MPID_Dataloop *)MPIU_Malloc( sizeof(MPID_Dataloop) );
    it->kind                      = MPID_DTYPE_INDEXED | DATALOOP_FINAL_MASK;
    it->loop_params.i_t.count     = count;
    it->loop_params.i_t.blocksize = (int *)MPIU_Malloc( sizeof(int) * count );
    it->loop_params.i_t.offset    = 
	(MPI_Aint *)MPIU_Malloc( sizeof(MPI_Aint) * count );
    for (i=0; i<count; i++) {
	it->loop_params.i_t.offset[i]    = offset[i];
	it->loop_params.i_t.blocksize[i] = blocksize[i];
	if (offset[i] + blocksize[i] > extent) 
	    extent = offset[i] + blocksize[i];
    }
    it->loop_params.i_t.dataloop  = 0;
    it->extent                    = extent;
    it->handle                        = 0;

    return it;
}
Example #2
0
/*
 * Add an enviroinment variable to the global list of variables
 */
int MPIE_Putenv( ProcessWorld *pWorld, const char *env_string )
{
    EnvInfo *genv;
    EnvData *p;

    /* FIXME: This should be getGenv (so allocation/init in one place) */
    if (!pWorld->genv) {
	genv = (EnvInfo *)MPIU_Malloc( sizeof(EnvInfo) );
	genv->includeAll = 1;
	genv->envPairs   = 0;
	genv->envNames   = 0;
	pWorld->genv     = genv;
    }
    genv           = pWorld->genv;

    p              = (EnvData *)MPIU_Malloc( sizeof(EnvData) );
    if (!p) return 1;
    p->name        = 0;
    p->value       = 0;
    p->envvalue    = (const char *)MPIU_Strdup( env_string );
    if (!p->envvalue) return 1;
    p->nextData    = genv->envPairs;
    genv->envPairs = p;

    return 0;
}
Example #3
0
int     
MPID_nem_ib_allocate_memory(int pg_rank, int pg_size)
{   
    int mpi_errno = MPI_SUCCESS;
    process_info.polling_group_size = 0;

    if (rdma_polling_set_limit > 0)
    {
        process_info.polling_set = (MPIDI_VC_t**) MPIU_Malloc(rdma_polling_set_limit * sizeof(MPIDI_VC_t*));
    }
    else
    {
        process_info.polling_set = (MPIDI_VC_t**) MPIU_Malloc(pg_size * sizeof(MPIDI_VC_t*));
    }

    if (!process_info.polling_set)
    {
    fprintf(
            stderr,
            "[%s:%d]: %s\n",
            __FILE__,
            __LINE__,
            "unable to allocate space for polling set\n");
        return 0;
    }

    /* We need to allocate vbufs for send/recv path */
    if ((mpi_errno = allocate_vbufs(rdma_vbuf_pool_size)))
    {
        return mpi_errno;
    }

    return mpi_errno;
}
int MPID_NS_Create( const MPID_Info *info_ptr, MPID_NS_Handle *handle_ptr )
{
    static const char FCNAME[] = "MPID_NS_Create";
    int err;
    int length;
    char *pmi_namepub_kvs;

    *handle_ptr = (MPID_NS_Handle)MPIU_Malloc( sizeof(struct MPID_NS_Handle) );
    /* --BEGIN ERROR HANDLING-- */
    if (!*handle_ptr)
    {
	err = MPIR_Err_create_code(MPI_SUCCESS, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**nomem", 0);
	return err;
    }
    /* --END ERROR HANDLING-- */

    err = PMI_KVS_Get_name_length_max(&length);
    /* --BEGIN ERROR HANDLING-- */
    if (err != PMI_SUCCESS)
    {
	err = MPIR_Err_create_code(MPI_SUCCESS, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**fail", 0);
    }
    /* --END ERROR HANDLING-- */

    (*handle_ptr)->kvsname = (char*)MPIU_Malloc(length);
    /* --BEGIN ERROR HANDLING-- */
    if (!(*handle_ptr)->kvsname)
    {
	err = MPIR_Err_create_code(MPI_SUCCESS, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**nomem", 0);
	return err;
    }
    /* --END ERROR HANDLING-- */

    pmi_namepub_kvs = getenv("PMI_NAMEPUB_KVS");
    if (pmi_namepub_kvs)
    {
	MPIU_Strncpy((*handle_ptr)->kvsname, pmi_namepub_kvs, length);
    }
    else
    {
	err = PMI_KVS_Get_my_name((*handle_ptr)->kvsname, length);
	/* --BEGIN ERROR HANDLING-- */
	if (err != PMI_SUCCESS)
	{
	    err = MPIR_Err_create_code(MPI_SUCCESS, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**fail", 0);
	}
	/* --END ERROR HANDLING-- */
    }

    /*printf("namepub kvs: <%s>\n", (*handle_ptr)->kvsname);fflush(stdout);*/
    return 0;
}
Example #5
0
static int reinit_pmi(void)
{
    int ret;
    int has_parent = 0;
    int pg_rank, pg_size;
    int kvs_name_sz, pg_id_sz;
    
    MPIDI_STATE_DECL(MPID_STATE_REINIT_PMI);

    MPIDI_FUNC_ENTER(MPID_STATE_REINIT_PMI);

    /* Init pmi and do some sanity checks */
    ret = PMI_Init(&has_parent);
    CHECK_ERR(ret, "pmi_init");

    ret = PMI_Get_rank(&pg_rank);
    CHECK_ERR(ret, "pmi_get_rank");

    ret = PMI_Get_size(&pg_size);
    CHECK_ERR(ret, "pmi_get_size");

    CHECK_ERR(pg_size != MPIDI_Process.my_pg->size, "pg size differs after restart");
    CHECK_ERR(pg_rank != MPIDI_Process.my_pg_rank, "pg rank differs after restart");

    /* get new pg_id */
    ret = PMI_KVS_Get_name_length_max(&pg_id_sz);
    CHECK_ERR(ret, "pmi_get_id_length_max");
    
    MPIU_Free(MPIDI_Process.my_pg->id);
   
    MPIDI_Process.my_pg->id = MPIU_Malloc(pg_id_sz + 1);
    CHECK_ERR(MPIDI_Process.my_pg->id == NULL, "malloc failed");

    ret = PMI_KVS_Get_my_name(MPIDI_Process.my_pg->id, pg_id_sz);
    CHECK_ERR(ret, "pmi_kvs_get_my_name");

    /* get new kvsname */
    ret = PMI_KVS_Get_name_length_max(&kvs_name_sz);
    CHECK_ERR(ret, "PMI_KVS_Get_name_length_max");
    
    MPIU_Free(MPIDI_Process.my_pg->connData);
   
    MPIDI_Process.my_pg->connData = MPIU_Malloc(kvs_name_sz + 1);
    CHECK_ERR(MPIDI_Process.my_pg->connData == NULL, "malloc failed");

    ret = PMI_KVS_Get_my_name(MPIDI_Process.my_pg->connData, kvs_name_sz);
    CHECK_ERR(ret, "PMI_Get_my_name");

    
    MPIDI_FUNC_EXIT(MPID_STATE_REINIT_PMI);
    return 0;
}
Example #6
0
int MPIDI_CH3U_Post_data_receive_unexpected(MPID_Request * rreq)
{
    int mpi_errno = MPI_SUCCESS;
    MPIDI_STATE_DECL(MPID_STATE_MPIDI_CH3U_POST_DATA_RECEIVE_UNEXPECTED);

    MPIDI_FUNC_ENTER(MPID_STATE_MPIDI_CH3U_POST_DATA_RECEIVE_UNEXPECTED);

    /* FIXME: to improve performance, allocate temporary buffer from a 
       specialized buffer pool. */
    /* FIXME: to avoid memory exhaustion, integrate buffer pool management
       with flow control */
    MPIU_DBG_MSG(CH3_OTHER,VERBOSE,"unexpected request allocated");
    
    rreq->dev.tmpbuf = MPIU_Malloc(rreq->dev.recv_data_sz);
    if (!rreq->dev.tmpbuf) {
	MPIU_ERR_SETANDJUMP1(mpi_errno,MPI_ERR_OTHER,"**nomem","**nomem %d",
			     rreq->dev.recv_data_sz);
    }
    rreq->dev.tmpbuf_sz = rreq->dev.recv_data_sz;
    
    rreq->dev.iov[0].MPID_IOV_BUF = (MPID_IOV_BUF_CAST)rreq->dev.tmpbuf;
    rreq->dev.iov[0].MPID_IOV_LEN = rreq->dev.recv_data_sz;
    rreq->dev.iov_count = 1;
    rreq->dev.OnDataAvail = MPIDI_CH3_ReqHandler_UnpackUEBufComplete;
    rreq->dev.recv_pending_count = 2;

 fn_fail:
    MPIDI_FUNC_EXIT(MPID_STATE_MPIDI_CH3U_POST_DATA_RECEIVE_UNEXPECTED);
    return mpi_errno;
}
Example #7
0
static void context_id_init(void)
{
    int i;

#if defined(FINEGRAIN_MPI)
    MPIU_Assert (NULL == context_mask);
    context_mask = (uint32_t *)MPIU_Malloc(MPIR_MAX_CONTEXT_MASK * sizeof(uint32_t));
    MPIU_Assert (NULL != context_mask);
#endif

    for (i = 1; i < MPIR_MAX_CONTEXT_MASK; i++) {
        context_mask[i] = 0xFFFFFFFF;
    }
    /* The first two values are already used (comm_world, comm_self).
     * The third value is also used for the internal-only copy of
     * comm_world, if needed by mpid. */
#ifdef MPID_NEEDS_ICOMM_WORLD
    context_mask[0] = 0xFFFFFFF8;
#else
    context_mask[0] = 0xFFFFFFFC;
#endif
    initialize_context_mask = 0;

#ifdef MPICH_DEBUG_HANDLEALLOC
    /* check for context ID leaks in MPI_Finalize.  Use (_PRIO-1) to make sure
     * that we run after MPID_Finalize. */
    MPIR_Add_finalize(check_context_ids_on_finalize, context_mask, MPIR_FINALIZE_CALLBACK_PRIO - 1); /* FG: TODO IMPORTANT */
#endif
}
Example #8
0
/*
 * MPIU_Thread_create()
 */
void MPIU_Thread_create(MPIU_Thread_func_t func, void * data, MPIU_Thread_id_t * idp, int * errp)
{
    struct MPEI_Thread_info * thread_info;
    int err = MPIU_THREAD_SUCCESS;

    /* FIXME: faster allocation, or avoid it all together? */
    thread_info = (struct MPEI_Thread_info *) MPIU_Malloc(sizeof(struct MPEI_Thread_info));
    if (thread_info != NULL)
    {
        pthread_attr_t attr;

        thread_info->func = func;
        thread_info->data = data;

        pthread_attr_init(&attr);
        pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);

        err = pthread_create(idp, &attr, MPEI_Thread_start, thread_info);
        /* FIXME: convert error to an MPIU_THREAD_ERR value */

        pthread_attr_destroy(&attr);
    }
    else
    {
        err = 1000000000;
    }

    if (errp != NULL)
    {
        *errp = err;
    }
}
Example #9
0
/*
 * MPIU_Thread_create()
 */
void MPIU_Thread_create(MPIU_Thread_func_t func, void * data, MPIU_Thread_id_t * idp, int * errp)
{
    struct MPEI_Thread_info * thread_info;
    int err = MPIU_THREAD_SUCCESS;

    thread_info = (struct MPEI_Thread_info *) MPIU_Malloc(sizeof(struct MPEI_Thread_info));
    if (thread_info != NULL)
    {
        thread_info->func = func;
        thread_info->data = data;
        *idp = CreateThread(NULL, 0, MPEI_Thread_start, thread_info, 0, NULL);
        if (*idp == NULL)
        {
            err = GetLastError();
        }
    }
    else
    {
        err = 1000000000;
    }

    if (errp != NULL)
    {
        *errp = err;
    }
}
Example #10
0
/*
 * MPIU_Thread_create()
 */
void MPIU_Thread_create(MPIU_Thread_func_t func, void * data, MPIU_Thread_id_t * idp, int * errp)
{
    struct MPEI_Thread_info * thread_info;
    int err = MPIU_THREAD_SUCCESS;

    /* FIXME: faster allocation, or avoid it all together? */
    thread_info = (struct MPEI_Thread_info *) MPIU_Malloc(sizeof(struct MPEI_Thread_info));
    if (thread_info != NULL)
    {
        thread_info->func = func;
        thread_info->data = data;

        err = thr_create(NULL, 0, MPEI_Thread_start, thread_info, THR_DETACHED, idp);
        /* FIXME: convert error to an MPIU_THREAD_ERR value */
    }
    else
    {
        err = 1000000000;
    }

    if (errp != NULL)
    {
        *errp = err;
    }
}
Example #11
0
int MPID_nem_ib_init_hash_table(
        MPID_nem_ib_hash_table_ptr_t table,
        uint32_t nentries)
{
    int mpi_errno = MPI_SUCCESS;

    table->entries = MPIU_Malloc(
            sizeof(MPID_nem_ib_hash_elem_t) * nentries);
    table->num_entries = nentries;

    if(NULL == table->entries) {
        MPIU_CHKMEM_SETERR(mpi_errno,
                sizeof(MPID_nem_ib_hash_elem_t) * nentries,
                "IB Module Hash Table");
    }

    memset(table->entries, 0,
            sizeof(MPID_nem_ib_hash_elem_t) * nentries);

    pthread_mutex_init(&table->hash_table_lock, NULL);

    /*   fn_exit: */
           return mpi_errno;
    /*   fn_fail:
           goto fn_exit;
           */
}
Example #12
0
static int create_r_cookie (char *hostname, int port, int data_sz, char **cookie, int *len)
{
    int mpi_errno = MPI_SUCCESS;
    int hostname_len;
    int cookie_len;
    r_cookie_t *c;

    hostname_len = strnlen (hostname, MAX_HOSTNAME_LEN) + 1;

    cookie_len = sizeof (r_cookie_t) - 1 + hostname_len;

    c = MPIU_Malloc (cookie_len);
    MPIU_ERR_CHKANDJUMP (c == NULL, mpi_errno, MPI_ERR_OTHER, "**nomem");

    c->port = port;
    c->data_sz = data_sz;
    MPIU_Strncpy (c->hostname, hostname, hostname_len);

    *cookie = (char *)c;
    *len = sizeof (r_cookie_t) - 1 + hostname_len;

 fn_exit:
    return mpi_errno;
 fn_fail:
    goto fn_exit;
}
Example #13
0
void
MPIDI_Win_datatype_map(MPIDI_Datatype * dt)
{
  if (dt->contig)
    {
      dt->num_contig = 1;
      dt->map = &dt->__map;
      dt->map[0].DLOOP_VECTOR_BUF = (void*)(size_t)dt->true_lb;
      dt->map[0].DLOOP_VECTOR_LEN = dt->size;
    }
  else
    {
      unsigned map_size = dt->pointer->max_contig_blocks*dt->count + 1;
      dt->num_contig = map_size;
      dt->map = (DLOOP_VECTOR*)MPIU_Malloc(map_size * sizeof(DLOOP_VECTOR));
      MPID_assert(dt->map != NULL);

      DLOOP_Offset last = dt->pointer->size*dt->count;
      MPID_Segment seg;
      MPID_Segment_init(NULL, dt->count, dt->type, &seg, 0);
      MPID_Segment_pack_vector(&seg, 0, &last, dt->map, &dt->num_contig);
      MPID_assert((unsigned)dt->num_contig <= map_size);
#ifdef TRACE_ON
      TRACE_ERR("dt->pointer->size=%d  num_contig:  orig=%u  new=%d\n", dt->pointer->size, map_size, dt->num_contig);
      int i;
      for(i=0; i<dt->num_contig; ++i)
        TRACE_ERR("     %d:  BUF=%zu  LEN=%zu\n", i, (size_t)dt->map[i].DLOOP_VECTOR_BUF, (size_t)dt->map[i].DLOOP_VECTOR_LEN);
#endif
    }
}
Example #14
0
int MPIR_Ibsend_impl(const void *buf, int count, MPI_Datatype datatype, int dest, int tag,
                     MPID_Comm *comm_ptr, MPI_Request *request)
{
    int mpi_errno = MPI_SUCCESS;
    MPID_Request *request_ptr, *new_request_ptr;
    ibsend_req_info *ibinfo=0;

    /* We don't try tbsend in for MPI_Ibsend because we must create a
       request even if we can send the message */

    mpi_errno = MPIR_Bsend_isend( buf, count, datatype, dest, tag, comm_ptr,
                                  IBSEND, &request_ptr );
    if (mpi_errno != MPI_SUCCESS) goto fn_fail;

    /* FIXME: use the memory management macros */
    ibinfo = (ibsend_req_info *)MPIU_Malloc( sizeof(ibsend_req_info) );
    ibinfo->req       = request_ptr;
    ibinfo->cancelled = 0;
    mpi_errno = MPIR_Grequest_start_impl( MPIR_Ibsend_query, MPIR_Ibsend_free,
                                          MPIR_Ibsend_cancel, ibinfo, &new_request_ptr );
    if (mpi_errno) MPIU_ERR_POP(mpi_errno);
    /* The request is immediately complete because the MPIR_Bsend_isend has
       already moved the data out of the user's buffer */
    MPIR_Request_add_ref( request_ptr );
    /* Request count is now 2 (set to 1 in Grequest_start) */
    MPIR_Grequest_complete_impl(new_request_ptr);
    MPIU_OBJ_PUBLISH_HANDLE(*request, new_request_ptr->handle);

fn_exit:
    return mpi_errno;
fn_fail:
    goto fn_exit;
}
Example #15
0
/* This routine is used to establish a queue of send requests to allow the
   debugger easier access to the active requests.  Some devices may be able
   to provide this information without requiring this separate queue. */
void MPIR_Sendq_remember( MPID_Request *req, 
			  int rank, int tag, int context_id )
{
    MPIR_Sendq *p;

    MPID_THREAD_CS_ENTER(POBJ, req->pobj_mutex);
    if (pool) {
	p = pool;
	pool = p->next;
    }
    else {
	p = (MPIR_Sendq *)MPIU_Malloc( sizeof(MPIR_Sendq) );
	if (!p) {
	    /* Just ignore it */
            req->dbg_next = NULL;
            goto fn_exit;
	}
    }
    p->sreq       = req;
    p->tag        = tag;
    p->rank       = rank;
    p->context_id = context_id;
    p->next       = MPIR_Sendq_head;
    p->prev       = NULL;
    MPIR_Sendq_head = p;
    if (p->next) p->next->prev = p;
    req->dbg_next = p;
fn_exit:
    MPID_THREAD_CS_EXIT(POBJ, req->pobj_mutex);
}
Example #16
0
/* --BEGIN DEBUG-- */
void MPITEST_Group_create( int nproc, int myrank, MPI_Group *new_group )
{
    MPID_Group *new_group_ptr;
    int i;

    new_group_ptr = (MPID_Group *)MPIU_Handle_obj_alloc( &MPID_Group_mem );
    if (!new_group_ptr) {
	fprintf( stderr, "Could not create a new group\n" );
	PMPI_Abort( MPI_COMM_WORLD, 1 );
    }
    MPIU_Object_set_ref( new_group_ptr, 1 );
    new_group_ptr->lrank_to_lpid = (MPID_Group_pmap_t *)MPIU_Malloc( nproc * sizeof(MPID_Group_pmap_t) );
    if (!new_group_ptr->lrank_to_lpid) {
	fprintf( stderr, "Could not create lrank map for new group\n" );
	PMPI_Abort( MPI_COMM_WORLD, 1 );
    }

    new_group_ptr->rank = MPI_UNDEFINED;
    for (i=0; i<nproc; i++) {
	new_group_ptr->lrank_to_lpid[i].lrank = i;
	new_group_ptr->lrank_to_lpid[i].lpid  = i;
    }
    new_group_ptr->size = nproc;
    new_group_ptr->rank = myrank;
    new_group_ptr->idx_of_first_lpid = -1;

    *new_group = new_group_ptr->handle;
}
Example #17
0
IRLOG_IOStruct *IRLOG_CreateOutputStruct(const char *filename)
{
    IRLOG_IOStruct *pOutput = NULL;

    /* allocate a data structure */
    pOutput = (IRLOG_IOStruct*)MPIU_Malloc(sizeof(IRLOG_IOStruct));
    if (pOutput == NULL)
    {
	MPIU_Error_printf("malloc failed - %s\n", strerror(errno));
	return NULL;
    }

    /* open the output clog file */
    pOutput->f = fopen(filename, "wb");
    if (pOutput->f == NULL)
    {
	MPIU_Error_printf("Unable to open output file '%s' - %s\n", filename, strerror(errno));
	MPIU_Free(pOutput);
	return NULL;
    }

    /* set all the data fields */
    pOutput->header.type = RLOG_INVALID_TYPE;
    pOutput->pCurHeader = pOutput->buffer;
    pOutput->pNextHeader = pOutput->buffer;
    pOutput->pEnd = &pOutput->buffer[RLOG_BUFFSIZE];

    return pOutput;
}
Example #18
0
int MPIR_Group_create( int nproc, MPID_Group **new_group_ptr )
{
    int mpi_errno = MPI_SUCCESS;

    *new_group_ptr = (MPID_Group *)MPIU_Handle_obj_alloc( &MPID_Group_mem );
    /* --BEGIN ERROR HANDLING-- */
    if (!*new_group_ptr) {
	mpi_errno = MPIR_Err_create_code( MPI_SUCCESS, MPIR_ERR_RECOVERABLE, "MPIR_Group_create", __LINE__, MPI_ERR_OTHER, "**nomem", 0 );
	return mpi_errno;
    }
    /* --END ERROR HANDLING-- */
    MPIU_Object_set_ref( *new_group_ptr, 1 );
    (*new_group_ptr)->lrank_to_lpid = 
	(MPID_Group_pmap_t *)MPIU_Malloc( nproc * sizeof(MPID_Group_pmap_t) );
    /* --BEGIN ERROR HANDLING-- */
    if (!(*new_group_ptr)->lrank_to_lpid) {
	MPIU_Handle_obj_free( &MPID_Group_mem, *new_group_ptr );
	*new_group_ptr = NULL;
	MPIU_CHKMEM_SETERR(mpi_errno,nproc*sizeof(MPID_Group_pmap_t),
			   "newgroup->lrank_to_lpid");
	return mpi_errno;
    }
    /* --END ERROR HANDLING-- */
    (*new_group_ptr)->size = nproc;
    /* Make sure that there is no question that the list of ranks sorted
       by pids is marked as uninitialized */
    (*new_group_ptr)->idx_of_first_lpid = -1;

    (*new_group_ptr)->is_local_dense_monotonic = FALSE;
    return mpi_errno;
}
Example #19
0
void MPIDI_CH3U_Buffer_allocate(
    const void * const sbuf, MPI_Aint scount, MPI_Datatype sdt, int * smpi_errno,
    void ** rbuf_handle, MPI_Aint rcount, MPI_Datatype rdt, MPIDI_msg_sz_t * rsz,
    int * rmpi_errno)
{
    int sdt_contig;
    int rdt_contig;
    MPI_Aint sdt_true_lb, rdt_true_lb;
    MPIDI_msg_sz_t sdata_sz;
    MPIDI_msg_sz_t rdata_sz;
    MPID_Datatype * sdt_ptr;
    MPID_Datatype * rdt_ptr;
    MPIDI_STATE_DECL(MPID_STATE_MPIDI_CH3U_BUFFER_ALLOCATE);

    MPIDI_FUNC_ENTER(MPID_STATE_MPIDI_CH3U_BUFFER_ALLOCATE);
    *smpi_errno = MPI_SUCCESS;
    *rmpi_errno = MPI_SUCCESS;

    MPIDI_Datatype_get_info(scount, sdt, sdt_contig, sdata_sz, sdt_ptr, sdt_true_lb);
    MPIDI_Datatype_get_info(rcount, rdt, rdt_contig, rdata_sz, rdt_ptr, rdt_true_lb);

    /* --BEGIN ERROR HANDLING-- */
    if (sdata_sz > rdata_sz)
    {
	MPIU_DBG_MSG_FMT(CH3_OTHER,TYPICAL,(MPIU_DBG_FDEST,
	    "message truncated, sdata_sz=" MPIDI_MSG_SZ_FMT " rdata_sz=" MPIDI_MSG_SZ_FMT,
			  sdata_sz, rdata_sz));
	sdata_sz = rdata_sz;
	*rmpi_errno = MPIR_Err_create_code(MPI_SUCCESS, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_TRUNCATE, "**truncate", "**truncate %d %d", sdata_sz, rdata_sz );
    }
    /* --END ERROR HANDLING-- */

    if (sdata_sz == 0)
    {
	*rsz = 0;
	goto fn_exit;
    }

    if (sdt_contig && rdt_contig)
    {
	*rbuf_handle = (void *)MPIU_Malloc(sdata_sz);
        MPIU_Assert(*rbuf_handle);
	*rsz = sdata_sz;
    }
    else
    {
	/* --BEGIN ERROR HANDLING-- */

        MPIU_DBG_MSG(CH3_OTHER,TYPICAL,"Sender and receiver datatypes are not contiguous");
        *smpi_errno = MPIR_Err_create_code(MPI_SUCCESS, MPIR_ERR_FATAL, FCNAME, __LINE__, MPI_ERR_OTHER, "**zcopybufalloc", "**zcopybufalloc %d %d", scount, rcount);
        *rmpi_errno = *smpi_errno;
        *rsz = 0;
        goto fn_exit;

	/* --END ERROR HANDLING-- */
    }

  fn_exit:
    MPIDI_FUNC_EXIT(MPID_STATE_MPIDI_CH3U_BUFFER_ALLOCATE);
}
Example #20
0
int
MPID_nem_gm_lmt_init()
{
    int mpi_errno = MPI_SUCCESS;
    int i;
    
    MPID_nem_gm_lmt_queue.head = NULL;
    MPID_nem_gm_lmt_queue.tail = NULL;

    MPID_nem_gm_lmt_free_queue = NULL;
    
    for (i = 0; i < FREE_LMT_QUEUE_ELEMENTS; ++i)
    {
	MPID_nem_gm_lmt_queue_t *e;
	
	e = MPIU_Malloc (sizeof (MPID_nem_gm_lmt_queue_t));
        if (e == NULL) MPIU_CHKMEM_SETERR (mpi_errno, sizeof (MPID_nem_gm_send_queue_t), "gm module lmt queue");
	e->next = MPID_nem_gm_lmt_free_queue;
	MPID_nem_gm_lmt_free_queue = e;
    }

 fn_exit:
    return mpi_errno;
 fn_fail:
     goto fn_exit;
}
Example #21
0
int MPIR_Comm_register_hint(const char *hint_key, MPIR_Comm_hint_fn_t fn, void *state)
{
    int mpi_errno = MPI_SUCCESS;
    struct MPIR_Comm_hint_fn_elt *hint_elt = NULL;
    MPID_MPI_STATE_DECL(MPID_STATE_MPIR_COMM_REGISTER_HINT);

    MPID_MPI_FUNC_ENTER(MPID_STATE_MPIR_COMM_REGISTER_HINT);

    if (MPID_hint_fns == NULL) {
        MPIR_Add_finalize(free_hint_handles, NULL, MPIR_FINALIZE_CALLBACK_PRIO - 1);
    }

    hint_elt = MPIU_Malloc(sizeof(struct MPIR_Comm_hint_fn_elt));
    strncpy(hint_elt->name, hint_key, MPI_MAX_INFO_KEY);
    hint_elt->state = state;
    hint_elt->fn = fn;

    HASH_ADD_STR(MPID_hint_fns, name, hint_elt);

  fn_exit:
    MPID_MPI_FUNC_EXIT(MPID_STATE_MPIR_COMM_REGISTER_HINT);
    return mpi_errno;
  fn_fail:
    goto fn_exit;
}
Example #22
0
/* MSGQUEUE lock is not held */
void
MPIDI_Callback_process_trunc(pami_context_t  context,
                             MPID_Request   *rreq,
                             pami_recv_t    *recv,
                             const void     *sndbuf)
{
  rreq->status.MPI_ERROR = MPI_ERR_TRUNCATE;

  /* -------------------------------------------------------------- */
  /*  The data is already available, so we can just unpack it now.  */
  /* -------------------------------------------------------------- */
  if (recv)
    {
      MPIDI_Request_setCA(rreq, MPIDI_CA_UNPACK_UEBUF_AND_COMPLETE);
      rreq->mpid.uebuflen = MPIR_STATUS_GET_COUNT(rreq->status);
      rreq->mpid.uebuf    = MPIU_Malloc(MPIR_STATUS_GET_COUNT(rreq->status));
      MPID_assert(rreq->mpid.uebuf != NULL);
      rreq->mpid.uebuf_malloc = mpiuMalloc;

      recv->addr = rreq->mpid.uebuf;
    }
  else
    {
      MPIDI_Request_setCA(rreq, MPIDI_CA_UNPACK_UEBUF_AND_COMPLETE);
      rreq->mpid.uebuflen = MPIR_STATUS_GET_COUNT(rreq->status);
      rreq->mpid.uebuf    = (void*)sndbuf;
      MPIDI_RecvDoneCB(context, rreq, PAMI_SUCCESS);
      MPID_Request_release(rreq);
    }
}
Example #23
0
void GenerateNewArgv(int *pargc, char ***pargv, int n)
{
    int argc;
    char **argv;
    int length, i;
    char *buffer, *str;

    length = (sizeof(char*) * (n+3)) +strlen((*pargv)[0]) + 1 + strlen((*pargv)[1]) + 1 + (15 * n);
    buffer = (char*)MPIU_Malloc(length);

    argc = n+2;
    argv = (char**)buffer;
    str = buffer + (sizeof(char*) * (n+4));
    argv[0] = str;
    str += sprintf(str, "%s", (*pargv)[0]);
    *str++ = '\0';
    argv[1] = str;
    str += sprintf(str, "%s", (*pargv)[1]);
    *str++ = '\0';
    for (i=0; i<n; i++)
    {
	argv[i+2] = str;
	str += sprintf(str, "log%d.irlog", i);
	*str++ = '\0';
    }
    argv[n+3] = NULL;

    *pargc = argc;
    *pargv = argv;
    s_bFreeArgv = TRUE;
}
Example #24
0
int MPID_nem_ib_insert_hash_elem(
        MPID_nem_ib_hash_table_ptr_t table,
        uint64_t key, void *data, uint32_t uniq)
{
    int mpi_errno = MPI_SUCCESS;
    uint32_t hash_index;
    MPID_nem_ib_hash_elem_ptr_t start_elem;
    MPID_nem_ib_hash_elem_ptr_t new_elem;
    MPID_nem_ib_hash_elem_ptr_t elem;

    MPIU_Assert(NULL != table);

    pthread_mutex_lock(&table->hash_table_lock);

    hash_index = hash(key, table->num_entries);

    /* Note that the first element is allocated
     * at the beginning, so this is guaranteed
     * to be non-null */
    start_elem = &table->entries[hash_index];

    MPIU_Assert(start_elem != NULL);

    /* Walk to end of list in this hash slot */
    elem = start_elem;
    while(elem->next != NULL) {
        elem = elem->next;
    }

    /* Insert the element */
    new_elem = MPIU_Malloc(sizeof(MPID_nem_ib_hash_elem_t));

    if(NULL == new_elem) {
        MPIU_CHKMEM_SETERR(mpi_errno,
                sizeof(MPID_nem_ib_hash_elem_t),
                "IB Module Hash Table New Element");
    }

    memset(new_elem, 0, sizeof(MPID_nem_ib_hash_elem_t));

    new_elem->data = data;
    new_elem->uniq = uniq;
    new_elem->key = key;
    new_elem->next = NULL;
    new_elem->prev = elem;

    elem->next = new_elem;

    NEM_IB_DBG("Inserted elem key %lu, uniq %u, hash index %u",
            key, uniq, hash_index);

    pthread_mutex_unlock(&table->hash_table_lock);

    /*   fn_exit: */
           return mpi_errno;
    /*   fn_fail:
           goto fn_exit;
           */
}
Example #25
0
int MPID_nem_mxm_SendNoncontig(MPIDI_VC_t * vc, MPID_Request * sreq, void *hdr,
                               MPIDI_msg_sz_t hdr_sz)
{
    int mpi_errno = MPI_SUCCESS;
    MPIDI_msg_sz_t last;
    MPID_nem_mxm_vc_area *vc_area = NULL;
    MPID_nem_mxm_req_area *req_area = NULL;

    MPIDI_STATE_DECL(MPID_STATE_MPID_NEM_MXM_SENDNONCONTIGMSG);
    MPIDI_FUNC_ENTER(MPID_STATE_MPID_NEM_MXM_SENDNONCONTIGMSG);

    MPIU_Assert(hdr_sz <= sizeof(MPIDI_CH3_Pkt_t));
    MPIU_DBG_MSG(CH3_CHANNEL, VERBOSE, "MPID_nem_mxm_iSendNoncontig");

    MPIU_Memcpy(&(sreq->dev.pending_pkt), (char *) hdr, sizeof(MPIDI_CH3_Pkt_t));

    _dbg_mxm_output(5,
                    "SendNoncontig ========> Sending ADI msg (to=%d type=%d) for req %p (data_size %d, %d) \n",
                    vc->pg_rank, sreq->dev.pending_pkt.type, sreq, sizeof(MPIDI_CH3_Pkt_t),
                    sreq->dev.segment_size);

    vc_area = VC_BASE(vc);
    req_area = REQ_BASE(sreq);

    req_area->ctx = sreq;
    req_area->iov_buf = req_area->tmp_buf;
    req_area->iov_count = 1;
    req_area->iov_buf[0].ptr = (void *) &(sreq->dev.pending_pkt);
    req_area->iov_buf[0].length = sizeof(MPIDI_CH3_Pkt_t);

    MPIU_Assert(sreq->dev.segment_first == 0);
    last = sreq->dev.segment_size;
    if (last > 0) {
        sreq->dev.tmpbuf = MPIU_Malloc((size_t) sreq->dev.segment_size);
        MPIU_Assert(sreq->dev.tmpbuf);
        MPID_Segment_pack(sreq->dev.segment_ptr, sreq->dev.segment_first, &last, sreq->dev.tmpbuf);
        MPIU_Assert(last == sreq->dev.segment_size);

        req_area->iov_count = 2;
        req_area->iov_buf[1].ptr = sreq->dev.tmpbuf;
        req_area->iov_buf[1].length = last;
    }

    vc_area->pending_sends += 1;
    sreq->ch.vc = vc;
    sreq->ch.noncontig = TRUE;

    mpi_errno = _mxm_isend(vc_area->mxm_ep, req_area, MXM_MPICH_ISEND_AM,
                           mxm_obj->mxm_mq, mxm_obj->mxm_rank, MXM_MPICH_HID_ADI_MSG, 0, 0);
    if (mpi_errno)
        MPIU_ERR_POP(mpi_errno);

  fn_exit:
    MPIDI_FUNC_EXIT(MPID_STATE_MPID_NEM_MXM_SENDNONCONTIGMSG);
    return mpi_errno;
  fn_fail:
    goto fn_exit;
}
Example #26
0
int MPID_nem_mxm_vc_init(MPIDI_VC_t * vc)
{
    int mpi_errno = MPI_SUCCESS;
    MPIDI_CH3I_VC *vc_ch = &vc->ch;
    MPID_nem_mxm_vc_area *vc_area = VC_BASE(vc);

    MPIDI_STATE_DECL(MPID_STATE_MXM_VC_INIT);
    MPIDI_FUNC_ENTER(MPID_STATE_MXM_VC_INIT);

    /* local connection is used for any source communication */
    MPIU_Assert(MPID_nem_mem_region.rank != vc->lpid);
    MPIU_DBG_MSG_FMT(CH3_CHANNEL, VERBOSE,
                     (MPIU_DBG_FDEST,
                      "[%i]=== connecting  to  %i  \n", MPID_nem_mem_region.rank, vc->lpid));
    {
        char *business_card;
        int val_max_sz;
#ifdef USE_PMI2_API
        val_max_sz = PMI2_MAX_VALLEN;
#else
        mpi_errno = PMI_KVS_Get_value_length_max(&val_max_sz);
        if (mpi_errno)
            MPIU_ERR_POP(mpi_errno);
#endif

        business_card = (char *) MPIU_Malloc(val_max_sz);
        mpi_errno = vc->pg->getConnInfo(vc->pg_rank, business_card, val_max_sz, vc->pg);
        if (mpi_errno)
            MPIU_ERR_POP(mpi_errno);

        vc_area->ctx = vc;
        vc_area->mxm_ep = &_mxm_obj.endpoint[vc->pg_rank];
        mpi_errno = _mxm_connect(&_mxm_obj.endpoint[vc->pg_rank], business_card, vc_area);
        if (mpi_errno)
            MPIU_ERR_POP(mpi_errno);

        MPIU_Free(business_card);
    }

    MPIDI_CHANGE_VC_STATE(vc, ACTIVE);

    vc_area->pending_sends = 0;

    vc->rndvSend_fn = NULL;
    vc->rndvRecv_fn = NULL;
    vc->sendNoncontig_fn = MPID_nem_mxm_SendNoncontig;
    vc->comm_ops = &comm_ops;

    vc_ch->iStartContigMsg = MPID_nem_mxm_iStartContigMsg;
    vc_ch->iSendContig = MPID_nem_mxm_iSendContig;

  fn_exit:
    MPIDI_FUNC_EXIT(MPID_STATE_MXM_VC_INIT);
    return mpi_errno;
  fn_fail:
    goto fn_exit;
}
Example #27
0
int
MPID_nem_mx_vc_init (MPIDI_VC_t *vc)
{
   uint32_t threshold;
   MPIDI_CH3I_VC *vc_ch = VC_CH(vc);
   int mpi_errno = MPI_SUCCESS;

   /* first make sure that our private fields in the vc fit into the area provided  */
   MPIU_Assert(sizeof(MPID_nem_mx_vc_area) <= MPID_NEM_VC_NETMOD_AREA_LEN);

#ifdef ONDEMAND
   VC_FIELD(vc, local_connected)  = 0;
   VC_FIELD(vc, remote_connected) = 0;
#else
   {
       char *business_card;
       int   val_max_sz;
       int   ret;
#ifdef USE_PMI2_API
       val_max_sz = PMI2_MAX_VALLEN;
#else
       mpi_errno = PMI_KVS_Get_value_length_max(&val_max_sz);
#endif 
       business_card = (char *)MPIU_Malloc(val_max_sz); 
       mpi_errno = vc->pg->getConnInfo(vc->pg_rank, business_card,val_max_sz, vc->pg);
       if (mpi_errno) MPIU_ERR_POP(mpi_errno);
       
       mpi_errno = MPID_nem_mx_get_from_bc (business_card, &VC_FIELD(vc, remote_endpoint_id), &VC_FIELD(vc, remote_nic_id));
       if (mpi_errno)    MPIU_ERR_POP (mpi_errno);

       MPIU_Free(business_card);
       
       ret = mx_connect(MPID_nem_mx_local_endpoint,VC_FIELD(vc, remote_nic_id),VC_FIELD(vc, remote_endpoint_id),
			MPID_NEM_MX_FILTER,MX_INFINITE,&(VC_FIELD(vc, remote_endpoint_addr)));
       MPIU_ERR_CHKANDJUMP1 (ret != MX_SUCCESS, mpi_errno, MPI_ERR_OTHER, "**mx_connect", "**mx_connect %s", mx_strerror (ret));
       mx_set_endpoint_addr_context(VC_FIELD(vc, remote_endpoint_addr),(void *)vc);

       MPIDI_CHANGE_VC_STATE(vc, ACTIVE);
   }
#endif
   mx_get_info(MPID_nem_mx_local_endpoint, MX_COPY_SEND_MAX, NULL, 0, &threshold, sizeof(uint32_t));

   vc->eager_max_msg_sz = threshold;
   vc->rndvSend_fn      = NULL;
   vc->sendNoncontig_fn = MPID_nem_mx_SendNoncontig;
   vc->comm_ops         = &comm_ops;
 
   vc_ch->iStartContigMsg = MPID_nem_mx_iStartContigMsg;
   vc_ch->iSendContig     = MPID_nem_mx_iSendContig;

 fn_exit:
   return mpi_errno;
 fn_fail:
   goto fn_exit;
}
Example #28
0
void *MPIDI_Alloc_mem( size_t size, MPID_Info *info_ptr )
{
    void *ap;
    MPIDI_STATE_DECL(MPID_STATE_MPIDI_ALLOC_MEM);

    MPIDI_FUNC_ENTER(MPID_STATE_MPIDI_ALLOC_MEM);

    ap = MPIU_Malloc(size);
    
    MPIDI_FUNC_EXIT(MPID_STATE_MPIDI_ALLOC_MEM);
    return ap;
}
static int  mpi_to_pmi_keyvals( MPID_Info *info_ptr, PMI_keyval_t **kv_ptr, 
				int *nkeys_ptr )
{
    char key[MPI_MAX_INFO_KEY];
    PMI_keyval_t *kv = 0;
    int          i, nkeys = 0, vallen, flag, mpi_errno=MPI_SUCCESS;

    if (!info_ptr || info_ptr->handle == MPI_INFO_NULL) {
	goto fn_exit;
    }

    MPIR_Info_get_nkeys_impl( info_ptr, &nkeys );
    if (nkeys == 0) {
	goto fn_exit;
    }
    kv = (PMI_keyval_t *)MPIU_Malloc( nkeys * sizeof(PMI_keyval_t) );
    if (!kv) { MPIU_ERR_POP(mpi_errno); }

    for (i=0; i<nkeys; i++) {
	mpi_errno = MPIR_Info_get_nthkey_impl( info_ptr, i, key );
	if (mpi_errno) { MPIU_ERR_POP(mpi_errno); }
	MPIR_Info_get_valuelen_impl( info_ptr, key, &vallen, &flag );
        MPIU_ERR_CHKANDJUMP1(!flag, mpi_errno, MPI_ERR_OTHER,"**infonokey", "**infonokey %s", key);

	kv[i].key = MPIU_Strdup(key);
	kv[i].val = MPIU_Malloc( vallen + 1 );
	if (!kv[i].key || !kv[i].val) { 
	    MPIU_ERR_SETANDJUMP(mpi_errno,MPI_ERR_OTHER,"**nomem" );
	}
	MPIR_Info_get_impl( info_ptr, key, vallen+1, kv[i].val, &flag );
        MPIU_ERR_CHKANDJUMP1(!flag, mpi_errno, MPI_ERR_OTHER,"**infonokey", "**infonokey %s", key);
	MPIU_DBG_PRINTF(("key: <%s>, value: <%s>\n", kv[i].key, kv[i].val));
    }

 fn_fail:
 fn_exit:
    *kv_ptr    = kv;
    *nkeys_ptr = nkeys;
    return mpi_errno;
}
Example #30
0
int smpd_hpc_js_init(smpd_hpc_js_ctxt_t *pctxt)
{
    HRESULT hr;
    int result;

    smpd_enter_fn(FCNAME);

    if(pctxt == NULL){
        smpd_err_printf("ERROR: Invalid pointer to js handle\n");
        smpd_exit_fn(FCNAME);
        return SMPD_FAIL;
    }

    /* Alloc memory for scheduler object */
    *pctxt = (smpd_hpc_js_ctxt_t )MPIU_Malloc(sizeof(smpd_hpc_js_ctxt_));
    if(*pctxt == NULL){
        smpd_err_printf("ERROR: Unable to allocate memory for js handle\n");
        smpd_exit_fn(FCNAME);
        return SMPD_FAIL;
    }

    (*pctxt)->pscheduler = NULL;
    (*pctxt)->pnode_names = NULL;

    CoInitializeEx(NULL, COINIT_APARTMENTTHREADED);

    /* Get an instance of the Scheduler object */
    hr = CoCreateInstance( __uuidof(Scheduler),
                           NULL,
                           CLSCTX_INPROC_SERVER,
                           __uuidof(IScheduler),
                           reinterpret_cast<void **>(&((*pctxt)->pscheduler)));

    CoUninitialize();

    if (FAILED(hr)){
        smpd_err_printf("ERROR: CoCreateInstance(IScheduler) failed, 0x%x\n", hr);
        smpd_exit_fn(FCNAME);
        return SMPD_FAIL;
    }

    /* Connect to the head node */
    result = smpd_hpc_js_connect_to_head_node(*pctxt, NULL);
    if(result != SMPD_SUCCESS){
        smpd_err_printf("Unable to connect to head node \n");
        smpd_exit_fn(FCNAME);
        return SMPD_FAIL;
    }

    smpd_exit_fn(FCNAME);
    return SMPD_SUCCESS;
}