Ejemplo n.º 1
0
/*@
MPI_Cart_rank - Determines process rank in communicator given Cartesian
                location

Input Parameters:
+ comm - communicator with cartesian structure (handle) 
- coords - integer array (of size 'ndims', the number of dimensions of
    the Cartesian topology associated with 'comm') specifying the cartesian 
  coordinates of a process 

Output Parameters:
. rank - rank of specified process (integer) 

Notes:
 Out-of-range coordinates are erroneous for non-periodic dimensions.  
 Versions of MPICH before 1.2.2 returned 'MPI_PROC_NULL' for the rank in this 
 case.

.N SignalSafe

.N Fortran

.N Errors
.N MPI_SUCCESS
.N MPI_ERR_TOPOLOGY
.N MPI_ERR_RANK
.N MPI_ERR_ARG
@*/
int MPI_Cart_rank(MPI_Comm comm, const int coords[], int *rank)
{
    int mpi_errno = MPI_SUCCESS;
    MPID_Comm *comm_ptr = NULL;
    MPIR_Topology *cart_ptr;
    MPID_MPI_STATE_DECL(MPID_STATE_MPI_CART_RANK);

    MPIR_ERRTEST_INITIALIZED_ORDIE();
    
    MPID_MPI_FUNC_ENTER(MPID_STATE_MPI_CART_RANK);

    /* Validate parameters, especially handles needing to be converted */
#   ifdef HAVE_ERROR_CHECKING
    {
        MPID_BEGIN_ERROR_CHECKS;
        {
	    MPIR_ERRTEST_COMM(comm, mpi_errno);
        }
        MPID_END_ERROR_CHECKS;
    }
#   endif
    
    /* Convert MPI object handles to object pointers */
    MPID_Comm_get_ptr( comm, comm_ptr );
#   ifdef HAVE_ERROR_CHECKING
    {
        MPID_BEGIN_ERROR_CHECKS;
        {
            /* Validate comm_ptr */
            MPID_Comm_valid_ptr( comm_ptr, mpi_errno );
            if (mpi_errno) goto fn_fail;
	    /* If comm_ptr is not valid, it will be reset to null */
	    MPIR_ERRTEST_ARGNULL(rank,"rank",mpi_errno);
        }
        MPID_END_ERROR_CHECKS;
    }
#   endif /* HAVE_ERROR_CHECKING */

    cart_ptr = MPIR_Topology_get( comm_ptr );
    MPIU_ERR_CHKANDJUMP((!cart_ptr || cart_ptr->kind != MPI_CART), mpi_errno, MPI_ERR_TOPOLOGY, "**notcarttopo");

    /* Validate coordinates */
#   ifdef HAVE_ERROR_CHECKING
    {
        int i, ndims, coord;
        MPID_BEGIN_ERROR_CHECKS;
        {
	    ndims = cart_ptr->topo.cart.ndims;
	    if (ndims != 0) {
		MPIR_ERRTEST_ARGNULL(coords,"coords",mpi_errno);
	    }
	    for (i=0; i<ndims; i++) {
		if (!cart_ptr->topo.cart.periodic[i]) {
		    coord = coords[i];
		    MPIU_ERR_CHKANDJUMP3(
			(coord < 0 || coord >= cart_ptr->topo.cart.dims[i] ), mpi_errno, MPI_ERR_ARG, "**cartcoordinvalid",
			"**cartcoordinvalid %d %d %d",i, coords[i], cart_ptr->topo.cart.dims[i]-1 );
		}
	    }
	}
        MPID_END_ERROR_CHECKS;
    }
#   endif /* HAVE_ERROR_CHECKING */

    /* ... body of routine ...  */
    MPIR_Cart_rank_impl(cart_ptr, coords, rank);
    /* ... end of body of routine ... */

  fn_exit:
    MPID_MPI_FUNC_EXIT(MPID_STATE_MPI_CART_RANK);
    return mpi_errno;

  fn_fail:
    /* --BEGIN ERROR HANDLING-- */
#   ifdef HAVE_ERROR_CHECKING
    {
	mpi_errno = MPIR_Err_create_code(
	    mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**mpi_cart_rank",
	    "**mpi_cart_rank %C %p %p", comm, coords, rank);
    }
#   endif
    mpi_errno = MPIR_Err_return_comm( comm_ptr, FCNAME, mpi_errno );
    goto fn_exit;
    /* --END ERROR HANDLING-- */
}
Ejemplo n.º 2
0
/*@

MPI_Graph_get - Retrieves graph topology information associated with a 
                communicator

Input Parameters:
+ comm - communicator with graph structure (handle) 
. maxindex - length of vector 'indx' in the calling program  (integer) 
- maxedges - length of vector 'edges' in the calling program  (integer) 

Output Parameters:
+ indx - array of integers containing the graph structure (for details see the definition of 'MPI_GRAPH_CREATE') 
- edges - array of integers containing the graph structure 

.N SignalSafe

.N Fortran

.N Errors
.N MPI_SUCCESS
.N MPI_ERR_TOPOLOGY
.N MPI_ERR_COMM
.N MPI_ERR_ARG
@*/
int MPI_Graph_get(MPI_Comm comm, int maxindex, int maxedges, 
                  int indx[], int edges[])
{
    static const char FCNAME[] = "MPI_Graph_get";
    int mpi_errno = MPI_SUCCESS;
    MPID_Comm *comm_ptr = NULL;
    MPIR_Topology *topo_ptr;
    int i, n, *vals;
    MPID_MPI_STATE_DECL(MPID_STATE_MPI_GRAPH_GET);

    MPIR_ERRTEST_INITIALIZED_ORDIE();
    
    MPID_MPI_FUNC_ENTER(MPID_STATE_MPI_GRAPH_GET);

    /* Validate parameters, especially handles needing to be converted */
#   ifdef HAVE_ERROR_CHECKING
    {
        MPID_BEGIN_ERROR_CHECKS;
        {
	    MPIR_ERRTEST_COMM(comm, mpi_errno);
        }
        MPID_END_ERROR_CHECKS;
    }
#   endif
    
    /* Convert MPI object handles to object pointers */
    MPID_Comm_get_ptr( comm, comm_ptr );

    /* Validate parameters and objects (post conversion) */
#   ifdef HAVE_ERROR_CHECKING
    {
        MPID_BEGIN_ERROR_CHECKS;
        {
            /* Validate comm_ptr */
            MPID_Comm_valid_ptr( comm_ptr, mpi_errno );
            if (mpi_errno) goto fn_fail;
	    /* If comm_ptr is not valid, it will be reset to null */
	    
	    MPIR_ERRTEST_ARGNULL( edges, "edges", mpi_errno );
	    MPIR_ERRTEST_ARGNULL( indx,  "indx", mpi_errno );
        }
        MPID_END_ERROR_CHECKS;
    }
#   endif /* HAVE_ERROR_CHECKING */

    /* ... body of routine ...  */
    
    topo_ptr = MPIR_Topology_get( comm_ptr );

    MPIU_ERR_CHKANDJUMP((!topo_ptr || topo_ptr->kind != MPI_GRAPH), mpi_errno, MPI_ERR_TOPOLOGY, "**notgraphtopo");
    MPIU_ERR_CHKANDJUMP3((topo_ptr->topo.graph.nnodes > maxindex), mpi_errno, MPI_ERR_ARG, "**argrange",
			 "**argrange %s %d %d", "maxindex", maxindex, topo_ptr->topo.graph.nnodes);
    MPIU_ERR_CHKANDJUMP3((topo_ptr->topo.graph.nedges > maxedges), mpi_errno, MPI_ERR_ARG, "**argrange",
			 "**argrange %s %d %d", "maxedges", maxedges, topo_ptr->topo.graph.nedges);
    
    /* Get index */
    n = topo_ptr->topo.graph.nnodes;
    vals = topo_ptr->topo.graph.index;
    for ( i=0; i<n; i++ )
	*indx++ = *vals++;
	    
    /* Get edges */
    n = topo_ptr->topo.graph.nedges;
    vals = topo_ptr->topo.graph.edges;
    for ( i=0; i<n; i++ )
	*edges++ = *vals++;

    /* ... end of body of routine ... */

  fn_exit:
    MPID_MPI_FUNC_EXIT(MPID_STATE_MPI_GRAPH_GET);
    return mpi_errno;

  fn_fail:
    /* --BEGIN ERROR HANDLING-- */
#   ifdef HAVE_ERROR_CHECKING
    {
	mpi_errno = MPIR_Err_create_code(
	    mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**mpi_graph_get",
	    "**mpi_graph_get %C %d %d %p %p", comm, maxindex, maxedges, indx, edges);
    }
#   endif
    mpi_errno = MPIR_Err_return_comm( comm_ptr, FCNAME, mpi_errno );
    goto fn_exit;
    /* --END ERROR HANDLING-- */
}
Ejemplo n.º 3
0
/*@
   MPI_Type_create_darray - Create a datatype representing a distributed array

Input Parameters:
+ size - size of process group (positive integer)
. rank - rank in process group (nonnegative integer)
. ndims - number of array dimensions as well as process grid dimensions (positive integer)
. array_of_gsizes - number of elements of type oldtype in each dimension of global array (array of positive integers)
. array_of_distribs - distribution of array in each dimension (array of state)
. array_of_dargs - distribution argument in each dimension (array of positive integers)
. array_of_psizes - size of process grid in each dimension (array of positive integers)
. order - array storage order flag (state)
- oldtype - old datatype (handle)

Output Parameters:
. newtype - new datatype (handle)

.N ThreadSafe

.N Fortran

.N Errors
.N MPI_SUCCESS
.N MPI_ERR_TYPE
.N MPI_ERR_ARG
@*/
int MPI_Type_create_darray(int size,
			   int rank,
			   int ndims,
			   const int array_of_gsizes[],
			   const int array_of_distribs[],
			   const int array_of_dargs[],
			   const int array_of_psizes[],
			   int order,
			   MPI_Datatype oldtype,
			   MPI_Datatype *newtype)
{
    static const char FCNAME[] = "MPI_Type_create_darray";
    int mpi_errno = MPI_SUCCESS, i;
    MPI_Datatype new_handle;

    int procs, tmp_rank, tmp_size, blklens[3], *coords;
    MPI_Aint *st_offsets, orig_extent, disps[3];
    MPI_Datatype type_old, type_new = MPI_DATATYPE_NULL, types[3];

#   ifdef HAVE_ERROR_CHECKING
    MPI_Aint   size_with_aint;
    MPI_Offset size_with_offset;
#   endif

    int *ints;
    MPID_Datatype *datatype_ptr = NULL;
    MPIU_CHKLMEM_DECL(3);
    MPID_MPI_STATE_DECL(MPID_STATE_MPI_TYPE_CREATE_DARRAY);

    MPIR_ERRTEST_INITIALIZED_ORDIE();

    MPIU_THREAD_CS_ENTER(ALLFUNC,);
    MPID_MPI_FUNC_ENTER(MPID_STATE_MPI_TYPE_CREATE_DARRAY);

    /* Validate parameters, especially handles needing to be converted */
#   ifdef HAVE_ERROR_CHECKING
    {
        MPID_BEGIN_ERROR_CHECKS;
        {
	    MPIR_ERRTEST_DATATYPE(oldtype, "datatype", mpi_errno);
        }
        MPID_END_ERROR_CHECKS;
    }
#   endif

    /* Convert MPI object handles to object pointers */
    MPID_Datatype_get_ptr(oldtype, datatype_ptr);
    MPID_Datatype_get_extent_macro(oldtype, orig_extent);

    /* Validate parameters and objects (post conversion) */
#   ifdef HAVE_ERROR_CHECKING
    {
        MPID_BEGIN_ERROR_CHECKS;
        {
	    /* Check parameters */
	    MPIR_ERRTEST_ARGNONPOS(size, "size", mpi_errno, MPI_ERR_ARG);
            /* use MPI_ERR_RANK class for PE-MPI compatibility */
            MPIU_ERR_CHKANDJUMP3((rank < 0 || rank >= size), mpi_errno, MPI_ERR_RANK,
                                 "**argrange", "**argrange %s %d %d", "rank", rank, (size-1));
	    MPIR_ERRTEST_ARGNONPOS(ndims, "ndims", mpi_errno, MPI_ERR_DIMS);

	    MPIR_ERRTEST_ARGNULL(array_of_gsizes, "array_of_gsizes", mpi_errno);
	    MPIR_ERRTEST_ARGNULL(array_of_distribs, "array_of_distribs", mpi_errno);
	    MPIR_ERRTEST_ARGNULL(array_of_dargs, "array_of_dargs", mpi_errno);
	    MPIR_ERRTEST_ARGNULL(array_of_psizes, "array_of_psizes", mpi_errno);
	    if (order != MPI_ORDER_C && order != MPI_ORDER_FORTRAN) {
		mpi_errno = MPIR_Err_create_code(MPI_SUCCESS,
						 MPIR_ERR_RECOVERABLE,
						 FCNAME,
						 __LINE__,
						 MPI_ERR_ARG,
						 "**arg",
						 "**arg %s",
						 "order");
                goto fn_fail;
	    }

            tmp_size = 1;
	    for (i=0; mpi_errno == MPI_SUCCESS && i < ndims; i++) {
		MPIR_ERRTEST_ARGNONPOS(array_of_gsizes[i], "gsize", mpi_errno, MPI_ERR_ARG);
		MPIR_ERRTEST_ARGNONPOS(array_of_psizes[i], "psize", mpi_errno, MPI_ERR_ARG);

		if ((array_of_distribs[i] != MPI_DISTRIBUTE_NONE) &&
		    (array_of_distribs[i] != MPI_DISTRIBUTE_BLOCK) &&
		    (array_of_distribs[i] != MPI_DISTRIBUTE_CYCLIC))
		{
		    mpi_errno = MPIR_Err_create_code(MPI_SUCCESS,
						     MPIR_ERR_RECOVERABLE,
						     FCNAME,
						     __LINE__,
						     MPI_ERR_ARG,
						     "**darrayunknown",
						     0);
                    goto fn_fail;
		}

		if ((array_of_dargs[i] != MPI_DISTRIBUTE_DFLT_DARG) &&
		    (array_of_dargs[i] <= 0))
		{
		    mpi_errno = MPIR_Err_create_code(MPI_SUCCESS,
						     MPIR_ERR_RECOVERABLE,
						     FCNAME,
						     __LINE__,
						     MPI_ERR_ARG,
						     "**arg",
						     "**arg %s",
						     "array_of_dargs");
                    goto fn_fail;
		}

		if ((array_of_distribs[i] == MPI_DISTRIBUTE_NONE) &&
		    (array_of_psizes[i] != 1))
		{
		    mpi_errno = MPIR_Err_create_code(MPI_SUCCESS,
						     MPIR_ERR_RECOVERABLE,
						     FCNAME,
						     __LINE__,
						     MPI_ERR_ARG,
						     "**darraydist",
						     "**darraydist %d %d",
						     i, array_of_psizes[i]);
                    goto fn_fail;
		}

                tmp_size *= array_of_psizes[i];
	    }

            MPIU_ERR_CHKANDJUMP1((tmp_size != size), mpi_errno, MPI_ERR_ARG,
                                 "**arg", "**arg %s", "array_of_psizes");

	    /* TODO: GET THIS CHECK IN ALSO */

	    /* check if MPI_Aint is large enough for size of global array.
	       if not, complain. */

	    size_with_aint = orig_extent;
	    for (i=0; i<ndims; i++) size_with_aint *= array_of_gsizes[i];
	    size_with_offset = orig_extent;
	    for (i=0; i<ndims; i++) size_with_offset *= array_of_gsizes[i];
	    if (size_with_aint != size_with_offset) {
		mpi_errno = MPIR_Err_create_code(MPI_SUCCESS,
						 MPIR_ERR_FATAL,
						 FCNAME,
						 __LINE__,
						 MPI_ERR_ARG,
						 "**darrayoverflow",
						 "**darrayoverflow %L",
						 size_with_offset);
                goto fn_fail;
	    }

            /* Validate datatype_ptr */
            MPID_Datatype_valid_ptr(datatype_ptr, mpi_errno);
	    /* If datatype_ptr is not valid, it will be reset to null */
	    /* --BEGIN ERROR HANDLING-- */
            if (mpi_errno) goto fn_fail;
	    /* --END ERROR HANDLING-- */
        }
        MPID_END_ERROR_CHECKS;
    }
#   endif /* HAVE_ERROR_CHECKING */

    /* ... body of routine ... */

/* calculate position in Cartesian grid as MPI would (row-major
   ordering) */
    MPIU_CHKLMEM_MALLOC_ORJUMP(coords, int *, ndims * sizeof(int), mpi_errno, "position is Cartesian grid");

    procs = size;
    tmp_rank = rank;
    for (i=0; i<ndims; i++) {
	procs = procs/array_of_psizes[i];
	coords[i] = tmp_rank/procs;
	tmp_rank = tmp_rank % procs;
    }

    MPIU_CHKLMEM_MALLOC_ORJUMP(st_offsets, MPI_Aint *, ndims * sizeof(MPI_Aint), mpi_errno, "st_offsets");

    type_old = oldtype;

    if (order == MPI_ORDER_FORTRAN) {
      /* dimension 0 changes fastest */
	for (i=0; i<ndims; i++) {
	    switch(array_of_distribs[i]) {
	    case MPI_DISTRIBUTE_BLOCK:
		mpi_errno = MPIR_Type_block(array_of_gsizes,
					    i,
					    ndims,
					    array_of_psizes[i],
					    coords[i],
					    array_of_dargs[i],
					    order,
					    orig_extent,
					    type_old,
					    &type_new,
					    st_offsets+i);
		break;
	    case MPI_DISTRIBUTE_CYCLIC:
		mpi_errno = MPIR_Type_cyclic(array_of_gsizes,
					     i,
					     ndims,
					     array_of_psizes[i],
					     coords[i],
					     array_of_dargs[i],
					     order,
					     orig_extent,
					     type_old,
					     &type_new,
					     st_offsets+i);
		break;
	    case MPI_DISTRIBUTE_NONE:
		/* treat it as a block distribution on 1 process */
		mpi_errno = MPIR_Type_block(array_of_gsizes,
					    i,
					    ndims,
					    1,
					    0,
					    MPI_DISTRIBUTE_DFLT_DARG,
					    order,
					    orig_extent,
					    type_old,
					    &type_new,
					    st_offsets+i);
		break;
	    }
	    if (i)
	    {
		MPIR_Type_free_impl(&type_old);
	    }
	    type_old = type_new;

	    /* --BEGIN ERROR HANDLING-- */
	    if (mpi_errno != MPI_SUCCESS) goto fn_fail;
	    /* --END ERROR HANDLING-- */
	}

	/* add displacement and UB */
	disps[1] = st_offsets[0];
	tmp_size = 1;
	for (i=1; i<ndims; i++) {
	    tmp_size *= array_of_gsizes[i-1];
	    disps[1] += (MPI_Aint) tmp_size * st_offsets[i];
	}
        /* rest done below for both Fortran and C order */
    }

    else /* order == MPI_ORDER_C */ {
        /* dimension ndims-1 changes fastest */
	for (i=ndims-1; i>=0; i--) {
	    switch(array_of_distribs[i]) {
	    case MPI_DISTRIBUTE_BLOCK:
		mpi_errno = MPIR_Type_block(array_of_gsizes,
					    i,
					    ndims,
					    array_of_psizes[i],
					    coords[i],
					    array_of_dargs[i],
					    order,
					    orig_extent,
					    type_old,
					    &type_new,
					    st_offsets+i);
		break;
	    case MPI_DISTRIBUTE_CYCLIC:
		mpi_errno = MPIR_Type_cyclic(array_of_gsizes,
					     i,
					     ndims,
					     array_of_psizes[i],
					     coords[i],
					     array_of_dargs[i],
					     order,
					     orig_extent,
					     type_old,
					     &type_new,
					     st_offsets+i);
		break;
	    case MPI_DISTRIBUTE_NONE:
		/* treat it as a block distribution on 1 process */
		mpi_errno = MPIR_Type_block(array_of_gsizes,
					    i,
					    ndims,
					    array_of_psizes[i],
					    coords[i],
					    MPI_DISTRIBUTE_DFLT_DARG,
					    order,
					    orig_extent,
					    type_old,
					    &type_new,
					    st_offsets+i);
		break;
	    }
	    if (i != ndims-1)
	    {
		MPIR_Type_free_impl(&type_old);
	    }
	    type_old = type_new;

	    /* --BEGIN ERROR HANDLING-- */
	    if (mpi_errno != MPI_SUCCESS) goto fn_fail;
	    /* --END ERROR HANDLING-- */
	}

	/* add displacement and UB */
	disps[1] = st_offsets[ndims-1];
	tmp_size = 1;
	for (i=ndims-2; i>=0; i--) {
	    tmp_size *= array_of_gsizes[i+1];
	    disps[1] += (MPI_Aint) tmp_size * st_offsets[i];
	}
    }

    disps[1] *= orig_extent;

    disps[2] = orig_extent;
    for (i=0; i<ndims; i++) disps[2] *= (MPI_Aint)(array_of_gsizes[i]);
	
    disps[0] = 0;
    blklens[0] = blklens[1] = blklens[2] = 1;
    types[0] = MPI_LB;
    types[1] = type_new;
    types[2] = MPI_UB;

    mpi_errno = MPID_Type_struct(3,
				 blklens,
				 disps,
				 types,
				 &new_handle);
    /* --BEGIN ERROR HANDLING-- */
    if (mpi_errno != MPI_SUCCESS) goto fn_fail;
    /* --END ERROR HANDLING-- */

    MPIR_Type_free_impl(&type_new);

    /* at this point we have the new type, and we've cleaned up any
     * intermediate types created in the process.  we just need to save
     * all our contents/envelope information.
     */

    /* Save contents */
    MPIU_CHKLMEM_MALLOC_ORJUMP(ints, int *, (4 * ndims + 4) * sizeof(int), mpi_errno, "content description");

    ints[0] = size;
    ints[1] = rank;
    ints[2] = ndims;

    for (i=0; i < ndims; i++) {
	ints[i + 3] = array_of_gsizes[i];
    }
    for (i=0; i < ndims; i++) {
	ints[i + ndims + 3] = array_of_distribs[i];
    }
    for (i=0; i < ndims; i++) {
	ints[i + 2*ndims + 3] = array_of_dargs[i];
    }
    for (i=0; i < ndims; i++) {
	ints[i + 3*ndims + 3] = array_of_psizes[i];
    }
    ints[4*ndims + 3] = order;
    MPID_Datatype_get_ptr(new_handle, datatype_ptr);
    mpi_errno = MPID_Datatype_set_contents(datatype_ptr,
					   MPI_COMBINER_DARRAY,
					   4*ndims + 4,
					   0,
					   1,
					   ints,
					   NULL,
					   &oldtype);
    /* --BEGIN ERROR HANDLING-- */
    if (mpi_errno != MPI_SUCCESS) goto fn_fail;
    /* --END ERROR HANDLING-- */

    MPIU_OBJ_PUBLISH_HANDLE(*newtype, new_handle);
    /* ... end of body of routine ... */

  fn_exit:
    MPIU_CHKLMEM_FREEALL();
    MPID_MPI_FUNC_EXIT(MPID_STATE_MPI_TYPE_CREATE_DARRAY);
    MPIU_THREAD_CS_EXIT(ALLFUNC,);
    return mpi_errno;

  fn_fail:
    /* --BEGIN ERROR HANDLING-- */
#   ifdef HAVE_ERROR_CHECKING
    {
	mpi_errno = MPIR_Err_create_code(
	    mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**mpi_type_create_darray",
	    "**mpi_type_create_darray %d %d %d %p %p %p %p %d %D %p", size, rank, ndims, array_of_gsizes,
	    array_of_distribs, array_of_dargs, array_of_psizes, order, oldtype, newtype);
    }
#   endif
    mpi_errno = MPIR_Err_return_comm(NULL, FCNAME, mpi_errno);
    goto fn_exit;
    /* --END ERROR HANDLING-- */
}
Ejemplo n.º 4
0
int MPID_nem_tcp_module_lmt_pre_recv (MPIDI_VC_t *vc, MPID_Request *req, MPID_IOV s_cookie, MPID_IOV *r_cookie, int *send_cts)
{
    int mpi_errno = MPI_SUCCESS;
    int ret;
    unsigned int len;
    struct sockaddr_in saddr;
    MPIDI_msg_sz_t data_sz;
    int dt_contig;
    MPI_Aint dt_true_lb;
    MPID_Datatype * dt_ptr;
    MPIDI_CH3I_VC *vc_ch = (MPIDI_CH3I_VC *)vc->channel_private;
    MPIDI_STATE_DECL(MPID_STATE_MPID_NEM_TCP_MODULE_LMT_PRE_RECV);

    MPIDI_FUNC_ENTER(MPID_STATE_MPID_NEM_TCP_MODULE_LMT_PRE_RECV);

    mpi_errno = read_s_cookie (s_cookie, &vc_ch->net.tcp.lmt_s_len);
    if (mpi_errno) MPIU_ERR_POP (mpi_errno);

    memset (&saddr, sizeof(saddr), 0);

    if (!vc_ch->net.tcp.lmt_connected)
    {
        vc_ch->net.tcp.lmt_desc = socket (AF_INET, SOCK_STREAM, 0);
        MPIU_ERR_CHKANDJUMP2 (vc_ch->net.tcp.lmt_desc == -1, mpi_errno, MPI_ERR_OTHER, "**sock_create", "**sock_create %s %d", strerror (errno), errno);

        //        ret = fcntl (vc_ch->net.tcp.lmt_desc, F_SETFL, O_NONBLOCK);
        //        MPIU_ERR_CHKANDJUMP2 (ret == -1, mpi_errno, MPI_ERR_OTHER, "**fail", "**fail %s %d", strerror (errno), errno);

        saddr.sin_family      = AF_INET;
        saddr.sin_addr.s_addr = htonl (INADDR_ANY);
        saddr.sin_port        = htons (0);

        ret = bind (vc_ch->net.tcp.lmt_desc, (struct sockaddr *)&saddr, sizeof (saddr));
        MPIU_ERR_CHKANDJUMP3 (ret == -1, mpi_errno, MPI_ERR_OTHER, "**sock|poll|bind", "**sock|poll|bind %d %d %s", ntohs (saddr.sin_port), errno, strerror (errno));

        len = sizeof (saddr);
        ret = getsockname (vc_ch->net.tcp.lmt_desc, (struct sockaddr *)&saddr, &len);
        MPIU_ERR_CHKANDJUMP2 (ret == -1, mpi_errno, MPI_ERR_OTHER, "**fail", "**fail %s %d", strerror (errno), errno);

        set_sockopts (vc_ch->net.tcp.lmt_desc);

        ret = listen (vc_ch->net.tcp.lmt_desc, SOMAXCONN);
        MPIU_ERR_CHKANDJUMP2 (ret == -1, mpi_errno, MPI_ERR_OTHER, "**listen", "**listen %s %d", errno, strerror (errno));
    }

    MPIDI_Datatype_get_info (req->dev.user_count, req->dev.datatype, dt_contig, data_sz, dt_ptr, dt_true_lb);

    mpi_errno = create_r_cookie (MPID_nem_hostname, ntohs (saddr.sin_port), data_sz, &vc_ch->net.tcp.lmt_cookie, &len);
    if (mpi_errno) MPIU_ERR_POP (mpi_errno);

    r_cookie->MPID_IOV_BUF = vc_ch->net.tcp.lmt_cookie;
    r_cookie->MPID_IOV_LEN = len;

    *send_cts = 1;

 fn_exit:
    MPIDI_FUNC_EXIT(MPID_STATE_MPID_NEM_TCP_MODULE_LMT_PRE_RECV);
    return mpi_errno;
 fn_fail:
    goto fn_exit;
}