Ejemplo n.º 1
0
static int set_up_listener(void)
{
    int mpi_errno = MPI_SUCCESS;
    int ret;
    MPIDI_STATE_DECL(MPID_STATE_SET_UP_LISTENER);

    MPIDI_FUNC_ENTER(MPID_STATE_SET_UP_LISTENER);

    MPID_nem_tcp_g_lstn_plfd.fd = MPID_nem_tcp_g_lstn_sc.fd = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
    MPIR_ERR_CHKANDJUMP2(MPID_nem_tcp_g_lstn_sc.fd == -1, mpi_errno, MPI_ERR_OTHER, "**sock_create", "**sock_create %s %d", MPIU_Strerror(errno), errno);

    mpi_errno = MPID_nem_tcp_set_sockopts(MPID_nem_tcp_g_lstn_sc.fd);
    if (mpi_errno) MPIR_ERR_POP(mpi_errno);

    MPID_nem_tcp_g_lstn_plfd.events = POLLIN;
    mpi_errno = MPID_nem_tcp_bind(MPID_nem_tcp_g_lstn_sc.fd);
    if (mpi_errno) MPIR_ERR_POP(mpi_errno);

    ret = listen(MPID_nem_tcp_g_lstn_sc.fd, SOMAXCONN);
    MPIR_ERR_CHKANDJUMP2(ret == -1, mpi_errno, MPI_ERR_OTHER, "**listen", "**listen %s %d", MPIU_Strerror(errno), errno);  
    MPID_nem_tcp_g_lstn_sc.state.lstate = LISTEN_STATE_LISTENING;
    MPID_nem_tcp_g_lstn_sc.handler = MPID_nem_tcp_state_listening_handler;

fn_exit:
    MPIDI_FUNC_EXIT(MPID_STATE_SET_UP_LISTENER);
    return mpi_errno;
fn_fail:

    goto fn_exit;
}
Ejemplo n.º 2
0
static int do_readv(MPID_Request *rreq, int pipe_fd, MPL_IOV iov[],
                    int *iov_offset, int *iov_count, int *complete)
{
    int mpi_errno = MPI_SUCCESS;
    ssize_t nread;

    nread = readv(pipe_fd, &rreq->dev.iov[rreq->dev.iov_offset], rreq->dev.iov_count);
    MPIR_ERR_CHKANDJUMP2(nread < 0 && errno != EAGAIN, mpi_errno, MPI_ERR_OTHER, "**read",
                         "**readv %d %s", errno, MPIU_Strerror(errno));

    if (nread < 0) {
        if (errno == EAGAIN) goto fn_exit;
        MPIR_ERR_CHKANDJUMP2(errno != EAGAIN, mpi_errno, MPI_ERR_OTHER, "**vmsplice",
                             "**vmsplice %d %s", errno, MPIU_Strerror(errno));
    }

    *complete = adjust_partially_xferred_iov(iov, iov_offset, iov_count, nread);
    if (*complete) {
        /* look for additional data to send and reload IOV if there is more */
        mpi_errno = check_req_complete(rreq->ch.vc, rreq, complete);
        if (mpi_errno) MPIR_ERR_POP(mpi_errno);

        if (*complete) {
            nread = close(pipe_fd);
            MPIR_ERR_CHKANDJUMP(nread < 0, mpi_errno, MPI_ERR_OTHER, "**close");
            MPL_DBG_MSG(MPIDI_CH3_DBG_CHANNEL, VERBOSE, ".... complete");
        }
    }

fn_fail:
fn_exit:
    return mpi_errno;
}
Ejemplo n.º 3
0
static int do_vmsplice(MPID_Request *sreq, int pipe_fd, MPL_IOV iov[],
                       int *iov_offset, int *iov_count, int *complete)
{
    int mpi_errno = MPI_SUCCESS;
    ssize_t err;

#if 1
    err = vmsplice(pipe_fd, &iov[*iov_offset], *iov_count, SPLICE_F_NONBLOCK);
#else
    err = writev(pipe_fd, &iov[*iov_offset], *iov_count);
#endif

    if (err < 0) {
        if (errno == EAGAIN) goto fn_exit;
        MPIR_ERR_CHKANDJUMP2(errno != EAGAIN, mpi_errno, MPI_ERR_OTHER, "**vmsplice",
                             "**vmsplice %d %s", errno, MPIU_Strerror(errno));
    }

    *complete = adjust_partially_xferred_iov(iov, iov_offset, iov_count, err);
    if (*complete) {
        /* look for additional data to send and reload IOV if there is more */
        mpi_errno = check_req_complete(sreq->ch.vc, sreq, complete);
        if (mpi_errno) MPIR_ERR_POP(mpi_errno);

        if (*complete) {
            err = close(pipe_fd);
            MPIR_ERR_CHKANDJUMP(err < 0, mpi_errno, MPI_ERR_OTHER, "**close");
            MPL_DBG_MSG(MPIDI_CH3_DBG_CHANNEL, VERBOSE, ".... complete");
        }
    }

fn_fail:
fn_exit:
    return mpi_errno;
}
Ejemplo n.º 4
0
int MPIR_Info_get_nthkey_impl(MPID_Info *info_ptr, int n, char *key)
{
    int mpi_errno = MPI_SUCCESS;
    MPID_Info *curr_ptr;
    int nkeys;

    curr_ptr = info_ptr->next;
    nkeys = 0;
    while (curr_ptr && nkeys != n) {
        curr_ptr = curr_ptr->next;
        nkeys++;
    }

    /* verify that n is valid */
    MPIR_ERR_CHKANDJUMP2((!curr_ptr), mpi_errno, MPI_ERR_ARG, "**infonkey", "**infonkey %d %d", n, nkeys);

    MPIU_Strncpy( key, curr_ptr->key, MPI_MAX_INFO_KEY+1 );
    /* Eventually, we could remember the location of this key in
       the head using the key/value locations (and a union datatype?) */
 
 fn_exit:
    return mpi_errno;
 fn_fail:
    goto fn_exit;
}
Ejemplo n.º 5
0
int MPIR_Graph_neighbors_impl(MPIR_Comm * comm_ptr, int rank, int maxneighbors, int neighbors[])
{
    int mpi_errno = MPI_SUCCESS;
    MPIR_Topology *graph_ptr;
    int i, is, ie;

    graph_ptr = MPIR_Topology_get(comm_ptr);

    MPIR_ERR_CHKANDJUMP((!graph_ptr ||
                         graph_ptr->kind != MPI_GRAPH), mpi_errno, MPI_ERR_TOPOLOGY,
                        "**notgraphtopo");
    MPIR_ERR_CHKANDJUMP2((rank < 0 ||
                          rank >= graph_ptr->topo.graph.nnodes), mpi_errno, MPI_ERR_RANK, "**rank",
                         "**rank %d %d", rank, graph_ptr->topo.graph.nnodes);

    /* Get location in edges array of the neighbors of the specified rank */
    if (rank == 0)
        is = 0;
    else
        is = graph_ptr->topo.graph.index[rank - 1];
    ie = graph_ptr->topo.graph.index[rank];

    /* Get neighbors */
    for (i = is; i < ie; i++)
        *neighbors++ = graph_ptr->topo.graph.edges[i];
  fn_exit:
    return mpi_errno;
  fn_fail:
    goto fn_exit;
}
Ejemplo n.º 6
0
int MPIR_Cart_create(MPIR_Comm * comm_ptr, int ndims, const int dims[],
                     const int periods[], int reorder, MPI_Comm * comm_cart)
{
    int i, newsize, rank, nranks, mpi_errno = MPI_SUCCESS;
    MPIR_Comm *newcomm_ptr = NULL;
    MPIR_Topology *cart_ptr = NULL;
    MPIR_CHKPMEM_DECL(4);

    /* Set this as null incase we exit with an error */
    *comm_cart = MPI_COMM_NULL;

    /* Check for invalid arguments */
    newsize = 1;
    for (i = 0; i < ndims; i++)
        newsize *= dims[i];

    /* Use ERR_ARG instead of ERR_TOPOLOGY because there is no topology yet */
    MPIR_ERR_CHKANDJUMP2((newsize > comm_ptr->remote_size), mpi_errno,
                         MPI_ERR_ARG, "**cartdim",
                         "**cartdim %d %d", comm_ptr->remote_size, newsize);

    if (ndims == 0) {
        /* specified as a 0D Cartesian topology in MPI 2.1.
         * Rank 0 returns a dup of COMM_SELF with the topology info attached.
         * Others return MPI_COMM_NULL. */

        rank = comm_ptr->rank;

        if (rank == 0) {
            MPIR_Comm *comm_self_ptr;
            MPIR_Comm_get_ptr(MPI_COMM_SELF, comm_self_ptr);
            mpi_errno = MPIR_Comm_dup_impl(comm_self_ptr, &newcomm_ptr);
            if (mpi_errno)
                MPIR_ERR_POP(mpi_errno);

            /* Create the topology structure */
            MPIR_CHKPMEM_MALLOC(cart_ptr, MPIR_Topology *, sizeof(MPIR_Topology),
                                mpi_errno, "cart_ptr", MPL_MEM_COMM);

            cart_ptr->kind = MPI_CART;
            cart_ptr->topo.cart.nnodes = 1;
            cart_ptr->topo.cart.ndims = 0;

            /* make mallocs of size 1 int so that they get freed as part of the
             * normal free mechanism */

            MPIR_CHKPMEM_MALLOC(cart_ptr->topo.cart.dims, int *, sizeof(int),
                                mpi_errno, "cart.dims", MPL_MEM_COMM);
            MPIR_CHKPMEM_MALLOC(cart_ptr->topo.cart.periodic, int *, sizeof(int),
                                mpi_errno, "cart.periodic", MPL_MEM_COMM);
            MPIR_CHKPMEM_MALLOC(cart_ptr->topo.cart.position, int *, sizeof(int),
                                mpi_errno, "cart.position", MPL_MEM_COMM);
        } else {
Ejemplo n.º 7
0
int MPID_nem_lmt_vmsplice_initiate_lmt(MPIDI_VC_t *vc, MPIDI_CH3_Pkt_t *pkt, MPID_Request *sreq)
{
    int mpi_errno = MPI_SUCCESS;
    MPID_nem_pkt_lmt_rts_t * const rts_pkt = (MPID_nem_pkt_lmt_rts_t *)pkt;
    MPIDI_CH3I_VC *vc_ch = &vc->ch;
    int complete = 0;
    MPIDI_STATE_DECL(MPID_STATE_MPID_NEM_LMT_VMSPLICE_INITIATE_LMT);

    MPIDI_FUNC_ENTER(MPID_STATE_MPID_NEM_LMT_VMSPLICE_INITIATE_LMT);

    /* re-use the same pipe per-pair,per-sender */
    if (vc_ch->lmt_copy_buf_handle == NULL) {
        int err;
        char *pipe_name;
        MPIDI_CH3I_VC *vc_ch = &vc->ch;

        pipe_name = tempnam(NULL, "lmt_");
        MPIR_ERR_CHKANDJUMP2(!pipe_name, mpi_errno, MPI_ERR_OTHER, "**tempnam",
                             "**tempnam %d %s", errno, MPIU_Strerror(errno));

        vc_ch->lmt_copy_buf_handle = MPL_strdup(pipe_name);
        /* XXX DJG hack */
#undef free
        free(pipe_name);

        err = mkfifo(vc_ch->lmt_copy_buf_handle, 0660);
        MPIR_ERR_CHKANDJUMP2(err < 0, mpi_errno, MPI_ERR_OTHER, "**mkfifo",
                             "**mkfifo %d %s", errno, MPIU_Strerror(errno));
    }

    /* can't start sending data yet, need full RTS/CTS handshake */

    MPID_nem_lmt_send_RTS(vc, rts_pkt, vc_ch->lmt_copy_buf_handle,
                          strlen(vc_ch->lmt_copy_buf_handle)+1);

fn_fail:
fn_exit:
    MPIDI_FUNC_EXIT(MPID_STATE_MPID_NEM_LMT_VMSPLICE_INITIATE_LMT);
    return mpi_errno;
}
Ejemplo n.º 8
0
/* this function is not used in pmi2 */
static int publish_node_id(MPIDI_PG_t *pg, int our_pg_rank)
{
    int mpi_errno = MPI_SUCCESS;
    int pmi_errno;
    int ret;
    char *key;
    int key_max_sz;
    char *kvs_name;
    MPIU_CHKLMEM_DECL(1);

    /* set MPIU_hostname */
    ret = gethostname(MPIU_hostname, MAX_HOSTNAME_LEN);
    MPIR_ERR_CHKANDJUMP2(ret == -1, mpi_errno, MPI_ERR_OTHER, "**sock_gethost", "**sock_gethost %s %d", MPIU_Strerror(errno), errno);
    MPIU_hostname[MAX_HOSTNAME_LEN-1] = '\0';

    /* Allocate space for pmi key */
    pmi_errno = PMI_KVS_Get_key_length_max(&key_max_sz);
    MPIR_ERR_CHKANDJUMP1(pmi_errno, mpi_errno, MPI_ERR_OTHER, "**fail", "**fail %d", pmi_errno);

    MPIU_CHKLMEM_MALLOC(key, char *, key_max_sz, mpi_errno, "key");

    mpi_errno = MPIDI_PG_GetConnKVSname(&kvs_name);
    if (mpi_errno) MPIR_ERR_POP(mpi_errno);

    /* Put my hostname id */
    if (pg->size > 1)
    {
        memset(key, 0, key_max_sz);
        MPL_snprintf(key, key_max_sz, "hostname[%d]", our_pg_rank);

        pmi_errno = PMI_KVS_Put(kvs_name, key, MPIU_hostname);
        MPIR_ERR_CHKANDJUMP1(pmi_errno != PMI_SUCCESS, mpi_errno, MPI_ERR_OTHER, "**pmi_kvs_put", "**pmi_kvs_put %d", pmi_errno);

        pmi_errno = PMI_KVS_Commit(kvs_name);
        MPIR_ERR_CHKANDJUMP1(pmi_errno != PMI_SUCCESS, mpi_errno, MPI_ERR_OTHER, "**pmi_kvs_commit", "**pmi_kvs_commit %d", pmi_errno);

        pmi_errno = PMI_Barrier();
        MPIR_ERR_CHKANDJUMP1(pmi_errno != PMI_SUCCESS, mpi_errno, MPI_ERR_OTHER, "**pmi_barrier", "**pmi_barrier %d", pmi_errno);
    }
    
fn_exit:
    MPIU_CHKLMEM_FREEALL();
    return mpi_errno;
fn_fail:
    goto fn_exit;
}
Ejemplo n.º 9
0
int MPIR_Graph_neighbors_count_impl(MPID_Comm *comm_ptr, int rank, int *nneighbors)
{
    int mpi_errno = MPI_SUCCESS;
    MPIR_Topology *graph_ptr;

    graph_ptr = MPIR_Topology_get( comm_ptr );

    MPIR_ERR_CHKANDJUMP((!graph_ptr || graph_ptr->kind != MPI_GRAPH), mpi_errno, MPI_ERR_TOPOLOGY, "**notgraphtopo");
    MPIR_ERR_CHKANDJUMP2((rank < 0 || rank >= graph_ptr->topo.graph.nnodes), mpi_errno, MPI_ERR_RANK, "**rank",
                         "**rank %d %d", rank, graph_ptr->topo.graph.nnodes );

    if ( rank == 0 )
        *nneighbors = graph_ptr->topo.graph.index[rank];
    else
        *nneighbors = graph_ptr->topo.graph.index[rank] -
            graph_ptr->topo.graph.index[rank-1];

fn_exit:
    return mpi_errno;
fn_fail:
    goto fn_exit;
}
Ejemplo n.º 10
0
int MPID_nem_ptl_poll(int is_blocking_poll)
{
    int mpi_errno = MPI_SUCCESS;
    ptl_event_t event;
    int ret;
    MPIDI_STATE_DECL(MPID_STATE_MPID_NEM_PTL_POLL);

    /* MPIDI_FUNC_ENTER(MPID_STATE_MPID_NEM_PTL_POLL); */

    while (1) {
        int ctl_event = FALSE;

        /* Check the rptls EQ first. It should never return an event. */
        ret = MPID_nem_ptl_rptl_eqget(MPIDI_nem_ptl_rpt_eq, &event);
        MPIU_Assert(ret == PTL_EQ_EMPTY);

        /* check EQs for events */
        ret = MPID_nem_ptl_rptl_eqget(MPIDI_nem_ptl_eq, &event);
        MPIR_ERR_CHKANDJUMP(ret == PTL_EQ_DROPPED, mpi_errno, MPI_ERR_OTHER, "**eqdropped");
        if (ret == PTL_EQ_EMPTY) {
            ret = MPID_nem_ptl_rptl_eqget(MPIDI_nem_ptl_get_eq, &event);
            MPIR_ERR_CHKANDJUMP(ret == PTL_EQ_DROPPED, mpi_errno, MPI_ERR_OTHER, "**eqdropped");

            if (ret == PTL_EQ_EMPTY) {
                ret = MPID_nem_ptl_rptl_eqget(MPIDI_nem_ptl_control_eq, &event);
                MPIR_ERR_CHKANDJUMP(ret == PTL_EQ_DROPPED, mpi_errno, MPI_ERR_OTHER, "**eqdropped");

                if (ret == PTL_EQ_EMPTY) {
                    ret = MPID_nem_ptl_rptl_eqget(MPIDI_nem_ptl_origin_eq, &event);
                    MPIR_ERR_CHKANDJUMP(ret == PTL_EQ_DROPPED, mpi_errno, MPI_ERR_OTHER, "**eqdropped");
                } else {
                    ctl_event = TRUE;
                }

                /* all EQs are empty */
                if (ret == PTL_EQ_EMPTY)
                    break;
            }
        }
        MPIR_ERR_CHKANDJUMP1(ret, mpi_errno, MPI_ERR_OTHER, "**ptleqget", "**ptleqget %s", MPID_nem_ptl_strerror(ret));
        MPL_DBG_MSG_FMT(MPIDI_CH3_DBG_CHANNEL, VERBOSE, (MPL_DBG_FDEST, "Received event %s pt_idx=%d ni_fail=%s list=%s user_ptr=%p hdr_data=%#lx mlength=%lu rlength=%lu",
                                                MPID_nem_ptl_strevent(&event), event.pt_index, MPID_nem_ptl_strnifail(event.ni_fail_type),
                                                MPID_nem_ptl_strlist(event.ptl_list), event.user_ptr, event.hdr_data, event.mlength, event.rlength));
        MPIR_ERR_CHKANDJUMP2(event.ni_fail_type != PTL_NI_OK && event.ni_fail_type != PTL_NI_NO_MATCH, mpi_errno, MPI_ERR_OTHER, "**ptlni_fail", "**ptlni_fail %s %s", MPID_nem_ptl_strevent(&event), MPID_nem_ptl_strnifail(event.ni_fail_type));

        /* special case for events on the control portal */
        if (ctl_event) {
            mpi_errno = MPID_nem_ptl_nm_ctl_event_handler(&event);
            if (mpi_errno) MPIR_ERR_POP(mpi_errno);
            continue;
        }

        switch (event.type) {
        case PTL_EVENT_PUT:
            if (event.ptl_list == PTL_OVERFLOW_LIST)
                break;
        case PTL_EVENT_PUT_OVERFLOW:
        case PTL_EVENT_GET:
        case PTL_EVENT_SEND:
        case PTL_EVENT_REPLY:
        case PTL_EVENT_SEARCH: {
            MPID_Request * const req = event.user_ptr;
            MPL_DBG_MSG_P(MPIDI_CH3_DBG_CHANNEL, VERBOSE, "req = %p", req);
            MPL_DBG_MSG_P(MPIDI_CH3_DBG_CHANNEL, VERBOSE, "REQ_PTL(req)->event_handler = %p", REQ_PTL(req)->event_handler);
            if (REQ_PTL(req)->event_handler) {
                mpi_errno = REQ_PTL(req)->event_handler(&event);
                if (mpi_errno) MPIR_ERR_POP(mpi_errno);
            }
            break;
        }
        case PTL_EVENT_AUTO_FREE:
            mpi_errno = append_overflow((size_t)event.user_ptr);
            if (mpi_errno) MPIR_ERR_POP(mpi_errno);
            break;
        case PTL_EVENT_AUTO_UNLINK:
            overflow_me_handle[(size_t)event.user_ptr] = PTL_INVALID_HANDLE;
            break;
        case PTL_EVENT_LINK:
            /* ignore */
            break;
        case PTL_EVENT_ACK:
        default:
            MPL_error_printf("Received unexpected event type: %d %s", event.type, MPID_nem_ptl_strevent(&event));
            MPIR_ERR_INTERNALANDJUMP(mpi_errno, "Unexpected event type");
        }
    }

 fn_exit:
    /* MPIDI_FUNC_EXIT(MPID_STATE_MPID_NEM_PTL_POLL); */
    return mpi_errno;
 fn_fail:
    goto fn_exit;
}
Ejemplo n.º 11
0
static int GetSockInterfaceAddr(int myRank, char *ifname, int maxIfname,
                                MPIDU_Sock_ifaddr_t *ifaddr)
{
    const char *ifname_string;
    int mpi_errno = MPI_SUCCESS;
    int ifaddrFound = 0;

    MPIU_Assert(maxIfname);
    ifname[0] = '\0';

    MPIR_ERR_CHKANDJUMP(MPIR_CVAR_CH3_INTERFACE_HOSTNAME && MPIR_CVAR_NEMESIS_TCP_NETWORK_IFACE, mpi_errno, MPI_ERR_OTHER, "**ifname_and_hostname");
    
    /* Set "not found" for ifaddr */
    ifaddr->len = 0;

    /* Check if user specified ethernet interface name, e.g., ib0, eth1 */
    if (MPIR_CVAR_NEMESIS_TCP_NETWORK_IFACE) {
	int len;
        mpi_errno = MPIDI_Get_IP_for_iface(MPIR_CVAR_NEMESIS_TCP_NETWORK_IFACE, ifaddr, &ifaddrFound);
        MPIR_ERR_CHKANDJUMP1(mpi_errno || !ifaddrFound, mpi_errno, MPI_ERR_OTHER, "**iface_notfound", "**iface_notfound %s", MPIR_CVAR_NEMESIS_TCP_NETWORK_IFACE);
        
        MPIU_DBG_MSG_FMT(CH3_CONNECT, VERBOSE, (MPIU_DBG_FDEST,
                                                "ifaddrFound=TRUE ifaddr->type=%d ifaddr->len=%d ifaddr->ifaddr[0-3]=%d.%d.%d.%d",
                                                ifaddr->type, ifaddr->len, ifaddr->ifaddr[0], ifaddr->ifaddr[1], ifaddr->ifaddr[2],
                                                ifaddr->ifaddr[3]));

        /* In this case, ifname is only used for debugging purposes */
	mpi_errno = MPID_Get_processor_name(ifname, maxIfname, &len );
        if (mpi_errno) MPIR_ERR_POP(mpi_errno);
        goto fn_exit;
    }

    /* Check for a host name supplied through an environment variable */
    ifname_string = MPIR_CVAR_CH3_INTERFACE_HOSTNAME;
    if (!ifname_string) {
	/* See if there is a per-process name for the interfaces (e.g.,
	   the process manager only delievers the same values for the 
	   environment to each process.  There's no way to do this with
           the param interface, so we need to use getenv() here. */
	char namebuf[1024];
	MPL_snprintf( namebuf, sizeof(namebuf), 
		       "MPICH_INTERFACE_HOSTNAME_R%d", myRank );
	ifname_string = getenv( namebuf );

	if (DBG_IFNAME && ifname_string) {
	    fprintf( stdout, "Found interface name %s from %s\n", 
		    ifname_string, namebuf );
	    fflush( stdout );
	}
    }
    else if (DBG_IFNAME) {
	fprintf( stdout, 
		 "Found interface name %s from MPICH_INTERFACE_HOSTNAME\n", 
		 ifname_string );
	fflush( stdout );
    }
	 
    if (!ifname_string) {
	int len;

	/* User did not specify a hostname.  Look it up. */
	mpi_errno = MPID_Get_processor_name(ifname, maxIfname, &len );
        if (mpi_errno) MPIR_ERR_POP(mpi_errno);

	ifname_string = ifname;

	/* If we didn't find a specific name, then try to get an IP address
	   directly from the available interfaces, if that is supported on
	   this platform.  Otherwise, we'll drop into the next step that uses 
	   the ifname */
	mpi_errno = MPIDI_GetIPInterface( ifaddr, &ifaddrFound );
        if (mpi_errno) MPIR_ERR_POP(mpi_errno);
    }
    else {
	/* Copy this name into the output name */
	MPIU_Strncpy( ifname, ifname_string, maxIfname );
    }

    /* If we don't have an IP address, try to get it from the name */
    if (!ifaddrFound) {
        int i;
	struct hostent *info = NULL;
        for (i = 0; i < MPIR_CVAR_NEMESIS_TCP_HOST_LOOKUP_RETRIES; ++i) {
            info = gethostbyname( ifname_string );
            if (info || h_errno != TRY_AGAIN)
                break;
        }
        MPIR_ERR_CHKANDJUMP2(!info || !info->h_addr_list, mpi_errno, MPI_ERR_OTHER, "**gethostbyname", "**gethostbyname %s %d", ifname_string, h_errno);
        
        /* Use the primary address */
        ifaddr->len  = info->h_length;
        ifaddr->type = info->h_addrtype;
        if (ifaddr->len > sizeof(ifaddr->ifaddr)) {
            /* If the address won't fit in the field, reset to
               no address */
            ifaddr->len = 0;
            ifaddr->type = -1;
            MPIR_ERR_INTERNAL(mpi_errno, "Address too long to fit in field");
        } else {
            MPIU_Memcpy( ifaddr->ifaddr, info->h_addr_list[0], ifaddr->len );
	}
    }

fn_exit:
    return mpi_errno;
fn_fail:
    goto fn_exit;
}
Ejemplo n.º 12
0
/*@
MPI_Cart_coords - Determines process coords in cartesian topology given
                  rank in group

Input Parameters:
+ comm - communicator with cartesian structure (handle)
. rank - rank of a process within group of 'comm' (integer)
- maxdims - length of vector 'coords' in the calling program (integer)

Output Parameters:
. coords - integer array (of size 'ndims') containing the Cartesian
  coordinates of specified process (integer)

.N SignalSafe

.N Fortran

.N Errors
.N MPI_SUCCESS
.N MPI_ERR_TOPOLOGY
.N MPI_ERR_RANK
.N MPI_ERR_DIMS
.N MPI_ERR_ARG
@*/
int MPI_Cart_coords(MPI_Comm comm, int rank, int maxdims, int coords[])
{
    int mpi_errno = MPI_SUCCESS;
    MPIR_Comm *comm_ptr = NULL;
    MPIR_Topology *cart_ptr;
    int i, nnodes;
    MPIR_FUNC_TERSE_STATE_DECL(MPID_STATE_MPI_CART_COORDS);

    MPIR_ERRTEST_INITIALIZED_ORDIE();

    MPIR_FUNC_TERSE_ENTER(MPID_STATE_MPI_CART_COORDS);

    /* Validate parameters, especially handles needing to be converted */
#ifdef HAVE_ERROR_CHECKING
    {
        MPID_BEGIN_ERROR_CHECKS;
        {
            MPIR_ERRTEST_COMM(comm, mpi_errno);
        }
        MPID_END_ERROR_CHECKS;
    }
#endif

    /* Convert MPI object handles to object pointers */
    MPIR_Comm_get_ptr(comm, comm_ptr);

    /* Validate parameters and objects (post conversion) */
#ifdef HAVE_ERROR_CHECKING
    {
        MPID_BEGIN_ERROR_CHECKS;
        {
            /* Validate comm_ptr */
            MPIR_Comm_valid_ptr(comm_ptr, mpi_errno, TRUE);
            /* If comm_ptr is not valid, it will be reset to null */
            if (mpi_errno != MPI_SUCCESS)
                goto fn_fail;

            MPIR_ERRTEST_RANK(comm_ptr, rank, mpi_errno);
        }
        MPID_END_ERROR_CHECKS;
    }
#endif /* HAVE_ERROR_CHECKING */

    cart_ptr = MPIR_Topology_get(comm_ptr);

#ifdef HAVE_ERROR_CHECKING
    {
        MPID_BEGIN_ERROR_CHECKS;
        {
            MPIR_ERR_CHKANDJUMP((!cart_ptr ||
                                 cart_ptr->kind != MPI_CART), mpi_errno, MPI_ERR_TOPOLOGY,
                                "**notcarttopo");
            MPIR_ERR_CHKANDJUMP2((cart_ptr->topo.cart.ndims > maxdims), mpi_errno, MPI_ERR_ARG,
                                 "**dimsmany", "**dimsmany %d %d", cart_ptr->topo.cart.ndims,
                                 maxdims);
            if (cart_ptr->topo.cart.ndims) {
                MPIR_ERRTEST_ARGNULL(coords, "coords", mpi_errno);
            }
        }
        MPID_END_ERROR_CHECKS;
    }
#endif /* HAVE_ERROR_CHECKING */

    /* ... body of routine ...  */

    /* Calculate coords */
    nnodes = cart_ptr->topo.cart.nnodes;
    for (i = 0; i < cart_ptr->topo.cart.ndims; i++) {
        nnodes = nnodes / cart_ptr->topo.cart.dims[i];
        coords[i] = rank / nnodes;
        rank = rank % nnodes;
    }

    /* ... end of body of routine ... */

#ifdef HAVE_ERROR_CHECKING
  fn_exit:
#endif
    MPIR_FUNC_TERSE_EXIT(MPID_STATE_MPI_CART_COORDS);
    return mpi_errno;

#ifdef HAVE_ERROR_CHECKING
  fn_fail:
    /* --BEGIN ERROR HANDLING-- */
    {
        mpi_errno =
            MPIR_Err_create_code(mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER,
                                 "**mpi_cart_coords", "**mpi_cart_coords %C %d %d %p", comm, rank,
                                 maxdims, coords);
    }
    mpi_errno = MPIR_Err_return_comm(comm_ptr, FCNAME, mpi_errno);
    goto fn_exit;
    /* --END ERROR HANDLING-- */
#endif
}
Ejemplo n.º 13
0
/*@
   MPI_Comm_join - Create a communicator by joining two processes connected by 
     a socket.

Input Parameters:
. fd - socket file descriptor 

Output Parameters:
. intercomm - new intercommunicator (handle) 

 Notes:
  The socket must be quiescent before 'MPI_COMM_JOIN' is called and after 
  'MPI_COMM_JOIN' returns. More specifically, on entry to 'MPI_COMM_JOIN', a 
  read on the socket will not read any data that was written to the socket 
  before the remote process called 'MPI_COMM_JOIN'.

.N ThreadSafe

.N Fortran

.N Errors
.N MPI_SUCCESS
.N MPI_ERR_ARG
@*/
int MPI_Comm_join(int fd, MPI_Comm *intercomm)
{
    static const char FCNAME[] = "MPI_Comm_join";
    int mpi_errno = MPI_SUCCESS, err;
    MPID_Comm *intercomm_ptr;
    char *local_port, *remote_port;
    MPIU_CHKLMEM_DECL(2);
    MPID_MPI_STATE_DECL(MPID_STATE_MPI_COMM_JOIN);

    MPIR_ERRTEST_INITIALIZED_ORDIE();
    
    MPID_THREAD_CS_ENTER(GLOBAL, MPIR_THREAD_GLOBAL_ALLFUNC_MUTEX);
    MPID_MPI_FUNC_ENTER(MPID_STATE_MPI_COMM_JOIN);

    /* ... body of routine ...  */
    
    MPIU_CHKLMEM_MALLOC(local_port, char *, MPI_MAX_PORT_NAME, mpi_errno, "local port name");
    MPIU_CHKLMEM_MALLOC(remote_port, char *, MPI_MAX_PORT_NAME, mpi_errno, "remote port name");
    
    mpi_errno = MPIR_Open_port_impl(NULL, local_port);
    MPIR_ERR_CHKANDJUMP((mpi_errno != MPI_SUCCESS), mpi_errno, MPI_ERR_OTHER, "**openportfailed");

    err = MPIR_fd_send(fd, local_port, MPI_MAX_PORT_NAME);
    MPIR_ERR_CHKANDJUMP1((err != 0), mpi_errno, MPI_ERR_INTERN, "**join_send", "**join_send %d", err);

    err = MPIR_fd_recv(fd, remote_port, MPI_MAX_PORT_NAME);
    MPIR_ERR_CHKANDJUMP1((err != 0), mpi_errno, MPI_ERR_INTERN, "**join_recv", "**join_recv %d", err);

    MPIR_ERR_CHKANDJUMP2((strcmp(local_port, remote_port) == 0), mpi_errno, MPI_ERR_INTERN, "**join_portname",
			 "**join_portname %s %s", local_port, remote_port);

    if (strcmp(local_port, remote_port) < 0) {
        MPID_Comm *comm_self_ptr;
        MPID_Comm_get_ptr( MPI_COMM_SELF, comm_self_ptr );
        mpi_errno = MPIR_Comm_accept_impl(local_port, NULL, 0, comm_self_ptr, &intercomm_ptr);
        if (mpi_errno) MPIR_ERR_POP(mpi_errno);
    } else {
        MPID_Comm *comm_self_ptr;
        MPID_Comm_get_ptr( MPI_COMM_SELF, comm_self_ptr );
        mpi_errno = MPIR_Comm_connect_impl(remote_port, NULL, 0, comm_self_ptr, &intercomm_ptr);
        if (mpi_errno) MPIR_ERR_POP(mpi_errno);
    }

    mpi_errno = MPIR_Close_port_impl(local_port);
    if (mpi_errno) MPIR_ERR_POP(mpi_errno);

    MPIR_OBJ_PUBLISH_HANDLE(*intercomm, intercomm_ptr->handle);

    /* ... end of body of routine ... */

  fn_exit:
    MPIU_CHKLMEM_FREEALL();
    MPID_MPI_FUNC_EXIT(MPID_STATE_MPI_COMM_JOIN);
    MPID_THREAD_CS_EXIT(GLOBAL, MPIR_THREAD_GLOBAL_ALLFUNC_MUTEX);
    return mpi_errno;

  fn_fail:
    /* --BEGIN ERROR HANDLING-- */
#   ifdef HAVE_ERROR_CHECKING
    {
	mpi_errno = MPIR_Err_create_code(
	    mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**mpi_comm_join",
	    "**mpi_comm_join %d %p", fd, intercomm);
    }
#   endif
    mpi_errno = MPIR_Err_return_comm( NULL, FCNAME, mpi_errno );
    goto fn_exit;
    /* --END ERROR HANDLING-- */
}