Example #1
0
int ptlrpc_uuid_to_peer(struct obd_uuid *uuid,
			 lnet_process_id_t *peer, lnet_nid_t *self)
{
	int	       best_dist = 0;
	__u32	     best_order = 0;
	int	       count = 0;
	int	       rc = -ENOENT;
	int	       portals_compatibility;
	int	       dist;
	__u32	     order;
	lnet_nid_t	dst_nid;
	lnet_nid_t	src_nid;

	portals_compatibility = LNetCtl(IOC_LIBCFS_PORTALS_COMPATIBILITY, NULL);

	peer->pid = LUSTRE_SRV_LNET_PID;

	/* Choose the matching UUID that's closest */
	while (lustre_uuid_to_peer(uuid->uuid, &dst_nid, count++) == 0) {
		dist = LNetDist(dst_nid, &src_nid, &order);
		if (dist < 0)
			continue;

		if (dist == 0) {		/* local! use loopback LND */
			peer->nid = *self = LNET_MKNID(LNET_MKNET(LOLND, 0), 0);
			rc = 0;
			break;
		}

		if (rc < 0 ||
		    dist < best_dist ||
		    (dist == best_dist && order < best_order)) {
			best_dist = dist;
			best_order = order;

			if (portals_compatibility > 1) {
				/* Strong portals compatibility: Zero the nid's
				 * NET, so if I'm reading new config logs, or
				 * getting configured by (new) lconf I can
				 * still talk to old servers. */
				dst_nid = LNET_MKNID(0, LNET_NIDADDR(dst_nid));
				src_nid = LNET_MKNID(0, LNET_NIDADDR(src_nid));
			}
			peer->nid = dst_nid;
			*self = src_nid;
			rc = 0;
		}
	}

	CDEBUG(D_NET,"%s->%s\n", uuid->uuid, libcfs_id2str(*peer));
	return rc;
}
Example #2
0
lnet_nid_t
libcfs_str2nid(const char *str)
{
        const char       *sep = strchr(str, '@');
        struct netstrfns *nf;
        __u32             net;
        __u32             addr;

        if (sep != NULL) {
                nf = libcfs_str2net_internal(sep + 1, &net);
                if (nf == NULL)
                        return LNET_NID_ANY;
        } else {
                sep = str + strlen(str);
                net = LNET_MKNET(SOCKLND, 0);
                nf = libcfs_lnd2netstrfns(SOCKLND);
                LASSERT (nf != NULL);
        }

        if (!nf->nf_str2addr(str, (int)(sep - str), &addr))
                return LNET_NID_ANY;

        return LNET_MKNID(net, addr);
}
Example #3
0
/* All actions that we need after receiving hello on passive conn:
 * 1) Stash peer's nid, pid, incarnation and conn type
 * 2) Cope with easy case: conn[idx] is empty - just save conn there
 * 3) Resolve race:
 *    a) if our nid is higher - reply with CONN_NONE and make us zombie
 *    b) if peer's nid is higher - postpone race resolution till
 *       READY state
 * 4) Anyhow, send reply hello
*/
int
usocklnd_passiveconn_hellorecv(usock_conn_t *conn)
{
        ksock_hello_msg_t *hello = conn->uc_rx_hello;
        int                type;
        int                idx;
        int                rc;
        usock_peer_t      *peer;
        lnet_ni_t         *ni        = conn->uc_ni;
        __u32              peer_ip   = conn->uc_peer_ip;
        __u16              peer_port = conn->uc_peer_port;

        /* don't know parent peer yet and not zombie */
        LASSERT (conn->uc_peer == NULL &&
                 ni != NULL);

        /* don't know peer's nid and incarnation yet */
        if (peer_port > LNET_ACCEPTOR_MAX_RESERVED_PORT) {
                /* do not trust liblustre clients */
                conn->uc_peerid.pid = peer_port | LNET_PID_USERFLAG;
                conn->uc_peerid.nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid),
                                                 peer_ip);
                if (hello->kshm_ctype != SOCKLND_CONN_ANY) {
                        lnet_ni_decref(ni);
                        conn->uc_ni = NULL;
                        CERROR("Refusing to accept connection of type=%d from "
                               "userspace process %u.%u.%u.%u:%d\n", hello->kshm_ctype,
                               HIPQUAD(peer_ip), peer_port);
                        return -EINVAL;
                }
        } else {
                conn->uc_peerid.pid = hello->kshm_src_pid;
                conn->uc_peerid.nid = hello->kshm_src_nid;
        }
        conn->uc_type = type = usocklnd_invert_type(hello->kshm_ctype);

        rc = usocklnd_find_or_create_peer(ni, conn->uc_peerid, &peer);
        if (rc) {
                lnet_ni_decref(ni);
                conn->uc_ni = NULL;
                return rc;
        }

        peer->up_last_alive = cfs_time_current();

        idx = usocklnd_type2idx(conn->uc_type);

        /* safely check whether we're first */
        pthread_mutex_lock(&peer->up_lock);

        usocklnd_cleanup_stale_conns(peer, hello->kshm_src_incarnation, NULL);

        if (peer->up_conns[idx] == NULL) {
                peer->up_last_alive = cfs_time_current();
                conn->uc_peer = peer;
                conn->uc_ni = NULL;
                usocklnd_link_conn_to_peer(conn, peer, idx);
                usocklnd_conn_addref(conn);
        } else {
                usocklnd_peer_decref(peer);

                /* Resolve race in favour of higher NID */
                if (conn->uc_peerid.nid < conn->uc_ni->ni_nid) {
                        /* make us zombie */
                        conn->uc_ni = NULL;
                        type = SOCKLND_CONN_NONE;
                }

                /* if conn->uc_peerid.nid > conn->uc_ni->ni_nid,
                 * postpone race resolution till READY state
                 * (hopefully that conn[idx] will die because of
                 * incoming hello of CONN_NONE type) */
        }
        pthread_mutex_unlock(&peer->up_lock);

        /* allocate and initialize fake tx with hello */
        conn->uc_tx_hello = usocklnd_create_hello_tx(ni, type,
                                                     conn->uc_peerid.nid);
        if (conn->uc_ni == NULL)
                lnet_ni_decref(ni);

        if (conn->uc_tx_hello == NULL)
                return -ENOMEM;

        /* rc == 0 */
        pthread_mutex_lock(&conn->uc_lock);
        if (conn->uc_state == UC_DEAD)
                goto passive_hellorecv_done;

        conn->uc_state = UC_SENDING_HELLO;
        conn->uc_tx_deadline = cfs_time_shift(usock_tuns.ut_timeout);
        conn->uc_tx_flag = 1;
        rc = usocklnd_add_pollrequest(conn, POLL_SET_REQUEST, POLLOUT);

  passive_hellorecv_done:
        pthread_mutex_unlock(&conn->uc_lock);
        return rc;
}