Example #1
0
/**
 * xprt_rdma_connect_worker - establish connection in the background
 * @work: worker thread context
 *
 * Requester holds the xprt's send lock to prevent activity on this
 * transport while a fresh connection is being established. RPC tasks
 * sleep on the xprt's pending queue waiting for connect to complete.
 */
static void
xprt_rdma_connect_worker(struct work_struct *work)
{
	struct rpcrdma_xprt *r_xprt = container_of(work, struct rpcrdma_xprt,
						   rx_connect_worker.work);
	struct rpc_xprt *xprt = &r_xprt->rx_xprt;
	int rc;

	rc = rpcrdma_ep_connect(&r_xprt->rx_ep, &r_xprt->rx_ia);
	xprt_clear_connecting(xprt);
	if (r_xprt->rx_ep.rep_connected > 0) {
		if (!xprt_test_and_set_connected(xprt)) {
			xprt->stat.connect_count++;
			xprt->stat.connect_time += (long)jiffies -
						   xprt->stat.connect_start;
			xprt_wake_pending_tasks(xprt, -EAGAIN);
		}
	} else {
		if (xprt_test_and_clear_connected(xprt))
			xprt_wake_pending_tasks(xprt, rc);
	}
}
Example #2
0
static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, unsigned int offset, size_t len)
{
    struct rpc_xprt *xprt = rd_desc->arg.data;
    skb_reader_t desc = {
        .skb	= skb,
        .offset	= offset,
        .count	= len,
        .csum	= 0
    };

    dprintk("RPC:      xs_tcp_data_recv started\n");
    do {
        /* Read in a new fragment marker if necessary */
        /* Can we ever really expect to get completely empty fragments? */
        if (xprt->tcp_flags & XPRT_COPY_RECM) {
            xs_tcp_read_fraghdr(xprt, &desc);
            continue;
        }
        /* Read in the xid if necessary */
        if (xprt->tcp_flags & XPRT_COPY_XID) {
            xs_tcp_read_xid(xprt, &desc);
            continue;
        }
        /* Read in the request data */
        if (xprt->tcp_flags & XPRT_COPY_DATA) {
            xs_tcp_read_request(xprt, &desc);
            continue;
        }
        /* Skip over any trailing bytes on short reads */
        xs_tcp_read_discard(xprt, &desc);
    } while (desc.count);
    dprintk("RPC:      xs_tcp_data_recv done\n");
    return len - desc.count;
}

/**
 * xs_tcp_data_ready - "data ready" callback for TCP sockets
 * @sk: socket with data to read
 * @bytes: how much data to read
 *
 */
static void xs_tcp_data_ready(struct sock *sk, int bytes)
{
    struct rpc_xprt *xprt;
    read_descriptor_t rd_desc;

    read_lock(&sk->sk_callback_lock);
    dprintk("RPC:      xs_tcp_data_ready...\n");
    if (!(xprt = xprt_from_sock(sk)))
        goto out;
    if (xprt->shutdown)
        goto out;

    /* We use rd_desc to pass struct xprt to xs_tcp_data_recv */
    rd_desc.arg.data = xprt;
    rd_desc.count = 65536;
    tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv);
out:
    read_unlock(&sk->sk_callback_lock);
}

/**
 * xs_tcp_state_change - callback to handle TCP socket state changes
 * @sk: socket whose state has changed
 *
 */
static void xs_tcp_state_change(struct sock *sk)
{
    struct rpc_xprt *xprt;

    read_lock(&sk->sk_callback_lock);
    if (!(xprt = xprt_from_sock(sk)))
        goto out;
    dprintk("RPC:      xs_tcp_state_change client %p...\n", xprt);
    dprintk("RPC:      state %x conn %d dead %d zapped %d\n",
            sk->sk_state, xprt_connected(xprt),
            sock_flag(sk, SOCK_DEAD),
            sock_flag(sk, SOCK_ZAPPED));

    switch (sk->sk_state) {
    case TCP_ESTABLISHED:
        spin_lock_bh(&xprt->transport_lock);
        if (!xprt_test_and_set_connected(xprt)) {
            /* Reset TCP record info */
            xprt->tcp_offset = 0;
            xprt->tcp_reclen = 0;
            xprt->tcp_copied = 0;

            if (xprt->tcp_flags & XPRT_SRCADDR_PRESENT)
                xprt->tcp_flags = XPRT_SRCADDR_PRESENT |
                                  XPRT_COPY_RECM |
                                  XPRT_COPY_XID;
            else
                xprt->tcp_flags = XPRT_COPY_RECM |
                                  XPRT_COPY_XID;

            xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
            xprt_wake_pending_tasks(xprt, 0);
        }
        spin_unlock_bh(&xprt->transport_lock);
        break;
    case TCP_SYN_SENT:
    case TCP_SYN_RECV:
        break;
    case TCP_CLOSE_WAIT:
        /* Try to schedule an autoclose RPC calls */
        set_bit(XPRT_CLOSE_WAIT, &xprt->state);
        if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
            queue_work(rpciod_workqueue, &xprt->task_cleanup);
    default:
        xprt_disconnect(xprt);
    }
out:
    read_unlock(&sk->sk_callback_lock);
}

/**
 * xs_udp_write_space - callback invoked when socket buffer space
 *                             becomes available
 * @sk: socket whose state has changed
 *
 * Called when more output buffer space is available for this socket.
 * We try not to wake our writers until they can make "significant"
 * progress, otherwise we'll waste resources thrashing kernel_sendmsg
 * with a bunch of small requests.
 */
static void xs_udp_write_space(struct sock *sk)
{
    read_lock(&sk->sk_callback_lock);

    /* from net/core/sock.c:sock_def_write_space */
    if (sock_writeable(sk)) {
        struct socket *sock;
        struct rpc_xprt *xprt;

        if (unlikely(!(sock = sk->sk_socket)))
            goto out;
        if (unlikely(!(xprt = xprt_from_sock(sk))))
            goto out;
        if (unlikely(!test_and_clear_bit(SOCK_NOSPACE, &sock->flags)))
            goto out;

        xprt_write_space(xprt);
    }

out:
    read_unlock(&sk->sk_callback_lock);
}