Exemple #1
0
static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, skb_reader_t *desc)
{
    size_t len, used;
    char *p;

    p = ((char *) &xprt->tcp_recm) + xprt->tcp_offset;
    len = sizeof(xprt->tcp_recm) - xprt->tcp_offset;
    used = xs_tcp_copy_data(desc, p, len);
    xprt->tcp_offset += used;
    if (used != len)
        return;

    xprt->tcp_reclen = ntohl(xprt->tcp_recm);
    if (xprt->tcp_reclen & RPC_LAST_STREAM_FRAGMENT)
        xprt->tcp_flags |= XPRT_LAST_FRAG;
    else
        xprt->tcp_flags &= ~XPRT_LAST_FRAG;
    xprt->tcp_reclen &= RPC_FRAGMENT_SIZE_MASK;

    xprt->tcp_flags &= ~XPRT_COPY_RECM;
    xprt->tcp_offset = 0;

    /* Sanity check of the record length */
    if (unlikely(xprt->tcp_reclen < 4)) {
        dprintk("RPC:      invalid TCP record fragment length\n");
        xprt_disconnect(xprt);
        return;
    }
    dprintk("RPC:      reading TCP record fragment of length %d\n",
            xprt->tcp_reclen);
}
Exemple #2
0
/**
 * xs_tcp_send_request - write an RPC request to a TCP socket
 * @task: address of RPC task that manages the state of an RPC request
 *
 * Return values:
 *        0:	The request has been sent
 *   EAGAIN:	The socket was blocked, please call again later to
 *		complete the request
 * ENOTCONN:	Caller needs to invoke connect logic then call again
 *    other:	Some other error occured, the request was not sent
 *
 * XXX: In the case of soft timeouts, should we eventually give up
 *	if sendmsg is not able to make progress?
 */
static int xs_tcp_send_request(struct rpc_task *task)
{
    struct rpc_rqst *req = task->tk_rqstp;
    struct rpc_xprt *xprt = req->rq_xprt;
    struct xdr_buf *xdr = &req->rq_snd_buf;
    int status, retry = 0;

    xs_encode_tcp_record_marker(&req->rq_snd_buf);

    xs_pktdump("packet data:",
               req->rq_svec->iov_base,
               req->rq_svec->iov_len);

    /* Continue transmitting the packet/record. We must be careful
     * to cope with writespace callbacks arriving _after_ we have
     * called sendmsg(). */
    while (1) {
        req->rq_xtime = jiffies;
        status = xs_sendpages(xprt->sock, NULL, 0, xdr,
                              req->rq_bytes_sent);

        dprintk("RPC:      xs_tcp_send_request(%u) = %d\n",
                xdr->len - req->rq_bytes_sent, status);

        if (unlikely(status < 0))
            break;

        /* If we've sent the entire packet, immediately
         * reset the count of bytes sent. */
        req->rq_bytes_sent += status;
        task->tk_bytes_sent += status;
        if (likely(req->rq_bytes_sent >= req->rq_slen)) {
            req->rq_bytes_sent = 0;
            return 0;
        }

        status = -EAGAIN;
        if (retry++ > XS_SENDMSG_RETRY)
            break;
    }

    switch (status) {
    case -EAGAIN:
        xs_nospace(task);
        break;
    case -ECONNREFUSED:
    case -ECONNRESET:
    case -ENOTCONN:
    case -EPIPE:
        status = -ENOTCONN;
        break;
    default:
        dprintk("RPC:      sendmsg returned unrecognized error %d\n",
                -status);
        xprt_disconnect(xprt);
        break;
    }

    return status;
}
/**
 * xs_destroy - prepare to shutdown a transport
 * @xprt: doomed transport
 *
 */
static void xs_destroy(struct rpc_xprt *xprt)
{
	dprintk("RPC:      xs_destroy xprt %p\n", xprt);

	cancel_delayed_work(&xprt->connect_worker);
	flush_scheduled_work();

	xprt_disconnect(xprt);
	xs_close(xprt);
	kfree(xprt->slot);
}
Exemple #4
0
/**
 * xs_destroy - prepare to shutdown a transport
 * @xprt: doomed transport
 *
 */
static void xs_destroy(struct rpc_xprt *xprt)
{
    dprintk("RPC:      xs_destroy xprt %p\n", xprt);

    if (!cancel_delayed_work(&xprt->connect_worker))
        wait_on_bit(&xprt->state, XPRT_CONNECTING,
                    xs_wait_bit_uninterruptible, TASK_UNINTERRUPTIBLE);

    xprt_disconnect(xprt);
    xs_close(xprt);
    kfree(xprt->slot);
}
Exemple #5
0
static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, unsigned int offset, size_t len)
{
    struct rpc_xprt *xprt = rd_desc->arg.data;
    skb_reader_t desc = {
        .skb	= skb,
        .offset	= offset,
        .count	= len,
        .csum	= 0
    };

    dprintk("RPC:      xs_tcp_data_recv started\n");
    do {
        /* Read in a new fragment marker if necessary */
        /* Can we ever really expect to get completely empty fragments? */
        if (xprt->tcp_flags & XPRT_COPY_RECM) {
            xs_tcp_read_fraghdr(xprt, &desc);
            continue;
        }
        /* Read in the xid if necessary */
        if (xprt->tcp_flags & XPRT_COPY_XID) {
            xs_tcp_read_xid(xprt, &desc);
            continue;
        }
        /* Read in the request data */
        if (xprt->tcp_flags & XPRT_COPY_DATA) {
            xs_tcp_read_request(xprt, &desc);
            continue;
        }
        /* Skip over any trailing bytes on short reads */
        xs_tcp_read_discard(xprt, &desc);
    } while (desc.count);
    dprintk("RPC:      xs_tcp_data_recv done\n");
    return len - desc.count;
}

/**
 * xs_tcp_data_ready - "data ready" callback for TCP sockets
 * @sk: socket with data to read
 * @bytes: how much data to read
 *
 */
static void xs_tcp_data_ready(struct sock *sk, int bytes)
{
    struct rpc_xprt *xprt;
    read_descriptor_t rd_desc;

    read_lock(&sk->sk_callback_lock);
    dprintk("RPC:      xs_tcp_data_ready...\n");
    if (!(xprt = xprt_from_sock(sk)))
        goto out;
    if (xprt->shutdown)
        goto out;

    /* We use rd_desc to pass struct xprt to xs_tcp_data_recv */
    rd_desc.arg.data = xprt;
    rd_desc.count = 65536;
    tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv);
out:
    read_unlock(&sk->sk_callback_lock);
}

/**
 * xs_tcp_state_change - callback to handle TCP socket state changes
 * @sk: socket whose state has changed
 *
 */
static void xs_tcp_state_change(struct sock *sk)
{
    struct rpc_xprt *xprt;

    read_lock(&sk->sk_callback_lock);
    if (!(xprt = xprt_from_sock(sk)))
        goto out;
    dprintk("RPC:      xs_tcp_state_change client %p...\n", xprt);
    dprintk("RPC:      state %x conn %d dead %d zapped %d\n",
            sk->sk_state, xprt_connected(xprt),
            sock_flag(sk, SOCK_DEAD),
            sock_flag(sk, SOCK_ZAPPED));

    switch (sk->sk_state) {
    case TCP_ESTABLISHED:
        spin_lock_bh(&xprt->transport_lock);
        if (!xprt_test_and_set_connected(xprt)) {
            /* Reset TCP record info */
            xprt->tcp_offset = 0;
            xprt->tcp_reclen = 0;
            xprt->tcp_copied = 0;

            if (xprt->tcp_flags & XPRT_SRCADDR_PRESENT)
                xprt->tcp_flags = XPRT_SRCADDR_PRESENT |
                                  XPRT_COPY_RECM |
                                  XPRT_COPY_XID;
            else
                xprt->tcp_flags = XPRT_COPY_RECM |
                                  XPRT_COPY_XID;

            xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
            xprt_wake_pending_tasks(xprt, 0);
        }
        spin_unlock_bh(&xprt->transport_lock);
        break;
    case TCP_SYN_SENT:
    case TCP_SYN_RECV:
        break;
    case TCP_CLOSE_WAIT:
        /* Try to schedule an autoclose RPC calls */
        set_bit(XPRT_CLOSE_WAIT, &xprt->state);
        if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
            queue_work(rpciod_workqueue, &xprt->task_cleanup);
    default:
        xprt_disconnect(xprt);
    }
out:
    read_unlock(&sk->sk_callback_lock);
}

/**
 * xs_udp_write_space - callback invoked when socket buffer space
 *                             becomes available
 * @sk: socket whose state has changed
 *
 * Called when more output buffer space is available for this socket.
 * We try not to wake our writers until they can make "significant"
 * progress, otherwise we'll waste resources thrashing kernel_sendmsg
 * with a bunch of small requests.
 */
static void xs_udp_write_space(struct sock *sk)
{
    read_lock(&sk->sk_callback_lock);

    /* from net/core/sock.c:sock_def_write_space */
    if (sock_writeable(sk)) {
        struct socket *sock;
        struct rpc_xprt *xprt;

        if (unlikely(!(sock = sk->sk_socket)))
            goto out;
        if (unlikely(!(xprt = xprt_from_sock(sk))))
            goto out;
        if (unlikely(!test_and_clear_bit(SOCK_NOSPACE, &sock->flags)))
            goto out;

        xprt_write_space(xprt);
    }

out:
    read_unlock(&sk->sk_callback_lock);
}