示例#1
0
文件: iscsi_tcp.c 项目: Addision/LVS
static void iscsi_sw_tcp_data_ready(struct sock *sk, int flag)
{
	struct iscsi_conn *conn;
	struct iscsi_tcp_conn *tcp_conn;
	read_descriptor_t rd_desc;

	read_lock(&sk->sk_callback_lock);
	conn = sk->sk_user_data;
	if (!conn) {
		read_unlock(&sk->sk_callback_lock);
		return;
	}
	tcp_conn = conn->dd_data;

	/*
	 * Use rd_desc to pass 'conn' to iscsi_tcp_recv.
	 * We set count to 1 because we want the network layer to
	 * hand us all the skbs that are available. iscsi_tcp_recv
	 * handled pdus that cross buffers or pdus that still need data.
	 */
	rd_desc.arg.data = conn;
	rd_desc.count = 1;
	tcp_read_sock(sk, &rd_desc, iscsi_sw_tcp_recv);

	iscsi_sw_sk_state_check(sk);

	/* If we had to (atomically) map a highmem page,
	 * unmap it now. */
	iscsi_tcp_segment_unmap(&tcp_conn->in.segment);
	read_unlock(&sk->sk_callback_lock);
}
示例#2
0
static void siw_qp_llp_data_ready(struct sock *sk)
#endif
{
	struct siw_qp		*qp;

	read_lock(&sk->sk_callback_lock);

	if (unlikely(!sk->sk_user_data || !sk_to_qp(sk))) {
		dprint(DBG_ON, " No QP: %p\n", sk->sk_user_data);
		goto done;
	}
	qp = sk_to_qp(sk);

	if (down_read_trylock(&qp->state_lock)) {
		read_descriptor_t	rd_desc = {.arg.data = qp, .count = 1};

#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0)
		dprint(DBG_SK|DBG_RX, "(QP%d): "
			"state (before tcp_read_sock)=%d, bytes=%x\n",
			QP_ID(qp), qp->attrs.state, bytes);
#else
		dprint(DBG_SK|DBG_RX, "(QP%d): "
			"state (before tcp_read_sock)=%d\n",
			QP_ID(qp), qp->attrs.state);
#endif

		if (likely(qp->attrs.state == SIW_QP_STATE_RTS))
			/*
			 * Implements data receive operation during
			 * socket callback. TCP gracefully catches
			 * the case where there is nothing to receive
			 * (not calling siw_tcp_rx_data() then).
			 */
			tcp_read_sock(sk, &rd_desc, siw_tcp_rx_data);

#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0)
		dprint(DBG_SK|DBG_RX, "(QP%d): "
			"state (after tcp_read_sock)=%d, bytes=%x\n",
			QP_ID(qp), qp->attrs.state, bytes);
#else
		dprint(DBG_SK|DBG_RX, "(QP%d): "
			"state (after tcp_read_sock)=%d\n",
			QP_ID(qp), qp->attrs.state);
#endif

		up_read(&qp->state_lock);
	} else {
示例#3
0
static void
iscsi_tcp_data_ready(struct sock *sk, int flag)
{
	struct iscsi_conn *conn = sk->sk_user_data;
	read_descriptor_t rd_desc;

	read_lock(&sk->sk_callback_lock);

	/*
	 * Use rd_desc to pass 'conn' to iscsi_tcp_data_recv.
	 * We set count to 1 because we want the network layer to
	 * hand us all the skbs that are available. iscsi_tcp_data_recv
	 * handled pdus that cross buffers or pdus that still need data.
	 */
	rd_desc.arg.data = conn;
	rd_desc.count = 1;
	tcp_read_sock(sk, &rd_desc, iscsi_tcp_data_recv);

	read_unlock(&sk->sk_callback_lock);
}
示例#4
0
static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, unsigned int offset, size_t len)
{
    struct rpc_xprt *xprt = rd_desc->arg.data;
    skb_reader_t desc = {
        .skb	= skb,
        .offset	= offset,
        .count	= len,
        .csum	= 0
    };

    dprintk("RPC:      xs_tcp_data_recv started\n");
    do {
        /* Read in a new fragment marker if necessary */
        /* Can we ever really expect to get completely empty fragments? */
        if (xprt->tcp_flags & XPRT_COPY_RECM) {
            xs_tcp_read_fraghdr(xprt, &desc);
            continue;
        }
        /* Read in the xid if necessary */
        if (xprt->tcp_flags & XPRT_COPY_XID) {
            xs_tcp_read_xid(xprt, &desc);
            continue;
        }
        /* Read in the request data */
        if (xprt->tcp_flags & XPRT_COPY_DATA) {
            xs_tcp_read_request(xprt, &desc);
            continue;
        }
        /* Skip over any trailing bytes on short reads */
        xs_tcp_read_discard(xprt, &desc);
    } while (desc.count);
    dprintk("RPC:      xs_tcp_data_recv done\n");
    return len - desc.count;
}

/**
 * xs_tcp_data_ready - "data ready" callback for TCP sockets
 * @sk: socket with data to read
 * @bytes: how much data to read
 *
 */
static void xs_tcp_data_ready(struct sock *sk, int bytes)
{
    struct rpc_xprt *xprt;
    read_descriptor_t rd_desc;

    read_lock(&sk->sk_callback_lock);
    dprintk("RPC:      xs_tcp_data_ready...\n");
    if (!(xprt = xprt_from_sock(sk)))
        goto out;
    if (xprt->shutdown)
        goto out;

    /* We use rd_desc to pass struct xprt to xs_tcp_data_recv */
    rd_desc.arg.data = xprt;
    rd_desc.count = 65536;
    tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv);
out:
    read_unlock(&sk->sk_callback_lock);
}

/**
 * xs_tcp_state_change - callback to handle TCP socket state changes
 * @sk: socket whose state has changed
 *
 */
static void xs_tcp_state_change(struct sock *sk)
{
    struct rpc_xprt *xprt;

    read_lock(&sk->sk_callback_lock);
    if (!(xprt = xprt_from_sock(sk)))
        goto out;
    dprintk("RPC:      xs_tcp_state_change client %p...\n", xprt);
    dprintk("RPC:      state %x conn %d dead %d zapped %d\n",
            sk->sk_state, xprt_connected(xprt),
            sock_flag(sk, SOCK_DEAD),
            sock_flag(sk, SOCK_ZAPPED));

    switch (sk->sk_state) {
    case TCP_ESTABLISHED:
        spin_lock_bh(&xprt->transport_lock);
        if (!xprt_test_and_set_connected(xprt)) {
            /* Reset TCP record info */
            xprt->tcp_offset = 0;
            xprt->tcp_reclen = 0;
            xprt->tcp_copied = 0;

            if (xprt->tcp_flags & XPRT_SRCADDR_PRESENT)
                xprt->tcp_flags = XPRT_SRCADDR_PRESENT |
                                  XPRT_COPY_RECM |
                                  XPRT_COPY_XID;
            else
                xprt->tcp_flags = XPRT_COPY_RECM |
                                  XPRT_COPY_XID;

            xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
            xprt_wake_pending_tasks(xprt, 0);
        }
        spin_unlock_bh(&xprt->transport_lock);
        break;
    case TCP_SYN_SENT:
    case TCP_SYN_RECV:
        break;
    case TCP_CLOSE_WAIT:
        /* Try to schedule an autoclose RPC calls */
        set_bit(XPRT_CLOSE_WAIT, &xprt->state);
        if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
            queue_work(rpciod_workqueue, &xprt->task_cleanup);
    default:
        xprt_disconnect(xprt);
    }
out:
    read_unlock(&sk->sk_callback_lock);
}

/**
 * xs_udp_write_space - callback invoked when socket buffer space
 *                             becomes available
 * @sk: socket whose state has changed
 *
 * Called when more output buffer space is available for this socket.
 * We try not to wake our writers until they can make "significant"
 * progress, otherwise we'll waste resources thrashing kernel_sendmsg
 * with a bunch of small requests.
 */
static void xs_udp_write_space(struct sock *sk)
{
    read_lock(&sk->sk_callback_lock);

    /* from net/core/sock.c:sock_def_write_space */
    if (sock_writeable(sk)) {
        struct socket *sock;
        struct rpc_xprt *xprt;

        if (unlikely(!(sock = sk->sk_socket)))
            goto out;
        if (unlikely(!(xprt = xprt_from_sock(sk))))
            goto out;
        if (unlikely(!test_and_clear_bit(SOCK_NOSPACE, &sock->flags)))
            goto out;

        xprt_write_space(xprt);
    }

out:
    read_unlock(&sk->sk_callback_lock);
}