static unsigned int kni_sock_poll(struct file *file, struct socket *sock, poll_table * wait) { struct kni_vhost_queue *q = container_of(sock->sk, struct kni_vhost_queue, sk); struct kni_dev *kni; unsigned int mask = 0; if (unlikely(q == NULL || q->kni == NULL)) return POLLERR; kni = q->kni; KNI_DBG("start kni_poll on group %d, wq 0x%16llx\n", kni->group_id, (uint64_t)sock->wq); poll_wait(file, &sock->wq->wait, wait); if (kni_fifo_count(kni->rx_q) > 0) mask |= POLLIN | POLLRDNORM; if (sock_writeable(&q->sk) || (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &q->sock->flags) && sock_writeable(&q->sk))) mask |= POLLOUT | POLLWRNORM; return mask; }
unsigned int datagram_poll(struct file * file, struct socket *sock, poll_table *wait) { struct sock *sk = sock->sk; unsigned int mask; poll_wait(file, sk->sleep, wait); mask = 0; /* exceptional events? */ if (sk->err || !skb_queue_empty(&sk->error_queue)) mask |= POLLERR; if (sk->shutdown == SHUTDOWN_MASK) mask |= POLLHUP; /* readable? */ if (!skb_queue_empty(&sk->receive_queue) || (sk->shutdown&RCV_SHUTDOWN)) mask |= POLLIN | POLLRDNORM; /* Connection-based need to check for termination and startup */ if (connection_based(sk)) { if (sk->state==TCP_CLOSE) mask |= POLLHUP; /* connection hasn't started yet? */ if (sk->state == TCP_SYN_SENT) return mask; } /* writable? */ if (sock_writeable(sk)) mask |= POLLOUT | POLLWRNORM | POLLWRBAND; else set_bit(SOCK_ASYNC_NOSPACE, &sk->socket->flags); return mask; }
void dccp_write_space(struct sock *sk) { read_lock(&sk->sk_callback_lock); if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) wake_up_interruptible(sk->sk_sleep); /* Should agree with poll, otherwise some programs break */ if (sock_writeable(sk)) sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); read_unlock(&sk->sk_callback_lock); }
void dccp_write_space(struct sock *sk) { read_lock(&sk->sk_callback_lock); if (sk_has_sleeper(sk)) wake_up_interruptible(sk->sk_sleep); if (sock_writeable(sk)) sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); read_unlock(&sk->sk_callback_lock); }
void dccp_write_space(struct sock *sk) { struct socket_wq *wq; rcu_read_lock(); wq = rcu_dereference(sk->sk_wq); if (wq_has_sleeper(wq)) wake_up_interruptible(&wq->wait); /* Should agree with poll, otherwise some programs break */ if (sock_writeable(sk)) sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); rcu_read_unlock(); }
static void kni_sk_write_space(struct sock *sk) { wait_queue_head_t *wqueue; if (!sock_writeable(sk) || !test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags)) return; wqueue = sk_sleep(sk); if (wqueue && waitqueue_active(wqueue)) wake_up_interruptible_poll( wqueue, POLLOUT | POLLWRNORM | POLLWRBAND); }
static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, unsigned int offset, size_t len) { struct rpc_xprt *xprt = rd_desc->arg.data; skb_reader_t desc = { .skb = skb, .offset = offset, .count = len, .csum = 0 }; dprintk("RPC: xs_tcp_data_recv started\n"); do { /* Read in a new fragment marker if necessary */ /* Can we ever really expect to get completely empty fragments? */ if (xprt->tcp_flags & XPRT_COPY_RECM) { xs_tcp_read_fraghdr(xprt, &desc); continue; } /* Read in the xid if necessary */ if (xprt->tcp_flags & XPRT_COPY_XID) { xs_tcp_read_xid(xprt, &desc); continue; } /* Read in the request data */ if (xprt->tcp_flags & XPRT_COPY_DATA) { xs_tcp_read_request(xprt, &desc); continue; } /* Skip over any trailing bytes on short reads */ xs_tcp_read_discard(xprt, &desc); } while (desc.count); dprintk("RPC: xs_tcp_data_recv done\n"); return len - desc.count; } /** * xs_tcp_data_ready - "data ready" callback for TCP sockets * @sk: socket with data to read * @bytes: how much data to read * */ static void xs_tcp_data_ready(struct sock *sk, int bytes) { struct rpc_xprt *xprt; read_descriptor_t rd_desc; read_lock(&sk->sk_callback_lock); dprintk("RPC: xs_tcp_data_ready...\n"); if (!(xprt = xprt_from_sock(sk))) goto out; if (xprt->shutdown) goto out; /* We use rd_desc to pass struct xprt to xs_tcp_data_recv */ rd_desc.arg.data = xprt; rd_desc.count = 65536; tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv); out: read_unlock(&sk->sk_callback_lock); } /** * xs_tcp_state_change - callback to handle TCP socket state changes * @sk: socket whose state has changed * */ static void xs_tcp_state_change(struct sock *sk) { struct rpc_xprt *xprt; read_lock(&sk->sk_callback_lock); if (!(xprt = xprt_from_sock(sk))) goto out; dprintk("RPC: xs_tcp_state_change client %p...\n", xprt); dprintk("RPC: state %x conn %d dead %d zapped %d\n", sk->sk_state, xprt_connected(xprt), sock_flag(sk, SOCK_DEAD), sock_flag(sk, SOCK_ZAPPED)); switch (sk->sk_state) { case TCP_ESTABLISHED: spin_lock_bh(&xprt->transport_lock); if (!xprt_test_and_set_connected(xprt)) { /* Reset TCP record info */ xprt->tcp_offset = 0; xprt->tcp_reclen = 0; xprt->tcp_copied = 0; if (xprt->tcp_flags & XPRT_SRCADDR_PRESENT) xprt->tcp_flags = XPRT_SRCADDR_PRESENT | XPRT_COPY_RECM | XPRT_COPY_XID; else xprt->tcp_flags = XPRT_COPY_RECM | XPRT_COPY_XID; xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; xprt_wake_pending_tasks(xprt, 0); } spin_unlock_bh(&xprt->transport_lock); break; case TCP_SYN_SENT: case TCP_SYN_RECV: break; case TCP_CLOSE_WAIT: /* Try to schedule an autoclose RPC calls */ set_bit(XPRT_CLOSE_WAIT, &xprt->state); if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0) queue_work(rpciod_workqueue, &xprt->task_cleanup); default: xprt_disconnect(xprt); } out: read_unlock(&sk->sk_callback_lock); } /** * xs_udp_write_space - callback invoked when socket buffer space * becomes available * @sk: socket whose state has changed * * Called when more output buffer space is available for this socket. * We try not to wake our writers until they can make "significant" * progress, otherwise we'll waste resources thrashing kernel_sendmsg * with a bunch of small requests. */ static void xs_udp_write_space(struct sock *sk) { read_lock(&sk->sk_callback_lock); /* from net/core/sock.c:sock_def_write_space */ if (sock_writeable(sk)) { struct socket *sock; struct rpc_xprt *xprt; if (unlikely(!(sock = sk->sk_socket))) goto out; if (unlikely(!(xprt = xprt_from_sock(sk)))) goto out; if (unlikely(!test_and_clear_bit(SOCK_NOSPACE, &sock->flags))) goto out; xprt_write_space(xprt); } out: read_unlock(&sk->sk_callback_lock); }