/** * sk_stream_wait_connect - Wait for a socket to get into the connected state * @sk: sock to wait on * @timeo_p: for how long to wait * * Must be called with the socket locked. */ int sk_stream_wait_connect(struct sock *sk, long *timeo_p) { struct task_struct *tsk = current; DEFINE_WAIT(wait); int done; do { int err = sock_error(sk); if (err) return err; if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) return -EPIPE; if (!*timeo_p) return -EAGAIN; if (signal_pending(tsk)) return sock_intr_errno(*timeo_p); prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); sk->sk_write_pending++; done = sk_wait_event(sk, timeo_p, !sk->sk_err && !((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))); finish_wait(sk_sleep(sk), &wait); sk->sk_write_pending--; } while (!done); return 0; }
static int pep_wait_connreq(struct sock *sk, int noblock) { struct task_struct *tsk = current; struct pep_sock *pn = pep_sk(sk); long timeo = sock_rcvtimeo(sk, noblock); for (;;) { DEFINE_WAIT(wait); if (sk->sk_state != TCP_LISTEN) return -EINVAL; if (!hlist_empty(&pn->ackq)) break; if (!timeo) return -EWOULDBLOCK; if (signal_pending(tsk)) return sock_intr_errno(timeo); prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); release_sock(sk); timeo = schedule_timeout(timeo); lock_sock(sk); finish_wait(sk_sleep(sk), &wait); } return 0; }
/* * wait for space to appear in the transmit/ACK window * - caller holds the socket locked */ static int rxrpc_wait_for_tx_window(struct rxrpc_sock *rx, struct rxrpc_call *call, long *timeo) { DECLARE_WAITQUEUE(myself, current); int ret; _enter(",{%d},%ld", CIRC_SPACE(call->acks_head, call->acks_tail, call->acks_winsz), *timeo); add_wait_queue(&call->tx_waitq, &myself); for (;;) { set_current_state(TASK_INTERRUPTIBLE); ret = 0; if (CIRC_SPACE(call->acks_head, call->acks_tail, call->acks_winsz) > 0) break; if (signal_pending(current)) { ret = sock_intr_errno(*timeo); break; } release_sock(&rx->sk); *timeo = schedule_timeout(*timeo); lock_sock(&rx->sk); } remove_wait_queue(&call->tx_waitq, &myself); set_current_state(TASK_RUNNING); _leave(" = %d", ret); return ret; }
int sk_stream_wait_memory(struct sock *sk, long *timeo_p) { int err = 0; long vm_wait = 0; long current_timeo = *timeo_p; DEFINE_WAIT(wait); if (sk_stream_memory_free(sk)) current_timeo = vm_wait = (net_random() % (HZ / 5)) + 2; while (1) { set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) goto do_error; if (!*timeo_p) goto do_nonblock; if (signal_pending(current)) goto do_interrupted; clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); if (sk_stream_memory_free(sk) && !vm_wait) break; set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); sk->sk_write_pending++; sk_wait_event(sk, ¤t_timeo, sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN) || (sk_stream_memory_free(sk) && !vm_wait)); sk->sk_write_pending--; if (vm_wait) { vm_wait -= current_timeo; current_timeo = *timeo_p; if (current_timeo != MAX_SCHEDULE_TIMEOUT && (current_timeo -= vm_wait) < 0) current_timeo = 0; vm_wait = 0; } *timeo_p = current_timeo; } out: finish_wait(sk_sleep(sk), &wait); return err; do_error: err = -EPIPE; goto out; do_nonblock: err = -EAGAIN; goto out; do_interrupted: err = sock_intr_errno(*timeo_p); goto out; }
static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags) { DEFINE_WAIT_FUNC(wait, woken_wake_function); struct sock *sk = sock->sk, *nsk; long timeo; int err = 0; lock_sock_nested(sk, L2CAP_NESTING_PARENT); timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); BT_DBG("sk %p timeo %ld", sk, timeo); /* Wait for an incoming connection. (wake-one). */ add_wait_queue_exclusive(sk_sleep(sk), &wait); while (1) { if (sk->sk_state != BT_LISTEN) { err = -EBADFD; break; } nsk = bt_accept_dequeue(sk, newsock); if (nsk) break; if (!timeo) { err = -EAGAIN; break; } if (signal_pending(current)) { err = sock_intr_errno(timeo); break; } release_sock(sk); timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo); lock_sock_nested(sk, L2CAP_NESTING_PARENT); } remove_wait_queue(sk_sleep(sk), &wait); if (err) goto done; newsock->state = SS_CONNECTED; BT_DBG("new socket %p", nsk); done: release_sock(sk); return err; }
static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags) { DECLARE_WAITQUEUE(wait, current); struct sock *sk = sock->sk, *nsk; long timeo; int err = 0; lock_sock_nested(sk, SINGLE_DEPTH_NESTING); if (sk->sk_state != BT_LISTEN) { err = -EBADFD; goto done; } timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); BT_DBG("sk %p timeo %ld", sk, timeo); /* Wait for an incoming connection. (wake-one). */ add_wait_queue_exclusive(sk_sleep(sk), &wait); while (!(nsk = bt_accept_dequeue(sk, newsock))) { set_current_state(TASK_INTERRUPTIBLE); if (!timeo) { err = -EAGAIN; break; } release_sock(sk); timeo = schedule_timeout(timeo); lock_sock_nested(sk, SINGLE_DEPTH_NESTING); if (sk->sk_state != BT_LISTEN) { err = -EBADFD; break; } if (signal_pending(current)) { err = sock_intr_errno(timeo); break; } } set_current_state(TASK_RUNNING); remove_wait_queue(sk_sleep(sk), &wait); if (err) goto done; newsock->state = SS_CONNECTED; BT_DBG("new socket %p", nsk); done: release_sock(sk); return err; }
/* * Wrapper for the above, for allocs of data skbs. We try and get the * whole size thats been asked for (plus 11 bytes of header). If this * fails, then we try for any size over 16 bytes for SOCK_STREAMS. */ struct sk_buff *dn_alloc_send_skb(struct sock *sk, size_t *size, int noblock, long timeo, int *err) { int space; int len; struct sk_buff *skb = NULL; *err = 0; while(skb == NULL) { if (signal_pending(current)) { *err = sock_intr_errno(timeo); break; } if (sk->sk_shutdown & SEND_SHUTDOWN) { *err = EINVAL; break; } if (sk->sk_err) break; len = *size + 11; space = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc); if (space < len) { if ((sk->sk_socket->type == SOCK_STREAM) && (space >= (16 + 11))) len = space; } if (space < len) { set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); if (noblock) { *err = EWOULDBLOCK; break; } clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); SOCK_SLEEP_PRE(sk) if ((sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc)) < len) schedule(); SOCK_SLEEP_POST(sk) continue; } if ((skb = dn_alloc_skb(sk, len, sk->sk_allocation)) == NULL) continue; *size = len - 11; }
static int wait_for_packet(struct sock * sk, int *err, long *timeo_p) { int error; DECLARE_WAITQUEUE(wait, current); __set_current_state(TASK_INTERRUPTIBLE); add_wait_queue_exclusive(sk->sleep, &wait); /* Socket errors? */ error = sock_error(sk); if (error) goto out_err; if (!skb_queue_empty(&sk->receive_queue)) goto ready; /* Socket shut down? */ if (sk->shutdown & RCV_SHUTDOWN) goto out_noerr; /* Sequenced packets can come disconnected. If so we report the problem */ error = -ENOTCONN; if(connection_based(sk) && !(sk->state==TCP_ESTABLISHED || sk->state==TCP_LISTEN)) goto out_err; /* handle signals */ if (signal_pending(current)) goto interrupted; *timeo_p = schedule_timeout(*timeo_p); ready: current->state = TASK_RUNNING; remove_wait_queue(sk->sleep, &wait); return 0; interrupted: error = sock_intr_errno(*timeo_p); out_err: *err = error; out: current->state = TASK_RUNNING; remove_wait_queue(sk->sleep, &wait); return error; out_noerr: *err = 0; error = 1; goto out; }
/* * Wait for a packet.. */ static int wait_for_packet(struct sock *sk, int *err, long *timeo_p) { int error; DEFINE_WAIT_FUNC(wait, receiver_wake_function); /* 前面的操作都是初始化wait,为将socket加入wait队列作准备,这部分代码牵涉到进程调度。关于进程调度,我 只是知道一些皮毛,留在以后学习。这里只需要将其看作是一些加入wait队列的准备工作即可,并不影响理解代码 。 */ prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); /* Socket errors? */ error = sock_error(sk); if (error) goto out_err; /* 一个完备检测。在决定wait和调用wait之间,有数据包到了,那么就不需要wait,所以这里再次检查socket 的队列是否为空 */ if (!skb_queue_empty(&sk->sk_receive_queue)) goto out; /* Socket shut down? */ /* 完备检测。也许socket无数据包读取,因为socket已经被另外的线程关闭了。这样可以保证关闭socket的时 候,不会导致其他的socket的读写操作被阻塞。*/ if (sk->sk_shutdown & RCV_SHUTDOWN) goto out_noerr; /* Sequenced packets can come disconnected. * If so we report the problem *//* 对于面向连接的socket进行检查。如果是面向连接的socket,如果不是已经建立连接或者正在监听状态的so cket是不可能有数据包的。不然即出错*/ error = -ENOTCONN; if (connection_based(sk) && !(sk->sk_state == TCP_ESTABLISHED || sk->sk_state == TCP_LISTEN)) goto out_err; /* handle signals */ if (signal_pending(current))/* 检查是否有pending的signal,保证阻塞时,进程可以被signal唤醒 */ goto interrupted; error = 0; *timeo_p = schedule_timeout(*timeo_p); /* sleep本进程,直至满足唤醒条件或者被信号唤醒——因为前面设置了TASK_INTERRUPTIBLE*/ out: finish_wait(sk_sleep(sk), &wait); /* wait队列的清理工作 */ return error; interrupted: error = sock_intr_errno(*timeo_p); out_err: *err = error; goto out; out_noerr: *err = 0; error = 1; goto out; }
/* blocks sndbuf producer until at least one byte of free space available */ static int smc_tx_wait_memory(struct smc_sock *smc, int flags) { DEFINE_WAIT_FUNC(wait, woken_wake_function); struct smc_connection *conn = &smc->conn; struct sock *sk = &smc->sk; bool noblock; long timeo; int rc = 0; /* similar to sk_stream_wait_memory */ timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); noblock = timeo ? false : true; add_wait_queue(sk_sleep(sk), &wait); while (1) { sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN) || conn->local_tx_ctrl.conn_state_flags.peer_done_writing) { rc = -EPIPE; break; } if (smc_cdc_rxed_any_close(conn)) { rc = -ECONNRESET; break; } if (!timeo) { if (noblock) set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); rc = -EAGAIN; break; } if (signal_pending(current)) { rc = sock_intr_errno(timeo); break; } sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); if (atomic_read(&conn->sndbuf_space)) break; /* at least 1 byte of free space available */ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); sk_wait_event(sk, &timeo, sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN) || smc_cdc_rxed_any_close(conn) || atomic_read(&conn->sndbuf_space), &wait); } remove_wait_queue(sk_sleep(sk), &wait); return rc; }
static void skb_async_read_worker(void *_data) { struct skb_async_info *info = _data; int error; /* Socket errors? */ error = sock_error(sk); if (error) goto out_err; if (!skb_queue_empty(&sk->receive_queue)) goto ready; /* Socket shut down? */ if (sk->shutdown & RCV_SHUTDOWN) goto out_noerr; /* Sequenced packets can come disconnected. If so we report the problem */ error = -ENOTCONN; if(connection_based(sk) && !(sk->state==TCP_ESTABLISHED || sk->state==TCP_LISTEN)) goto out_err; /* handle signals */ if (signal_pending(current)) goto interrupted; /* here: queue sleep */ *timeo_p = schedule_timeout(*timeo_p); return; ready: current->state = TASK_RUNNING; remove_wait_queue(sk->sleep, &wait); return 0; interrupted: error = sock_intr_errno(*timeo_p); out_err: *err = error; out: current->state = TASK_RUNNING; remove_wait_queue(sk->sleep, &wait); return error; out_noerr: *err = 0; error = 1; goto out; }
/* * Wait for the last received packet to be different from skb */ static int wait_for_more_packets(struct sock *sk, int *err, long *timeo_p, const struct sk_buff *skb) { int error; DEFINE_WAIT_FUNC(wait, receiver_wake_function); prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); /* Socket errors? */ error = sock_error(sk); if (error) goto out_err; if (sk->sk_receive_queue.prev != skb) goto out; /* Socket shut down? */ if (sk->sk_shutdown & RCV_SHUTDOWN) goto out_noerr; /* Sequenced packets can come disconnected. * If so we report the problem */ error = -ENOTCONN; if (connection_based(sk) && !(sk->sk_state == TCP_ESTABLISHED || sk->sk_state == TCP_LISTEN)) goto out_err; /* handle signals */ if (signal_pending(current)) goto interrupted; error = 0; *timeo_p = schedule_timeout(*timeo_p); out: finish_wait(sk_sleep(sk), &wait); return error; interrupted: error = sock_intr_errno(*timeo_p); out_err: *err = error; goto out; out_noerr: *err = 0; error = 1; goto out; }
/** * dccp_wait_for_ccid - Wait for ccid to tell us we can send a packet * @sk: socket to wait for * @timeo: for how long */ static int dccp_wait_for_ccid(struct sock *sk, struct sk_buff *skb, long *timeo) { struct dccp_sock *dp = dccp_sk(sk); DEFINE_WAIT(wait); long delay; int rc; while (1) { prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) goto do_error; if (!*timeo) goto do_nonblock; if (signal_pending(current)) goto do_interrupted; rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb, skb->len); if (rc <= 0) break; delay = msecs_to_jiffies(rc); if (delay > *timeo || delay < 0) goto do_nonblock; sk->sk_write_pending++; release_sock(sk); *timeo -= schedule_timeout(delay); lock_sock(sk); sk->sk_write_pending--; } out: finish_wait(sk->sk_sleep, &wait); return rc; do_error: rc = -EPIPE; goto out; do_nonblock: rc = -EAGAIN; goto out; do_interrupted: rc = sock_intr_errno(*timeo); goto out; }
/* * Attach a skb to a netlink socket. * The caller must hold a reference to the destination socket. On error, the * reference is dropped. The skb is not send to the destination, just all * all error checks are performed and memory in the queue is reserved. * Return values: * < 0: error. skb freed, reference to sock dropped. * 0: continue * 1: repeat lookup - reference dropped while waiting for socket memory. */ int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, long timeo, struct sock *ssk) { struct netlink_opt *nlk; nlk = nlk_sk(sk); #ifdef NL_EMULATE_DEV if (nlk->handler) return 0; #endif if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || test_bit(0, &nlk->state)) { DECLARE_WAITQUEUE(wait, current); if (!timeo) { if (!ssk || nlk_sk(ssk)->pid == 0) netlink_overrun(sk); sock_put(sk); kfree_skb(skb); return -EAGAIN; } __set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&nlk->wait, &wait); if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || test_bit(0, &nlk->state)) && !sock_flag(sk, SOCK_DEAD)) timeo = schedule_timeout(timeo); __set_current_state(TASK_RUNNING); remove_wait_queue(&nlk->wait, &wait); sock_put(sk); if (signal_pending(current)) { kfree_skb(skb); return sock_intr_errno(timeo); } return 1; } skb_orphan(skb); skb_set_owner_r(skb, sk); return 0; }
static int wait_for_packet(struct sock *sk, int *err, long *timeo_p) { int error; DEFINE_WAIT_FUNC(wait, receiver_wake_function); prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); error = sock_error(sk); if (error) goto out_err; if (!skb_queue_empty(&sk->sk_receive_queue)) goto out; if (sk->sk_shutdown & RCV_SHUTDOWN) goto out_noerr; error = -ENOTCONN; if (connection_based(sk) && !(sk->sk_state == TCP_ESTABLISHED || sk->sk_state == TCP_LISTEN)) goto out_err; if (signal_pending(current)) goto interrupted; error = 0; *timeo_p = schedule_timeout(*timeo_p); out: finish_wait(sk_sleep(sk), &wait); return error; interrupted: error = sock_intr_errno(*timeo_p); out_err: *err = error; goto out; out_noerr: *err = 0; error = 1; goto out; }
static int pn_socket_connect(struct socket *sock, struct sockaddr *addr, int len, int flags) { struct sock *sk = sock->sk; struct pn_sock *pn = pn_sk(sk); struct sockaddr_pn *spn = (struct sockaddr_pn *)addr; struct task_struct *tsk = current; long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); int err; if (pn_socket_autobind(sock)) return -ENOBUFS; if (len < sizeof(struct sockaddr_pn)) return -EINVAL; if (spn->spn_family != AF_PHONET) return -EAFNOSUPPORT; lock_sock(sk); switch (sock->state) { case SS_UNCONNECTED: if (sk->sk_state != TCP_CLOSE) { err = -EISCONN; goto out; } break; case SS_CONNECTING: err = -EALREADY; goto out; default: err = -EISCONN; goto out; } pn->dobject = pn_sockaddr_get_object(spn); pn->resource = pn_sockaddr_get_resource(spn); sock->state = SS_CONNECTING; err = sk->sk_prot->connect(sk, addr, len); if (err) { sock->state = SS_UNCONNECTED; pn->dobject = 0; goto out; } while (sk->sk_state == TCP_SYN_SENT) { DEFINE_WAIT(wait); if (!timeo) { err = -EINPROGRESS; goto out; } if (signal_pending(tsk)) { err = sock_intr_errno(timeo); goto out; } prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); release_sock(sk); timeo = schedule_timeout(timeo); lock_sock(sk); finish_wait(sk_sleep(sk), &wait); } if ((1 << sk->sk_state) & (TCPF_SYN_RECV|TCPF_ESTABLISHED)) err = 0; else if (sk->sk_state == TCP_CLOSE_WAIT) err = -ECONNRESET; else err = -ECONNREFUSED; sock->state = err ? SS_UNCONNECTED : SS_CONNECTED; out: release_sock(sk); return err; }
/* * Receive a message from an RxRPC socket * - we need to be careful about two or more threads calling recvmsg * simultaneously */ int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct rxrpc_call *call; struct rxrpc_sock *rx = rxrpc_sk(sock->sk); struct list_head *l; size_t copied = 0; long timeo; int ret; DEFINE_WAIT(wait); trace_rxrpc_recvmsg(NULL, rxrpc_recvmsg_enter, 0, 0, 0, 0); if (flags & (MSG_OOB | MSG_TRUNC)) return -EOPNOTSUPP; timeo = sock_rcvtimeo(&rx->sk, flags & MSG_DONTWAIT); try_again: lock_sock(&rx->sk); /* Return immediately if a client socket has no outstanding calls */ if (RB_EMPTY_ROOT(&rx->calls) && list_empty(&rx->recvmsg_q) && rx->sk.sk_state != RXRPC_SERVER_LISTENING) { release_sock(&rx->sk); return -ENODATA; } if (list_empty(&rx->recvmsg_q)) { ret = -EWOULDBLOCK; if (timeo == 0) { call = NULL; goto error_no_call; } release_sock(&rx->sk); /* Wait for something to happen */ prepare_to_wait_exclusive(sk_sleep(&rx->sk), &wait, TASK_INTERRUPTIBLE); ret = sock_error(&rx->sk); if (ret) goto wait_error; if (list_empty(&rx->recvmsg_q)) { if (signal_pending(current)) goto wait_interrupted; trace_rxrpc_recvmsg(NULL, rxrpc_recvmsg_wait, 0, 0, 0, 0); timeo = schedule_timeout(timeo); } finish_wait(sk_sleep(&rx->sk), &wait); goto try_again; } /* Find the next call and dequeue it if we're not just peeking. If we * do dequeue it, that comes with a ref that we will need to release. */ write_lock_bh(&rx->recvmsg_lock); l = rx->recvmsg_q.next; call = list_entry(l, struct rxrpc_call, recvmsg_link); if (!(flags & MSG_PEEK)) list_del_init(&call->recvmsg_link); else rxrpc_get_call(call, rxrpc_call_got); write_unlock_bh(&rx->recvmsg_lock); trace_rxrpc_recvmsg(call, rxrpc_recvmsg_dequeue, 0, 0, 0, 0); if (test_bit(RXRPC_CALL_RELEASED, &call->flags)) BUG(); if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) { if (flags & MSG_CMSG_COMPAT) { unsigned int id32 = call->user_call_ID; ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID, sizeof(unsigned int), &id32); } else { ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID, sizeof(unsigned long), &call->user_call_ID); } if (ret < 0) goto error; } if (msg->msg_name) { size_t len = sizeof(call->conn->params.peer->srx); memcpy(msg->msg_name, &call->conn->params.peer->srx, len); msg->msg_namelen = len; } switch (call->state) { case RXRPC_CALL_SERVER_ACCEPTING: ret = rxrpc_recvmsg_new_call(rx, call, msg, flags); break; case RXRPC_CALL_CLIENT_RECV_REPLY: case RXRPC_CALL_SERVER_RECV_REQUEST: case RXRPC_CALL_SERVER_ACK_REQUEST: ret = rxrpc_recvmsg_data(sock, call, msg, &msg->msg_iter, len, flags, &copied); if (ret == -EAGAIN) ret = 0; if (after(call->rx_top, call->rx_hard_ack) && call->rxtx_buffer[(call->rx_hard_ack + 1) & RXRPC_RXTX_BUFF_MASK]) rxrpc_notify_socket(call); break; default: ret = 0; break; } if (ret < 0) goto error; if (call->state == RXRPC_CALL_COMPLETE) { ret = rxrpc_recvmsg_term(call, msg); if (ret < 0) goto error; if (!(flags & MSG_PEEK)) rxrpc_release_call(rx, call); msg->msg_flags |= MSG_EOR; ret = 1; } if (ret == 0) msg->msg_flags |= MSG_MORE; else msg->msg_flags &= ~MSG_MORE; ret = copied; error: rxrpc_put_call(call, rxrpc_call_put); error_no_call: release_sock(&rx->sk); trace_rxrpc_recvmsg(call, rxrpc_recvmsg_return, 0, 0, 0, ret); return ret; wait_interrupted: ret = sock_intr_errno(timeo); wait_error: finish_wait(sk_sleep(&rx->sk), &wait); call = NULL; goto error_no_call; }
static int pep_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len) { struct pep_sock *pn = pep_sk(sk); struct sk_buff *skb; long timeo; int flags = msg->msg_flags; int err, done; if ((msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_NOSIGNAL| MSG_CMSG_COMPAT)) || !(msg->msg_flags & MSG_EOR)) return -EOPNOTSUPP; skb = sock_alloc_send_skb(sk, MAX_PNPIPE_HEADER + len, flags & MSG_DONTWAIT, &err); if (!skb) return -ENOBUFS; skb_reserve(skb, MAX_PHONET_HEADER + 3); err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); if (err < 0) goto outfree; lock_sock(sk); timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); if ((1 << sk->sk_state) & (TCPF_LISTEN|TCPF_CLOSE)) { err = -ENOTCONN; goto out; } if (sk->sk_state != TCP_ESTABLISHED) { /* Wait until the pipe gets to enabled state */ disabled: err = sk_stream_wait_connect(sk, &timeo); if (err) goto out; if (sk->sk_state == TCP_CLOSE_WAIT) { err = -ECONNRESET; goto out; } } BUG_ON(sk->sk_state != TCP_ESTABLISHED); /* Wait until flow control allows TX */ done = atomic_read(&pn->tx_credits); while (!done) { DEFINE_WAIT(wait); if (!timeo) { err = -EAGAIN; goto out; } if (signal_pending(current)) { err = sock_intr_errno(timeo); goto out; } prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); done = sk_wait_event(sk, &timeo, atomic_read(&pn->tx_credits)); finish_wait(sk_sleep(sk), &wait); if (sk->sk_state != TCP_ESTABLISHED) goto disabled; } err = pipe_skb_send(sk, skb); if (err >= 0) err = len; /* success! */ skb = NULL; out: release_sock(sk); outfree: kfree_skb(skb); return err; }
static int vsock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct sock *sk; struct vsock_sock *vsk; int err; size_t target; ssize_t copied; long timeout; struct vsock_transport_recv_notify_data recv_data; DEFINE_WAIT(wait); sk = sock->sk; vsk = vsock_sk(sk); err = 0; lock_sock(sk); if (sk->sk_state != TCP_ESTABLISHED) { /* Recvmsg is supposed to return 0 if a peer performs an * orderly shutdown. Differentiate between that case and when a * peer has not connected or a local shutdown occured with the * SOCK_DONE flag. */ if (sock_flag(sk, SOCK_DONE)) err = 0; else err = -ENOTCONN; goto out; } if (flags & MSG_OOB) { err = -EOPNOTSUPP; goto out; } /* We don't check peer_shutdown flag here since peer may actually shut * down, but there can be data in the queue that a local socket can * receive. */ if (sk->sk_shutdown & RCV_SHUTDOWN) { err = 0; goto out; } /* It is valid on Linux to pass in a zero-length receive buffer. This * is not an error. We may as well bail out now. */ if (!len) { err = 0; goto out; } /* We must not copy less than target bytes into the user's buffer * before returning successfully, so we wait for the consume queue to * have that much data to consume before dequeueing. Note that this * makes it impossible to handle cases where target is greater than the * queue size. */ target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); if (target >= transport->stream_rcvhiwat(vsk)) { err = -ENOMEM; goto out; } timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); copied = 0; err = transport->notify_recv_init(vsk, target, &recv_data); if (err < 0) goto out; while (1) { s64 ready; prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); ready = vsock_stream_has_data(vsk); if (ready == 0) { if (sk->sk_err != 0 || (sk->sk_shutdown & RCV_SHUTDOWN) || (vsk->peer_shutdown & SEND_SHUTDOWN)) { finish_wait(sk_sleep(sk), &wait); break; } /* Don't wait for non-blocking sockets. */ if (timeout == 0) { err = -EAGAIN; finish_wait(sk_sleep(sk), &wait); break; } err = transport->notify_recv_pre_block( vsk, target, &recv_data); if (err < 0) { finish_wait(sk_sleep(sk), &wait); break; } release_sock(sk); timeout = schedule_timeout(timeout); lock_sock(sk); if (signal_pending(current)) { err = sock_intr_errno(timeout); finish_wait(sk_sleep(sk), &wait); break; } else if (timeout == 0) { err = -EAGAIN; finish_wait(sk_sleep(sk), &wait); break; } } else { ssize_t read; finish_wait(sk_sleep(sk), &wait); if (ready < 0) { /* Invalid queue pair content. XXX This should * be changed to a connection reset in a later * change. */ err = -ENOMEM; goto out; } err = transport->notify_recv_pre_dequeue( vsk, target, &recv_data); if (err < 0) break; read = transport->stream_dequeue( vsk, msg, len - copied, flags); if (read < 0) { err = -ENOMEM; break; } copied += read; err = transport->notify_recv_post_dequeue( vsk, target, read, !(flags & MSG_PEEK), &recv_data); if (err < 0) goto out; if (read >= target || flags & MSG_PEEK) break; target -= read; } } if (sk->sk_err) err = -sk->sk_err; else if (sk->sk_shutdown & RCV_SHUTDOWN) err = 0; if (copied > 0) err = copied; out: release_sock(sk); return err; }
static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk; struct vsock_sock *vsk; ssize_t total_written; long timeout; int err; struct vsock_transport_send_notify_data send_data; DEFINE_WAIT_FUNC(wait, woken_wake_function); sk = sock->sk; vsk = vsock_sk(sk); total_written = 0; err = 0; if (msg->msg_flags & MSG_OOB) return -EOPNOTSUPP; lock_sock(sk); /* Callers should not provide a destination with stream sockets. */ if (msg->msg_namelen) { err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP; goto out; } /* Send data only if both sides are not shutdown in the direction. */ if (sk->sk_shutdown & SEND_SHUTDOWN || vsk->peer_shutdown & RCV_SHUTDOWN) { err = -EPIPE; goto out; } if (sk->sk_state != TCP_ESTABLISHED || !vsock_addr_bound(&vsk->local_addr)) { err = -ENOTCONN; goto out; } if (!vsock_addr_bound(&vsk->remote_addr)) { err = -EDESTADDRREQ; goto out; } /* Wait for room in the produce queue to enqueue our user's data. */ timeout = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); err = transport->notify_send_init(vsk, &send_data); if (err < 0) goto out; while (total_written < len) { ssize_t written; add_wait_queue(sk_sleep(sk), &wait); while (vsock_stream_has_space(vsk) == 0 && sk->sk_err == 0 && !(sk->sk_shutdown & SEND_SHUTDOWN) && !(vsk->peer_shutdown & RCV_SHUTDOWN)) { /* Don't wait for non-blocking sockets. */ if (timeout == 0) { err = -EAGAIN; remove_wait_queue(sk_sleep(sk), &wait); goto out_err; } err = transport->notify_send_pre_block(vsk, &send_data); if (err < 0) { remove_wait_queue(sk_sleep(sk), &wait); goto out_err; } release_sock(sk); timeout = wait_woken(&wait, TASK_INTERRUPTIBLE, timeout); lock_sock(sk); if (signal_pending(current)) { err = sock_intr_errno(timeout); remove_wait_queue(sk_sleep(sk), &wait); goto out_err; } else if (timeout == 0) { err = -EAGAIN; remove_wait_queue(sk_sleep(sk), &wait); goto out_err; } } remove_wait_queue(sk_sleep(sk), &wait); /* These checks occur both as part of and after the loop * conditional since we need to check before and after * sleeping. */ if (sk->sk_err) { err = -sk->sk_err; goto out_err; } else if ((sk->sk_shutdown & SEND_SHUTDOWN) || (vsk->peer_shutdown & RCV_SHUTDOWN)) { err = -EPIPE; goto out_err; } err = transport->notify_send_pre_enqueue(vsk, &send_data); if (err < 0) goto out_err; /* Note that enqueue will only write as many bytes as are free * in the produce queue, so we don't need to ensure len is * smaller than the queue size. It is the caller's * responsibility to check how many bytes we were able to send. */ written = transport->stream_enqueue( vsk, msg, len - total_written); if (written < 0) { err = -ENOMEM; goto out_err; } total_written += written; err = transport->notify_send_post_enqueue( vsk, written, &send_data); if (err < 0) goto out_err; } out_err: if (total_written > 0) err = total_written; out: release_sock(sk); return err; }
static int vsock_accept(struct socket *sock, struct socket *newsock, int flags, bool kern) { struct sock *listener; int err; struct sock *connected; struct vsock_sock *vconnected; long timeout; DEFINE_WAIT(wait); err = 0; listener = sock->sk; lock_sock(listener); if (sock->type != SOCK_STREAM) { err = -EOPNOTSUPP; goto out; } if (listener->sk_state != TCP_LISTEN) { err = -EINVAL; goto out; } /* Wait for children sockets to appear; these are the new sockets * created upon connection establishment. */ timeout = sock_sndtimeo(listener, flags & O_NONBLOCK); prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE); while ((connected = vsock_dequeue_accept(listener)) == NULL && listener->sk_err == 0) { release_sock(listener); timeout = schedule_timeout(timeout); finish_wait(sk_sleep(listener), &wait); lock_sock(listener); if (signal_pending(current)) { err = sock_intr_errno(timeout); goto out; } else if (timeout == 0) { err = -EAGAIN; goto out; } prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE); } finish_wait(sk_sleep(listener), &wait); if (listener->sk_err) err = -listener->sk_err; if (connected) { listener->sk_ack_backlog--; lock_sock_nested(connected, SINGLE_DEPTH_NESTING); vconnected = vsock_sk(connected); /* If the listener socket has received an error, then we should * reject this socket and return. Note that we simply mark the * socket rejected, drop our reference, and let the cleanup * function handle the cleanup; the fact that we found it in * the listener's accept queue guarantees that the cleanup * function hasn't run yet. */ if (err) { vconnected->rejected = true; } else { newsock->state = SS_CONNECTED; sock_graft(connected, newsock); } release_sock(connected); sock_put(connected); } out: release_sock(listener); return err; }
static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr, int addr_len, int flags) { int err; struct sock *sk; struct vsock_sock *vsk; struct sockaddr_vm *remote_addr; long timeout; DEFINE_WAIT(wait); err = 0; sk = sock->sk; vsk = vsock_sk(sk); lock_sock(sk); /* XXX AF_UNSPEC should make us disconnect like AF_INET. */ switch (sock->state) { case SS_CONNECTED: err = -EISCONN; goto out; case SS_DISCONNECTING: err = -EINVAL; goto out; case SS_CONNECTING: /* This continues on so we can move sock into the SS_CONNECTED * state once the connection has completed (at which point err * will be set to zero also). Otherwise, we will either wait * for the connection or return -EALREADY should this be a * non-blocking call. */ err = -EALREADY; break; default: if ((sk->sk_state == TCP_LISTEN) || vsock_addr_cast(addr, addr_len, &remote_addr) != 0) { err = -EINVAL; goto out; } /* The hypervisor and well-known contexts do not have socket * endpoints. */ if (!transport->stream_allow(remote_addr->svm_cid, remote_addr->svm_port)) { err = -ENETUNREACH; goto out; } /* Set the remote address that we are connecting to. */ memcpy(&vsk->remote_addr, remote_addr, sizeof(vsk->remote_addr)); err = vsock_auto_bind(vsk); if (err) goto out; sk->sk_state = TCP_SYN_SENT; err = transport->connect(vsk); if (err < 0) goto out; /* Mark sock as connecting and set the error code to in * progress in case this is a non-blocking connect. */ sock->state = SS_CONNECTING; err = -EINPROGRESS; } /* The receive path will handle all communication until we are able to * enter the connected state. Here we wait for the connection to be * completed or a notification of an error. */ timeout = vsk->connect_timeout; prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); while (sk->sk_state != TCP_ESTABLISHED && sk->sk_err == 0) { if (flags & O_NONBLOCK) { /* If we're not going to block, we schedule a timeout * function to generate a timeout on the connection * attempt, in case the peer doesn't respond in a * timely manner. We hold on to the socket until the * timeout fires. */ sock_hold(sk); INIT_DELAYED_WORK(&vsk->dwork, vsock_connect_timeout); schedule_delayed_work(&vsk->dwork, timeout); /* Skip ahead to preserve error code set above. */ goto out_wait; } release_sock(sk); timeout = schedule_timeout(timeout); lock_sock(sk); if (signal_pending(current)) { err = sock_intr_errno(timeout); sk->sk_state = TCP_CLOSE; sock->state = SS_UNCONNECTED; vsock_transport_cancel_pkt(vsk); goto out_wait; } else if (timeout == 0) { err = -ETIMEDOUT; sk->sk_state = TCP_CLOSE; sock->state = SS_UNCONNECTED; vsock_transport_cancel_pkt(vsk); goto out_wait; } prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); } if (sk->sk_err) { err = -sk->sk_err; sk->sk_state = TCP_CLOSE; sock->state = SS_UNCONNECTED; } else { err = 0; } out_wait: finish_wait(sk_sleep(sk), &wait); out: release_sock(sk); return err; }
int netlink_unicast(struct sock *ssk, struct sk_buff *skb, u32 pid, int nonblock) { struct sock *sk; int len = skb->len; int protocol = ssk->protocol; long timeo; DECLARE_WAITQUEUE(wait, current); timeo = sock_sndtimeo(ssk, nonblock); retry: sk = netlink_lookup(protocol, pid); if (sk == NULL) goto no_dst; #ifdef NL_EMULATE_DEV if (sk->protinfo.af_netlink->handler) { skb_orphan(skb); len = sk->protinfo.af_netlink->handler(protocol, skb); sock_put(sk); return len; } #endif if (atomic_read(&sk->rmem_alloc) > sk->rcvbuf || test_bit(0, &sk->protinfo.af_netlink->state)) { if (!timeo) { if (ssk->protinfo.af_netlink->pid == 0) netlink_overrun(sk); sock_put(sk); kfree_skb(skb); return -EAGAIN; } __set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&sk->protinfo.af_netlink->wait, &wait); if ((atomic_read(&sk->rmem_alloc) > sk->rcvbuf || test_bit(0, &sk->protinfo.af_netlink->state)) && !sk->dead) timeo = schedule_timeout(timeo); __set_current_state(TASK_RUNNING); remove_wait_queue(&sk->protinfo.af_netlink->wait, &wait); sock_put(sk); if (signal_pending(current)) { kfree_skb(skb); return sock_intr_errno(timeo); } goto retry; } skb_orphan(skb); skb_set_owner_r(skb, sk); skb_queue_tail(&sk->receive_queue, skb); sk->data_ready(sk, len); sock_put(sk); return len; no_dst: kfree_skb(skb); return -ECONNREFUSED; }
int dccp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len, int nonblock, int flags, int *addr_len) { const struct dccp_hdr *dh; long timeo; lock_sock(sk); if (sk->sk_state == DCCP_LISTEN) { len = -ENOTCONN; goto out; } timeo = sock_rcvtimeo(sk, nonblock); do { struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); if (skb == NULL) goto verify_sock_status; dh = dccp_hdr(skb); switch (dh->dccph_type) { case DCCP_PKT_DATA: case DCCP_PKT_DATAACK: goto found_ok_skb; case DCCP_PKT_CLOSE: case DCCP_PKT_CLOSEREQ: if (!(flags & MSG_PEEK)) dccp_finish_passive_close(sk); /* fall through */ case DCCP_PKT_RESET: dccp_pr_debug("found fin (%s) ok!\n", dccp_packet_name(dh->dccph_type)); len = 0; goto found_fin_ok; default: dccp_pr_debug("packet_type=%s\n", dccp_packet_name(dh->dccph_type)); sk_eat_skb(sk, skb, false); } verify_sock_status: if (sock_flag(sk, SOCK_DONE)) { len = 0; break; } if (sk->sk_err) { len = sock_error(sk); break; } if (sk->sk_shutdown & RCV_SHUTDOWN) { len = 0; break; } if (sk->sk_state == DCCP_CLOSED) { if (!sock_flag(sk, SOCK_DONE)) { /* This occurs when user tries to read * from never connected socket. */ len = -ENOTCONN; break; } len = 0; break; } if (!timeo) { len = -EAGAIN; break; } if (signal_pending(current)) { len = sock_intr_errno(timeo); break; } sk_wait_data(sk, &timeo); continue; found_ok_skb: if (len > skb->len) len = skb->len; else if (len < skb->len) msg->msg_flags |= MSG_TRUNC; if (skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len)) { /* Exception. Bailout! */ len = -EFAULT; break; } if (flags & MSG_TRUNC) len = skb->len; found_fin_ok: if (!(flags & MSG_PEEK)) sk_eat_skb(sk, skb, false); break; } while (1); out: release_sock(sk); return len; }