/* * Post a call for attention by the socket or kernel service. Further * notifications are suppressed by putting recvmsg_link on a dummy queue. */ void rxrpc_notify_socket(struct rxrpc_call *call) { struct rxrpc_sock *rx; struct sock *sk; _enter("%d", call->debug_id); if (!list_empty(&call->recvmsg_link)) return; rcu_read_lock(); rx = rcu_dereference(call->socket); sk = &rx->sk; if (rx && sk->sk_state < RXRPC_CLOSE) { if (call->notify_rx) { call->notify_rx(sk, call, call->user_call_ID); } else { write_lock_bh(&rx->recvmsg_lock); if (list_empty(&call->recvmsg_link)) { rxrpc_get_call(call, rxrpc_call_got); list_add_tail(&call->recvmsg_link, &rx->recvmsg_q); } write_unlock_bh(&rx->recvmsg_lock); if (!sock_flag(sk, SOCK_DEAD)) { _debug("call %ps", sk->sk_data_ready); sk->sk_data_ready(sk); } } } rcu_read_unlock(); _leave(""); }
/* * find an extant server call * - called in process context with IRQs enabled */ struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *rx, unsigned long user_call_ID) { struct rxrpc_call *call; struct rb_node *p; _enter("%p,%lx", rx, user_call_ID); read_lock(&rx->call_lock); p = rx->calls.rb_node; while (p) { call = rb_entry(p, struct rxrpc_call, sock_node); if (user_call_ID < call->user_call_ID) p = p->rb_left; else if (user_call_ID > call->user_call_ID) p = p->rb_right; else goto found_extant_call; } read_unlock(&rx->call_lock); _leave(" = NULL"); return NULL; found_extant_call: rxrpc_get_call(call, rxrpc_call_got); read_unlock(&rx->call_lock); _leave(" = %p [%d]", call, atomic_read(&call->usage)); return call; }
/* * set up an incoming call * - called in process context with IRQs enabled */ struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx, struct rxrpc_connection *conn, struct rxrpc_header *hdr, gfp_t gfp) { struct rxrpc_call *call, *candidate; struct rb_node **p, *parent; __be32 call_id; _enter(",%d,,%x", conn->debug_id, gfp); ASSERT(rx != NULL); candidate = rxrpc_alloc_call(gfp); if (!candidate) return ERR_PTR(-EBUSY); candidate->socket = rx; candidate->conn = conn; candidate->cid = hdr->cid; candidate->call_id = hdr->callNumber; candidate->channel = ntohl(hdr->cid) & RXRPC_CHANNELMASK; candidate->rx_data_post = 0; candidate->state = RXRPC_CALL_SERVER_ACCEPTING; if (conn->security_ix > 0) candidate->state = RXRPC_CALL_SERVER_SECURING; write_lock_bh(&conn->lock); /* set the channel for this call */ call = conn->channels[candidate->channel]; _debug("channel[%u] is %p", candidate->channel, call); if (call && call->call_id == hdr->callNumber) { /* already set; must've been a duplicate packet */ _debug("extant call [%d]", call->state); ASSERTCMP(call->conn, ==, conn); read_lock(&call->state_lock); switch (call->state) { case RXRPC_CALL_LOCALLY_ABORTED: if (!test_and_set_bit(RXRPC_CALL_ABORT, &call->events)) rxrpc_queue_call(call); case RXRPC_CALL_REMOTELY_ABORTED: read_unlock(&call->state_lock); goto aborted_call; default: rxrpc_get_call(call); read_unlock(&call->state_lock); goto extant_call; } }
/* * Pass back notification of a new call. The call is added to the * to-be-accepted list. This means that the next call to be accepted might not * be the last call seen awaiting acceptance, but unless we leave this on the * front of the queue and block all other messages until someone gives us a * user_ID for it, there's not a lot we can do. */ static int rxrpc_recvmsg_new_call(struct rxrpc_sock *rx, struct rxrpc_call *call, struct msghdr *msg, int flags) { int tmp = 0, ret; ret = put_cmsg(msg, SOL_RXRPC, RXRPC_NEW_CALL, 0, &tmp); if (ret == 0 && !(flags & MSG_PEEK)) { _debug("to be accepted"); write_lock_bh(&rx->recvmsg_lock); list_del_init(&call->recvmsg_link); write_unlock_bh(&rx->recvmsg_lock); rxrpc_get_call(call, rxrpc_call_got); write_lock(&rx->call_lock); list_add_tail(&call->accept_link, &rx->to_be_accepted); write_unlock(&rx->call_lock); } trace_rxrpc_recvmsg(call, rxrpc_recvmsg_to_be_accepted, 1, 0, 0, ret); return ret; }
/* * receive a message from an RxRPC socket * - we need to be careful about two or more threads calling recvmsg * simultaneously */ int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct rxrpc_skb_priv *sp; struct rxrpc_call *call = NULL, *continue_call = NULL; struct rxrpc_sock *rx = rxrpc_sk(sock->sk); struct sk_buff *skb; long timeo; int copy, ret, ullen, offset, copied = 0; u32 abort_code; DEFINE_WAIT(wait); _enter(",,,%zu,%d", len, flags); if (flags & (MSG_OOB | MSG_TRUNC)) return -EOPNOTSUPP; ullen = msg->msg_flags & MSG_CMSG_COMPAT ? 4 : sizeof(unsigned long); timeo = sock_rcvtimeo(&rx->sk, flags & MSG_DONTWAIT); msg->msg_flags |= MSG_MORE; lock_sock(&rx->sk); for (;;) { /* return immediately if a client socket has no outstanding * calls */ if (RB_EMPTY_ROOT(&rx->calls)) { if (copied) goto out; if (rx->sk.sk_state != RXRPC_SERVER_LISTENING) { release_sock(&rx->sk); if (continue_call) rxrpc_put_call(continue_call); return -ENODATA; } } /* get the next message on the Rx queue */ skb = skb_peek(&rx->sk.sk_receive_queue); if (!skb) { /* nothing remains on the queue */ if (copied && (msg->msg_flags & MSG_PEEK || timeo == 0)) goto out; /* wait for a message to turn up */ release_sock(&rx->sk); prepare_to_wait_exclusive(sk_sleep(&rx->sk), &wait, TASK_INTERRUPTIBLE); ret = sock_error(&rx->sk); if (ret) goto wait_error; if (skb_queue_empty(&rx->sk.sk_receive_queue)) { if (signal_pending(current)) goto wait_interrupted; timeo = schedule_timeout(timeo); } finish_wait(sk_sleep(&rx->sk), &wait); lock_sock(&rx->sk); continue; } peek_next_packet: sp = rxrpc_skb(skb); call = sp->call; ASSERT(call != NULL); _debug("next pkt %s", rxrpc_pkts[sp->hdr.type]); /* make sure we wait for the state to be updated in this call */ spin_lock_bh(&call->lock); spin_unlock_bh(&call->lock); if (test_bit(RXRPC_CALL_RELEASED, &call->flags)) { _debug("packet from released call"); if (skb_dequeue(&rx->sk.sk_receive_queue) != skb) BUG(); rxrpc_free_skb(skb); continue; } /* determine whether to continue last data receive */ if (continue_call) { _debug("maybe cont"); if (call != continue_call || skb->mark != RXRPC_SKB_MARK_DATA) { release_sock(&rx->sk); rxrpc_put_call(continue_call); _leave(" = %d [noncont]", copied); return copied; } } rxrpc_get_call(call); /* copy the peer address and timestamp */ if (!continue_call) { if (msg->msg_name) { size_t len = sizeof(call->conn->trans->peer->srx); memcpy(msg->msg_name, &call->conn->trans->peer->srx, len); msg->msg_namelen = len; } sock_recv_ts_and_drops(msg, &rx->sk, skb); } /* receive the message */ if (skb->mark != RXRPC_SKB_MARK_DATA) goto receive_non_data_message; _debug("recvmsg DATA #%u { %d, %d }", ntohl(sp->hdr.seq), skb->len, sp->offset); if (!continue_call) { /* only set the control data once per recvmsg() */ ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID, ullen, &call->user_call_ID); if (ret < 0) goto copy_error; ASSERT(test_bit(RXRPC_CALL_HAS_USERID, &call->flags)); } ASSERTCMP(ntohl(sp->hdr.seq), >=, call->rx_data_recv); ASSERTCMP(ntohl(sp->hdr.seq), <=, call->rx_data_recv + 1); call->rx_data_recv = ntohl(sp->hdr.seq); ASSERTCMP(ntohl(sp->hdr.seq), >, call->rx_data_eaten); offset = sp->offset; copy = skb->len - offset; if (copy > len - copied) copy = len - copied; ret = skb_copy_datagram_iovec(skb, offset, msg->msg_iov, copy); if (ret < 0) goto copy_error; /* handle piecemeal consumption of data packets */ _debug("copied %d+%d", copy, copied); offset += copy; copied += copy; if (!(flags & MSG_PEEK)) sp->offset = offset; if (sp->offset < skb->len) { _debug("buffer full"); ASSERTCMP(copied, ==, len); break; } /* we transferred the whole data packet */ if (sp->hdr.flags & RXRPC_LAST_PACKET) { _debug("last"); if (call->conn->out_clientflag) { /* last byte of reply received */ ret = copied; goto terminal_message; } /* last bit of request received */ if (!(flags & MSG_PEEK)) { _debug("eat packet"); if (skb_dequeue(&rx->sk.sk_receive_queue) != skb) BUG(); rxrpc_free_skb(skb); } msg->msg_flags &= ~MSG_MORE; break; } /* move on to the next data message */ _debug("next"); if (!continue_call) continue_call = sp->call; else rxrpc_put_call(call); call = NULL; if (flags & MSG_PEEK) { _debug("peek next"); skb = skb->next; if (skb == (struct sk_buff *) &rx->sk.sk_receive_queue) break; goto peek_next_packet; } _debug("eat packet"); if (skb_dequeue(&rx->sk.sk_receive_queue) != skb) BUG(); rxrpc_free_skb(skb); }
/* * set up a call for the given data * - called in process context with IRQs enabled */ struct rxrpc_call *rxrpc_get_client_call(struct rxrpc_sock *rx, struct rxrpc_transport *trans, struct rxrpc_conn_bundle *bundle, unsigned long user_call_ID, int create, gfp_t gfp) { struct rxrpc_call *call, *candidate; struct rb_node *p, *parent, **pp; _enter("%p,%d,%d,%lx,%d", rx, trans ? trans->debug_id : -1, bundle ? bundle->debug_id : -1, user_call_ID, create); /* search the extant calls first for one that matches the specified * user ID */ read_lock(&rx->call_lock); p = rx->calls.rb_node; while (p) { call = rb_entry(p, struct rxrpc_call, sock_node); if (user_call_ID < call->user_call_ID) p = p->rb_left; else if (user_call_ID > call->user_call_ID) p = p->rb_right; else goto found_extant_call; } read_unlock(&rx->call_lock); if (!create || !trans) return ERR_PTR(-EBADSLT); /* not yet present - create a candidate for a new record and then * redo the search */ candidate = rxrpc_alloc_client_call(rx, trans, bundle, gfp); if (IS_ERR(candidate)) { _leave(" = %ld", PTR_ERR(candidate)); return candidate; } candidate->user_call_ID = user_call_ID; __set_bit(RXRPC_CALL_HAS_USERID, &candidate->flags); write_lock(&rx->call_lock); pp = &rx->calls.rb_node; parent = NULL; while (*pp) { parent = *pp; call = rb_entry(parent, struct rxrpc_call, sock_node); if (user_call_ID < call->user_call_ID) pp = &(*pp)->rb_left; else if (user_call_ID > call->user_call_ID) pp = &(*pp)->rb_right; else goto found_extant_second; } /* second search also failed; add the new call */ call = candidate; candidate = NULL; rxrpc_get_call(call); rb_link_node(&call->sock_node, parent, pp); rb_insert_color(&call->sock_node, &rx->calls); write_unlock(&rx->call_lock); write_lock_bh(&rxrpc_call_lock); list_add_tail(&call->link, &rxrpc_calls); write_unlock_bh(&rxrpc_call_lock); _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id); _leave(" = %p [new]", call); return call; /* we found the call in the list immediately */ found_extant_call: rxrpc_get_call(call); read_unlock(&rx->call_lock); _leave(" = %p [extant %d]", call, atomic_read(&call->usage)); return call; /* we found the call on the second time through the list */ found_extant_second: rxrpc_get_call(call); write_unlock(&rx->call_lock); rxrpc_put_call(candidate); _leave(" = %p [second %d]", call, atomic_read(&call->usage)); return call; }
/* * accept an incoming call that needs peer, transport and/or connection setting * up */ static int rxrpc_accept_incoming_call(struct rxrpc_local *local, struct rxrpc_sock *rx, struct sk_buff *skb, struct sockaddr_rxrpc *srx) { struct rxrpc_connection *conn; struct rxrpc_skb_priv *sp, *nsp; struct rxrpc_call *call; struct sk_buff *notification; int ret; _enter(""); sp = rxrpc_skb(skb); /* get a notification message to send to the server app */ notification = alloc_skb(0, GFP_NOFS); if (!notification) { _debug("no memory"); ret = -ENOMEM; goto error_nofree; } rxrpc_new_skb(notification); notification->mark = RXRPC_SKB_MARK_NEW_CALL; conn = rxrpc_incoming_connection(local, srx, skb); if (IS_ERR(conn)) { _debug("no conn"); ret = PTR_ERR(conn); goto error; } call = rxrpc_incoming_call(rx, conn, skb); rxrpc_put_connection(conn); if (IS_ERR(call)) { _debug("no call"); ret = PTR_ERR(call); goto error; } /* attach the call to the socket */ read_lock_bh(&local->services_lock); if (rx->sk.sk_state == RXRPC_CLOSE) goto invalid_service; write_lock(&rx->call_lock); if (!test_and_set_bit(RXRPC_CALL_INIT_ACCEPT, &call->flags)) { rxrpc_get_call(call); spin_lock(&call->conn->state_lock); if (sp->hdr.securityIndex > 0 && call->conn->state == RXRPC_CONN_SERVICE_UNSECURED) { _debug("await conn sec"); list_add_tail(&call->accept_link, &rx->secureq); call->conn->state = RXRPC_CONN_SERVICE_CHALLENGING; set_bit(RXRPC_CONN_EV_CHALLENGE, &call->conn->events); rxrpc_queue_conn(call->conn); } else { _debug("conn ready"); call->state = RXRPC_CALL_SERVER_ACCEPTING; list_add_tail(&call->accept_link, &rx->acceptq); rxrpc_get_call(call); atomic_inc(&call->skb_count); nsp = rxrpc_skb(notification); nsp->call = call; ASSERTCMP(atomic_read(&call->usage), >=, 3); _debug("notify"); spin_lock(&call->lock); ret = rxrpc_queue_rcv_skb(call, notification, true, false); spin_unlock(&call->lock); notification = NULL; BUG_ON(ret < 0); } spin_unlock(&call->conn->state_lock); _debug("queued"); }
int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct rxrpc_skb_priv *sp; struct rxrpc_call *call = NULL, *continue_call = NULL; struct rxrpc_sock *rx = rxrpc_sk(sock->sk); struct sk_buff *skb; long timeo; int copy, ret, ullen, offset, copied = 0; u32 abort_code; DEFINE_WAIT(wait); _enter(",,,%zu,%d", len, flags); if (flags & (MSG_OOB | MSG_TRUNC)) return -EOPNOTSUPP; ullen = msg->msg_flags & MSG_CMSG_COMPAT ? 4 : sizeof(unsigned long); timeo = sock_rcvtimeo(&rx->sk, flags & MSG_DONTWAIT); msg->msg_flags |= MSG_MORE; lock_sock(&rx->sk); for (;;) { if (RB_EMPTY_ROOT(&rx->calls)) { if (copied) goto out; if (rx->sk.sk_state != RXRPC_SERVER_LISTENING) { release_sock(&rx->sk); if (continue_call) rxrpc_put_call(continue_call); return -ENODATA; } } skb = skb_peek(&rx->sk.sk_receive_queue); if (!skb) { if (copied && (msg->msg_flags & MSG_PEEK || timeo == 0)) goto out; release_sock(&rx->sk); prepare_to_wait_exclusive(sk_sleep(&rx->sk), &wait, TASK_INTERRUPTIBLE); ret = sock_error(&rx->sk); if (ret) goto wait_error; if (skb_queue_empty(&rx->sk.sk_receive_queue)) { if (signal_pending(current)) goto wait_interrupted; timeo = schedule_timeout(timeo); } finish_wait(sk_sleep(&rx->sk), &wait); lock_sock(&rx->sk); continue; } peek_next_packet: sp = rxrpc_skb(skb); call = sp->call; ASSERT(call != NULL); _debug("next pkt %s", rxrpc_pkts[sp->hdr.type]); spin_lock_bh(&call->lock); spin_unlock_bh(&call->lock); if (test_bit(RXRPC_CALL_RELEASED, &call->flags)) { _debug("packet from released call"); if (skb_dequeue(&rx->sk.sk_receive_queue) != skb) BUG(); rxrpc_free_skb(skb); continue; } if (continue_call) { _debug("maybe cont"); if (call != continue_call || skb->mark != RXRPC_SKB_MARK_DATA) { release_sock(&rx->sk); rxrpc_put_call(continue_call); _leave(" = %d [noncont]", copied); return copied; } } rxrpc_get_call(call); if (!continue_call) { if (msg->msg_name && msg->msg_namelen > 0) memcpy(msg->msg_name, &call->conn->trans->peer->srx, sizeof(call->conn->trans->peer->srx)); sock_recv_ts_and_drops(msg, &rx->sk, skb); } if (skb->mark != RXRPC_SKB_MARK_DATA) goto receive_non_data_message; _debug("recvmsg DATA #%u { %d, %d }", ntohl(sp->hdr.seq), skb->len, sp->offset); if (!continue_call) { ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID, ullen, &call->user_call_ID); if (ret < 0) goto copy_error; ASSERT(test_bit(RXRPC_CALL_HAS_USERID, &call->flags)); } ASSERTCMP(ntohl(sp->hdr.seq), >=, call->rx_data_recv); ASSERTCMP(ntohl(sp->hdr.seq), <=, call->rx_data_recv + 1); call->rx_data_recv = ntohl(sp->hdr.seq); ASSERTCMP(ntohl(sp->hdr.seq), >, call->rx_data_eaten); offset = sp->offset; copy = skb->len - offset; if (copy > len - copied) copy = len - copied; if (skb->ip_summed == CHECKSUM_UNNECESSARY) { ret = skb_copy_datagram_iovec(skb, offset, msg->msg_iov, copy); } else { ret = skb_copy_and_csum_datagram_iovec(skb, offset, msg->msg_iov); if (ret == -EINVAL) goto csum_copy_error; } if (ret < 0) goto copy_error; _debug("copied %d+%d", copy, copied); offset += copy; copied += copy; if (!(flags & MSG_PEEK)) sp->offset = offset; if (sp->offset < skb->len) { _debug("buffer full"); ASSERTCMP(copied, ==, len); break; } if (sp->hdr.flags & RXRPC_LAST_PACKET) { _debug("last"); if (call->conn->out_clientflag) { ret = copied; goto terminal_message; } if (!(flags & MSG_PEEK)) { _debug("eat packet"); if (skb_dequeue(&rx->sk.sk_receive_queue) != skb) BUG(); rxrpc_free_skb(skb); } msg->msg_flags &= ~MSG_MORE; break; } _debug("next"); if (!continue_call) continue_call = sp->call; else rxrpc_put_call(call); call = NULL; if (flags & MSG_PEEK) { _debug("peek next"); skb = skb->next; if (skb == (struct sk_buff *) &rx->sk.sk_receive_queue) break; goto peek_next_packet; } _debug("eat packet"); if (skb_dequeue(&rx->sk.sk_receive_queue) != skb) BUG(); rxrpc_free_skb(skb); }
/* * Receive a message from an RxRPC socket * - we need to be careful about two or more threads calling recvmsg * simultaneously */ int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct rxrpc_call *call; struct rxrpc_sock *rx = rxrpc_sk(sock->sk); struct list_head *l; size_t copied = 0; long timeo; int ret; DEFINE_WAIT(wait); trace_rxrpc_recvmsg(NULL, rxrpc_recvmsg_enter, 0, 0, 0, 0); if (flags & (MSG_OOB | MSG_TRUNC)) return -EOPNOTSUPP; timeo = sock_rcvtimeo(&rx->sk, flags & MSG_DONTWAIT); try_again: lock_sock(&rx->sk); /* Return immediately if a client socket has no outstanding calls */ if (RB_EMPTY_ROOT(&rx->calls) && list_empty(&rx->recvmsg_q) && rx->sk.sk_state != RXRPC_SERVER_LISTENING) { release_sock(&rx->sk); return -ENODATA; } if (list_empty(&rx->recvmsg_q)) { ret = -EWOULDBLOCK; if (timeo == 0) { call = NULL; goto error_no_call; } release_sock(&rx->sk); /* Wait for something to happen */ prepare_to_wait_exclusive(sk_sleep(&rx->sk), &wait, TASK_INTERRUPTIBLE); ret = sock_error(&rx->sk); if (ret) goto wait_error; if (list_empty(&rx->recvmsg_q)) { if (signal_pending(current)) goto wait_interrupted; trace_rxrpc_recvmsg(NULL, rxrpc_recvmsg_wait, 0, 0, 0, 0); timeo = schedule_timeout(timeo); } finish_wait(sk_sleep(&rx->sk), &wait); goto try_again; } /* Find the next call and dequeue it if we're not just peeking. If we * do dequeue it, that comes with a ref that we will need to release. */ write_lock_bh(&rx->recvmsg_lock); l = rx->recvmsg_q.next; call = list_entry(l, struct rxrpc_call, recvmsg_link); if (!(flags & MSG_PEEK)) list_del_init(&call->recvmsg_link); else rxrpc_get_call(call, rxrpc_call_got); write_unlock_bh(&rx->recvmsg_lock); trace_rxrpc_recvmsg(call, rxrpc_recvmsg_dequeue, 0, 0, 0, 0); if (test_bit(RXRPC_CALL_RELEASED, &call->flags)) BUG(); if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) { if (flags & MSG_CMSG_COMPAT) { unsigned int id32 = call->user_call_ID; ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID, sizeof(unsigned int), &id32); } else { ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID, sizeof(unsigned long), &call->user_call_ID); } if (ret < 0) goto error; } if (msg->msg_name) { size_t len = sizeof(call->conn->params.peer->srx); memcpy(msg->msg_name, &call->conn->params.peer->srx, len); msg->msg_namelen = len; } switch (call->state) { case RXRPC_CALL_SERVER_ACCEPTING: ret = rxrpc_recvmsg_new_call(rx, call, msg, flags); break; case RXRPC_CALL_CLIENT_RECV_REPLY: case RXRPC_CALL_SERVER_RECV_REQUEST: case RXRPC_CALL_SERVER_ACK_REQUEST: ret = rxrpc_recvmsg_data(sock, call, msg, &msg->msg_iter, len, flags, &copied); if (ret == -EAGAIN) ret = 0; if (after(call->rx_top, call->rx_hard_ack) && call->rxtx_buffer[(call->rx_hard_ack + 1) & RXRPC_RXTX_BUFF_MASK]) rxrpc_notify_socket(call); break; default: ret = 0; break; } if (ret < 0) goto error; if (call->state == RXRPC_CALL_COMPLETE) { ret = rxrpc_recvmsg_term(call, msg); if (ret < 0) goto error; if (!(flags & MSG_PEEK)) rxrpc_release_call(rx, call); msg->msg_flags |= MSG_EOR; ret = 1; } if (ret == 0) msg->msg_flags |= MSG_MORE; else msg->msg_flags &= ~MSG_MORE; ret = copied; error: rxrpc_put_call(call, rxrpc_call_put); error_no_call: release_sock(&rx->sk); trace_rxrpc_recvmsg(call, rxrpc_recvmsg_return, 0, 0, 0, ret); return ret; wait_interrupted: ret = sock_intr_errno(timeo); wait_error: finish_wait(sk_sleep(&rx->sk), &wait); call = NULL; goto error_no_call; }
/* * Preallocate a single service call, connection and peer and, if possible, * give them a user ID and attach the user's side of the ID to them. */ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx, struct rxrpc_backlog *b, rxrpc_notify_rx_t notify_rx, rxrpc_user_attach_call_t user_attach_call, unsigned long user_call_ID, gfp_t gfp) { const void *here = __builtin_return_address(0); struct rxrpc_call *call; struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk)); int max, tmp; unsigned int size = RXRPC_BACKLOG_MAX; unsigned int head, tail, call_head, call_tail; max = rx->sk.sk_max_ack_backlog; tmp = rx->sk.sk_ack_backlog; if (tmp >= max) { _leave(" = -ENOBUFS [full %u]", max); return -ENOBUFS; } max -= tmp; /* We don't need more conns and peers than we have calls, but on the * other hand, we shouldn't ever use more peers than conns or conns * than calls. */ call_head = b->call_backlog_head; call_tail = READ_ONCE(b->call_backlog_tail); tmp = CIRC_CNT(call_head, call_tail, size); if (tmp >= max) { _leave(" = -ENOBUFS [enough %u]", tmp); return -ENOBUFS; } max = tmp + 1; head = b->peer_backlog_head; tail = READ_ONCE(b->peer_backlog_tail); if (CIRC_CNT(head, tail, size) < max) { struct rxrpc_peer *peer = rxrpc_alloc_peer(rx->local, gfp); if (!peer) return -ENOMEM; b->peer_backlog[head] = peer; smp_store_release(&b->peer_backlog_head, (head + 1) & (size - 1)); } head = b->conn_backlog_head; tail = READ_ONCE(b->conn_backlog_tail); if (CIRC_CNT(head, tail, size) < max) { struct rxrpc_connection *conn; conn = rxrpc_prealloc_service_connection(rxnet, gfp); if (!conn) return -ENOMEM; b->conn_backlog[head] = conn; smp_store_release(&b->conn_backlog_head, (head + 1) & (size - 1)); trace_rxrpc_conn(conn, rxrpc_conn_new_service, atomic_read(&conn->usage), here); } /* Now it gets complicated, because calls get registered with the * socket here, particularly if a user ID is preassigned by the user. */ call = rxrpc_alloc_call(gfp); if (!call) return -ENOMEM; call->flags |= (1 << RXRPC_CALL_IS_SERVICE); call->state = RXRPC_CALL_SERVER_PREALLOC; trace_rxrpc_call(call, rxrpc_call_new_service, atomic_read(&call->usage), here, (const void *)user_call_ID); write_lock(&rx->call_lock); if (user_attach_call) { struct rxrpc_call *xcall; struct rb_node *parent, **pp; /* Check the user ID isn't already in use */ pp = &rx->calls.rb_node; parent = NULL; while (*pp) { parent = *pp; xcall = rb_entry(parent, struct rxrpc_call, sock_node); if (user_call_ID < call->user_call_ID) pp = &(*pp)->rb_left; else if (user_call_ID > call->user_call_ID) pp = &(*pp)->rb_right; else goto id_in_use; } call->user_call_ID = user_call_ID; call->notify_rx = notify_rx; rxrpc_get_call(call, rxrpc_call_got_kernel); user_attach_call(call, user_call_ID); rxrpc_get_call(call, rxrpc_call_got_userid); rb_link_node(&call->sock_node, parent, pp); rb_insert_color(&call->sock_node, &rx->calls); set_bit(RXRPC_CALL_HAS_USERID, &call->flags); } list_add(&call->sock_link, &rx->sock_calls); write_unlock(&rx->call_lock); write_lock(&rxnet->call_lock); list_add_tail(&call->link, &rxnet->calls); write_unlock(&rxnet->call_lock); b->call_backlog[call_head] = call; smp_store_release(&b->call_backlog_head, (call_head + 1) & (size - 1)); _leave(" = 0 [%d -> %lx]", call->debug_id, user_call_ID); return 0; id_in_use: write_unlock(&rx->call_lock); rxrpc_cleanup_call(call); _leave(" = -EBADSLT"); return -EBADSLT; }
/* * Rx I/O daemon */ static int rxrpc_krxiod(void *arg) { DECLARE_WAITQUEUE(krxiod,current); printk("Started krxiod %d\n",current->pid); daemonize("krxiod"); /* loop around waiting for work to do */ do { /* wait for work or to be told to exit */ _debug("### Begin Wait"); if (!atomic_read(&rxrpc_krxiod_qcount)) { set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&rxrpc_krxiod_sleepq, &krxiod); for (;;) { set_current_state(TASK_INTERRUPTIBLE); if (atomic_read(&rxrpc_krxiod_qcount) || rxrpc_krxiod_die || signal_pending(current)) break; schedule(); } remove_wait_queue(&rxrpc_krxiod_sleepq, &krxiod); set_current_state(TASK_RUNNING); } _debug("### End Wait"); /* do work if been given some to do */ _debug("### Begin Work"); /* see if there's a transport in need of attention */ if (!list_empty(&rxrpc_krxiod_transportq)) { struct rxrpc_transport *trans = NULL; spin_lock_irq(&rxrpc_krxiod_transportq_lock); if (!list_empty(&rxrpc_krxiod_transportq)) { trans = list_entry( rxrpc_krxiod_transportq.next, struct rxrpc_transport, krxiodq_link); list_del_init(&trans->krxiodq_link); atomic_dec(&rxrpc_krxiod_qcount); /* make sure it hasn't gone away and doesn't go * away */ if (atomic_read(&trans->usage)>0) rxrpc_get_transport(trans); else trans = NULL; } spin_unlock_irq(&rxrpc_krxiod_transportq_lock); if (trans) { rxrpc_trans_receive_packet(trans); rxrpc_put_transport(trans); } } /* see if there's a call in need of attention */ if (!list_empty(&rxrpc_krxiod_callq)) { struct rxrpc_call *call = NULL; spin_lock_irq(&rxrpc_krxiod_callq_lock); if (!list_empty(&rxrpc_krxiod_callq)) { call = list_entry(rxrpc_krxiod_callq.next, struct rxrpc_call, rcv_krxiodq_lk); list_del_init(&call->rcv_krxiodq_lk); atomic_dec(&rxrpc_krxiod_qcount); /* make sure it hasn't gone away and doesn't go * away */ if (atomic_read(&call->usage) > 0) { _debug("@@@ KRXIOD" " Begin Attend Call %p", call); rxrpc_get_call(call); } else { call = NULL; } } spin_unlock_irq(&rxrpc_krxiod_callq_lock); if (call) { rxrpc_call_do_stuff(call); rxrpc_put_call(call); _debug("@@@ KRXIOD End Attend Call %p", call); } }