/* * send a message through a server socket * - caller holds the socket locked */ int rxrpc_server_sendmsg(struct kiocb *iocb, struct rxrpc_sock *rx, struct msghdr *msg, size_t len) { enum rxrpc_command cmd; struct rxrpc_call *call; unsigned long user_call_ID = 0; u32 abort_code = 0; int ret; _enter(""); ret = rxrpc_sendmsg_cmsg(rx, msg, &user_call_ID, &cmd, &abort_code, true); if (ret < 0) return ret; if (cmd == RXRPC_CMD_ACCEPT) { call = rxrpc_accept_call(rx, user_call_ID); if (IS_ERR(call)) return PTR_ERR(call); rxrpc_put_call(call); return 0; } call = rxrpc_find_server_call(rx, user_call_ID); if (!call) return -EBADSLT; if (call->state >= RXRPC_CALL_COMPLETE) { ret = -ESHUTDOWN; goto out; } switch (cmd) { case RXRPC_CMD_SEND_DATA: if (call->state != RXRPC_CALL_CLIENT_SEND_REQUEST && call->state != RXRPC_CALL_SERVER_ACK_REQUEST && call->state != RXRPC_CALL_SERVER_SEND_REPLY) { /* Tx phase not yet begun for this call */ ret = -EPROTO; break; } ret = rxrpc_send_data(iocb, rx, call, msg, len); break; case RXRPC_CMD_SEND_ABORT: rxrpc_send_abort(call, abort_code); break; default: BUG(); } out: rxrpc_put_call(call); _leave(" = %d", ret); return ret; }
/* * Discard the preallocation on a service. */ void rxrpc_discard_prealloc(struct rxrpc_sock *rx) { struct rxrpc_backlog *b = rx->backlog; struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk)); unsigned int size = RXRPC_BACKLOG_MAX, head, tail; if (!b) return; rx->backlog = NULL; /* Make sure that there aren't any incoming calls in progress before we * clear the preallocation buffers. */ spin_lock_bh(&rx->incoming_lock); spin_unlock_bh(&rx->incoming_lock); head = b->peer_backlog_head; tail = b->peer_backlog_tail; while (CIRC_CNT(head, tail, size) > 0) { struct rxrpc_peer *peer = b->peer_backlog[tail]; kfree(peer); tail = (tail + 1) & (size - 1); } head = b->conn_backlog_head; tail = b->conn_backlog_tail; while (CIRC_CNT(head, tail, size) > 0) { struct rxrpc_connection *conn = b->conn_backlog[tail]; write_lock(&rxnet->conn_lock); list_del(&conn->link); list_del(&conn->proc_link); write_unlock(&rxnet->conn_lock); kfree(conn); tail = (tail + 1) & (size - 1); } head = b->call_backlog_head; tail = b->call_backlog_tail; while (CIRC_CNT(head, tail, size) > 0) { struct rxrpc_call *call = b->call_backlog[tail]; if (rx->discard_new_call) { _debug("discard %lx", call->user_call_ID); rx->discard_new_call(call, call->user_call_ID); rxrpc_put_call(call, rxrpc_call_put_kernel); } rxrpc_call_completed(call); rxrpc_release_call(rx, call); rxrpc_put_call(call, rxrpc_call_put); tail = (tail + 1) & (size - 1); } kfree(b); }
/* * queue a packet for recvmsg to pass to userspace * - the caller must hold a lock on call->lock * - must not be called with interrupts disabled (sk_filter() disables BH's) * - eats the packet whether successful or not * - there must be just one reference to the packet, which the caller passes to * this function */ int rxrpc_queue_rcv_skb(struct rxrpc_call *call, struct sk_buff *skb, bool force, bool terminal) { struct rxrpc_skb_priv *sp; struct rxrpc_sock *rx = call->socket; struct sock *sk; int ret; _enter(",,%d,%d", force, terminal); ASSERT(!irqs_disabled()); sp = rxrpc_skb(skb); ASSERTCMP(sp->call, ==, call); /* if we've already posted the terminal message for a call, then we * don't post any more */ if (test_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags)) { _debug("already terminated"); ASSERTCMP(call->state, >=, RXRPC_CALL_COMPLETE); skb->destructor = NULL; sp->call = NULL; rxrpc_put_call(call); rxrpc_free_skb(skb); return 0; }
/** * rxrpc_kernel_end_call - Allow a kernel service to end a call it was using * @sock: The socket the call is on * @call: The call to end * * Allow a kernel service to end a call it was using. The call must be * complete before this is called (the call should be aborted if necessary). */ void rxrpc_kernel_end_call(struct socket *sock, struct rxrpc_call *call) { _enter("%d{%d}", call->debug_id, atomic_read(&call->usage)); mutex_lock(&call->user_mutex); rxrpc_release_call(rxrpc_sk(sock->sk), call); /* Make sure we're not going to call back into a kernel service */ if (call->notify_rx) { spin_lock_bh(&call->notify_lock); call->notify_rx = rxrpc_dummy_notify_rx; spin_unlock_bh(&call->notify_lock); } mutex_unlock(&call->user_mutex); rxrpc_put_call(call, rxrpc_call_put_kernel); }
/* * Rx I/O daemon */ static int rxrpc_krxiod(void *arg) { DECLARE_WAITQUEUE(krxiod,current); printk("Started krxiod %d\n",current->pid); daemonize("krxiod"); /* loop around waiting for work to do */ do { /* wait for work or to be told to exit */ _debug("### Begin Wait"); if (!atomic_read(&rxrpc_krxiod_qcount)) { set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&rxrpc_krxiod_sleepq, &krxiod); for (;;) { set_current_state(TASK_INTERRUPTIBLE); if (atomic_read(&rxrpc_krxiod_qcount) || rxrpc_krxiod_die || signal_pending(current)) break; schedule(); } remove_wait_queue(&rxrpc_krxiod_sleepq, &krxiod); set_current_state(TASK_RUNNING); } _debug("### End Wait"); /* do work if been given some to do */ _debug("### Begin Work"); /* see if there's a transport in need of attention */ if (!list_empty(&rxrpc_krxiod_transportq)) { struct rxrpc_transport *trans = NULL; spin_lock_irq(&rxrpc_krxiod_transportq_lock); if (!list_empty(&rxrpc_krxiod_transportq)) { trans = list_entry( rxrpc_krxiod_transportq.next, struct rxrpc_transport, krxiodq_link); list_del_init(&trans->krxiodq_link); atomic_dec(&rxrpc_krxiod_qcount); /* make sure it hasn't gone away and doesn't go * away */ if (atomic_read(&trans->usage)>0) rxrpc_get_transport(trans); else trans = NULL; } spin_unlock_irq(&rxrpc_krxiod_transportq_lock); if (trans) { rxrpc_trans_receive_packet(trans); rxrpc_put_transport(trans); } } /* see if there's a call in need of attention */ if (!list_empty(&rxrpc_krxiod_callq)) { struct rxrpc_call *call = NULL; spin_lock_irq(&rxrpc_krxiod_callq_lock); if (!list_empty(&rxrpc_krxiod_callq)) { call = list_entry(rxrpc_krxiod_callq.next, struct rxrpc_call, rcv_krxiodq_lk); list_del_init(&call->rcv_krxiodq_lk); atomic_dec(&rxrpc_krxiod_qcount); /* make sure it hasn't gone away and doesn't go * away */ if (atomic_read(&call->usage) > 0) { _debug("@@@ KRXIOD" " Begin Attend Call %p", call); rxrpc_get_call(call); } else { call = NULL; } } spin_unlock_irq(&rxrpc_krxiod_callq_lock); if (call) { rxrpc_call_do_stuff(call); rxrpc_put_call(call); _debug("@@@ KRXIOD End Attend Call %p", call); } }
/* * receive a message from an RxRPC socket * - we need to be careful about two or more threads calling recvmsg * simultaneously */ int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct rxrpc_skb_priv *sp; struct rxrpc_call *call = NULL, *continue_call = NULL; struct rxrpc_sock *rx = rxrpc_sk(sock->sk); struct sk_buff *skb; long timeo; int copy, ret, ullen, offset, copied = 0; u32 abort_code; DEFINE_WAIT(wait); _enter(",,,%zu,%d", len, flags); if (flags & (MSG_OOB | MSG_TRUNC)) return -EOPNOTSUPP; ullen = msg->msg_flags & MSG_CMSG_COMPAT ? 4 : sizeof(unsigned long); timeo = sock_rcvtimeo(&rx->sk, flags & MSG_DONTWAIT); msg->msg_flags |= MSG_MORE; lock_sock(&rx->sk); for (;;) { /* return immediately if a client socket has no outstanding * calls */ if (RB_EMPTY_ROOT(&rx->calls)) { if (copied) goto out; if (rx->sk.sk_state != RXRPC_SERVER_LISTENING) { release_sock(&rx->sk); if (continue_call) rxrpc_put_call(continue_call); return -ENODATA; } } /* get the next message on the Rx queue */ skb = skb_peek(&rx->sk.sk_receive_queue); if (!skb) { /* nothing remains on the queue */ if (copied && (msg->msg_flags & MSG_PEEK || timeo == 0)) goto out; /* wait for a message to turn up */ release_sock(&rx->sk); prepare_to_wait_exclusive(sk_sleep(&rx->sk), &wait, TASK_INTERRUPTIBLE); ret = sock_error(&rx->sk); if (ret) goto wait_error; if (skb_queue_empty(&rx->sk.sk_receive_queue)) { if (signal_pending(current)) goto wait_interrupted; timeo = schedule_timeout(timeo); } finish_wait(sk_sleep(&rx->sk), &wait); lock_sock(&rx->sk); continue; } peek_next_packet: sp = rxrpc_skb(skb); call = sp->call; ASSERT(call != NULL); _debug("next pkt %s", rxrpc_pkts[sp->hdr.type]); /* make sure we wait for the state to be updated in this call */ spin_lock_bh(&call->lock); spin_unlock_bh(&call->lock); if (test_bit(RXRPC_CALL_RELEASED, &call->flags)) { _debug("packet from released call"); if (skb_dequeue(&rx->sk.sk_receive_queue) != skb) BUG(); rxrpc_free_skb(skb); continue; } /* determine whether to continue last data receive */ if (continue_call) { _debug("maybe cont"); if (call != continue_call || skb->mark != RXRPC_SKB_MARK_DATA) { release_sock(&rx->sk); rxrpc_put_call(continue_call); _leave(" = %d [noncont]", copied); return copied; } } rxrpc_get_call(call); /* copy the peer address and timestamp */ if (!continue_call) { if (msg->msg_name) { size_t len = sizeof(call->conn->trans->peer->srx); memcpy(msg->msg_name, &call->conn->trans->peer->srx, len); msg->msg_namelen = len; } sock_recv_ts_and_drops(msg, &rx->sk, skb); } /* receive the message */ if (skb->mark != RXRPC_SKB_MARK_DATA) goto receive_non_data_message; _debug("recvmsg DATA #%u { %d, %d }", ntohl(sp->hdr.seq), skb->len, sp->offset); if (!continue_call) { /* only set the control data once per recvmsg() */ ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID, ullen, &call->user_call_ID); if (ret < 0) goto copy_error; ASSERT(test_bit(RXRPC_CALL_HAS_USERID, &call->flags)); } ASSERTCMP(ntohl(sp->hdr.seq), >=, call->rx_data_recv); ASSERTCMP(ntohl(sp->hdr.seq), <=, call->rx_data_recv + 1); call->rx_data_recv = ntohl(sp->hdr.seq); ASSERTCMP(ntohl(sp->hdr.seq), >, call->rx_data_eaten); offset = sp->offset; copy = skb->len - offset; if (copy > len - copied) copy = len - copied; ret = skb_copy_datagram_iovec(skb, offset, msg->msg_iov, copy); if (ret < 0) goto copy_error; /* handle piecemeal consumption of data packets */ _debug("copied %d+%d", copy, copied); offset += copy; copied += copy; if (!(flags & MSG_PEEK)) sp->offset = offset; if (sp->offset < skb->len) { _debug("buffer full"); ASSERTCMP(copied, ==, len); break; } /* we transferred the whole data packet */ if (sp->hdr.flags & RXRPC_LAST_PACKET) { _debug("last"); if (call->conn->out_clientflag) { /* last byte of reply received */ ret = copied; goto terminal_message; } /* last bit of request received */ if (!(flags & MSG_PEEK)) { _debug("eat packet"); if (skb_dequeue(&rx->sk.sk_receive_queue) != skb) BUG(); rxrpc_free_skb(skb); } msg->msg_flags &= ~MSG_MORE; break; } /* move on to the next data message */ _debug("next"); if (!continue_call) continue_call = sp->call; else rxrpc_put_call(call); call = NULL; if (flags & MSG_PEEK) { _debug("peek next"); skb = skb->next; if (skb == (struct sk_buff *) &rx->sk.sk_receive_queue) break; goto peek_next_packet; } _debug("eat packet"); if (skb_dequeue(&rx->sk.sk_receive_queue) != skb) BUG(); rxrpc_free_skb(skb); }
/* * set up a call for the given data * - called in process context with IRQs enabled */ struct rxrpc_call *rxrpc_get_client_call(struct rxrpc_sock *rx, struct rxrpc_transport *trans, struct rxrpc_conn_bundle *bundle, unsigned long user_call_ID, int create, gfp_t gfp) { struct rxrpc_call *call, *candidate; struct rb_node *p, *parent, **pp; _enter("%p,%d,%d,%lx,%d", rx, trans ? trans->debug_id : -1, bundle ? bundle->debug_id : -1, user_call_ID, create); /* search the extant calls first for one that matches the specified * user ID */ read_lock(&rx->call_lock); p = rx->calls.rb_node; while (p) { call = rb_entry(p, struct rxrpc_call, sock_node); if (user_call_ID < call->user_call_ID) p = p->rb_left; else if (user_call_ID > call->user_call_ID) p = p->rb_right; else goto found_extant_call; } read_unlock(&rx->call_lock); if (!create || !trans) return ERR_PTR(-EBADSLT); /* not yet present - create a candidate for a new record and then * redo the search */ candidate = rxrpc_alloc_client_call(rx, trans, bundle, gfp); if (IS_ERR(candidate)) { _leave(" = %ld", PTR_ERR(candidate)); return candidate; } candidate->user_call_ID = user_call_ID; __set_bit(RXRPC_CALL_HAS_USERID, &candidate->flags); write_lock(&rx->call_lock); pp = &rx->calls.rb_node; parent = NULL; while (*pp) { parent = *pp; call = rb_entry(parent, struct rxrpc_call, sock_node); if (user_call_ID < call->user_call_ID) pp = &(*pp)->rb_left; else if (user_call_ID > call->user_call_ID) pp = &(*pp)->rb_right; else goto found_extant_second; } /* second search also failed; add the new call */ call = candidate; candidate = NULL; rxrpc_get_call(call); rb_link_node(&call->sock_node, parent, pp); rb_insert_color(&call->sock_node, &rx->calls); write_unlock(&rx->call_lock); write_lock_bh(&rxrpc_call_lock); list_add_tail(&call->link, &rxrpc_calls); write_unlock_bh(&rxrpc_call_lock); _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id); _leave(" = %p [new]", call); return call; /* we found the call in the list immediately */ found_extant_call: rxrpc_get_call(call); read_unlock(&rx->call_lock); _leave(" = %p [extant %d]", call, atomic_read(&call->usage)); return call; /* we found the call on the second time through the list */ found_extant_second: rxrpc_get_call(call); write_unlock(&rx->call_lock); rxrpc_put_call(candidate); _leave(" = %p [second %d]", call, atomic_read(&call->usage)); return call; }
int afs_rxfs_lookup(struct afs_server *server, struct afs_vnode *dir, const char *filename, struct afs_vnode *vnode, struct afs_volsync *volsync) { struct rxrpc_connection *conn; struct rxrpc_call *call; struct kvec piov[3]; size_t sent; int ret; u32 *bp, zero; DECLARE_WAITQUEUE(myself, current); kenter("%p,{%u,%u,%u},%s", server, fid->vid, fid->vnode, fid->unique, filename); /* get hold of the fileserver connection */ ret = afs_server_get_fsconn(server, &conn); if (ret < 0) goto out; /* create a call through that connection */ ret = rxrpc_create_call(conn, NULL, NULL, afs_rxfs_aemap, &call); if (ret < 0) { printk("kAFS: Unable to create call: %d\n", ret); goto out_put_conn; } call->app_opcode = FSLOOKUP; /* we want to get event notifications from the call */ add_wait_queue(&call->waitq,&myself); /* marshall the parameters */ bp = rxrpc_call_alloc_scratch(call, 20); zero = 0; piov[0].iov_len = 20; piov[0].iov_base = bp; piov[1].iov_len = strlen(filename); piov[1].iov_base = (char *) filename; piov[2].iov_len = (4 - (piov[1].iov_len & 3)) & 3; piov[2].iov_base = &zero; *bp++ = htonl(FSLOOKUP); *bp++ = htonl(dirfid->vid); *bp++ = htonl(dirfid->vnode); *bp++ = htonl(dirfid->unique); *bp++ = htonl(piov[1].iov_len); /* send the parameters to the server */ ret = rxrpc_call_write_data(call, 3, piov, RXRPC_LAST_PACKET, GFP_NOFS, 0, &sent); if (ret < 0) goto abort; /* wait for the reply to completely arrive */ bp = rxrpc_call_alloc_scratch(call, 220); ret = rxrpc_call_read_data(call, bp, 220, RXRPC_CALL_READ_BLOCK | RXRPC_CALL_READ_ALL); if (ret < 0) { if (ret == -ECONNABORTED) { ret = call->app_errno; goto out_unwait; } goto abort; } /* unmarshall the reply */ fid->vid = ntohl(*bp++); fid->vnode = ntohl(*bp++); fid->unique = ntohl(*bp++); vnode->status.if_version = ntohl(*bp++); vnode->status.type = ntohl(*bp++); vnode->status.nlink = ntohl(*bp++); vnode->status.size = ntohl(*bp++); vnode->status.version = ntohl(*bp++); vnode->status.author = ntohl(*bp++); vnode->status.owner = ntohl(*bp++); vnode->status.caller_access = ntohl(*bp++); vnode->status.anon_access = ntohl(*bp++); vnode->status.mode = ntohl(*bp++); vnode->status.parent.vid = dirfid->vid; vnode->status.parent.vnode = ntohl(*bp++); vnode->status.parent.unique = ntohl(*bp++); bp++; /* seg size */ vnode->status.mtime_client = ntohl(*bp++); vnode->status.mtime_server = ntohl(*bp++); bp++; /* group */ bp++; /* sync counter */ vnode->status.version |= ((unsigned long long) ntohl(*bp++)) << 32; bp++; /* spare2 */ bp++; /* spare3 */ bp++; /* spare4 */ dir->status.if_version = ntohl(*bp++); dir->status.type = ntohl(*bp++); dir->status.nlink = ntohl(*bp++); dir->status.size = ntohl(*bp++); dir->status.version = ntohl(*bp++); dir->status.author = ntohl(*bp++); dir->status.owner = ntohl(*bp++); dir->status.caller_access = ntohl(*bp++); dir->status.anon_access = ntohl(*bp++); dir->status.mode = ntohl(*bp++); dir->status.parent.vid = dirfid->vid; dir->status.parent.vnode = ntohl(*bp++); dir->status.parent.unique = ntohl(*bp++); bp++; /* seg size */ dir->status.mtime_client = ntohl(*bp++); dir->status.mtime_server = ntohl(*bp++); bp++; /* group */ bp++; /* sync counter */ dir->status.version |= ((unsigned long long) ntohl(*bp++)) << 32; bp++; /* spare2 */ bp++; /* spare3 */ bp++; /* spare4 */ callback->fid = *fid; callback->version = ntohl(*bp++); callback->expiry = ntohl(*bp++); callback->type = ntohl(*bp++); if (volsync) { volsync->creation = ntohl(*bp++); bp++; /* spare2 */ bp++; /* spare3 */ bp++; /* spare4 */ bp++; /* spare5 */ bp++; /* spare6 */ } /* success */ ret = 0; out_unwait: set_current_state(TASK_RUNNING); remove_wait_queue(&call->waitq, &myself); rxrpc_put_call(call); out_put_conn: afs_server_release_fsconn(server, conn); out: kleave(""); return ret; abort: set_current_state(TASK_UNINTERRUPTIBLE); rxrpc_call_abort(call, ret); schedule(); goto out_unwait; } /* end afs_rxfs_lookup() */
/* * ask the AFS fileserver to discard a callback request on a file */ int afs_rxfs_give_up_callback(struct afs_server *server, struct afs_vnode *vnode) { struct afs_server_callslot callslot; struct rxrpc_call *call; struct kvec piov[1]; size_t sent; int ret; u32 *bp; DECLARE_WAITQUEUE(myself, current); _enter("%p,{%u,%u,%u}", server, vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique); /* get hold of the fileserver connection */ ret = afs_server_request_callslot(server, &callslot); if (ret < 0) goto out; /* create a call through that connection */ ret = rxrpc_create_call(callslot.conn, NULL, NULL, afs_rxfs_aemap, &call); if (ret < 0) { printk("kAFS: Unable to create call: %d\n", ret); goto out_put_conn; } call->app_opcode = FSGIVEUPCALLBACKS; /* we want to get event notifications from the call */ add_wait_queue(&call->waitq, &myself); /* marshall the parameters */ bp = rxrpc_call_alloc_scratch(call, (1 + 4 + 4) * 4); piov[0].iov_len = (1 + 4 + 4) * 4; piov[0].iov_base = bp; *bp++ = htonl(FSGIVEUPCALLBACKS); *bp++ = htonl(1); *bp++ = htonl(vnode->fid.vid); *bp++ = htonl(vnode->fid.vnode); *bp++ = htonl(vnode->fid.unique); *bp++ = htonl(1); *bp++ = htonl(vnode->cb_version); *bp++ = htonl(vnode->cb_expiry); *bp++ = htonl(vnode->cb_type); /* send the parameters to the server */ ret = rxrpc_call_write_data(call, 1, piov, RXRPC_LAST_PACKET, GFP_NOFS, 0, &sent); if (ret < 0) goto abort; /* wait for the reply to completely arrive */ for (;;) { set_current_state(TASK_INTERRUPTIBLE); if (call->app_call_state != RXRPC_CSTATE_CLNT_RCV_REPLY || signal_pending(current)) break; schedule(); } set_current_state(TASK_RUNNING); ret = -EINTR; if (signal_pending(current)) goto abort; switch (call->app_call_state) { case RXRPC_CSTATE_ERROR: ret = call->app_errno; goto out_unwait; case RXRPC_CSTATE_CLNT_GOT_REPLY: ret = 0; goto out_unwait; default: BUG(); } out_unwait: set_current_state(TASK_RUNNING); remove_wait_queue(&call->waitq, &myself); rxrpc_put_call(call); out_put_conn: afs_server_release_callslot(server, &callslot); out: _leave(""); return ret; abort: set_current_state(TASK_UNINTERRUPTIBLE); rxrpc_call_abort(call, ret); schedule(); goto out_unwait; } /* end afs_rxfs_give_up_callback() */
int afs_rxfs_get_root_volume(struct afs_server *server, char *buf, size_t *buflen) { struct rxrpc_connection *conn; struct rxrpc_call *call; struct kvec piov[2]; size_t sent; int ret; u32 param[1]; DECLARE_WAITQUEUE(myself, current); kenter("%p,%p,%u",server, buf, *buflen); /* get hold of the fileserver connection */ ret = afs_server_get_fsconn(server, &conn); if (ret < 0) goto out; /* create a call through that connection */ ret = rxrpc_create_call(conn, NULL, NULL, afs_rxfs_aemap, &call); if (ret < 0) { printk("kAFS: Unable to create call: %d\n", ret); goto out_put_conn; } call->app_opcode = FSGETROOTVOLUME; /* we want to get event notifications from the call */ add_wait_queue(&call->waitq, &myself); /* marshall the parameters */ param[0] = htonl(FSGETROOTVOLUME); piov[0].iov_len = sizeof(param); piov[0].iov_base = param; /* send the parameters to the server */ ret = rxrpc_call_write_data(call, 1, piov, RXRPC_LAST_PACKET, GFP_NOFS, 0, &sent); if (ret < 0) goto abort; /* wait for the reply to completely arrive */ for (;;) { set_current_state(TASK_INTERRUPTIBLE); if (call->app_call_state != RXRPC_CSTATE_CLNT_RCV_REPLY || signal_pending(current)) break; schedule(); } set_current_state(TASK_RUNNING); ret = -EINTR; if (signal_pending(current)) goto abort; switch (call->app_call_state) { case RXRPC_CSTATE_ERROR: ret = call->app_errno; kdebug("Got Error: %d", ret); goto out_unwait; case RXRPC_CSTATE_CLNT_GOT_REPLY: /* read the reply */ kdebug("Got Reply: qty=%d", call->app_ready_qty); ret = -EBADMSG; if (call->app_ready_qty <= 4) goto abort; ret = rxrpc_call_read_data(call, NULL, call->app_ready_qty, 0); if (ret < 0) goto abort; #if 0 /* unmarshall the reply */ bp = buffer; for (loop = 0; loop < 65; loop++) entry->name[loop] = ntohl(*bp++); entry->name[64] = 0; entry->type = ntohl(*bp++); entry->num_servers = ntohl(*bp++); for (loop = 0; loop < 8; loop++) entry->servers[loop].addr.s_addr = *bp++; for (loop = 0; loop < 8; loop++) entry->servers[loop].partition = ntohl(*bp++); for (loop = 0; loop < 8; loop++) entry->servers[loop].flags = ntohl(*bp++); for (loop = 0; loop < 3; loop++) entry->volume_ids[loop] = ntohl(*bp++); entry->clone_id = ntohl(*bp++); entry->flags = ntohl(*bp); #endif /* success */ ret = 0; goto out_unwait; default: BUG(); } abort: set_current_state(TASK_UNINTERRUPTIBLE); rxrpc_call_abort(call, ret); schedule(); out_unwait: set_current_state(TASK_RUNNING); remove_wait_queue(&call->waitq, &myself); rxrpc_put_call(call); out_put_conn: afs_server_release_fsconn(server, conn); out: kleave(""); return ret; } /* end afs_rxfs_get_root_volume() */
/* * fetch the contents of a file or directory */ int afs_rxfs_fetch_file_data(struct afs_server *server, struct afs_vnode *vnode, struct afs_rxfs_fetch_descriptor *desc, struct afs_volsync *volsync) { struct afs_server_callslot callslot; struct rxrpc_call *call; struct kvec piov[1]; size_t sent; int ret; u32 *bp; DECLARE_WAITQUEUE(myself, current); _enter("%p,{fid={%u,%u,%u},sz=%Zu,of=%lu}", server, desc->fid.vid, desc->fid.vnode, desc->fid.unique, desc->size, desc->offset); /* get hold of the fileserver connection */ ret = afs_server_request_callslot(server, &callslot); if (ret < 0) goto out; /* create a call through that connection */ ret = rxrpc_create_call(callslot.conn, NULL, NULL, afs_rxfs_aemap, &call); if (ret < 0) { printk("kAFS: Unable to create call: %d\n", ret); goto out_put_conn; } call->app_opcode = FSFETCHDATA; /* we want to get event notifications from the call */ add_wait_queue(&call->waitq, &myself); /* marshall the parameters */ bp = rxrpc_call_alloc_scratch(call, 24); bp[0] = htonl(FSFETCHDATA); bp[1] = htonl(desc->fid.vid); bp[2] = htonl(desc->fid.vnode); bp[3] = htonl(desc->fid.unique); bp[4] = htonl(desc->offset); bp[5] = htonl(desc->size); piov[0].iov_len = 24; piov[0].iov_base = bp; /* send the parameters to the server */ ret = rxrpc_call_write_data(call, 1, piov, RXRPC_LAST_PACKET, GFP_NOFS, 0, &sent); if (ret < 0) goto abort; /* wait for the data count to arrive */ ret = rxrpc_call_read_data(call, bp, 4, RXRPC_CALL_READ_BLOCK); if (ret < 0) goto read_failed; desc->actual = ntohl(bp[0]); if (desc->actual != desc->size) { ret = -EBADMSG; goto abort; } /* call the app to read the actual data */ rxrpc_call_reset_scratch(call); ret = rxrpc_call_read_data(call, desc->buffer, desc->actual, RXRPC_CALL_READ_BLOCK); if (ret < 0) goto read_failed; /* wait for the rest of the reply to completely arrive */ rxrpc_call_reset_scratch(call); bp = rxrpc_call_alloc_scratch(call, 120); ret = rxrpc_call_read_data(call, bp, 120, RXRPC_CALL_READ_BLOCK | RXRPC_CALL_READ_ALL); if (ret < 0) goto read_failed; /* unmarshall the reply */ vnode->status.if_version = ntohl(*bp++); vnode->status.type = ntohl(*bp++); vnode->status.nlink = ntohl(*bp++); vnode->status.size = ntohl(*bp++); vnode->status.version = ntohl(*bp++); vnode->status.author = ntohl(*bp++); vnode->status.owner = ntohl(*bp++); vnode->status.caller_access = ntohl(*bp++); vnode->status.anon_access = ntohl(*bp++); vnode->status.mode = ntohl(*bp++); vnode->status.parent.vid = desc->fid.vid; vnode->status.parent.vnode = ntohl(*bp++); vnode->status.parent.unique = ntohl(*bp++); bp++; /* seg size */ vnode->status.mtime_client = ntohl(*bp++); vnode->status.mtime_server = ntohl(*bp++); bp++; /* group */ bp++; /* sync counter */ vnode->status.version |= ((unsigned long long) ntohl(*bp++)) << 32; bp++; /* spare2 */ bp++; /* spare3 */ bp++; /* spare4 */ vnode->cb_version = ntohl(*bp++); vnode->cb_expiry = ntohl(*bp++); vnode->cb_type = ntohl(*bp++); if (volsync) { volsync->creation = ntohl(*bp++); bp++; /* spare2 */ bp++; /* spare3 */ bp++; /* spare4 */ bp++; /* spare5 */ bp++; /* spare6 */ } /* success */ ret = 0; out_unwait: set_current_state(TASK_RUNNING); remove_wait_queue(&call->waitq,&myself); rxrpc_put_call(call); out_put_conn: afs_server_release_callslot(server, &callslot); out: _leave(" = %d", ret); return ret; read_failed: if (ret == -ECONNABORTED) { ret = call->app_errno; goto out_unwait; } abort: set_current_state(TASK_UNINTERRUPTIBLE); rxrpc_call_abort(call, ret); schedule(); goto out_unwait; } /* end afs_rxfs_fetch_file_data() */
int afs_rxfs_get_volume_info(struct afs_server *server, const char *name, struct afs_volume_info *vinfo) { struct rxrpc_connection *conn; struct rxrpc_call *call; struct kvec piov[3]; size_t sent; int ret; u32 param[2], *bp, zero; DECLARE_WAITQUEUE(myself, current); _enter("%p,%s,%p", server, name, vinfo); /* get hold of the fileserver connection */ ret = afs_server_get_fsconn(server, &conn); if (ret < 0) goto out; /* create a call through that connection */ ret = rxrpc_create_call(conn, NULL, NULL, afs_rxfs_aemap, &call); if (ret < 0) { printk("kAFS: Unable to create call: %d\n", ret); goto out_put_conn; } call->app_opcode = FSGETVOLUMEINFO; /* we want to get event notifications from the call */ add_wait_queue(&call->waitq, &myself); /* marshall the parameters */ piov[1].iov_len = strlen(name); piov[1].iov_base = (char *) name; zero = 0; piov[2].iov_len = (4 - (piov[1].iov_len & 3)) & 3; piov[2].iov_base = &zero; param[0] = htonl(FSGETVOLUMEINFO); param[1] = htonl(piov[1].iov_len); piov[0].iov_len = sizeof(param); piov[0].iov_base = param; /* send the parameters to the server */ ret = rxrpc_call_write_data(call, 3, piov, RXRPC_LAST_PACKET, GFP_NOFS, 0, &sent); if (ret < 0) goto abort; /* wait for the reply to completely arrive */ bp = rxrpc_call_alloc_scratch(call, 64); ret = rxrpc_call_read_data(call, bp, 64, RXRPC_CALL_READ_BLOCK | RXRPC_CALL_READ_ALL); if (ret < 0) { if (ret == -ECONNABORTED) { ret = call->app_errno; goto out_unwait; } goto abort; } /* unmarshall the reply */ vinfo->vid = ntohl(*bp++); vinfo->type = ntohl(*bp++); vinfo->type_vids[0] = ntohl(*bp++); vinfo->type_vids[1] = ntohl(*bp++); vinfo->type_vids[2] = ntohl(*bp++); vinfo->type_vids[3] = ntohl(*bp++); vinfo->type_vids[4] = ntohl(*bp++); vinfo->nservers = ntohl(*bp++); vinfo->servers[0].addr.s_addr = *bp++; vinfo->servers[1].addr.s_addr = *bp++; vinfo->servers[2].addr.s_addr = *bp++; vinfo->servers[3].addr.s_addr = *bp++; vinfo->servers[4].addr.s_addr = *bp++; vinfo->servers[5].addr.s_addr = *bp++; vinfo->servers[6].addr.s_addr = *bp++; vinfo->servers[7].addr.s_addr = *bp++; ret = -EBADMSG; if (vinfo->nservers > 8) goto abort; /* success */ ret = 0; out_unwait: set_current_state(TASK_RUNNING); remove_wait_queue(&call->waitq, &myself); rxrpc_put_call(call); out_put_conn: afs_server_release_fsconn(server, conn); out: _leave(""); return ret; abort: set_current_state(TASK_UNINTERRUPTIBLE); rxrpc_call_abort(call, ret); schedule(); goto out_unwait; } /* end afs_rxfs_get_volume_info() */
/* * AFS Cache Manager kernel thread */ static int kafscmd(void *arg) { DECLARE_WAITQUEUE(myself, current); struct rxrpc_call *call; _SRXAFSCM_xxxx_t func; int die; printk("kAFS: Started kafscmd %d\n", current->pid); daemonize("kafscmd"); complete(&kafscmd_alive); /* loop around looking for things to attend to */ do { if (list_empty(&kafscmd_attention_list)) { set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&kafscmd_sleepq, &myself); for (;;) { set_current_state(TASK_INTERRUPTIBLE); if (!list_empty(&kafscmd_attention_list) || signal_pending(current) || kafscmd_die) break; schedule(); } remove_wait_queue(&kafscmd_sleepq, &myself); set_current_state(TASK_RUNNING); } die = kafscmd_die; /* dequeue the next call requiring attention */ call = NULL; spin_lock(&kafscmd_attention_lock); if (!list_empty(&kafscmd_attention_list)) { call = list_entry(kafscmd_attention_list.next, struct rxrpc_call, app_attn_link); list_del_init(&call->app_attn_link); die = 0; } spin_unlock(&kafscmd_attention_lock); if (call) { /* act upon it */ _debug("@@@ Begin Attend Call %p", call); func = call->app_user; if (func) func(call); rxrpc_put_call(call); _debug("@@@ End Attend Call %p", call); } } while(!die);
/* * send a message forming part of a client call through an RxRPC socket * - caller holds the socket locked * - the socket may be either a client socket or a server socket */ int rxrpc_client_sendmsg(struct kiocb *iocb, struct rxrpc_sock *rx, struct rxrpc_transport *trans, struct msghdr *msg, size_t len) { struct rxrpc_conn_bundle *bundle; enum rxrpc_command cmd; struct rxrpc_call *call; unsigned long user_call_ID = 0; struct key *key; __be16 service_id; u32 abort_code = 0; int ret; _enter(""); ASSERT(trans != NULL); ret = rxrpc_sendmsg_cmsg(rx, msg, &user_call_ID, &cmd, &abort_code, false); if (ret < 0) return ret; bundle = NULL; if (trans) { service_id = rx->service_id; if (msg->msg_name) { struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *) msg->msg_name; service_id = htons(srx->srx_service); } key = rx->key; if (key && !rx->key->payload.data) key = NULL; bundle = rxrpc_get_bundle(rx, trans, key, service_id, GFP_KERNEL); if (IS_ERR(bundle)) return PTR_ERR(bundle); } call = rxrpc_get_client_call(rx, trans, bundle, user_call_ID, abort_code == 0, GFP_KERNEL); if (trans) rxrpc_put_bundle(trans, bundle); if (IS_ERR(call)) { _leave(" = %ld", PTR_ERR(call)); return PTR_ERR(call); } _debug("CALL %d USR %lx ST %d on CONN %p", call->debug_id, call->user_call_ID, call->state, call->conn); if (call->state >= RXRPC_CALL_COMPLETE) { /* it's too late for this call */ ret = -ESHUTDOWN; } else if (cmd == RXRPC_CMD_SEND_ABORT) { rxrpc_send_abort(call, abort_code); } else if (cmd != RXRPC_CMD_SEND_DATA) { ret = -EINVAL; } else if (call->state != RXRPC_CALL_CLIENT_SEND_REQUEST) { /* request phase complete for this client call */ ret = -EPROTO; } else { ret = rxrpc_send_data(iocb, rx, call, msg, len); } rxrpc_put_call(call); _leave(" = %d", ret); return ret; }
int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct rxrpc_skb_priv *sp; struct rxrpc_call *call = NULL, *continue_call = NULL; struct rxrpc_sock *rx = rxrpc_sk(sock->sk); struct sk_buff *skb; long timeo; int copy, ret, ullen, offset, copied = 0; u32 abort_code; DEFINE_WAIT(wait); _enter(",,,%zu,%d", len, flags); if (flags & (MSG_OOB | MSG_TRUNC)) return -EOPNOTSUPP; ullen = msg->msg_flags & MSG_CMSG_COMPAT ? 4 : sizeof(unsigned long); timeo = sock_rcvtimeo(&rx->sk, flags & MSG_DONTWAIT); msg->msg_flags |= MSG_MORE; lock_sock(&rx->sk); for (;;) { if (RB_EMPTY_ROOT(&rx->calls)) { if (copied) goto out; if (rx->sk.sk_state != RXRPC_SERVER_LISTENING) { release_sock(&rx->sk); if (continue_call) rxrpc_put_call(continue_call); return -ENODATA; } } skb = skb_peek(&rx->sk.sk_receive_queue); if (!skb) { if (copied && (msg->msg_flags & MSG_PEEK || timeo == 0)) goto out; release_sock(&rx->sk); prepare_to_wait_exclusive(sk_sleep(&rx->sk), &wait, TASK_INTERRUPTIBLE); ret = sock_error(&rx->sk); if (ret) goto wait_error; if (skb_queue_empty(&rx->sk.sk_receive_queue)) { if (signal_pending(current)) goto wait_interrupted; timeo = schedule_timeout(timeo); } finish_wait(sk_sleep(&rx->sk), &wait); lock_sock(&rx->sk); continue; } peek_next_packet: sp = rxrpc_skb(skb); call = sp->call; ASSERT(call != NULL); _debug("next pkt %s", rxrpc_pkts[sp->hdr.type]); spin_lock_bh(&call->lock); spin_unlock_bh(&call->lock); if (test_bit(RXRPC_CALL_RELEASED, &call->flags)) { _debug("packet from released call"); if (skb_dequeue(&rx->sk.sk_receive_queue) != skb) BUG(); rxrpc_free_skb(skb); continue; } if (continue_call) { _debug("maybe cont"); if (call != continue_call || skb->mark != RXRPC_SKB_MARK_DATA) { release_sock(&rx->sk); rxrpc_put_call(continue_call); _leave(" = %d [noncont]", copied); return copied; } } rxrpc_get_call(call); if (!continue_call) { if (msg->msg_name && msg->msg_namelen > 0) memcpy(msg->msg_name, &call->conn->trans->peer->srx, sizeof(call->conn->trans->peer->srx)); sock_recv_ts_and_drops(msg, &rx->sk, skb); } if (skb->mark != RXRPC_SKB_MARK_DATA) goto receive_non_data_message; _debug("recvmsg DATA #%u { %d, %d }", ntohl(sp->hdr.seq), skb->len, sp->offset); if (!continue_call) { ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID, ullen, &call->user_call_ID); if (ret < 0) goto copy_error; ASSERT(test_bit(RXRPC_CALL_HAS_USERID, &call->flags)); } ASSERTCMP(ntohl(sp->hdr.seq), >=, call->rx_data_recv); ASSERTCMP(ntohl(sp->hdr.seq), <=, call->rx_data_recv + 1); call->rx_data_recv = ntohl(sp->hdr.seq); ASSERTCMP(ntohl(sp->hdr.seq), >, call->rx_data_eaten); offset = sp->offset; copy = skb->len - offset; if (copy > len - copied) copy = len - copied; if (skb->ip_summed == CHECKSUM_UNNECESSARY) { ret = skb_copy_datagram_iovec(skb, offset, msg->msg_iov, copy); } else { ret = skb_copy_and_csum_datagram_iovec(skb, offset, msg->msg_iov); if (ret == -EINVAL) goto csum_copy_error; } if (ret < 0) goto copy_error; _debug("copied %d+%d", copy, copied); offset += copy; copied += copy; if (!(flags & MSG_PEEK)) sp->offset = offset; if (sp->offset < skb->len) { _debug("buffer full"); ASSERTCMP(copied, ==, len); break; } if (sp->hdr.flags & RXRPC_LAST_PACKET) { _debug("last"); if (call->conn->out_clientflag) { ret = copied; goto terminal_message; } if (!(flags & MSG_PEEK)) { _debug("eat packet"); if (skb_dequeue(&rx->sk.sk_receive_queue) != skb) BUG(); rxrpc_free_skb(skb); } msg->msg_flags &= ~MSG_MORE; break; } _debug("next"); if (!continue_call) continue_call = sp->call; else rxrpc_put_call(call); call = NULL; if (flags & MSG_PEEK) { _debug("peek next"); skb = skb->next; if (skb == (struct sk_buff *) &rx->sk.sk_receive_queue) break; goto peek_next_packet; } _debug("eat packet"); if (skb_dequeue(&rx->sk.sk_receive_queue) != skb) BUG(); rxrpc_free_skb(skb); }
/* * Handle retransmission and deferred ACK/abort generation. */ void rxrpc_process_call(struct work_struct *work) { struct rxrpc_call *call = container_of(work, struct rxrpc_call, processor); ktime_t now; rxrpc_see_call(call); //printk("\n--------------------\n"); _enter("{%d,%s,%lx}", call->debug_id, rxrpc_call_states[call->state], call->events); recheck_state: if (test_and_clear_bit(RXRPC_CALL_EV_ABORT, &call->events)) { rxrpc_send_abort_packet(call); goto recheck_state; } if (call->state == RXRPC_CALL_COMPLETE) { del_timer_sync(&call->timer); rxrpc_notify_socket(call); goto out_put; } now = ktime_get_real(); if (ktime_before(call->expire_at, now)) { rxrpc_abort_call("EXP", call, 0, RX_CALL_TIMEOUT, ETIME); set_bit(RXRPC_CALL_EV_ABORT, &call->events); goto recheck_state; } if (test_and_clear_bit(RXRPC_CALL_EV_ACK, &call->events)) { if (call->ackr_reason) { rxrpc_send_ack_packet(call, false); goto recheck_state; } } if (test_and_clear_bit(RXRPC_CALL_EV_PING, &call->events)) { rxrpc_send_ack_packet(call, true); goto recheck_state; } if (test_and_clear_bit(RXRPC_CALL_EV_RESEND, &call->events)) { rxrpc_resend(call, now); goto recheck_state; } rxrpc_set_timer(call, rxrpc_timer_set_for_resend, now); /* other events may have been raised since we started checking */ if (call->events && call->state < RXRPC_CALL_COMPLETE) { __rxrpc_queue_call(call); goto out; } out_put: rxrpc_put_call(call, rxrpc_call_put); out: _leave(""); }
/* * Receive a message from an RxRPC socket * - we need to be careful about two or more threads calling recvmsg * simultaneously */ int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct rxrpc_call *call; struct rxrpc_sock *rx = rxrpc_sk(sock->sk); struct list_head *l; size_t copied = 0; long timeo; int ret; DEFINE_WAIT(wait); trace_rxrpc_recvmsg(NULL, rxrpc_recvmsg_enter, 0, 0, 0, 0); if (flags & (MSG_OOB | MSG_TRUNC)) return -EOPNOTSUPP; timeo = sock_rcvtimeo(&rx->sk, flags & MSG_DONTWAIT); try_again: lock_sock(&rx->sk); /* Return immediately if a client socket has no outstanding calls */ if (RB_EMPTY_ROOT(&rx->calls) && list_empty(&rx->recvmsg_q) && rx->sk.sk_state != RXRPC_SERVER_LISTENING) { release_sock(&rx->sk); return -ENODATA; } if (list_empty(&rx->recvmsg_q)) { ret = -EWOULDBLOCK; if (timeo == 0) { call = NULL; goto error_no_call; } release_sock(&rx->sk); /* Wait for something to happen */ prepare_to_wait_exclusive(sk_sleep(&rx->sk), &wait, TASK_INTERRUPTIBLE); ret = sock_error(&rx->sk); if (ret) goto wait_error; if (list_empty(&rx->recvmsg_q)) { if (signal_pending(current)) goto wait_interrupted; trace_rxrpc_recvmsg(NULL, rxrpc_recvmsg_wait, 0, 0, 0, 0); timeo = schedule_timeout(timeo); } finish_wait(sk_sleep(&rx->sk), &wait); goto try_again; } /* Find the next call and dequeue it if we're not just peeking. If we * do dequeue it, that comes with a ref that we will need to release. */ write_lock_bh(&rx->recvmsg_lock); l = rx->recvmsg_q.next; call = list_entry(l, struct rxrpc_call, recvmsg_link); if (!(flags & MSG_PEEK)) list_del_init(&call->recvmsg_link); else rxrpc_get_call(call, rxrpc_call_got); write_unlock_bh(&rx->recvmsg_lock); trace_rxrpc_recvmsg(call, rxrpc_recvmsg_dequeue, 0, 0, 0, 0); if (test_bit(RXRPC_CALL_RELEASED, &call->flags)) BUG(); if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) { if (flags & MSG_CMSG_COMPAT) { unsigned int id32 = call->user_call_ID; ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID, sizeof(unsigned int), &id32); } else { ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID, sizeof(unsigned long), &call->user_call_ID); } if (ret < 0) goto error; } if (msg->msg_name) { size_t len = sizeof(call->conn->params.peer->srx); memcpy(msg->msg_name, &call->conn->params.peer->srx, len); msg->msg_namelen = len; } switch (call->state) { case RXRPC_CALL_SERVER_ACCEPTING: ret = rxrpc_recvmsg_new_call(rx, call, msg, flags); break; case RXRPC_CALL_CLIENT_RECV_REPLY: case RXRPC_CALL_SERVER_RECV_REQUEST: case RXRPC_CALL_SERVER_ACK_REQUEST: ret = rxrpc_recvmsg_data(sock, call, msg, &msg->msg_iter, len, flags, &copied); if (ret == -EAGAIN) ret = 0; if (after(call->rx_top, call->rx_hard_ack) && call->rxtx_buffer[(call->rx_hard_ack + 1) & RXRPC_RXTX_BUFF_MASK]) rxrpc_notify_socket(call); break; default: ret = 0; break; } if (ret < 0) goto error; if (call->state == RXRPC_CALL_COMPLETE) { ret = rxrpc_recvmsg_term(call, msg); if (ret < 0) goto error; if (!(flags & MSG_PEEK)) rxrpc_release_call(rx, call); msg->msg_flags |= MSG_EOR; ret = 1; } if (ret == 0) msg->msg_flags |= MSG_MORE; else msg->msg_flags &= ~MSG_MORE; ret = copied; error: rxrpc_put_call(call, rxrpc_call_put); error_no_call: release_sock(&rx->sk); trace_rxrpc_recvmsg(call, rxrpc_recvmsg_return, 0, 0, 0, ret); return ret; wait_interrupted: ret = sock_intr_errno(timeo); wait_error: finish_wait(sk_sleep(&rx->sk), &wait); call = NULL; goto error_no_call; }
/* * Set up a new incoming call. Called in BH context with the RCU read lock * held. * * If this is for a kernel service, when we allocate the call, it will have * three refs on it: (1) the kernel service, (2) the user_call_ID tree, (3) the * retainer ref obtained from the backlog buffer. Prealloc calls for userspace * services only have the ref from the backlog buffer. We want to pass this * ref to non-BH context to dispose of. * * If we want to report an error, we mark the skb with the packet type and * abort code and return NULL. * * The call is returned with the user access mutex held. */ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local, struct rxrpc_connection *conn, struct sk_buff *skb) { struct rxrpc_skb_priv *sp = rxrpc_skb(skb); struct rxrpc_sock *rx; struct rxrpc_call *call; u16 service_id = sp->hdr.serviceId; _enter(""); /* Get the socket providing the service */ rx = rcu_dereference(local->service); if (rx && (service_id == rx->srx.srx_service || service_id == rx->second_service)) goto found_service; trace_rxrpc_abort("INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, RX_INVALID_OPERATION, EOPNOTSUPP); skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT; skb->priority = RX_INVALID_OPERATION; _leave(" = NULL [service]"); return NULL; found_service: spin_lock(&rx->incoming_lock); if (rx->sk.sk_state == RXRPC_SERVER_LISTEN_DISABLED || rx->sk.sk_state == RXRPC_CLOSE) { trace_rxrpc_abort("CLS", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN); skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT; skb->priority = RX_INVALID_OPERATION; _leave(" = NULL [close]"); call = NULL; goto out; } call = rxrpc_alloc_incoming_call(rx, local, conn, skb); if (!call) { skb->mark = RXRPC_SKB_MARK_BUSY; _leave(" = NULL [busy]"); call = NULL; goto out; } trace_rxrpc_receive(call, rxrpc_receive_incoming, sp->hdr.serial, sp->hdr.seq); /* Lock the call to prevent rxrpc_kernel_send/recv_data() and * sendmsg()/recvmsg() inconveniently stealing the mutex once the * notification is generated. * * The BUG should never happen because the kernel should be well * behaved enough not to access the call before the first notification * event and userspace is prevented from doing so until the state is * appropriate. */ if (!mutex_trylock(&call->user_mutex)) BUG(); /* Make the call live. */ rxrpc_incoming_call(rx, call, skb); conn = call->conn; if (rx->notify_new_call) rx->notify_new_call(&rx->sk, call, call->user_call_ID); else sk_acceptq_added(&rx->sk); spin_lock(&conn->state_lock); switch (conn->state) { case RXRPC_CONN_SERVICE_UNSECURED: conn->state = RXRPC_CONN_SERVICE_CHALLENGING; set_bit(RXRPC_CONN_EV_CHALLENGE, &call->conn->events); rxrpc_queue_conn(call->conn); break; case RXRPC_CONN_SERVICE: write_lock(&call->state_lock); if (rx->discard_new_call) call->state = RXRPC_CALL_SERVER_RECV_REQUEST; else call->state = RXRPC_CALL_SERVER_ACCEPTING; write_unlock(&call->state_lock); break; case RXRPC_CONN_REMOTELY_ABORTED: rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED, conn->remote_abort, -ECONNABORTED); break; case RXRPC_CONN_LOCALLY_ABORTED: rxrpc_abort_call("CON", call, sp->hdr.seq, conn->local_abort, -ECONNABORTED); break; default: BUG(); } spin_unlock(&conn->state_lock); if (call->state == RXRPC_CALL_SERVER_ACCEPTING) rxrpc_notify_socket(call); /* We have to discard the prealloc queue's ref here and rely on a * combination of the RCU read lock and refs held either by the socket * (recvmsg queue, to-be-accepted queue or user ID tree) or the kernel * service to prevent the call from being deallocated too early. */ rxrpc_put_call(call, rxrpc_call_put); _leave(" = %p{%d}", call, call->debug_id); out: spin_unlock(&rx->incoming_lock); return call; }