/* * send a simple reply */ void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len) { struct msghdr msg; struct iovec iov[1]; int n; _enter(""); iov[0].iov_base = (void *) buf; iov[0].iov_len = len; msg.msg_name = NULL; msg.msg_namelen = 0; msg.msg_iov = iov; msg.msg_iovlen = 1; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = 0; call->state = AFS_CALL_AWAIT_ACK; n = rxrpc_kernel_send_data(call->rxcall, &msg, len); if (n >= 0) { _leave(" [replied]"); return; } if (n == -ENOMEM) { _debug("oom"); rxrpc_kernel_abort_call(call->rxcall, RX_USER_ABORT); } rxrpc_kernel_end_call(call->rxcall); call->rxcall = NULL; call->type->destructor(call); afs_free_call(call); _leave(" [error]"); }
/* * perform processing on an asynchronous call * - on a multiple-thread workqueue this work item may try to run on several * CPUs at the same time */ static void afs_process_async_call(struct work_struct *work) { struct afs_call *call = container_of(work, struct afs_call, async_work); _enter(""); if (!skb_queue_empty(&call->rx_queue)) afs_deliver_to_call(call); if (call->state >= AFS_CALL_COMPLETE && call->wait_mode) { if (call->wait_mode->async_complete) call->wait_mode->async_complete(call->reply, call->error); call->reply = NULL; /* kill the call */ rxrpc_kernel_end_call(call->rxcall); call->rxcall = NULL; if (call->type->destructor) call->type->destructor(call); /* we can't just delete the call because the work item may be * queued */ call->async_workfn = afs_delete_async_call; queue_work(afs_async_calls, &call->async_work); } _leave(""); }
/* * send an empty reply */ void afs_send_empty_reply(struct afs_call *call) { struct msghdr msg; struct iovec iov[1]; _enter(""); iov[0].iov_base = NULL; iov[0].iov_len = 0; msg.msg_name = NULL; msg.msg_namelen = 0; msg.msg_iov = iov; msg.msg_iovlen = 0; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = 0; call->state = AFS_CALL_AWAIT_ACK; switch (rxrpc_kernel_send_data(call->rxcall, &msg, 0)) { case 0: _leave(" [replied]"); return; case -ENOMEM: _debug("oom"); rxrpc_kernel_abort_call(call->rxcall, RX_USER_ABORT); default: rxrpc_kernel_end_call(call->rxcall); call->rxcall = NULL; call->type->destructor(call); afs_free_call(call); _leave(" [error]"); return; } }
/* * Dispose of a reference on a call. */ void afs_put_call(struct afs_call *call) { struct afs_net *net = call->net; int n = atomic_dec_return(&call->usage); int o = atomic_read(&net->nr_outstanding_calls); trace_afs_call(call, afs_call_trace_put, n + 1, o, __builtin_return_address(0)); ASSERTCMP(n, >=, 0); if (n == 0) { ASSERT(!work_pending(&call->async_work)); ASSERT(call->type->name != NULL); if (call->rxcall) { rxrpc_kernel_end_call(net->socket, call->rxcall); call->rxcall = NULL; } if (call->type->destructor) call->type->destructor(call); afs_put_server(call->net, call->cm_server); afs_put_cb_interest(call->net, call->cbi); kfree(call->request); trace_afs_call(call, afs_call_trace_free, 0, o, __builtin_return_address(0)); kfree(call); o = atomic_dec_return(&net->nr_outstanding_calls); if (o == 0) wake_up_var(&net->nr_outstanding_calls); } }
/* * End a call but do not free it */ static void afs_end_call_nofree(struct afs_call *call) { if (call->rxcall) { rxrpc_kernel_end_call(call->rxcall); call->rxcall = NULL; } if (call->type->destructor) call->type->destructor(call); }
/* * wait synchronously for a call to complete */ static int afs_wait_for_call_to_complete(struct afs_call *call) { struct sk_buff *skb; int ret; DECLARE_WAITQUEUE(myself, current); _enter(""); add_wait_queue(&call->waitq, &myself); for (;;) { set_current_state(TASK_INTERRUPTIBLE); /* deliver any messages that are in the queue */ if (!skb_queue_empty(&call->rx_queue)) { __set_current_state(TASK_RUNNING); afs_deliver_to_call(call); continue; } ret = call->error; if (call->state >= AFS_CALL_COMPLETE) break; ret = -EINTR; if (signal_pending(current)) break; schedule(); } remove_wait_queue(&call->waitq, &myself); __set_current_state(TASK_RUNNING); /* kill the call */ if (call->state < AFS_CALL_COMPLETE) { _debug("call incomplete"); rxrpc_kernel_abort_call(call->rxcall, RX_CALL_DEAD); while ((skb = skb_dequeue(&call->rx_queue))) afs_free_skb(skb); } _debug("call complete"); rxrpc_kernel_end_call(call->rxcall); call->rxcall = NULL; call->type->destructor(call); afs_free_call(call); _leave(" = %d", ret); return ret; }
/* * deliver messages to a call */ static void afs_deliver_to_call(struct afs_call *call) { struct sk_buff *skb; bool last; u32 abort_code; int ret; _enter(""); while ((call->state == AFS_CALL_AWAIT_REPLY || call->state == AFS_CALL_AWAIT_OP_ID || call->state == AFS_CALL_AWAIT_REQUEST || call->state == AFS_CALL_AWAIT_ACK) && (skb = skb_dequeue(&call->rx_queue))) { switch (skb->mark) { case RXRPC_SKB_MARK_DATA: _debug("Rcv DATA"); last = rxrpc_kernel_is_data_last(skb); ret = call->type->deliver(call, skb, last); switch (ret) { case 0: if (last && call->state == AFS_CALL_AWAIT_REPLY) call->state = AFS_CALL_COMPLETE; break; case -ENOTCONN: abort_code = RX_CALL_DEAD; goto do_abort; case -ENOTSUPP: abort_code = RX_INVALID_OPERATION; goto do_abort; default: abort_code = RXGEN_CC_UNMARSHAL; if (call->state != AFS_CALL_AWAIT_REPLY) abort_code = RXGEN_SS_UNMARSHAL; do_abort: rxrpc_kernel_abort_call(call->rxcall, abort_code); call->error = ret; call->state = AFS_CALL_ERROR; break; } afs_data_delivered(skb); skb = NULL; continue; case RXRPC_SKB_MARK_FINAL_ACK: _debug("Rcv ACK"); call->state = AFS_CALL_COMPLETE; break; case RXRPC_SKB_MARK_BUSY: _debug("Rcv BUSY"); call->error = -EBUSY; call->state = AFS_CALL_BUSY; break; case RXRPC_SKB_MARK_REMOTE_ABORT: abort_code = rxrpc_kernel_get_abort_code(skb); call->error = call->type->abort_to_error(abort_code); call->state = AFS_CALL_ABORTED; _debug("Rcv ABORT %u -> %d", abort_code, call->error); break; case RXRPC_SKB_MARK_NET_ERROR: call->error = -rxrpc_kernel_get_error_number(skb); call->state = AFS_CALL_ERROR; _debug("Rcv NET ERROR %d", call->error); break; case RXRPC_SKB_MARK_LOCAL_ERROR: call->error = -rxrpc_kernel_get_error_number(skb); call->state = AFS_CALL_ERROR; _debug("Rcv LOCAL ERROR %d", call->error); break; default: BUG(); break; } afs_free_skb(skb); } /* make sure the queue is empty if the call is done with (we might have * aborted the call early because of an unmarshalling error) */ if (call->state >= AFS_CALL_COMPLETE) { while ((skb = skb_dequeue(&call->rx_queue))) afs_free_skb(skb); if (call->incoming) { rxrpc_kernel_end_call(call->rxcall); call->rxcall = NULL; call->type->destructor(call); afs_free_call(call); } } _leave(""); }
/* * initiate a call */ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp, const struct afs_wait_mode *wait_mode) { struct sockaddr_rxrpc srx; struct rxrpc_call *rxcall; struct msghdr msg; struct kvec iov[1]; int ret; struct sk_buff *skb; _enter("%x,{%d},", addr->s_addr, ntohs(call->port)); ASSERT(call->type != NULL); ASSERT(call->type->name != NULL); _debug("____MAKE %p{%s,%x} [%d]____", call, call->type->name, key_serial(call->key), atomic_read(&afs_outstanding_calls)); call->wait_mode = wait_mode; INIT_WORK(&call->async_work, afs_process_async_call); memset(&srx, 0, sizeof(srx)); srx.srx_family = AF_RXRPC; srx.srx_service = call->service_id; srx.transport_type = SOCK_DGRAM; srx.transport_len = sizeof(srx.transport.sin); srx.transport.sin.sin_family = AF_INET; srx.transport.sin.sin_port = call->port; memcpy(&srx.transport.sin.sin_addr, addr, 4); /* create a call */ rxcall = rxrpc_kernel_begin_call(afs_socket, &srx, call->key, (unsigned long) call, gfp); call->key = NULL; if (IS_ERR(rxcall)) { ret = PTR_ERR(rxcall); goto error_kill_call; } call->rxcall = rxcall; /* send the request */ iov[0].iov_base = call->request; iov[0].iov_len = call->request_size; msg.msg_name = NULL; msg.msg_namelen = 0; msg.msg_iov = (struct iovec *) iov; msg.msg_iovlen = 1; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = (call->send_pages ? MSG_MORE : 0); /* have to change the state *before* sending the last packet as RxRPC * might give us the reply before it returns from sending the * request */ if (!call->send_pages) call->state = AFS_CALL_AWAIT_REPLY; ret = rxrpc_kernel_send_data(rxcall, &msg, call->request_size); if (ret < 0) goto error_do_abort; if (call->send_pages) { ret = afs_send_pages(call, &msg, iov); if (ret < 0) goto error_do_abort; } /* at this point, an async call may no longer exist as it may have * already completed */ return wait_mode->wait(call); error_do_abort: rxrpc_kernel_abort_call(rxcall, RX_USER_ABORT); while ((skb = skb_dequeue(&call->rx_queue))) afs_free_skb(skb); rxrpc_kernel_end_call(rxcall); call->rxcall = NULL; error_kill_call: call->type->destructor(call); afs_free_call(call); _leave(" = %d", ret); return ret; }