/* * send a simple reply */ void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len) { struct msghdr msg; struct iovec iov[1]; int n; _enter(""); iov[0].iov_base = (void *) buf; iov[0].iov_len = len; msg.msg_name = NULL; msg.msg_namelen = 0; msg.msg_iov = iov; msg.msg_iovlen = 1; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = 0; call->state = AFS_CALL_AWAIT_ACK; n = rxrpc_kernel_send_data(call->rxcall, &msg, len); if (n >= 0) { _leave(" [replied]"); return; } if (n == -ENOMEM) { _debug("oom"); rxrpc_kernel_abort_call(call->rxcall, RX_USER_ABORT); } rxrpc_kernel_end_call(call->rxcall); call->rxcall = NULL; call->type->destructor(call); afs_free_call(call); _leave(" [error]"); }
/* * send an empty reply */ void afs_send_empty_reply(struct afs_call *call) { struct msghdr msg; struct iovec iov[1]; _enter(""); iov[0].iov_base = NULL; iov[0].iov_len = 0; msg.msg_name = NULL; msg.msg_namelen = 0; msg.msg_iov = iov; msg.msg_iovlen = 0; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = 0; call->state = AFS_CALL_AWAIT_ACK; switch (rxrpc_kernel_send_data(call->rxcall, &msg, 0)) { case 0: _leave(" [replied]"); return; case -ENOMEM: _debug("oom"); rxrpc_kernel_abort_call(call->rxcall, RX_USER_ABORT); default: rxrpc_kernel_end_call(call->rxcall); call->rxcall = NULL; call->type->destructor(call); afs_free_call(call); _leave(" [error]"); return; } }
/* * delete an asynchronous call */ static void afs_delete_async_call(struct afs_call *call) { _enter(""); afs_free_call(call); _leave(""); }
/* * delete an asynchronous call */ static void afs_delete_async_call(struct work_struct *work) { struct afs_call *call = container_of(work, struct afs_call, async_work); _enter(""); afs_free_call(call); _leave(""); }
/* * wait synchronously for a call to complete */ static int afs_wait_for_call_to_complete(struct afs_call *call) { struct sk_buff *skb; int ret; DECLARE_WAITQUEUE(myself, current); _enter(""); add_wait_queue(&call->waitq, &myself); for (;;) { set_current_state(TASK_INTERRUPTIBLE); /* deliver any messages that are in the queue */ if (!skb_queue_empty(&call->rx_queue)) { __set_current_state(TASK_RUNNING); afs_deliver_to_call(call); continue; } ret = call->error; if (call->state >= AFS_CALL_COMPLETE) break; ret = -EINTR; if (signal_pending(current)) break; schedule(); } remove_wait_queue(&call->waitq, &myself); __set_current_state(TASK_RUNNING); /* kill the call */ if (call->state < AFS_CALL_COMPLETE) { _debug("call incomplete"); rxrpc_kernel_abort_call(call->rxcall, RX_CALL_DEAD); while ((skb = skb_dequeue(&call->rx_queue))) afs_free_skb(skb); } _debug("call complete"); rxrpc_kernel_end_call(call->rxcall); call->rxcall = NULL; call->type->destructor(call); afs_free_call(call); _leave(" = %d", ret); return ret; }
/* * accept the backlog of incoming calls */ static void afs_collect_incoming_call(struct work_struct *work) { struct rxrpc_call *rxcall; struct afs_call *call = NULL; struct sk_buff *skb; while ((skb = skb_dequeue(&afs_incoming_calls))) { _debug("new call"); /* don't need the notification */ afs_free_skb(skb); if (!call) { call = kzalloc(sizeof(struct afs_call), GFP_KERNEL); if (!call) { rxrpc_kernel_reject_call(afs_socket); return; } call->async_workfn = afs_process_async_call; INIT_WORK(&call->async_work, afs_async_workfn); call->wait_mode = &afs_async_incoming_call; call->type = &afs_RXCMxxxx; init_waitqueue_head(&call->waitq); skb_queue_head_init(&call->rx_queue); call->state = AFS_CALL_AWAIT_OP_ID; _debug("CALL %p{%s} [%d]", call, call->type->name, atomic_read(&afs_outstanding_calls)); atomic_inc(&afs_outstanding_calls); } rxcall = rxrpc_kernel_accept_call(afs_socket, (unsigned long) call); if (!IS_ERR(rxcall)) { call->rxcall = rxcall; call = NULL; } } if (call) afs_free_call(call); }
/* * allocate a call with flat request and reply buffers */ struct afs_call *afs_alloc_flat_call(const struct afs_call_type *type, size_t request_size, size_t reply_size) { struct afs_call *call; call = kzalloc(sizeof(*call), GFP_NOFS); if (!call) goto nomem_call; _debug("CALL %p{%s} [%d]", call, type->name, atomic_read(&afs_outstanding_calls)); atomic_inc(&afs_outstanding_calls); call->type = type; call->request_size = request_size; call->reply_max = reply_size; if (request_size) { call->request = kmalloc(request_size, GFP_NOFS); if (!call->request) goto nomem_free; } if (reply_size) { call->buffer = kmalloc(reply_size, GFP_NOFS); if (!call->buffer) goto nomem_free; } init_waitqueue_head(&call->waitq); skb_queue_head_init(&call->rx_queue); return call; nomem_free: afs_free_call(call); nomem_call: return NULL; }
/* * deliver messages to a call */ static void afs_deliver_to_call(struct afs_call *call) { struct sk_buff *skb; bool last; u32 abort_code; int ret; _enter(""); while ((call->state == AFS_CALL_AWAIT_REPLY || call->state == AFS_CALL_AWAIT_OP_ID || call->state == AFS_CALL_AWAIT_REQUEST || call->state == AFS_CALL_AWAIT_ACK) && (skb = skb_dequeue(&call->rx_queue))) { switch (skb->mark) { case RXRPC_SKB_MARK_DATA: _debug("Rcv DATA"); last = rxrpc_kernel_is_data_last(skb); ret = call->type->deliver(call, skb, last); switch (ret) { case 0: if (last && call->state == AFS_CALL_AWAIT_REPLY) call->state = AFS_CALL_COMPLETE; break; case -ENOTCONN: abort_code = RX_CALL_DEAD; goto do_abort; case -ENOTSUPP: abort_code = RX_INVALID_OPERATION; goto do_abort; default: abort_code = RXGEN_CC_UNMARSHAL; if (call->state != AFS_CALL_AWAIT_REPLY) abort_code = RXGEN_SS_UNMARSHAL; do_abort: rxrpc_kernel_abort_call(call->rxcall, abort_code); call->error = ret; call->state = AFS_CALL_ERROR; break; } afs_data_delivered(skb); skb = NULL; continue; case RXRPC_SKB_MARK_FINAL_ACK: _debug("Rcv ACK"); call->state = AFS_CALL_COMPLETE; break; case RXRPC_SKB_MARK_BUSY: _debug("Rcv BUSY"); call->error = -EBUSY; call->state = AFS_CALL_BUSY; break; case RXRPC_SKB_MARK_REMOTE_ABORT: abort_code = rxrpc_kernel_get_abort_code(skb); call->error = call->type->abort_to_error(abort_code); call->state = AFS_CALL_ABORTED; _debug("Rcv ABORT %u -> %d", abort_code, call->error); break; case RXRPC_SKB_MARK_NET_ERROR: call->error = -rxrpc_kernel_get_error_number(skb); call->state = AFS_CALL_ERROR; _debug("Rcv NET ERROR %d", call->error); break; case RXRPC_SKB_MARK_LOCAL_ERROR: call->error = -rxrpc_kernel_get_error_number(skb); call->state = AFS_CALL_ERROR; _debug("Rcv LOCAL ERROR %d", call->error); break; default: BUG(); break; } afs_free_skb(skb); } /* make sure the queue is empty if the call is done with (we might have * aborted the call early because of an unmarshalling error) */ if (call->state >= AFS_CALL_COMPLETE) { while ((skb = skb_dequeue(&call->rx_queue))) afs_free_skb(skb); if (call->incoming) { rxrpc_kernel_end_call(call->rxcall); call->rxcall = NULL; call->type->destructor(call); afs_free_call(call); } } _leave(""); }
/* * initiate a call */ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp, const struct afs_wait_mode *wait_mode) { struct sockaddr_rxrpc srx; struct rxrpc_call *rxcall; struct msghdr msg; struct kvec iov[1]; int ret; struct sk_buff *skb; _enter("%x,{%d},", addr->s_addr, ntohs(call->port)); ASSERT(call->type != NULL); ASSERT(call->type->name != NULL); _debug("____MAKE %p{%s,%x} [%d]____", call, call->type->name, key_serial(call->key), atomic_read(&afs_outstanding_calls)); call->wait_mode = wait_mode; INIT_WORK(&call->async_work, afs_process_async_call); memset(&srx, 0, sizeof(srx)); srx.srx_family = AF_RXRPC; srx.srx_service = call->service_id; srx.transport_type = SOCK_DGRAM; srx.transport_len = sizeof(srx.transport.sin); srx.transport.sin.sin_family = AF_INET; srx.transport.sin.sin_port = call->port; memcpy(&srx.transport.sin.sin_addr, addr, 4); /* create a call */ rxcall = rxrpc_kernel_begin_call(afs_socket, &srx, call->key, (unsigned long) call, gfp); call->key = NULL; if (IS_ERR(rxcall)) { ret = PTR_ERR(rxcall); goto error_kill_call; } call->rxcall = rxcall; /* send the request */ iov[0].iov_base = call->request; iov[0].iov_len = call->request_size; msg.msg_name = NULL; msg.msg_namelen = 0; msg.msg_iov = (struct iovec *) iov; msg.msg_iovlen = 1; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = (call->send_pages ? MSG_MORE : 0); /* have to change the state *before* sending the last packet as RxRPC * might give us the reply before it returns from sending the * request */ if (!call->send_pages) call->state = AFS_CALL_AWAIT_REPLY; ret = rxrpc_kernel_send_data(rxcall, &msg, call->request_size); if (ret < 0) goto error_do_abort; if (call->send_pages) { ret = afs_send_pages(call, &msg, iov); if (ret < 0) goto error_do_abort; } /* at this point, an async call may no longer exist as it may have * already completed */ return wait_mode->wait(call); error_do_abort: rxrpc_kernel_abort_call(rxcall, RX_USER_ABORT); while ((skb = skb_dequeue(&call->rx_queue))) afs_free_skb(skb); rxrpc_kernel_end_call(rxcall); call->rxcall = NULL; error_kill_call: call->type->destructor(call); afs_free_call(call); _leave(" = %d", ret); return ret; }
/* * End a call and free it */ static void afs_end_call(struct afs_call *call) { afs_end_call_nofree(call); afs_free_call(call); }