Example #1
0
/*
 * Discard the preallocation on a service.
 */
void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
{
	struct rxrpc_backlog *b = rx->backlog;
	struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
	unsigned int size = RXRPC_BACKLOG_MAX, head, tail;

	if (!b)
		return;
	rx->backlog = NULL;

	/* Make sure that there aren't any incoming calls in progress before we
	 * clear the preallocation buffers.
	 */
	spin_lock_bh(&rx->incoming_lock);
	spin_unlock_bh(&rx->incoming_lock);

	head = b->peer_backlog_head;
	tail = b->peer_backlog_tail;
	while (CIRC_CNT(head, tail, size) > 0) {
		struct rxrpc_peer *peer = b->peer_backlog[tail];
		kfree(peer);
		tail = (tail + 1) & (size - 1);
	}

	head = b->conn_backlog_head;
	tail = b->conn_backlog_tail;
	while (CIRC_CNT(head, tail, size) > 0) {
		struct rxrpc_connection *conn = b->conn_backlog[tail];
		write_lock(&rxnet->conn_lock);
		list_del(&conn->link);
		list_del(&conn->proc_link);
		write_unlock(&rxnet->conn_lock);
		kfree(conn);
		tail = (tail + 1) & (size - 1);
	}

	head = b->call_backlog_head;
	tail = b->call_backlog_tail;
	while (CIRC_CNT(head, tail, size) > 0) {
		struct rxrpc_call *call = b->call_backlog[tail];
		if (rx->discard_new_call) {
			_debug("discard %lx", call->user_call_ID);
			rx->discard_new_call(call, call->user_call_ID);
			rxrpc_put_call(call, rxrpc_call_put_kernel);
		}
		rxrpc_call_completed(call);
		rxrpc_release_call(rx, call);
		rxrpc_put_call(call, rxrpc_call_put);
		tail = (tail + 1) & (size - 1);
	}

	kfree(b);
}
Example #2
0
/**
 * rxrpc_kernel_end_call - Allow a kernel service to end a call it was using
 * @sock: The socket the call is on
 * @call: The call to end
 *
 * Allow a kernel service to end a call it was using.  The call must be
 * complete before this is called (the call should be aborted if necessary).
 */
void rxrpc_kernel_end_call(struct socket *sock, struct rxrpc_call *call)
{
	_enter("%d{%d}", call->debug_id, atomic_read(&call->usage));

	mutex_lock(&call->user_mutex);
	rxrpc_release_call(rxrpc_sk(sock->sk), call);

	/* Make sure we're not going to call back into a kernel service */
	if (call->notify_rx) {
		spin_lock_bh(&call->notify_lock);
		call->notify_rx = rxrpc_dummy_notify_rx;
		spin_unlock_bh(&call->notify_lock);
	}

	mutex_unlock(&call->user_mutex);
	rxrpc_put_call(call, rxrpc_call_put_kernel);
}
Example #3
0
/*
 * Receive a message from an RxRPC socket
 * - we need to be careful about two or more threads calling recvmsg
 *   simultaneously
 */
int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
                  int flags)
{
    struct rxrpc_call *call;
    struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
    struct list_head *l;
    size_t copied = 0;
    long timeo;
    int ret;

    DEFINE_WAIT(wait);

    trace_rxrpc_recvmsg(NULL, rxrpc_recvmsg_enter, 0, 0, 0, 0);

    if (flags & (MSG_OOB | MSG_TRUNC))
        return -EOPNOTSUPP;

    timeo = sock_rcvtimeo(&rx->sk, flags & MSG_DONTWAIT);

try_again:
    lock_sock(&rx->sk);

    /* Return immediately if a client socket has no outstanding calls */
    if (RB_EMPTY_ROOT(&rx->calls) &&
            list_empty(&rx->recvmsg_q) &&
            rx->sk.sk_state != RXRPC_SERVER_LISTENING) {
        release_sock(&rx->sk);
        return -ENODATA;
    }

    if (list_empty(&rx->recvmsg_q)) {
        ret = -EWOULDBLOCK;
        if (timeo == 0) {
            call = NULL;
            goto error_no_call;
        }

        release_sock(&rx->sk);

        /* Wait for something to happen */
        prepare_to_wait_exclusive(sk_sleep(&rx->sk), &wait,
                                  TASK_INTERRUPTIBLE);
        ret = sock_error(&rx->sk);
        if (ret)
            goto wait_error;

        if (list_empty(&rx->recvmsg_q)) {
            if (signal_pending(current))
                goto wait_interrupted;
            trace_rxrpc_recvmsg(NULL, rxrpc_recvmsg_wait,
                                0, 0, 0, 0);
            timeo = schedule_timeout(timeo);
        }
        finish_wait(sk_sleep(&rx->sk), &wait);
        goto try_again;
    }

    /* Find the next call and dequeue it if we're not just peeking.  If we
     * do dequeue it, that comes with a ref that we will need to release.
     */
    write_lock_bh(&rx->recvmsg_lock);
    l = rx->recvmsg_q.next;
    call = list_entry(l, struct rxrpc_call, recvmsg_link);
    if (!(flags & MSG_PEEK))
        list_del_init(&call->recvmsg_link);
    else
        rxrpc_get_call(call, rxrpc_call_got);
    write_unlock_bh(&rx->recvmsg_lock);

    trace_rxrpc_recvmsg(call, rxrpc_recvmsg_dequeue, 0, 0, 0, 0);

    if (test_bit(RXRPC_CALL_RELEASED, &call->flags))
        BUG();

    if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
        if (flags & MSG_CMSG_COMPAT) {
            unsigned int id32 = call->user_call_ID;

            ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID,
                           sizeof(unsigned int), &id32);
        } else {
            ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID,
                           sizeof(unsigned long),
                           &call->user_call_ID);
        }
        if (ret < 0)
            goto error;
    }

    if (msg->msg_name) {
        size_t len = sizeof(call->conn->params.peer->srx);
        memcpy(msg->msg_name, &call->conn->params.peer->srx, len);
        msg->msg_namelen = len;
    }

    switch (call->state) {
    case RXRPC_CALL_SERVER_ACCEPTING:
        ret = rxrpc_recvmsg_new_call(rx, call, msg, flags);
        break;
    case RXRPC_CALL_CLIENT_RECV_REPLY:
    case RXRPC_CALL_SERVER_RECV_REQUEST:
    case RXRPC_CALL_SERVER_ACK_REQUEST:
        ret = rxrpc_recvmsg_data(sock, call, msg, &msg->msg_iter, len,
                                 flags, &copied);
        if (ret == -EAGAIN)
            ret = 0;

        if (after(call->rx_top, call->rx_hard_ack) &&
                call->rxtx_buffer[(call->rx_hard_ack + 1) & RXRPC_RXTX_BUFF_MASK])
            rxrpc_notify_socket(call);
        break;
    default:
        ret = 0;
        break;
    }

    if (ret < 0)
        goto error;

    if (call->state == RXRPC_CALL_COMPLETE) {
        ret = rxrpc_recvmsg_term(call, msg);
        if (ret < 0)
            goto error;
        if (!(flags & MSG_PEEK))
            rxrpc_release_call(rx, call);
        msg->msg_flags |= MSG_EOR;
        ret = 1;
    }

    if (ret == 0)
        msg->msg_flags |= MSG_MORE;
    else
        msg->msg_flags &= ~MSG_MORE;
    ret = copied;

error:
    rxrpc_put_call(call, rxrpc_call_put);
error_no_call:
    release_sock(&rx->sk);
    trace_rxrpc_recvmsg(call, rxrpc_recvmsg_return, 0, 0, 0, ret);
    return ret;

wait_interrupted:
    ret = sock_intr_errno(timeo);
wait_error:
    finish_wait(sk_sleep(&rx->sk), &wait);
    call = NULL;
    goto error_no_call;
}