コード例 #1
0
/*
 * End the packet reception phase.
 */
static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial)
{
    _enter("%d,%s", call->debug_id, rxrpc_call_states[call->state]);

    trace_rxrpc_receive(call, rxrpc_receive_end, 0, call->rx_top);
    ASSERTCMP(call->rx_hard_ack, ==, call->rx_top);

    if (call->state == RXRPC_CALL_CLIENT_RECV_REPLY) {
        rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, 0, serial, true, false,
                          rxrpc_propose_ack_terminal_ack);
        rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ACK);
    }

    write_lock_bh(&call->state_lock);

    switch (call->state) {
    case RXRPC_CALL_CLIENT_RECV_REPLY:
        __rxrpc_call_completed(call);
        break;

    case RXRPC_CALL_SERVER_RECV_REQUEST:
        call->tx_phase = true;
        call->state = RXRPC_CALL_SERVER_ACK_REQUEST;
        break;
    default:
        break;
    }

    write_unlock_bh(&call->state_lock);
}
コード例 #2
0
/*
 * Discard a packet we've used up and advance the Rx window by one.
 */
static void rxrpc_rotate_rx_window(struct rxrpc_call *call)
{
    struct rxrpc_skb_priv *sp;
    struct sk_buff *skb;
    rxrpc_serial_t serial;
    rxrpc_seq_t hard_ack, top;
    u8 flags;
    int ix;

    _enter("%d", call->debug_id);

    hard_ack = call->rx_hard_ack;
    top = smp_load_acquire(&call->rx_top);
    ASSERT(before(hard_ack, top));

    hard_ack++;
    ix = hard_ack & RXRPC_RXTX_BUFF_MASK;
    skb = call->rxtx_buffer[ix];
    rxrpc_see_skb(skb, rxrpc_skb_rx_rotated);
    sp = rxrpc_skb(skb);
    flags = sp->hdr.flags;
    serial = sp->hdr.serial;
    if (call->rxtx_annotations[ix] & RXRPC_RX_ANNO_JUMBO)
        serial += (call->rxtx_annotations[ix] & RXRPC_RX_ANNO_JUMBO) - 1;

    call->rxtx_buffer[ix] = NULL;
    call->rxtx_annotations[ix] = 0;
    /* Barrier against rxrpc_input_data(). */
    smp_store_release(&call->rx_hard_ack, hard_ack);

    rxrpc_free_skb(skb, rxrpc_skb_rx_freed);

    _debug("%u,%u,%02x", hard_ack, top, flags);
    trace_rxrpc_receive(call, rxrpc_receive_rotate, serial, hard_ack);
    if (flags & RXRPC_LAST_PACKET) {
        rxrpc_end_rx_phase(call, serial);
    } else {
        /* Check to see if there's an ACK that needs sending. */
        if (after_eq(hard_ack, call->ackr_consumed + 2) ||
                after_eq(top, call->ackr_seen + 2) ||
                (hard_ack == top && after(hard_ack, call->ackr_consumed)))
            rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, 0, serial,
                              true, false,
                              rxrpc_propose_ack_rotate_rx);
        if (call->ackr_reason)
            rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ACK);
    }
}
コード例 #3
0
/*
 * Deliver messages to a call.  This keeps processing packets until the buffer
 * is filled and we find either more DATA (returns 0) or the end of the DATA
 * (returns 1).  If more packets are required, it returns -EAGAIN.
 */
static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call,
                              struct msghdr *msg, struct iov_iter *iter,
                              size_t len, int flags, size_t *_offset)
{
    struct rxrpc_skb_priv *sp;
    struct sk_buff *skb;
    rxrpc_seq_t hard_ack, top, seq;
    size_t remain;
    bool last;
    unsigned int rx_pkt_offset, rx_pkt_len;
    int ix, copy, ret = -EAGAIN, ret2;

    rx_pkt_offset = call->rx_pkt_offset;
    rx_pkt_len = call->rx_pkt_len;

    if (call->state >= RXRPC_CALL_SERVER_ACK_REQUEST) {
        seq = call->rx_hard_ack;
        ret = 1;
        goto done;
    }

    /* Barriers against rxrpc_input_data(). */
    hard_ack = call->rx_hard_ack;
    top = smp_load_acquire(&call->rx_top);
    for (seq = hard_ack + 1; before_eq(seq, top); seq++) {
        ix = seq & RXRPC_RXTX_BUFF_MASK;
        skb = call->rxtx_buffer[ix];
        if (!skb) {
            trace_rxrpc_recvmsg(call, rxrpc_recvmsg_hole, seq,
                                rx_pkt_offset, rx_pkt_len, 0);
            break;
        }
        smp_rmb();
        rxrpc_see_skb(skb, rxrpc_skb_rx_seen);
        sp = rxrpc_skb(skb);

        if (!(flags & MSG_PEEK))
            trace_rxrpc_receive(call, rxrpc_receive_front,
                                sp->hdr.serial, seq);

        if (msg)
            sock_recv_timestamp(msg, sock->sk, skb);

        if (rx_pkt_offset == 0) {
            ret2 = rxrpc_locate_data(call, skb,
                                     &call->rxtx_annotations[ix],
                                     &rx_pkt_offset, &rx_pkt_len);
            trace_rxrpc_recvmsg(call, rxrpc_recvmsg_next, seq,
                                rx_pkt_offset, rx_pkt_len, ret2);
            if (ret2 < 0) {
                ret = ret2;
                goto out;
            }
        } else {
            trace_rxrpc_recvmsg(call, rxrpc_recvmsg_cont, seq,
                                rx_pkt_offset, rx_pkt_len, 0);
        }

        /* We have to handle short, empty and used-up DATA packets. */
        remain = len - *_offset;
        copy = rx_pkt_len;
        if (copy > remain)
            copy = remain;
        if (copy > 0) {
            ret2 = skb_copy_datagram_iter(skb, rx_pkt_offset, iter,
                                          copy);
            if (ret2 < 0) {
                ret = ret2;
                goto out;
            }

            /* handle piecemeal consumption of data packets */
            rx_pkt_offset += copy;
            rx_pkt_len -= copy;
            *_offset += copy;
        }

        if (rx_pkt_len > 0) {
            trace_rxrpc_recvmsg(call, rxrpc_recvmsg_full, seq,
                                rx_pkt_offset, rx_pkt_len, 0);
            ASSERTCMP(*_offset, ==, len);
            ret = 0;
            break;
        }

        /* The whole packet has been transferred. */
        last = sp->hdr.flags & RXRPC_LAST_PACKET;
        if (!(flags & MSG_PEEK))
            rxrpc_rotate_rx_window(call);
        rx_pkt_offset = 0;
        rx_pkt_len = 0;

        if (last) {
            ASSERTCMP(seq, ==, READ_ONCE(call->rx_top));
            ret = 1;
            goto out;
        }
    }
コード例 #4
0
/*
 * Set up a new incoming call.  Called in BH context with the RCU read lock
 * held.
 *
 * If this is for a kernel service, when we allocate the call, it will have
 * three refs on it: (1) the kernel service, (2) the user_call_ID tree, (3) the
 * retainer ref obtained from the backlog buffer.  Prealloc calls for userspace
 * services only have the ref from the backlog buffer.  We want to pass this
 * ref to non-BH context to dispose of.
 *
 * If we want to report an error, we mark the skb with the packet type and
 * abort code and return NULL.
 *
 * The call is returned with the user access mutex held.
 */
struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
					   struct rxrpc_connection *conn,
					   struct sk_buff *skb)
{
	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
	struct rxrpc_sock *rx;
	struct rxrpc_call *call;
	u16 service_id = sp->hdr.serviceId;

	_enter("");

	/* Get the socket providing the service */
	rx = rcu_dereference(local->service);
	if (rx && (service_id == rx->srx.srx_service ||
		   service_id == rx->second_service))
		goto found_service;

	trace_rxrpc_abort("INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
			  RX_INVALID_OPERATION, EOPNOTSUPP);
	skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT;
	skb->priority = RX_INVALID_OPERATION;
	_leave(" = NULL [service]");
	return NULL;

found_service:
	spin_lock(&rx->incoming_lock);
	if (rx->sk.sk_state == RXRPC_SERVER_LISTEN_DISABLED ||
	    rx->sk.sk_state == RXRPC_CLOSE) {
		trace_rxrpc_abort("CLS", sp->hdr.cid, sp->hdr.callNumber,
				  sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN);
		skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT;
		skb->priority = RX_INVALID_OPERATION;
		_leave(" = NULL [close]");
		call = NULL;
		goto out;
	}

	call = rxrpc_alloc_incoming_call(rx, local, conn, skb);
	if (!call) {
		skb->mark = RXRPC_SKB_MARK_BUSY;
		_leave(" = NULL [busy]");
		call = NULL;
		goto out;
	}

	trace_rxrpc_receive(call, rxrpc_receive_incoming,
			    sp->hdr.serial, sp->hdr.seq);

	/* Lock the call to prevent rxrpc_kernel_send/recv_data() and
	 * sendmsg()/recvmsg() inconveniently stealing the mutex once the
	 * notification is generated.
	 *
	 * The BUG should never happen because the kernel should be well
	 * behaved enough not to access the call before the first notification
	 * event and userspace is prevented from doing so until the state is
	 * appropriate.
	 */
	if (!mutex_trylock(&call->user_mutex))
		BUG();

	/* Make the call live. */
	rxrpc_incoming_call(rx, call, skb);
	conn = call->conn;

	if (rx->notify_new_call)
		rx->notify_new_call(&rx->sk, call, call->user_call_ID);
	else
		sk_acceptq_added(&rx->sk);

	spin_lock(&conn->state_lock);
	switch (conn->state) {
	case RXRPC_CONN_SERVICE_UNSECURED:
		conn->state = RXRPC_CONN_SERVICE_CHALLENGING;
		set_bit(RXRPC_CONN_EV_CHALLENGE, &call->conn->events);
		rxrpc_queue_conn(call->conn);
		break;

	case RXRPC_CONN_SERVICE:
		write_lock(&call->state_lock);
		if (rx->discard_new_call)
			call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
		else
			call->state = RXRPC_CALL_SERVER_ACCEPTING;
		write_unlock(&call->state_lock);
		break;

	case RXRPC_CONN_REMOTELY_ABORTED:
		rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
					  conn->remote_abort, -ECONNABORTED);
		break;
	case RXRPC_CONN_LOCALLY_ABORTED:
		rxrpc_abort_call("CON", call, sp->hdr.seq,
				 conn->local_abort, -ECONNABORTED);
		break;
	default:
		BUG();
	}
	spin_unlock(&conn->state_lock);

	if (call->state == RXRPC_CALL_SERVER_ACCEPTING)
		rxrpc_notify_socket(call);

	/* We have to discard the prealloc queue's ref here and rely on a
	 * combination of the RCU read lock and refs held either by the socket
	 * (recvmsg queue, to-be-accepted queue or user ID tree) or the kernel
	 * service to prevent the call from being deallocated too early.
	 */
	rxrpc_put_call(call, rxrpc_call_put);

	_leave(" = %p{%d}", call, call->debug_id);
out:
	spin_unlock(&rx->incoming_lock);
	return call;
}