示例#1
0
/*
 * connection-level event processor
 */
void rxrpc_process_connection(struct work_struct *work)
{
	struct rxrpc_connection *conn =
		container_of(work, struct rxrpc_connection, processor);
	struct sk_buff *skb;
	u32 abort_code = RX_PROTOCOL_ERROR;
	int ret;

	_enter("{%d}", conn->debug_id);

	rxrpc_get_connection(conn);

	if (test_and_clear_bit(RXRPC_CONN_CHALLENGE, &conn->events)) {
		rxrpc_secure_connection(conn);
		rxrpc_put_connection(conn);
	}

	/* go through the conn-level event packets, releasing the ref on this
	 * connection that each one has when we've finished with it */
	while ((skb = skb_dequeue(&conn->rx_queue))) {
		ret = rxrpc_process_event(conn, skb, &abort_code);
		switch (ret) {
		case -EPROTO:
		case -EKEYEXPIRED:
		case -EKEYREJECTED:
			goto protocol_error;
		case -EAGAIN:
			goto requeue_and_leave;
		case -ECONNABORTED:
		default:
			rxrpc_put_connection(conn);
			rxrpc_free_skb(skb);
			break;
		}
	}

out:
	rxrpc_put_connection(conn);
	_leave("");
	return;

requeue_and_leave:
	skb_queue_head(&conn->rx_queue, skb);
	goto out;

protocol_error:
	if (rxrpc_abort_connection(conn, -ret, abort_code) < 0)
		goto requeue_and_leave;
	rxrpc_put_connection(conn);
	rxrpc_free_skb(skb);
	_leave(" [EPROTO]");
	goto out;
}
示例#2
0
/*
 * Allocate a new incoming call from the prealloc pool, along with a connection
 * and a peer as necessary.
 */
static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
						    struct rxrpc_local *local,
						    struct rxrpc_connection *conn,
						    struct sk_buff *skb)
{
	struct rxrpc_backlog *b = rx->backlog;
	struct rxrpc_peer *peer, *xpeer;
	struct rxrpc_call *call;
	unsigned short call_head, conn_head, peer_head;
	unsigned short call_tail, conn_tail, peer_tail;
	unsigned short call_count, conn_count;

	/* #calls >= #conns >= #peers must hold true. */
	call_head = smp_load_acquire(&b->call_backlog_head);
	call_tail = b->call_backlog_tail;
	call_count = CIRC_CNT(call_head, call_tail, RXRPC_BACKLOG_MAX);
	conn_head = smp_load_acquire(&b->conn_backlog_head);
	conn_tail = b->conn_backlog_tail;
	conn_count = CIRC_CNT(conn_head, conn_tail, RXRPC_BACKLOG_MAX);
	ASSERTCMP(conn_count, >=, call_count);
	peer_head = smp_load_acquire(&b->peer_backlog_head);
	peer_tail = b->peer_backlog_tail;
	ASSERTCMP(CIRC_CNT(peer_head, peer_tail, RXRPC_BACKLOG_MAX), >=,
		  conn_count);

	if (call_count == 0)
		return NULL;

	if (!conn) {
		/* No connection.  We're going to need a peer to start off
		 * with.  If one doesn't yet exist, use a spare from the
		 * preallocation set.  We dump the address into the spare in
		 * anticipation - and to save on stack space.
		 */
		xpeer = b->peer_backlog[peer_tail];
		if (rxrpc_extract_addr_from_skb(&xpeer->srx, skb) < 0)
			return NULL;

		peer = rxrpc_lookup_incoming_peer(local, xpeer);
		if (peer == xpeer) {
			b->peer_backlog[peer_tail] = NULL;
			smp_store_release(&b->peer_backlog_tail,
					  (peer_tail + 1) &
					  (RXRPC_BACKLOG_MAX - 1));
		}

		/* Now allocate and set up the connection */
		conn = b->conn_backlog[conn_tail];
		b->conn_backlog[conn_tail] = NULL;
		smp_store_release(&b->conn_backlog_tail,
				  (conn_tail + 1) & (RXRPC_BACKLOG_MAX - 1));
		rxrpc_get_local(local);
		conn->params.local = local;
		conn->params.peer = peer;
		rxrpc_see_connection(conn);
		rxrpc_new_incoming_connection(rx, conn, skb);
	} else {
		rxrpc_get_connection(conn);
	}

	/* And now we can allocate and set up a new call */
	call = b->call_backlog[call_tail];
	b->call_backlog[call_tail] = NULL;
	smp_store_release(&b->call_backlog_tail,
			  (call_tail + 1) & (RXRPC_BACKLOG_MAX - 1));

	rxrpc_see_call(call);
	call->conn = conn;
	call->peer = rxrpc_get_peer(conn->params.peer);
	call->cong_cwnd = call->peer->cong_cwnd;
	return call;
}