Exemplo n.º 1
0
static void rxrpc_proto_abort(const char *why,
			      struct rxrpc_call *call, rxrpc_seq_t seq)
{
	if (rxrpc_abort_call(why, call, seq, RX_PROTOCOL_ERROR, EBADMSG)) {
		set_bit(RXRPC_CALL_EV_ABORT, &call->events);
		rxrpc_queue_call(call);
	}
}
Exemplo n.º 2
0
/*
 * Handle retransmission and deferred ACK/abort generation.
 */
void rxrpc_process_call(struct work_struct *work)
{
	struct rxrpc_call *call =
		container_of(work, struct rxrpc_call, processor);
	ktime_t now;

	rxrpc_see_call(call);

	//printk("\n--------------------\n");
	_enter("{%d,%s,%lx}",
	       call->debug_id, rxrpc_call_states[call->state], call->events);

recheck_state:
	if (test_and_clear_bit(RXRPC_CALL_EV_ABORT, &call->events)) {
		rxrpc_send_abort_packet(call);
		goto recheck_state;
	}

	if (call->state == RXRPC_CALL_COMPLETE) {
		del_timer_sync(&call->timer);
		rxrpc_notify_socket(call);
		goto out_put;
	}

	now = ktime_get_real();
	if (ktime_before(call->expire_at, now)) {
		rxrpc_abort_call("EXP", call, 0, RX_CALL_TIMEOUT, ETIME);
		set_bit(RXRPC_CALL_EV_ABORT, &call->events);
		goto recheck_state;
	}

	if (test_and_clear_bit(RXRPC_CALL_EV_ACK, &call->events)) {
		if (call->ackr_reason) {
			rxrpc_send_ack_packet(call, false);
			goto recheck_state;
		}
	}

	if (test_and_clear_bit(RXRPC_CALL_EV_PING, &call->events)) {
		rxrpc_send_ack_packet(call, true);
		goto recheck_state;
	}

	if (test_and_clear_bit(RXRPC_CALL_EV_RESEND, &call->events)) {
		rxrpc_resend(call, now);
		goto recheck_state;
	}

	rxrpc_set_timer(call, rxrpc_timer_set_for_resend, now);

	/* other events may have been raised since we started checking */
	if (call->events && call->state < RXRPC_CALL_COMPLETE) {
		__rxrpc_queue_call(call);
		goto out;
	}

out_put:
	rxrpc_put_call(call, rxrpc_call_put);
out:
	_leave("");
}
Exemplo n.º 3
0
/*
 * Set up a new incoming call.  Called in BH context with the RCU read lock
 * held.
 *
 * If this is for a kernel service, when we allocate the call, it will have
 * three refs on it: (1) the kernel service, (2) the user_call_ID tree, (3) the
 * retainer ref obtained from the backlog buffer.  Prealloc calls for userspace
 * services only have the ref from the backlog buffer.  We want to pass this
 * ref to non-BH context to dispose of.
 *
 * If we want to report an error, we mark the skb with the packet type and
 * abort code and return NULL.
 *
 * The call is returned with the user access mutex held.
 */
struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
					   struct rxrpc_connection *conn,
					   struct sk_buff *skb)
{
	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
	struct rxrpc_sock *rx;
	struct rxrpc_call *call;
	u16 service_id = sp->hdr.serviceId;

	_enter("");

	/* Get the socket providing the service */
	rx = rcu_dereference(local->service);
	if (rx && (service_id == rx->srx.srx_service ||
		   service_id == rx->second_service))
		goto found_service;

	trace_rxrpc_abort("INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
			  RX_INVALID_OPERATION, EOPNOTSUPP);
	skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT;
	skb->priority = RX_INVALID_OPERATION;
	_leave(" = NULL [service]");
	return NULL;

found_service:
	spin_lock(&rx->incoming_lock);
	if (rx->sk.sk_state == RXRPC_SERVER_LISTEN_DISABLED ||
	    rx->sk.sk_state == RXRPC_CLOSE) {
		trace_rxrpc_abort("CLS", sp->hdr.cid, sp->hdr.callNumber,
				  sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN);
		skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT;
		skb->priority = RX_INVALID_OPERATION;
		_leave(" = NULL [close]");
		call = NULL;
		goto out;
	}

	call = rxrpc_alloc_incoming_call(rx, local, conn, skb);
	if (!call) {
		skb->mark = RXRPC_SKB_MARK_BUSY;
		_leave(" = NULL [busy]");
		call = NULL;
		goto out;
	}

	trace_rxrpc_receive(call, rxrpc_receive_incoming,
			    sp->hdr.serial, sp->hdr.seq);

	/* Lock the call to prevent rxrpc_kernel_send/recv_data() and
	 * sendmsg()/recvmsg() inconveniently stealing the mutex once the
	 * notification is generated.
	 *
	 * The BUG should never happen because the kernel should be well
	 * behaved enough not to access the call before the first notification
	 * event and userspace is prevented from doing so until the state is
	 * appropriate.
	 */
	if (!mutex_trylock(&call->user_mutex))
		BUG();

	/* Make the call live. */
	rxrpc_incoming_call(rx, call, skb);
	conn = call->conn;

	if (rx->notify_new_call)
		rx->notify_new_call(&rx->sk, call, call->user_call_ID);
	else
		sk_acceptq_added(&rx->sk);

	spin_lock(&conn->state_lock);
	switch (conn->state) {
	case RXRPC_CONN_SERVICE_UNSECURED:
		conn->state = RXRPC_CONN_SERVICE_CHALLENGING;
		set_bit(RXRPC_CONN_EV_CHALLENGE, &call->conn->events);
		rxrpc_queue_conn(call->conn);
		break;

	case RXRPC_CONN_SERVICE:
		write_lock(&call->state_lock);
		if (rx->discard_new_call)
			call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
		else
			call->state = RXRPC_CALL_SERVER_ACCEPTING;
		write_unlock(&call->state_lock);
		break;

	case RXRPC_CONN_REMOTELY_ABORTED:
		rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
					  conn->remote_abort, -ECONNABORTED);
		break;
	case RXRPC_CONN_LOCALLY_ABORTED:
		rxrpc_abort_call("CON", call, sp->hdr.seq,
				 conn->local_abort, -ECONNABORTED);
		break;
	default:
		BUG();
	}
	spin_unlock(&conn->state_lock);

	if (call->state == RXRPC_CALL_SERVER_ACCEPTING)
		rxrpc_notify_socket(call);

	/* We have to discard the prealloc queue's ref here and rely on a
	 * combination of the RCU read lock and refs held either by the socket
	 * (recvmsg queue, to-be-accepted queue or user ID tree) or the kernel
	 * service to prevent the call from being deallocated too early.
	 */
	rxrpc_put_call(call, rxrpc_call_put);

	_leave(" = %p{%d}", call, call->debug_id);
out:
	spin_unlock(&rx->incoming_lock);
	return call;
}