static void rxrpc_abort_calls(struct rxrpc_connection *conn, int state, u32 abort_code) { struct rxrpc_call *call; struct rb_node *p; _enter("{%d},%x", conn->debug_id, abort_code); read_lock_bh(&conn->lock); for (p = rb_first(&conn->calls); p; p = rb_next(p)) { call = rb_entry(p, struct rxrpc_call, conn_node); write_lock(&call->state_lock); if (call->state <= RXRPC_CALL_COMPLETE) { call->state = state; call->abort_code = abort_code; if (state == RXRPC_CALL_LOCALLY_ABORTED) set_bit(RXRPC_CALL_CONN_ABORT, &call->events); else set_bit(RXRPC_CALL_RCVD_ABORT, &call->events); rxrpc_queue_call(call); } write_unlock(&call->state_lock); } read_unlock_bh(&conn->lock); _leave(""); }
/* * Set the timer */ void __rxrpc_set_timer(struct rxrpc_call *call, enum rxrpc_timer_trace why, ktime_t now) { unsigned long t_j, now_j = jiffies; ktime_t t; bool queue = false; if (call->state < RXRPC_CALL_COMPLETE) { t = call->expire_at; if (!ktime_after(t, now)) { trace_rxrpc_timer(call, why, now, now_j); queue = true; goto out; } if (!ktime_after(call->resend_at, now)) { call->resend_at = call->expire_at; if (!test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events)) queue = true; } else if (ktime_before(call->resend_at, t)) { t = call->resend_at; } if (!ktime_after(call->ack_at, now)) { call->ack_at = call->expire_at; if (!test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events)) queue = true; } else if (ktime_before(call->ack_at, t)) { t = call->ack_at; } if (!ktime_after(call->ping_at, now)) { call->ping_at = call->expire_at; if (!test_and_set_bit(RXRPC_CALL_EV_PING, &call->events)) queue = true; } else if (ktime_before(call->ping_at, t)) { t = call->ping_at; } t_j = nsecs_to_jiffies(ktime_to_ns(ktime_sub(t, now))); t_j += jiffies; /* We have to make sure that the calculated jiffies value falls * at or after the nsec value, or we may loop ceaselessly * because the timer times out, but we haven't reached the nsec * timeout yet. */ t_j++; if (call->timer.expires != t_j || !timer_pending(&call->timer)) { mod_timer(&call->timer, t_j); trace_rxrpc_timer(call, why, now, now_j); } } out: if (queue) rxrpc_queue_call(call); }
static void rxrpc_proto_abort(const char *why, struct rxrpc_call *call, rxrpc_seq_t seq) { if (rxrpc_abort_call(why, call, seq, RX_PROTOCOL_ERROR, EBADMSG)) { set_bit(RXRPC_CALL_EV_ABORT, &call->events); rxrpc_queue_call(call); } }
/* * attempt to schedule an instant Tx resend */ static inline void rxrpc_instant_resend(struct rxrpc_call *call) { read_lock_bh(&call->state_lock); if (try_to_del_timer_sync(&call->resend_timer) >= 0) { clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags); if (call->state < RXRPC_CALL_COMPLETE && !test_and_set_bit(RXRPC_CALL_RESEND_TIMER, &call->events)) rxrpc_queue_call(call); } read_unlock_bh(&call->state_lock); }
static void rxrpc_call_is_secure(struct rxrpc_call *call) { _enter("%p", call); if (call) { read_lock(&call->state_lock); if (call->state < RXRPC_CALL_COMPLETE && !test_and_set_bit(RXRPC_CALL_SECURED, &call->events)) rxrpc_queue_call(call); read_unlock(&call->state_lock); } }
/* * set up an incoming call * - called in process context with IRQs enabled */ struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx, struct rxrpc_connection *conn, struct rxrpc_header *hdr, gfp_t gfp) { struct rxrpc_call *call, *candidate; struct rb_node **p, *parent; __be32 call_id; _enter(",%d,,%x", conn->debug_id, gfp); ASSERT(rx != NULL); candidate = rxrpc_alloc_call(gfp); if (!candidate) return ERR_PTR(-EBUSY); candidate->socket = rx; candidate->conn = conn; candidate->cid = hdr->cid; candidate->call_id = hdr->callNumber; candidate->channel = ntohl(hdr->cid) & RXRPC_CHANNELMASK; candidate->rx_data_post = 0; candidate->state = RXRPC_CALL_SERVER_ACCEPTING; if (conn->security_ix > 0) candidate->state = RXRPC_CALL_SERVER_SECURING; write_lock_bh(&conn->lock); /* set the channel for this call */ call = conn->channels[candidate->channel]; _debug("channel[%u] is %p", candidate->channel, call); if (call && call->call_id == hdr->callNumber) { /* already set; must've been a duplicate packet */ _debug("extant call [%d]", call->state); ASSERTCMP(call->conn, ==, conn); read_lock(&call->state_lock); switch (call->state) { case RXRPC_CALL_LOCALLY_ABORTED: if (!test_and_set_bit(RXRPC_CALL_ABORT, &call->events)) rxrpc_queue_call(call); case RXRPC_CALL_REMOTELY_ABORTED: read_unlock(&call->state_lock); goto aborted_call; default: rxrpc_get_call(call); read_unlock(&call->state_lock); goto extant_call; } }
/* * removal a call's user ID from the socket tree to make the user ID available * again and so that it won't be seen again in association with that call */ void rxrpc_remove_user_ID(struct rxrpc_sock *rx, struct rxrpc_call *call) { _debug("RELEASE CALL %d", call->debug_id); if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) { write_lock_bh(&rx->call_lock); rb_erase(&call->sock_node, &call->socket->calls); clear_bit(RXRPC_CALL_HAS_USERID, &call->flags); write_unlock_bh(&rx->call_lock); } read_lock_bh(&call->state_lock); if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) && !test_and_set_bit(RXRPC_CALL_RELEASE, &call->events)) rxrpc_queue_call(call); read_unlock_bh(&call->state_lock); }
/* * Propose a PING ACK be sent. */ static void rxrpc_propose_ping(struct rxrpc_call *call, bool immediate, bool background) { if (immediate) { if (background && !test_and_set_bit(RXRPC_CALL_EV_PING, &call->events)) rxrpc_queue_call(call); } else { ktime_t now = ktime_get_real(); ktime_t ping_at = ktime_add_ms(now, rxrpc_idle_ack_delay); if (ktime_before(ping_at, call->ping_at)) { call->ping_at = ping_at; rxrpc_set_timer(call, rxrpc_timer_set_for_ping, now); } } }
/* * abort a call, sending an ABORT packet to the peer */ static void rxrpc_send_abort(struct rxrpc_call *call, u32 abort_code) { write_lock_bh(&call->state_lock); if (call->state <= RXRPC_CALL_COMPLETE) { call->state = RXRPC_CALL_LOCALLY_ABORTED; call->abort_code = abort_code; set_bit(RXRPC_CALL_ABORT, &call->events); del_timer_sync(&call->resend_timer); del_timer_sync(&call->ack_timer); clear_bit(RXRPC_CALL_RESEND_TIMER, &call->events); clear_bit(RXRPC_CALL_ACK, &call->events); clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags); rxrpc_queue_call(call); } write_unlock_bh(&call->state_lock); }
/* * propose an ACK be sent */ static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason, u16 skew, u32 serial, bool immediate, bool background, enum rxrpc_propose_ack_trace why) { enum rxrpc_propose_ack_outcome outcome = rxrpc_propose_ack_use; unsigned int expiry = rxrpc_soft_ack_delay; ktime_t now, ack_at; s8 prior = rxrpc_ack_priority[ack_reason]; /* Pings are handled specially because we don't want to accidentally * lose a ping response by subsuming it into a ping. */ if (ack_reason == RXRPC_ACK_PING) { rxrpc_propose_ping(call, immediate, background); goto trace; } /* Update DELAY, IDLE, REQUESTED and PING_RESPONSE ACK serial * numbers, but we don't alter the timeout. */ _debug("prior %u %u vs %u %u", ack_reason, prior, call->ackr_reason, rxrpc_ack_priority[call->ackr_reason]); if (ack_reason == call->ackr_reason) { if (RXRPC_ACK_UPDATEABLE & (1 << ack_reason)) { outcome = rxrpc_propose_ack_update; call->ackr_serial = serial; call->ackr_skew = skew; } if (!immediate) goto trace; } else if (prior > rxrpc_ack_priority[call->ackr_reason]) { call->ackr_reason = ack_reason; call->ackr_serial = serial; call->ackr_skew = skew; } else { outcome = rxrpc_propose_ack_subsume; } switch (ack_reason) { case RXRPC_ACK_REQUESTED: if (rxrpc_requested_ack_delay < expiry) expiry = rxrpc_requested_ack_delay; if (serial == 1) immediate = false; break; case RXRPC_ACK_DELAY: if (rxrpc_soft_ack_delay < expiry) expiry = rxrpc_soft_ack_delay; break; case RXRPC_ACK_IDLE: if (rxrpc_idle_ack_delay < expiry) expiry = rxrpc_idle_ack_delay; break; default: immediate = true; break; } if (test_bit(RXRPC_CALL_EV_ACK, &call->events)) { _debug("already scheduled"); } else if (immediate || expiry == 0) { _debug("immediate ACK %lx", call->events); if (!test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events) && background) rxrpc_queue_call(call); } else { now = ktime_get_real(); ack_at = ktime_add_ms(now, expiry); if (ktime_before(ack_at, call->ack_at)) { call->ack_at = ack_at; rxrpc_set_timer(call, rxrpc_timer_set_for_ack, now); } } trace: trace_rxrpc_propose_ack(call, why, ack_reason, serial, immediate, background, outcome); }