/* * End the packet reception phase. */ static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial) { _enter("%d,%s", call->debug_id, rxrpc_call_states[call->state]); trace_rxrpc_receive(call, rxrpc_receive_end, 0, call->rx_top); ASSERTCMP(call->rx_hard_ack, ==, call->rx_top); if (call->state == RXRPC_CALL_CLIENT_RECV_REPLY) { rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, 0, serial, true, false, rxrpc_propose_ack_terminal_ack); rxrpc_send_ack_packet(call, false); } write_lock_bh(&call->state_lock); switch (call->state) { case RXRPC_CALL_CLIENT_RECV_REPLY: __rxrpc_call_completed(call); write_unlock_bh(&call->state_lock); break; case RXRPC_CALL_SERVER_RECV_REQUEST: call->tx_phase = true; call->state = RXRPC_CALL_SERVER_ACK_REQUEST; call->ack_at = call->expire_at; write_unlock_bh(&call->state_lock); rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, 0, serial, false, true, rxrpc_propose_ack_processing_op); break; default: write_unlock_bh(&call->state_lock); break; } }
/* * Discard a packet we've used up and advance the Rx window by one. */ static void rxrpc_rotate_rx_window(struct rxrpc_call *call) { struct rxrpc_skb_priv *sp; struct sk_buff *skb; rxrpc_serial_t serial; rxrpc_seq_t hard_ack, top; u8 flags; int ix; _enter("%d", call->debug_id); hard_ack = call->rx_hard_ack; top = smp_load_acquire(&call->rx_top); ASSERT(before(hard_ack, top)); hard_ack++; ix = hard_ack & RXRPC_RXTX_BUFF_MASK; skb = call->rxtx_buffer[ix]; rxrpc_see_skb(skb, rxrpc_skb_rx_rotated); sp = rxrpc_skb(skb); flags = sp->hdr.flags; serial = sp->hdr.serial; if (call->rxtx_annotations[ix] & RXRPC_RX_ANNO_JUMBO) serial += (call->rxtx_annotations[ix] & RXRPC_RX_ANNO_JUMBO) - 1; call->rxtx_buffer[ix] = NULL; call->rxtx_annotations[ix] = 0; /* Barrier against rxrpc_input_data(). */ smp_store_release(&call->rx_hard_ack, hard_ack); rxrpc_free_skb(skb, rxrpc_skb_rx_freed); _debug("%u,%u,%02x", hard_ack, top, flags); trace_rxrpc_receive(call, rxrpc_receive_rotate, serial, hard_ack); if (flags & RXRPC_LAST_PACKET) { rxrpc_end_rx_phase(call, serial); } else { /* Check to see if there's an ACK that needs sending. */ if (after_eq(hard_ack, call->ackr_consumed + 2) || after_eq(top, call->ackr_seen + 2) || (hard_ack == top && after(hard_ack, call->ackr_consumed))) rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, 0, serial, true, false, rxrpc_propose_ack_rotate_rx); if (call->ackr_reason) rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ACK); } }
/** * rxrpc_kernel_probe_life - Poke the peer to see if it's still alive * @sock: The socket the call is on * @call: The call to check * * In conjunction with rxrpc_kernel_check_life(), allow a kernel service to * find out whether a call is still alive by pinging it. This should cause the * life state to be bumped in about 2*RTT. * * The must be called in TASK_RUNNING state on pain of might_sleep() objecting. */ void rxrpc_kernel_probe_life(struct socket *sock, struct rxrpc_call *call) { rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false, rxrpc_propose_ack_ping_for_check_life); rxrpc_send_ack_packet(call, true, NULL); }
/* * Perform retransmission of NAK'd and unack'd packets. */ static void rxrpc_resend(struct rxrpc_call *call, ktime_t now) { struct rxrpc_skb_priv *sp; struct sk_buff *skb; rxrpc_seq_t cursor, seq, top; ktime_t max_age, oldest, ack_ts; int ix; u8 annotation, anno_type, retrans = 0, unacked = 0; _enter("{%d,%d}", call->tx_hard_ack, call->tx_top); max_age = ktime_sub_ms(now, rxrpc_resend_timeout); spin_lock_bh(&call->lock); cursor = call->tx_hard_ack; top = call->tx_top; ASSERT(before_eq(cursor, top)); if (cursor == top) goto out_unlock; /* Scan the packet list without dropping the lock and decide which of * the packets in the Tx buffer we're going to resend and what the new * resend timeout will be. */ oldest = now; for (seq = cursor + 1; before_eq(seq, top); seq++) { ix = seq & RXRPC_RXTX_BUFF_MASK; annotation = call->rxtx_annotations[ix]; anno_type = annotation & RXRPC_TX_ANNO_MASK; annotation &= ~RXRPC_TX_ANNO_MASK; if (anno_type == RXRPC_TX_ANNO_ACK) continue; skb = call->rxtx_buffer[ix]; rxrpc_see_skb(skb, rxrpc_skb_tx_seen); sp = rxrpc_skb(skb); if (anno_type == RXRPC_TX_ANNO_UNACK) { if (ktime_after(skb->tstamp, max_age)) { if (ktime_before(skb->tstamp, oldest)) oldest = skb->tstamp; continue; } if (!(annotation & RXRPC_TX_ANNO_RESENT)) unacked++; } /* Okay, we need to retransmit a packet. */ call->rxtx_annotations[ix] = RXRPC_TX_ANNO_RETRANS | annotation; retrans++; trace_rxrpc_retransmit(call, seq, annotation | anno_type, ktime_to_ns(ktime_sub(skb->tstamp, max_age))); } call->resend_at = ktime_add_ms(oldest, rxrpc_resend_timeout); if (unacked) rxrpc_congestion_timeout(call); /* If there was nothing that needed retransmission then it's likely * that an ACK got lost somewhere. Send a ping to find out instead of * retransmitting data. */ if (!retrans) { rxrpc_set_timer(call, rxrpc_timer_set_for_resend, now); spin_unlock_bh(&call->lock); ack_ts = ktime_sub(now, call->acks_latest_ts); if (ktime_to_ns(ack_ts) < call->peer->rtt) goto out; rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false, rxrpc_propose_ack_ping_for_lost_ack); rxrpc_send_ack_packet(call, true); goto out; } /* Now go through the Tx window and perform the retransmissions. We * have to drop the lock for each send. If an ACK comes in whilst the * lock is dropped, it may clear some of the retransmission markers for * packets that it soft-ACKs. */ for (seq = cursor + 1; before_eq(seq, top); seq++) { ix = seq & RXRPC_RXTX_BUFF_MASK; annotation = call->rxtx_annotations[ix]; anno_type = annotation & RXRPC_TX_ANNO_MASK; if (anno_type != RXRPC_TX_ANNO_RETRANS) continue; skb = call->rxtx_buffer[ix]; rxrpc_get_skb(skb, rxrpc_skb_tx_got); spin_unlock_bh(&call->lock); if (rxrpc_send_data_packet(call, skb, true) < 0) { rxrpc_free_skb(skb, rxrpc_skb_tx_freed); return; } if (rxrpc_is_client_call(call)) rxrpc_expose_client_call(call); rxrpc_free_skb(skb, rxrpc_skb_tx_freed); spin_lock_bh(&call->lock); /* We need to clear the retransmit state, but there are two * things we need to be aware of: A new ACK/NAK might have been * received and the packet might have been hard-ACK'd (in which * case it will no longer be in the buffer). */ if (after(seq, call->tx_hard_ack)) { annotation = call->rxtx_annotations[ix]; anno_type = annotation & RXRPC_TX_ANNO_MASK; if (anno_type == RXRPC_TX_ANNO_RETRANS || anno_type == RXRPC_TX_ANNO_NAK) { annotation &= ~RXRPC_TX_ANNO_MASK; annotation |= RXRPC_TX_ANNO_UNACK; } annotation |= RXRPC_TX_ANNO_RESENT; call->rxtx_annotations[ix] = annotation; } if (after(call->tx_hard_ack, seq)) seq = call->tx_hard_ack; } out_unlock: spin_unlock_bh(&call->lock); out: _leave(""); }
/* * Send an ACK call packet. */ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping) { struct rxrpc_connection *conn = NULL; struct rxrpc_ack_buffer *pkt; struct msghdr msg; struct kvec iov[2]; rxrpc_serial_t serial; rxrpc_seq_t hard_ack, top; size_t len, n; int ret; u8 reason; spin_lock_bh(&call->lock); if (call->conn) conn = rxrpc_get_connection_maybe(call->conn); spin_unlock_bh(&call->lock); if (!conn) return -ECONNRESET; pkt = kzalloc(sizeof(*pkt), GFP_KERNEL); if (!pkt) { rxrpc_put_connection(conn); return -ENOMEM; } msg.msg_name = &call->peer->srx.transport; msg.msg_namelen = call->peer->srx.transport_len; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = 0; pkt->whdr.epoch = htonl(conn->proto.epoch); pkt->whdr.cid = htonl(call->cid); pkt->whdr.callNumber = htonl(call->call_id); pkt->whdr.seq = 0; pkt->whdr.type = RXRPC_PACKET_TYPE_ACK; pkt->whdr.flags = RXRPC_SLOW_START_OK | conn->out_clientflag; pkt->whdr.userStatus = 0; pkt->whdr.securityIndex = call->security_ix; pkt->whdr._rsvd = 0; pkt->whdr.serviceId = htons(call->service_id); spin_lock_bh(&call->lock); if (ping) { reason = RXRPC_ACK_PING; } else { reason = call->ackr_reason; if (!call->ackr_reason) { spin_unlock_bh(&call->lock); ret = 0; goto out; } call->ackr_reason = 0; } n = rxrpc_fill_out_ack(call, pkt, &hard_ack, &top, reason); spin_unlock_bh(&call->lock); iov[0].iov_base = pkt; iov[0].iov_len = sizeof(pkt->whdr) + sizeof(pkt->ack) + n; iov[1].iov_base = &pkt->ackinfo; iov[1].iov_len = sizeof(pkt->ackinfo); len = iov[0].iov_len + iov[1].iov_len; serial = atomic_inc_return(&conn->serial); pkt->whdr.serial = htonl(serial); trace_rxrpc_tx_ack(call, serial, ntohl(pkt->ack.firstPacket), ntohl(pkt->ack.serial), pkt->ack.reason, pkt->ack.nAcks); if (ping) { call->ping_serial = serial; smp_wmb(); /* We need to stick a time in before we send the packet in case * the reply gets back before kernel_sendmsg() completes - but * asking UDP to send the packet can take a relatively long * time, so we update the time after, on the assumption that * the packet transmission is more likely to happen towards the * end of the kernel_sendmsg() call. */ call->ping_time = ktime_get_real(); set_bit(RXRPC_CALL_PINGING, &call->flags); trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_ping, serial); } ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len); if (ping) call->ping_time = ktime_get_real(); if (call->state < RXRPC_CALL_COMPLETE) { if (ret < 0) { if (ping) clear_bit(RXRPC_CALL_PINGING, &call->flags); rxrpc_propose_ACK(call, pkt->ack.reason, ntohs(pkt->ack.maxSkew), ntohl(pkt->ack.serial), true, true, rxrpc_propose_ack_retry_tx); } else { spin_lock_bh(&call->lock); if (after(hard_ack, call->ackr_consumed)) call->ackr_consumed = hard_ack; if (after(top, call->ackr_seen)) call->ackr_seen = top; spin_unlock_bh(&call->lock); } } out: rxrpc_put_connection(conn); kfree(pkt); return ret; }