struct sk_buff *dccp_make_response(const struct sock *sk, struct dst_entry *dst, struct request_sock *req) { struct dccp_hdr *dh; struct dccp_request_sock *dreq; const u32 dccp_header_size = sizeof(struct dccp_hdr) + sizeof(struct dccp_hdr_ext) + sizeof(struct dccp_hdr_response); struct sk_buff *skb; /* sk is marked const to clearly express we dont hold socket lock. * sock_wmalloc() will atomically change sk->sk_wmem_alloc, * it is safe to promote sk to non const. */ skb = sock_wmalloc((struct sock *)sk, MAX_DCCP_HEADER, 1, GFP_ATOMIC); if (!skb) return NULL; skb_reserve(skb, MAX_DCCP_HEADER); skb_dst_set(skb, dst_clone(dst)); dreq = dccp_rsk(req); if (inet_rsk(req)->acked) /* increase GSS upon retransmission */ dccp_inc_seqno(&dreq->dreq_gss); DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESPONSE; DCCP_SKB_CB(skb)->dccpd_seq = dreq->dreq_gss; /* Resolve feature dependencies resulting from choice of CCID */ if (dccp_feat_server_ccid_dependencies(dreq)) goto response_failed; if (dccp_insert_options_rsk(dreq, skb)) goto response_failed; /* Build and checksum header */ dh = dccp_zeroed_hdr(skb, dccp_header_size); dh->dccph_sport = htons(inet_rsk(req)->ir_num); dh->dccph_dport = inet_rsk(req)->ir_rmt_port; dh->dccph_doff = (dccp_header_size + DCCP_SKB_CB(skb)->dccpd_opt_len) / 4; dh->dccph_type = DCCP_PKT_RESPONSE; dh->dccph_x = 1; dccp_hdr_set_seq(dh, dreq->dreq_gss); dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dreq->dreq_gsr); dccp_hdr_response(skb)->dccph_resp_service = dreq->dreq_service; dccp_csum_outgoing(skb); /* We use `acked' to remember that a Response was already sent. */ inet_rsk(req)->acked = 1; DCCP_INC_STATS(DCCP_MIB_OUTSEGS); return skb; response_failed: kfree_skb(skb); return NULL; }
struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst, struct request_sock *req) { struct dccp_hdr *dh; struct dccp_request_sock *dreq; const u32 dccp_header_size = sizeof(struct dccp_hdr) + sizeof(struct dccp_hdr_ext) + sizeof(struct dccp_hdr_response); struct sk_buff *skb = sock_wmalloc(sk, sk->sk_prot->max_header, 1, GFP_ATOMIC); if (skb == NULL) return NULL; /* Reserve space for headers. */ skb_reserve(skb, sk->sk_prot->max_header); skb_dst_set(skb, dst_clone(dst)); dreq = dccp_rsk(req); if (inet_rsk(req)->acked) /* increase ISS upon retransmission */ dccp_inc_seqno(&dreq->dreq_iss); DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESPONSE; DCCP_SKB_CB(skb)->dccpd_seq = dreq->dreq_iss; /* Resolve feature dependencies resulting from choice of CCID */ if (dccp_feat_server_ccid_dependencies(dreq)) goto response_failed; if (dccp_insert_options_rsk(dreq, skb)) goto response_failed; /* Build and checksum header */ dh = dccp_zeroed_hdr(skb, dccp_header_size); dh->dccph_sport = inet_rsk(req)->loc_port; dh->dccph_dport = inet_rsk(req)->rmt_port; dh->dccph_doff = (dccp_header_size + DCCP_SKB_CB(skb)->dccpd_opt_len) / 4; dh->dccph_type = DCCP_PKT_RESPONSE; dh->dccph_x = 1; dccp_hdr_set_seq(dh, dreq->dreq_iss); dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dreq->dreq_isr); dccp_hdr_response(skb)->dccph_resp_service = dreq->dreq_service; dccp_csum_outgoing(skb); /* We use `acked' to remember that a Response was already sent. */ inet_rsk(req)->acked = 1; DCCP_INC_STATS(DCCP_MIB_OUTSEGS); return skb; response_failed: kfree_skb(skb); return NULL; }
struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst, struct request_sock *req) { struct dccp_hdr *dh; struct dccp_request_sock *dreq; const u32 dccp_header_size = sizeof(struct dccp_hdr) + sizeof(struct dccp_hdr_ext) + sizeof(struct dccp_hdr_response); struct sk_buff *skb = sock_wmalloc(sk, sk->sk_prot->max_header, 1, GFP_ATOMIC); if (skb == NULL) return NULL; skb_reserve(skb, sk->sk_prot->max_header); skb_dst_set(skb, dst_clone(dst)); dreq = dccp_rsk(req); if (inet_rsk(req)->acked) dccp_inc_seqno(&dreq->dreq_iss); DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESPONSE; DCCP_SKB_CB(skb)->dccpd_seq = dreq->dreq_iss; if (dccp_feat_server_ccid_dependencies(dreq)) goto response_failed; if (dccp_insert_options_rsk(dreq, skb)) goto response_failed; dh = dccp_zeroed_hdr(skb, dccp_header_size); dh->dccph_sport = inet_rsk(req)->loc_port; dh->dccph_dport = inet_rsk(req)->rmt_port; dh->dccph_doff = (dccp_header_size + DCCP_SKB_CB(skb)->dccpd_opt_len) / 4; dh->dccph_type = DCCP_PKT_RESPONSE; dh->dccph_x = 1; dccp_hdr_set_seq(dh, dreq->dreq_iss); dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dreq->dreq_isr); dccp_hdr_response(skb)->dccph_resp_service = dreq->dreq_service; dccp_csum_outgoing(skb); inet_rsk(req)->acked = 1; DCCP_INC_STATS(DCCP_MIB_OUTSEGS); return skb; response_failed: kfree_skb(skb); return NULL; }
static struct sk_buff *dccp_make_reset(struct sock *sk, struct dst_entry *dst, const enum dccp_reset_codes code) { struct dccp_hdr *dh; struct dccp_sock *dp = dccp_sk(sk); const u32 dccp_header_size = sizeof(struct dccp_hdr) + sizeof(struct dccp_hdr_ext) + sizeof(struct dccp_hdr_reset); struct sk_buff *skb = sock_wmalloc(sk, sk->sk_prot->max_header, 1, GFP_ATOMIC); if (skb == NULL) return NULL; /* Reserve space for headers. */ skb_reserve(skb, sk->sk_prot->max_header); skb->dst = dst_clone(dst); skb->csum = 0; dccp_inc_seqno(&dp->dccps_gss); DCCP_SKB_CB(skb)->dccpd_reset_code = code; DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESET; DCCP_SKB_CB(skb)->dccpd_seq = dp->dccps_gss; if (dccp_insert_options(sk, skb)) { kfree_skb(skb); return NULL; } skb->h.raw = skb_push(skb, dccp_header_size); dh = dccp_hdr(skb); memset(dh, 0, dccp_header_size); dh->dccph_sport = inet_sk(sk)->sport; dh->dccph_dport = inet_sk(sk)->dport; dh->dccph_doff = (dccp_header_size + DCCP_SKB_CB(skb)->dccpd_opt_len) / 4; dh->dccph_type = DCCP_PKT_RESET; dh->dccph_x = 1; dccp_hdr_set_seq(dh, dp->dccps_gss); dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dp->dccps_gsr); dccp_hdr_reset(skb)->dccph_reset_code = code; inet_csk(sk)->icsk_af_ops->send_check(sk, skb->len, skb); DCCP_INC_STATS(DCCP_MIB_OUTSEGS); return skb; }
struct sk_buff *dccp_make_reset(struct sock *sk, struct dst_entry *dst, const enum dccp_reset_codes code) { struct dccp_hdr *dh; struct dccp_sock *dp = dccp_sk(sk); const int dccp_header_size = sizeof(struct dccp_hdr) + sizeof(struct dccp_hdr_ext) + sizeof(struct dccp_hdr_reset); struct sk_buff *skb = sock_wmalloc(sk, MAX_HEADER + DCCP_MAX_OPT_LEN + dccp_header_size, 1, GFP_ATOMIC); if (skb == NULL) return NULL; /* Reserve space for headers. */ skb_reserve(skb, MAX_HEADER + DCCP_MAX_OPT_LEN + dccp_header_size); skb->dst = dst_clone(dst); skb->csum = 0; dccp_inc_seqno(&dp->dccps_gss); DCCP_SKB_CB(skb)->dccpd_reset_code = code; DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESET; DCCP_SKB_CB(skb)->dccpd_seq = dp->dccps_gss; dccp_insert_options(sk, skb); skb->h.raw = skb_push(skb, dccp_header_size); dh = dccp_hdr(skb); memset(dh, 0, dccp_header_size); dh->dccph_sport = inet_sk(sk)->sport; dh->dccph_dport = inet_sk(sk)->dport; dh->dccph_doff = (dccp_header_size + DCCP_SKB_CB(skb)->dccpd_opt_len) / 4; dh->dccph_type = DCCP_PKT_RESET; dh->dccph_x = 1; dccp_hdr_set_seq(dh, dp->dccps_gss); dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dp->dccps_gsr); dccp_hdr_reset(skb)->dccph_reset_code = code; dh->dccph_checksum = dccp_v4_checksum(skb, inet_sk(sk)->saddr, inet_sk(sk)->daddr); DCCP_INC_STATS(DCCP_MIB_OUTSEGS); return skb; }
/* * All SKB's seen here are completely headerless. It is our * job to build the DCCP header, and pass the packet down to * IP so it can do the same plus pass the packet off to the * device. */ static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb) { if (likely(skb != NULL)) { const struct inet_sock *inet = inet_sk(sk); const struct inet_connection_sock *icsk = inet_csk(sk); struct dccp_sock *dp = dccp_sk(sk); struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); struct dccp_hdr *dh; /* XXX For now we're using only 48 bits sequence numbers */ const u32 dccp_header_size = sizeof(*dh) + sizeof(struct dccp_hdr_ext) + dccp_packet_hdr_len(dcb->dccpd_type); int err, set_ack = 1; u64 ackno = dp->dccps_gsr; dccp_inc_seqno(&dp->dccps_gss); switch (dcb->dccpd_type) { case DCCP_PKT_DATA: set_ack = 0; /* fall through */ case DCCP_PKT_DATAACK: case DCCP_PKT_RESET: break; case DCCP_PKT_REQUEST: set_ack = 0; /* fall through */ case DCCP_PKT_SYNC: case DCCP_PKT_SYNCACK: ackno = dcb->dccpd_ack_seq; /* fall through */ default: /* * Set owner/destructor: some skbs are allocated via * alloc_skb (e.g. when retransmission may happen). * Only Data, DataAck, and Reset packets should come * through here with skb->sk set. */ WARN_ON(skb->sk); skb_set_owner_w(skb, sk); break; } dcb->dccpd_seq = dp->dccps_gss; if (dccp_insert_options(sk, skb)) { kfree_skb(skb); return -EPROTO; } /* Build DCCP header and checksum it. */ dh = dccp_zeroed_hdr(skb, dccp_header_size); dh->dccph_type = dcb->dccpd_type; dh->dccph_sport = inet->sport; dh->dccph_dport = inet->dport; dh->dccph_doff = (dccp_header_size + dcb->dccpd_opt_len) / 4; dh->dccph_ccval = dcb->dccpd_ccval; dh->dccph_cscov = dp->dccps_pcslen; /* XXX For now we're using only 48 bits sequence numbers */ dh->dccph_x = 1; dp->dccps_awh = dp->dccps_gss; dccp_hdr_set_seq(dh, dp->dccps_gss); if (set_ack) dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), ackno); switch (dcb->dccpd_type) { case DCCP_PKT_REQUEST: dccp_hdr_request(skb)->dccph_req_service = dp->dccps_service; break; case DCCP_PKT_RESET: dccp_hdr_reset(skb)->dccph_reset_code = dcb->dccpd_reset_code; break; } icsk->icsk_af_ops->send_check(sk, 0, skb); if (set_ack) dccp_event_ack_sent(sk); DCCP_INC_STATS(DCCP_MIB_OUTSEGS); err = icsk->icsk_af_ops->queue_xmit(skb, 0); return net_xmit_eval(err); } return -ENOBUFS; }
static int dccp_check_seqno(struct sock *sk, struct sk_buff *skb) { const struct dccp_hdr *dh = dccp_hdr(skb); struct dccp_sock *dp = dccp_sk(sk); u64 lswl, lawl; /* * Step 5: Prepare sequence numbers for Sync * If P.type == Sync or P.type == SyncAck, * If S.AWL <= P.ackno <= S.AWH and P.seqno >= S.SWL, * / * P is valid, so update sequence number variables * accordingly. After this update, P will pass the tests * in Step 6. A SyncAck is generated if necessary in * Step 15 * / * Update S.GSR, S.SWL, S.SWH * Otherwise, * Drop packet and return */ if (dh->dccph_type == DCCP_PKT_SYNC || dh->dccph_type == DCCP_PKT_SYNCACK) { if (between48(DCCP_SKB_CB(skb)->dccpd_ack_seq, dp->dccps_awl, dp->dccps_awh) && !before48(DCCP_SKB_CB(skb)->dccpd_seq, dp->dccps_swl)) dccp_update_gsr(sk, DCCP_SKB_CB(skb)->dccpd_seq); else return -1; } /* * Step 6: Check sequence numbers * Let LSWL = S.SWL and LAWL = S.AWL * If P.type == CloseReq or P.type == Close or P.type == Reset, * LSWL := S.GSR + 1, LAWL := S.GAR * If LSWL <= P.seqno <= S.SWH * and (P.ackno does not exist or LAWL <= P.ackno <= S.AWH), * Update S.GSR, S.SWL, S.SWH * If P.type != Sync, * Update S.GAR * Otherwise, * Send Sync packet acknowledging P.seqno * Drop packet and return */ lswl = dp->dccps_swl; lawl = dp->dccps_awl; if (dh->dccph_type == DCCP_PKT_CLOSEREQ || dh->dccph_type == DCCP_PKT_CLOSE || dh->dccph_type == DCCP_PKT_RESET) { lswl = dp->dccps_gsr; dccp_inc_seqno(&lswl); lawl = dp->dccps_gar; } if (between48(DCCP_SKB_CB(skb)->dccpd_seq, lswl, dp->dccps_swh) && (DCCP_SKB_CB(skb)->dccpd_ack_seq == DCCP_PKT_WITHOUT_ACK_SEQ || between48(DCCP_SKB_CB(skb)->dccpd_ack_seq, lawl, dp->dccps_awh))) { dccp_update_gsr(sk, DCCP_SKB_CB(skb)->dccpd_seq); if (dh->dccph_type != DCCP_PKT_SYNC && (DCCP_SKB_CB(skb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)) dp->dccps_gar = DCCP_SKB_CB(skb)->dccpd_ack_seq; } else { DCCP_WARN("DCCP: Step 6 failed for %s packet, " "(LSWL(%llu) <= P.seqno(%llu) <= S.SWH(%llu)) and " "(P.ackno %s or LAWL(%llu) <= P.ackno(%llu) <= S.AWH(%llu), " "sending SYNC...\n", dccp_packet_name(dh->dccph_type), (unsigned long long) lswl, (unsigned long long) DCCP_SKB_CB(skb)->dccpd_seq, (unsigned long long) dp->dccps_swh, (DCCP_SKB_CB(skb)->dccpd_ack_seq == DCCP_PKT_WITHOUT_ACK_SEQ) ? "doesn't exist" : "exists", (unsigned long long) lawl, (unsigned long long) DCCP_SKB_CB(skb)->dccpd_ack_seq, (unsigned long long) dp->dccps_awh); dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq, DCCP_PKT_SYNC); return -1; } return 0; }
/* * All SKB's seen here are completely headerless. It is our * job to build the DCCP header, and pass the packet down to * IP so it can do the same plus pass the packet off to the * device. */ int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb) { if (likely(skb != NULL)) { const struct inet_sock *inet = inet_sk(sk); struct dccp_sock *dp = dccp_sk(sk); struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); struct dccp_hdr *dh; /* XXX For now we're using only 48 bits sequence numbers */ const int dccp_header_size = sizeof(*dh) + sizeof(struct dccp_hdr_ext) + dccp_packet_hdr_len(dcb->dccpd_type); int err, set_ack = 1; u64 ackno = dp->dccps_gsr; dccp_inc_seqno(&dp->dccps_gss); switch (dcb->dccpd_type) { case DCCP_PKT_DATA: set_ack = 0; break; case DCCP_PKT_SYNC: case DCCP_PKT_SYNCACK: ackno = dcb->dccpd_seq; break; } dcb->dccpd_seq = dp->dccps_gss; dccp_insert_options(sk, skb); skb->h.raw = skb_push(skb, dccp_header_size); dh = dccp_hdr(skb); if (!skb->sk) skb_set_owner_w(skb, sk); /* Build DCCP header and checksum it. */ memset(dh, 0, dccp_header_size); dh->dccph_type = dcb->dccpd_type; dh->dccph_sport = inet->sport; dh->dccph_dport = inet->dport; dh->dccph_doff = (dccp_header_size + dcb->dccpd_opt_len) / 4; dh->dccph_ccval = dcb->dccpd_ccval; /* XXX For now we're using only 48 bits sequence numbers */ dh->dccph_x = 1; dp->dccps_awh = dp->dccps_gss; dccp_hdr_set_seq(dh, dp->dccps_gss); if (set_ack) dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), ackno); switch (dcb->dccpd_type) { case DCCP_PKT_REQUEST: dccp_hdr_request(skb)->dccph_req_service = dp->dccps_service; break; case DCCP_PKT_RESET: dccp_hdr_reset(skb)->dccph_reset_code = dcb->dccpd_reset_code; break; } dh->dccph_checksum = dccp_v4_checksum(skb, inet->saddr, inet->daddr); if (set_ack) dccp_event_ack_sent(sk); DCCP_INC_STATS(DCCP_MIB_OUTSEGS); memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); err = ip_queue_xmit(skb, 0); if (err <= 0) return err; /* NET_XMIT_CN is special. It does not guarantee, * that this packet is lost. It tells that device * is about to start to drop packets or already * drops some packets of the same priority and * invokes us to send less aggressively. */ return err == NET_XMIT_CN ? 0 : err; } return -ENOBUFS; }