static struct sk_buff *dccp_make_reset(struct sock *sk, struct dst_entry *dst, const enum dccp_reset_codes code) { struct dccp_hdr *dh; struct dccp_sock *dp = dccp_sk(sk); const u32 dccp_header_size = sizeof(struct dccp_hdr) + sizeof(struct dccp_hdr_ext) + sizeof(struct dccp_hdr_reset); struct sk_buff *skb = sock_wmalloc(sk, sk->sk_prot->max_header, 1, GFP_ATOMIC); if (skb == NULL) return NULL; /* Reserve space for headers. */ skb_reserve(skb, sk->sk_prot->max_header); skb->dst = dst_clone(dst); skb->csum = 0; dccp_inc_seqno(&dp->dccps_gss); DCCP_SKB_CB(skb)->dccpd_reset_code = code; DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESET; DCCP_SKB_CB(skb)->dccpd_seq = dp->dccps_gss; if (dccp_insert_options(sk, skb)) { kfree_skb(skb); return NULL; } skb->h.raw = skb_push(skb, dccp_header_size); dh = dccp_hdr(skb); memset(dh, 0, dccp_header_size); dh->dccph_sport = inet_sk(sk)->sport; dh->dccph_dport = inet_sk(sk)->dport; dh->dccph_doff = (dccp_header_size + DCCP_SKB_CB(skb)->dccpd_opt_len) / 4; dh->dccph_type = DCCP_PKT_RESET; dh->dccph_x = 1; dccp_hdr_set_seq(dh, dp->dccps_gss); dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dp->dccps_gsr); dccp_hdr_reset(skb)->dccph_reset_code = code; inet_csk(sk)->icsk_af_ops->send_check(sk, skb->len, skb); DCCP_INC_STATS(DCCP_MIB_OUTSEGS); return skb; }
struct sk_buff *dccp_make_reset(struct sock *sk, struct dst_entry *dst, const enum dccp_reset_codes code) { struct dccp_hdr *dh; struct dccp_sock *dp = dccp_sk(sk); const int dccp_header_size = sizeof(struct dccp_hdr) + sizeof(struct dccp_hdr_ext) + sizeof(struct dccp_hdr_reset); struct sk_buff *skb = sock_wmalloc(sk, MAX_HEADER + DCCP_MAX_OPT_LEN + dccp_header_size, 1, GFP_ATOMIC); if (skb == NULL) return NULL; /* Reserve space for headers. */ skb_reserve(skb, MAX_HEADER + DCCP_MAX_OPT_LEN + dccp_header_size); skb->dst = dst_clone(dst); skb->csum = 0; dccp_inc_seqno(&dp->dccps_gss); DCCP_SKB_CB(skb)->dccpd_reset_code = code; DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESET; DCCP_SKB_CB(skb)->dccpd_seq = dp->dccps_gss; dccp_insert_options(sk, skb); skb->h.raw = skb_push(skb, dccp_header_size); dh = dccp_hdr(skb); memset(dh, 0, dccp_header_size); dh->dccph_sport = inet_sk(sk)->sport; dh->dccph_dport = inet_sk(sk)->dport; dh->dccph_doff = (dccp_header_size + DCCP_SKB_CB(skb)->dccpd_opt_len) / 4; dh->dccph_type = DCCP_PKT_RESET; dh->dccph_x = 1; dccp_hdr_set_seq(dh, dp->dccps_gss); dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dp->dccps_gsr); dccp_hdr_reset(skb)->dccph_reset_code = code; dh->dccph_checksum = dccp_v4_checksum(skb, inet_sk(sk)->saddr, inet_sk(sk)->daddr); DCCP_INC_STATS(DCCP_MIB_OUTSEGS); return skb; }
struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst, struct request_sock *req) { struct dccp_hdr *dh; struct dccp_request_sock *dreq; const u32 dccp_header_size = sizeof(struct dccp_hdr) + sizeof(struct dccp_hdr_ext) + sizeof(struct dccp_hdr_response); struct sk_buff *skb = sock_wmalloc(sk, sk->sk_prot->max_header, 1, GFP_ATOMIC); if (skb == NULL) return NULL; /* Reserve space for headers. */ skb_reserve(skb, sk->sk_prot->max_header); skb->dst = dst_clone(dst); dreq = dccp_rsk(req); if (inet_rsk(req)->acked) /* increase ISS upon retransmission */ dccp_inc_seqno(&dreq->dreq_iss); DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESPONSE; DCCP_SKB_CB(skb)->dccpd_seq = dreq->dreq_iss; if (dccp_insert_options(sk, skb)) { kfree_skb(skb); return NULL; } /* Build and checksum header */ dh = dccp_zeroed_hdr(skb, dccp_header_size); dh->dccph_sport = inet_sk(sk)->sport; dh->dccph_dport = inet_rsk(req)->rmt_port; dh->dccph_doff = (dccp_header_size + DCCP_SKB_CB(skb)->dccpd_opt_len) / 4; dh->dccph_type = DCCP_PKT_RESPONSE; dh->dccph_x = 1; dccp_hdr_set_seq(dh, dreq->dreq_iss); dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dreq->dreq_isr); dccp_hdr_response(skb)->dccph_resp_service = dreq->dreq_service; dccp_csum_outgoing(skb); /* We use `acked' to remember that a Response was already sent. */ inet_rsk(req)->acked = 1; DCCP_INC_STATS(DCCP_MIB_OUTSEGS); return skb; }
struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst, struct request_sock *req) { struct dccp_hdr *dh; struct dccp_request_sock *dreq; const int dccp_header_size = sizeof(struct dccp_hdr) + sizeof(struct dccp_hdr_ext) + sizeof(struct dccp_hdr_response); struct sk_buff *skb = sock_wmalloc(sk, MAX_HEADER + DCCP_MAX_OPT_LEN + dccp_header_size, 1, GFP_ATOMIC); if (skb == NULL) return NULL; /* Reserve space for headers. */ skb_reserve(skb, MAX_HEADER + DCCP_MAX_OPT_LEN + dccp_header_size); skb->dst = dst_clone(dst); skb->csum = 0; dreq = dccp_rsk(req); DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESPONSE; DCCP_SKB_CB(skb)->dccpd_seq = dreq->dreq_iss; dccp_insert_options(sk, skb); skb->h.raw = skb_push(skb, dccp_header_size); dh = dccp_hdr(skb); memset(dh, 0, dccp_header_size); dh->dccph_sport = inet_sk(sk)->sport; dh->dccph_dport = inet_rsk(req)->rmt_port; dh->dccph_doff = (dccp_header_size + DCCP_SKB_CB(skb)->dccpd_opt_len) / 4; dh->dccph_type = DCCP_PKT_RESPONSE; dh->dccph_x = 1; dccp_hdr_set_seq(dh, dreq->dreq_iss); dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dreq->dreq_isr); dccp_hdr_response(skb)->dccph_resp_service = dreq->dreq_service; dh->dccph_checksum = dccp_v4_checksum(skb, inet_rsk(req)->loc_addr, inet_rsk(req)->rmt_addr); DCCP_INC_STATS(DCCP_MIB_OUTSEGS); return skb; }
/* * All SKB's seen here are completely headerless. It is our * job to build the DCCP header, and pass the packet down to * IP so it can do the same plus pass the packet off to the * device. */ static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb) { if (likely(skb != NULL)) { const struct inet_sock *inet = inet_sk(sk); const struct inet_connection_sock *icsk = inet_csk(sk); struct dccp_sock *dp = dccp_sk(sk); struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); struct dccp_hdr *dh; /* XXX For now we're using only 48 bits sequence numbers */ const u32 dccp_header_size = sizeof(*dh) + sizeof(struct dccp_hdr_ext) + dccp_packet_hdr_len(dcb->dccpd_type); int err, set_ack = 1; u64 ackno = dp->dccps_gsr; dccp_inc_seqno(&dp->dccps_gss); switch (dcb->dccpd_type) { case DCCP_PKT_DATA: set_ack = 0; /* fall through */ case DCCP_PKT_DATAACK: case DCCP_PKT_RESET: break; case DCCP_PKT_REQUEST: set_ack = 0; /* fall through */ case DCCP_PKT_SYNC: case DCCP_PKT_SYNCACK: ackno = dcb->dccpd_ack_seq; /* fall through */ default: /* * Set owner/destructor: some skbs are allocated via * alloc_skb (e.g. when retransmission may happen). * Only Data, DataAck, and Reset packets should come * through here with skb->sk set. */ WARN_ON(skb->sk); skb_set_owner_w(skb, sk); break; } dcb->dccpd_seq = dp->dccps_gss; if (dccp_insert_options(sk, skb)) { kfree_skb(skb); return -EPROTO; } /* Build DCCP header and checksum it. */ dh = dccp_zeroed_hdr(skb, dccp_header_size); dh->dccph_type = dcb->dccpd_type; dh->dccph_sport = inet->sport; dh->dccph_dport = inet->dport; dh->dccph_doff = (dccp_header_size + dcb->dccpd_opt_len) / 4; dh->dccph_ccval = dcb->dccpd_ccval; dh->dccph_cscov = dp->dccps_pcslen; /* XXX For now we're using only 48 bits sequence numbers */ dh->dccph_x = 1; dp->dccps_awh = dp->dccps_gss; dccp_hdr_set_seq(dh, dp->dccps_gss); if (set_ack) dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), ackno); switch (dcb->dccpd_type) { case DCCP_PKT_REQUEST: dccp_hdr_request(skb)->dccph_req_service = dp->dccps_service; break; case DCCP_PKT_RESET: dccp_hdr_reset(skb)->dccph_reset_code = dcb->dccpd_reset_code; break; } icsk->icsk_af_ops->send_check(sk, 0, skb); if (set_ack) dccp_event_ack_sent(sk); DCCP_INC_STATS(DCCP_MIB_OUTSEGS); err = icsk->icsk_af_ops->queue_xmit(skb, 0); return net_xmit_eval(err); } return -ENOBUFS; }
/* * All SKB's seen here are completely headerless. It is our * job to build the DCCP header, and pass the packet down to * IP so it can do the same plus pass the packet off to the * device. */ static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb) { if (likely(skb != NULL)) { struct inet_sock *inet = inet_sk(sk); const struct inet_connection_sock *icsk = inet_csk(sk); struct dccp_sock *dp = dccp_sk(sk); struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); struct dccp_hdr *dh; /* XXX For now we're using only 48 bits sequence numbers */ const u32 dccp_header_size = sizeof(*dh) + sizeof(struct dccp_hdr_ext) + dccp_packet_hdr_len(dcb->dccpd_type); int err, set_ack = 1; u64 ackno = dp->dccps_gsr; /* * Increment GSS here already in case the option code needs it. * Update GSS for real only if option processing below succeeds. */ dcb->dccpd_seq = ADD48(dp->dccps_gss, 1); switch (dcb->dccpd_type) { case DCCP_PKT_DATA: set_ack = 0; /* fall through */ case DCCP_PKT_DATAACK: case DCCP_PKT_RESET: break; case DCCP_PKT_REQUEST: set_ack = 0; /* Use ISS on the first (non-retransmitted) Request. */ if (icsk->icsk_retransmits == 0) dcb->dccpd_seq = dp->dccps_iss; /* fall through */ case DCCP_PKT_SYNC: case DCCP_PKT_SYNCACK: ackno = dcb->dccpd_ack_seq; /* fall through */ default: /* * Set owner/destructor: some skbs are allocated via * alloc_skb (e.g. when retransmission may happen). * Only Data, DataAck, and Reset packets should come * through here with skb->sk set. */ WARN_ON(skb->sk); skb_set_owner_w(skb, sk); break; } if (dccp_insert_options(sk, skb)) { kfree_skb(skb); return -EPROTO; } /* Build DCCP header and checksum it. */ dh = dccp_zeroed_hdr(skb, dccp_header_size); dh->dccph_type = dcb->dccpd_type; dh->dccph_sport = inet->inet_sport; dh->dccph_dport = inet->inet_dport; dh->dccph_doff = (dccp_header_size + dcb->dccpd_opt_len) / 4; dh->dccph_ccval = dcb->dccpd_ccval; dh->dccph_cscov = dp->dccps_pcslen; /* XXX For now we're using only 48 bits sequence numbers */ dh->dccph_x = 1; dccp_update_gss(sk, dcb->dccpd_seq); dccp_hdr_set_seq(dh, dp->dccps_gss); if (set_ack) dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), ackno); switch (dcb->dccpd_type) { case DCCP_PKT_REQUEST: dccp_hdr_request(skb)->dccph_req_service = dp->dccps_service; /* * Limit Ack window to ISS <= P.ackno <= GSS, so that * only Responses to Requests we sent are considered. */ dp->dccps_awl = dp->dccps_iss; break; case DCCP_PKT_RESET: dccp_hdr_reset(skb)->dccph_reset_code = dcb->dccpd_reset_code; break; } icsk->icsk_af_ops->send_check(sk, skb); if (set_ack) dccp_event_ack_sent(sk); DCCP_INC_STATS(DCCP_MIB_OUTSEGS); err = icsk->icsk_af_ops->queue_xmit(skb, &inet->cork.fl); return net_xmit_eval(err); } return -ENOBUFS; }
static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb) { if (likely(skb != NULL)) { const struct inet_sock *inet = inet_sk(sk); const struct inet_connection_sock *icsk = inet_csk(sk); struct dccp_sock *dp = dccp_sk(sk); struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); struct dccp_hdr *dh; const u32 dccp_header_size = sizeof(*dh) + sizeof(struct dccp_hdr_ext) + dccp_packet_hdr_len(dcb->dccpd_type); int err, set_ack = 1; u64 ackno = dp->dccps_gsr; dcb->dccpd_seq = ADD48(dp->dccps_gss, 1); switch (dcb->dccpd_type) { case DCCP_PKT_DATA: set_ack = 0; case DCCP_PKT_DATAACK: case DCCP_PKT_RESET: break; case DCCP_PKT_REQUEST: set_ack = 0; if (icsk->icsk_retransmits == 0) dcb->dccpd_seq = dp->dccps_iss; case DCCP_PKT_SYNC: case DCCP_PKT_SYNCACK: ackno = dcb->dccpd_ack_seq; default: WARN_ON(skb->sk); skb_set_owner_w(skb, sk); break; } if (dccp_insert_options(sk, skb)) { kfree_skb(skb); return -EPROTO; } dh = dccp_zeroed_hdr(skb, dccp_header_size); dh->dccph_type = dcb->dccpd_type; dh->dccph_sport = inet->sport; dh->dccph_dport = inet->dport; dh->dccph_doff = (dccp_header_size + dcb->dccpd_opt_len) / 4; dh->dccph_ccval = dcb->dccpd_ccval; dh->dccph_cscov = dp->dccps_pcslen; dh->dccph_x = 1; dccp_update_gss(sk, dcb->dccpd_seq); dccp_hdr_set_seq(dh, dp->dccps_gss); if (set_ack) dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), ackno); switch (dcb->dccpd_type) { case DCCP_PKT_REQUEST: dccp_hdr_request(skb)->dccph_req_service = dp->dccps_service; dp->dccps_awl = dp->dccps_iss; break; case DCCP_PKT_RESET: dccp_hdr_reset(skb)->dccph_reset_code = dcb->dccpd_reset_code; break; } icsk->icsk_af_ops->send_check(sk, 0, skb); if (set_ack) dccp_event_ack_sent(sk); DCCP_INC_STATS(DCCP_MIB_OUTSEGS); err = icsk->icsk_af_ops->queue_xmit(skb, 0); return net_xmit_eval(err); } return -ENOBUFS; }
/* * All SKB's seen here are completely headerless. It is our * job to build the DCCP header, and pass the packet down to * IP so it can do the same plus pass the packet off to the * device. */ int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb) { if (likely(skb != NULL)) { const struct inet_sock *inet = inet_sk(sk); struct dccp_sock *dp = dccp_sk(sk); struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); struct dccp_hdr *dh; /* XXX For now we're using only 48 bits sequence numbers */ const int dccp_header_size = sizeof(*dh) + sizeof(struct dccp_hdr_ext) + dccp_packet_hdr_len(dcb->dccpd_type); int err, set_ack = 1; u64 ackno = dp->dccps_gsr; dccp_inc_seqno(&dp->dccps_gss); switch (dcb->dccpd_type) { case DCCP_PKT_DATA: set_ack = 0; break; case DCCP_PKT_SYNC: case DCCP_PKT_SYNCACK: ackno = dcb->dccpd_seq; break; } dcb->dccpd_seq = dp->dccps_gss; dccp_insert_options(sk, skb); skb->h.raw = skb_push(skb, dccp_header_size); dh = dccp_hdr(skb); if (!skb->sk) skb_set_owner_w(skb, sk); /* Build DCCP header and checksum it. */ memset(dh, 0, dccp_header_size); dh->dccph_type = dcb->dccpd_type; dh->dccph_sport = inet->sport; dh->dccph_dport = inet->dport; dh->dccph_doff = (dccp_header_size + dcb->dccpd_opt_len) / 4; dh->dccph_ccval = dcb->dccpd_ccval; /* XXX For now we're using only 48 bits sequence numbers */ dh->dccph_x = 1; dp->dccps_awh = dp->dccps_gss; dccp_hdr_set_seq(dh, dp->dccps_gss); if (set_ack) dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), ackno); switch (dcb->dccpd_type) { case DCCP_PKT_REQUEST: dccp_hdr_request(skb)->dccph_req_service = dp->dccps_service; break; case DCCP_PKT_RESET: dccp_hdr_reset(skb)->dccph_reset_code = dcb->dccpd_reset_code; break; } dh->dccph_checksum = dccp_v4_checksum(skb, inet->saddr, inet->daddr); if (set_ack) dccp_event_ack_sent(sk); DCCP_INC_STATS(DCCP_MIB_OUTSEGS); memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); err = ip_queue_xmit(skb, 0); if (err <= 0) return err; /* NET_XMIT_CN is special. It does not guarantee, * that this packet is lost. It tells that device * is about to start to drop packets or already * drops some packets of the same priority and * invokes us to send less aggressively. */ return err == NET_XMIT_CN ? 0 : err; } return -ENOBUFS; }