static void try_fill_recv(struct virtnet_info *vi) { struct sk_buff *skb; struct scatterlist sg[1+MAX_SKB_FRAGS]; int num, err; sg_init_table(sg, 1+MAX_SKB_FRAGS); for (;;) { skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN); if (unlikely(!skb)) break; skb_put(skb, MAX_PACKET_LEN); vnet_hdr_to_sg(sg, skb); num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1; skb_queue_head(&vi->recv, skb); err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, num, skb); if (err) { skb_unlink(skb, &vi->recv); kfree_skb(skb); break; } vi->num++; } if (unlikely(vi->num > vi->max)) vi->max = vi->num; vi->rvq->vq_ops->kick(vi->rvq); }
static int start_xmit(struct sk_buff *skb, struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); int num, err; struct scatterlist sg[1+MAX_SKB_FRAGS]; struct virtio_net_hdr *hdr; const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; DECLARE_MAC_BUF(mac); sg_init_table(sg, 1+MAX_SKB_FRAGS); pr_debug("%s: xmit %p %s\n", dev->name, skb, print_mac(mac, dest)); free_old_xmit_skbs(vi); /* Encode metadata header at front. */ hdr = skb_vnet_hdr(skb); if (skb->ip_summed == CHECKSUM_PARTIAL) { hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; hdr->csum_start = skb->csum_start - skb_headroom(skb); hdr->csum_offset = skb->csum_offset; } else { hdr->flags = 0; hdr->csum_offset = hdr->csum_start = 0; } if (skb_is_gso(skb)) { hdr->gso_size = skb_shinfo(skb)->gso_size; if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN) hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4_ECN; else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4; else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6; else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP; else BUG(); } else { hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE; hdr->gso_size = 0; } vnet_hdr_to_sg(sg, skb); num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1; __skb_queue_head(&vi->send, skb); err = vi->svq->vq_ops->add_buf(vi->svq, sg, num, 0, skb); if (err) { pr_debug("%s: virtio not prepared to send\n", dev->name); skb_unlink(skb, &vi->send); netif_stop_queue(dev); return NETDEV_TX_BUSY; } vi->svq->vq_ops->kick(vi->svq); return 0; }
uint BCMFASTPATH osl_dma_map(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_map_t *dmah) { int dir; ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE; #if defined(__ARM_ARCH_7A__) && defined(BCMDMASGLISTOSL) if (dmah != NULL) { int32 nsegs, i, totsegs = 0, totlen = 0; struct scatterlist *sg, _sg[16]; struct sk_buff *skb; for (skb = (struct sk_buff *)p; skb != NULL; skb = PKTNEXT(osh, skb)) { sg = &_sg[totsegs]; if (skb_is_nonlinear(skb)) { nsegs = skb_to_sgvec(skb, sg, 0, PKTLEN(osh, skb)); ASSERT((nsegs > 0) && (nsegs <= 16)); pci_map_sg(osh->pdev, sg, nsegs, dir); } else { nsegs = 1; sg->page_link = 0; sg_set_buf(sg, PKTDATA(osh, skb), PKTLEN(osh, skb)); pci_map_single(osh->pdev, PKTDATA(osh, skb), PKTISCTF(osh, skb) ? CTFMAPSZ : PKTLEN(osh, skb), dir); } totsegs += nsegs; totlen += PKTLEN(osh, skb); } dmah->nsegs = totsegs; dmah->origsize = totlen; for (i = 0, sg = _sg; i < totsegs; i++, sg++) { dmah->segs[i].addr = sg_phys(sg); dmah->segs[i].length = sg->length; } return dmah->segs[0].addr; } #endif return (pci_map_single(osh->pdev, va, size, dir)); }
static int esp6_input(struct xfrm_state *x, struct sk_buff *skb) { struct ipv6hdr *iph; struct ipv6_esp_hdr *esph; struct esp_data *esp = x->data; struct crypto_blkcipher *tfm = esp->conf.tfm; struct blkcipher_desc desc = { .tfm = tfm }; struct sk_buff *trailer; int blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4); int alen = esp->auth.icv_trunc_len; int elen = skb->len - sizeof(struct ipv6_esp_hdr) - esp->conf.ivlen - alen; int hdr_len = skb->h.raw - skb->nh.raw; int nfrags; int ret = 0; if (!pskb_may_pull(skb, sizeof(struct ipv6_esp_hdr))) { ret = -EINVAL; goto out; } if (elen <= 0 || (elen & (blksize-1))) { ret = -EINVAL; goto out; } /* If integrity check is required, do this. */ if (esp->auth.icv_full_len) { u8 sum[alen]; ret = esp_mac_digest(esp, skb, 0, skb->len - alen); if (ret) goto out; if (skb_copy_bits(skb, skb->len - alen, sum, alen)) BUG(); if (unlikely(memcmp(esp->auth.work_icv, sum, alen))) { x->stats.integrity_failed++; ret = -EINVAL; goto out; } } if ((nfrags = skb_cow_data(skb, 0, &trailer)) < 0) { ret = -EINVAL; goto out; } skb->ip_summed = CHECKSUM_NONE; esph = (struct ipv6_esp_hdr*)skb->data; iph = skb->nh.ipv6h; /* Get ivec. This can be wrong, check against another impls. */ if (esp->conf.ivlen) crypto_blkcipher_set_iv(tfm, esph->enc_data, esp->conf.ivlen); { u8 nexthdr[2]; struct scatterlist *sg = &esp->sgbuf[0]; u8 padlen; if (unlikely(nfrags > ESP_NUM_FAST_SG)) { sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC); if (!sg) { ret = -ENOMEM; goto out; } } skb_to_sgvec(skb, sg, sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen, elen); ret = crypto_blkcipher_decrypt(&desc, sg, sg, elen); if (unlikely(sg != &esp->sgbuf[0])) kfree(sg); if (unlikely(ret)) goto out; if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2)) BUG(); padlen = nexthdr[0]; if (padlen+2 >= elen) { LIMIT_NETDEBUG(KERN_WARNING "ipsec esp packet is garbage padlen=%d, elen=%d\n", padlen+2, elen); ret = -EINVAL; goto out; } /* ... check padding bits here. Silly. :-) */ pskb_trim(skb, skb->len - alen - padlen - 2); ret = nexthdr[1]; } skb->h.raw = __skb_pull(skb, sizeof(*esph) + esp->conf.ivlen) - hdr_len; out: return ret; }
/* * Note: detecting truncated vs. non-truncated authentication data is very * expensive, so we only support truncated data, which is the recommended * and common case. */ static int esp_input(struct xfrm_state *x, struct sk_buff *skb) { struct iphdr *iph; struct ip_esp_hdr *esph; struct esp_data *esp = x->data; struct crypto_blkcipher *tfm = esp->conf.tfm; struct blkcipher_desc desc = { .tfm = tfm }; struct sk_buff *trailer; int blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4); int alen = esp->auth.icv_trunc_len; int elen = skb->len - sizeof(struct ip_esp_hdr) - esp->conf.ivlen - alen; int nfrags; int ihl; u8 nexthdr[2]; struct scatterlist *sg; int padlen; int err; if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr))) goto out; if (elen <= 0 || (elen & (blksize-1))) goto out; /* If integrity check is required, do this. */ if (esp->auth.icv_full_len) { u8 sum[alen]; err = esp_mac_digest(esp, skb, 0, skb->len - alen); if (err) goto out; if (skb_copy_bits(skb, skb->len - alen, sum, alen)) BUG(); if (unlikely(memcmp(esp->auth.work_icv, sum, alen))) { x->stats.integrity_failed++; goto out; } } if ((nfrags = skb_cow_data(skb, 0, &trailer)) < 0) goto out; skb->ip_summed = CHECKSUM_NONE; esph = (struct ip_esp_hdr*)skb->data; /* Get ivec. This can be wrong, check against another impls. */ if (esp->conf.ivlen) crypto_blkcipher_set_iv(tfm, esph->enc_data, esp->conf.ivlen); sg = &esp->sgbuf[0]; if (unlikely(nfrags > ESP_NUM_FAST_SG)) { sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC); if (!sg) goto out; } skb_to_sgvec(skb, sg, sizeof(struct ip_esp_hdr) + esp->conf.ivlen, elen); err = crypto_blkcipher_decrypt(&desc, sg, sg, elen); if (unlikely(sg != &esp->sgbuf[0])) kfree(sg); if (unlikely(err)) return err; if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2)) BUG(); padlen = nexthdr[0]; if (padlen+2 >= elen) goto out; /* ... check padding bits here. Silly. :-) */ iph = skb->nh.iph; ihl = iph->ihl * 4; if (x->encap) { struct xfrm_encap_tmpl *encap = x->encap; struct udphdr *uh = (void *)(skb->nh.raw + ihl); /* * 1) if the NAT-T peer's IP or port changed then * advertize the change to the keying daemon. * This is an inbound SA, so just compare * SRC ports. */ if (iph->saddr != x->props.saddr.a4 || uh->source != encap->encap_sport) { xfrm_address_t ipaddr; ipaddr.a4 = iph->saddr; km_new_mapping(x, &ipaddr, uh->source); /* XXX: perhaps add an extra * policy check here, to see * if we should allow or * reject a packet from a * different source * address/port. */ } /* * 2) ignore UDP/TCP checksums in case * of NAT-T in Transport Mode, or * perform other post-processing fixes * as per draft-ietf-ipsec-udp-encaps-06, * section 3.1.2 */ if (x->props.mode == XFRM_MODE_TRANSPORT || x->props.mode == XFRM_MODE_BEET) skb->ip_summed = CHECKSUM_UNNECESSARY; } iph->protocol = nexthdr[1]; pskb_trim(skb, skb->len - alen - padlen - 2); skb->h.raw = __skb_pull(skb, sizeof(*esph) + esp->conf.ivlen) - ihl; return 0; out: return -EINVAL; }
static int esp_output(struct xfrm_state *x, struct sk_buff *skb) { int err; struct iphdr *top_iph; struct ip_esp_hdr *esph; struct crypto_blkcipher *tfm; struct blkcipher_desc desc; struct esp_data *esp; struct sk_buff *trailer; int blksize; int clen; int alen; int nfrags; /* Strip IP+ESP header. */ __skb_pull(skb, skb->h.raw - skb->data); /* Now skb is pure payload to encrypt */ err = -ENOMEM; /* Round to block size */ clen = skb->len; esp = x->data; alen = esp->auth.icv_trunc_len; tfm = esp->conf.tfm; desc.tfm = tfm; desc.flags = 0; blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4); clen = ALIGN(clen + 2, blksize); if (esp->conf.padlen) clen = ALIGN(clen, esp->conf.padlen); if ((nfrags = skb_cow_data(skb, clen-skb->len+alen, &trailer)) < 0) goto error; /* Fill padding... */ do { int i; for (i=0; i<clen-skb->len - 2; i++) *(u8*)(trailer->tail + i) = i+1; } while (0); *(u8*)(trailer->tail + clen-skb->len - 2) = (clen - skb->len)-2; pskb_put(skb, trailer, clen - skb->len); __skb_push(skb, skb->data - skb->nh.raw); top_iph = skb->nh.iph; esph = (struct ip_esp_hdr *)(skb->nh.raw + top_iph->ihl*4); top_iph->tot_len = htons(skb->len + alen); *(u8*)(trailer->tail - 1) = top_iph->protocol; /* this is non-NULL only with UDP Encapsulation */ if (x->encap) { struct xfrm_encap_tmpl *encap = x->encap; struct udphdr *uh; u32 *udpdata32; uh = (struct udphdr *)esph; uh->source = encap->encap_sport; uh->dest = encap->encap_dport; uh->len = htons(skb->len + alen - top_iph->ihl*4); uh->check = 0; switch (encap->encap_type) { default: case UDP_ENCAP_ESPINUDP: esph = (struct ip_esp_hdr *)(uh + 1); break; case UDP_ENCAP_ESPINUDP_NON_IKE: udpdata32 = (u32 *)(uh + 1); udpdata32[0] = udpdata32[1] = 0; esph = (struct ip_esp_hdr *)(udpdata32 + 2); break; } top_iph->protocol = IPPROTO_UDP; } else top_iph->protocol = IPPROTO_ESP; esph->spi = x->id.spi; esph->seq_no = htonl(++x->replay.oseq); xfrm_aevent_doreplay(x); if (esp->conf.ivlen) { if (unlikely(!esp->conf.ivinitted)) { get_random_bytes(esp->conf.ivec, esp->conf.ivlen); esp->conf.ivinitted = 1; } crypto_blkcipher_set_iv(tfm, esp->conf.ivec, esp->conf.ivlen); } do { struct scatterlist *sg = &esp->sgbuf[0]; if (unlikely(nfrags > ESP_NUM_FAST_SG)) { sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC); if (!sg) goto error; } skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen); err = crypto_blkcipher_encrypt(&desc, sg, sg, clen); if (unlikely(sg != &esp->sgbuf[0])) kfree(sg); } while (0); if (unlikely(err)) goto error; if (esp->conf.ivlen) { memcpy(esph->enc_data, esp->conf.ivec, esp->conf.ivlen); crypto_blkcipher_get_iv(tfm, esp->conf.ivec, esp->conf.ivlen); } if (esp->auth.icv_full_len) { err = esp_mac_digest(esp, skb, (u8 *)esph - skb->data, sizeof(*esph) + esp->conf.ivlen + clen); memcpy(pskb_put(skb, trailer, alen), esp->auth.work_icv, alen); } ip_send_check(top_iph); error: return err; }
int esp_output(struct sk_buff *skb) { int err; struct dst_entry *dst = skb->dst; struct xfrm_state *x = dst->xfrm; struct iphdr *iph, *top_iph; struct ip_esp_hdr *esph; struct crypto_tfm *tfm; struct esp_data *esp; struct sk_buff *trailer; struct udphdr *uh = NULL; struct xfrm_encap_tmpl *encap = NULL; int blksize; int clen; int alen; int nfrags; union { struct iphdr iph; char buf[60]; } tmp_iph; /* First, if the skb is not checksummed, complete checksum. */ if (skb->ip_summed == CHECKSUM_HW && skb_checksum_help(skb) == NULL) { err = -EINVAL; goto error_nolock; } spin_lock_bh(&x->lock); err = xfrm_check_output(x, skb, AF_INET); if (err) goto error; err = -ENOMEM; /* Strip IP header in transport mode. Save it. */ if (!x->props.mode) { iph = skb->nh.iph; memcpy(&tmp_iph, iph, iph->ihl*4); __skb_pull(skb, iph->ihl*4); } /* Now skb is pure payload to encrypt */ /* Round to block size */ clen = skb->len; esp = x->data; alen = esp->auth.icv_trunc_len; tfm = esp->conf.tfm; blksize = (crypto_tfm_alg_blocksize(tfm) + 3) & ~3; clen = (clen + 2 + blksize-1)&~(blksize-1); if (esp->conf.padlen) clen = (clen + esp->conf.padlen-1)&~(esp->conf.padlen-1); if ((nfrags = skb_cow_data(skb, clen-skb->len+alen, &trailer)) < 0) goto error; /* Fill padding... */ do { int i; for (i=0; i<clen-skb->len - 2; i++) *(u8*)(trailer->tail + i) = i+1; } while (0); *(u8*)(trailer->tail + clen-skb->len - 2) = (clen - skb->len)-2; pskb_put(skb, trailer, clen - skb->len); encap = x->encap; iph = skb->nh.iph; if (x->props.mode) { top_iph = (struct iphdr*)skb_push(skb, x->props.header_len); esph = (struct ip_esp_hdr*)(top_iph+1); if (encap && encap->encap_type) { switch (encap->encap_type) { case UDP_ENCAP_ESPINUDP: uh = (struct udphdr*) esph; esph = (struct ip_esp_hdr*)(uh+1); top_iph->protocol = IPPROTO_UDP; break; default: printk(KERN_INFO "esp_output(): Unhandled encap: %u\n", encap->encap_type); top_iph->protocol = IPPROTO_ESP; break; } } else top_iph->protocol = IPPROTO_ESP; *(u8*)(trailer->tail - 1) = IPPROTO_IPIP; top_iph->ihl = 5; top_iph->version = 4; top_iph->tos = iph->tos; /* DS disclosed */ if (x->props.flags & XFRM_STATE_NOECN) IP_ECN_clear(top_iph); top_iph->tot_len = htons(skb->len + alen); top_iph->frag_off = iph->frag_off&htons(IP_DF); if (!(top_iph->frag_off)) ip_select_ident(top_iph, dst, 0); top_iph->ttl = iph->ttl; /* TTL disclosed */ top_iph->check = 0; top_iph->saddr = x->props.saddr.a4; top_iph->daddr = x->id.daddr.a4; memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options)); } else { esph = (struct ip_esp_hdr*)skb_push(skb, x->props.header_len); top_iph = (struct iphdr*)skb_push(skb, iph->ihl*4); memcpy(top_iph, &tmp_iph, iph->ihl*4); if (encap && encap->encap_type) { switch (encap->encap_type) { case UDP_ENCAP_ESPINUDP: uh = (struct udphdr*) esph; esph = (struct ip_esp_hdr*)(uh+1); top_iph->protocol = IPPROTO_UDP; break; default: printk(KERN_INFO "esp_output(): Unhandled encap: %u\n", encap->encap_type); top_iph->protocol = IPPROTO_ESP; break; } } else top_iph->protocol = IPPROTO_ESP; iph = &tmp_iph.iph; top_iph->tot_len = htons(skb->len + alen); top_iph->check = 0; top_iph->frag_off = iph->frag_off; *(u8*)(trailer->tail - 1) = iph->protocol; } /* this is non-NULL only with UDP Encapsulation */ if (encap && uh) { uh->source = encap->encap_sport; uh->dest = encap->encap_dport; uh->len = htons(skb->len + alen - sizeof(struct iphdr)); uh->check = 0; } esph->spi = x->id.spi; esph->seq_no = htonl(++x->replay.oseq); if (esp->conf.ivlen) crypto_cipher_set_iv(tfm, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm)); do { struct scatterlist sgbuf[nfrags>MAX_SG_ONSTACK ? 0 : nfrags]; struct scatterlist *sg = sgbuf; if (unlikely(nfrags > MAX_SG_ONSTACK)) { sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC); if (!sg) goto error; } skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen); crypto_cipher_encrypt(tfm, sg, sg, clen); if (unlikely(sg != sgbuf)) kfree(sg); } while (0); if (esp->conf.ivlen) { memcpy(esph->enc_data, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm)); crypto_cipher_get_iv(tfm, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm)); } if (esp->auth.icv_full_len) { esp->auth.icv(esp, skb, (u8*)esph-skb->data, sizeof(struct ip_esp_hdr) + esp->conf.ivlen+clen, trailer->tail); pskb_put(skb, trailer, alen); } ip_send_check(top_iph); skb->nh.raw = skb->data; x->curlft.bytes += skb->len; x->curlft.packets++; spin_unlock_bh(&x->lock); if ((skb->dst = dst_pop(dst)) == NULL) { err = -EHOSTUNREACH; goto error_nolock; } return NET_XMIT_BYPASS; error: spin_unlock_bh(&x->lock); error_nolock: kfree_skb(skb); return err; }
/* * Note: detecting truncated vs. non-truncated authentication data is very * expensive, so we only support truncated data, which is the recommended * and common case. */ int esp_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struct sk_buff *skb) { struct iphdr *iph; struct ip_esp_hdr *esph; struct esp_data *esp = x->data; struct sk_buff *trailer; int blksize = crypto_tfm_alg_blocksize(esp->conf.tfm); int alen = esp->auth.icv_trunc_len; int elen = skb->len - sizeof(struct ip_esp_hdr) - esp->conf.ivlen - alen; int nfrags; int encap_len = 0; if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr))) goto out; if (elen <= 0 || (elen & (blksize-1))) goto out; /* If integrity check is required, do this. */ if (esp->auth.icv_full_len) { u8 sum[esp->auth.icv_full_len]; u8 sum1[alen]; esp->auth.icv(esp, skb, 0, skb->len-alen, sum); if (skb_copy_bits(skb, skb->len-alen, sum1, alen)) BUG(); if (unlikely(memcmp(sum, sum1, alen))) { x->stats.integrity_failed++; goto out; } } if ((nfrags = skb_cow_data(skb, 0, &trailer)) < 0) goto out; skb->ip_summed = CHECKSUM_NONE; esph = (struct ip_esp_hdr*)skb->data; iph = skb->nh.iph; /* Get ivec. This can be wrong, check against another impls. */ if (esp->conf.ivlen) crypto_cipher_set_iv(esp->conf.tfm, esph->enc_data, crypto_tfm_alg_ivsize(esp->conf.tfm)); { u8 nexthdr[2]; struct scatterlist sgbuf[nfrags>MAX_SG_ONSTACK ? 0 : nfrags]; struct scatterlist *sg = sgbuf; u8 workbuf[60]; int padlen; if (unlikely(nfrags > MAX_SG_ONSTACK)) { sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC); if (!sg) goto out; } skb_to_sgvec(skb, sg, sizeof(struct ip_esp_hdr) + esp->conf.ivlen, elen); crypto_cipher_decrypt(esp->conf.tfm, sg, sg, elen); if (unlikely(sg != sgbuf)) kfree(sg); if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2)) BUG(); padlen = nexthdr[0]; if (padlen+2 >= elen) goto out; /* ... check padding bits here. Silly. :-) */ if (x->encap && decap && decap->decap_type) { struct esp_decap_data *encap_data; struct udphdr *uh = (struct udphdr *) (iph+1); encap_data = (struct esp_decap_data *) (decap->decap_data); encap_data->proto = 0; switch (decap->decap_type) { case UDP_ENCAP_ESPINUDP: if ((void*)uh == (void*)esph) { printk(KERN_DEBUG "esp_input(): Got ESP; expecting ESPinUDP\n"); break; } encap_data->proto = AF_INET; encap_data->saddr.a4 = iph->saddr; encap_data->sport = uh->source; encap_len = (void*)esph - (void*)uh; if (encap_len != sizeof(*uh)) printk(KERN_DEBUG "esp_input(): UDP -> ESP: too much room: %d\n", encap_len); break; default: printk(KERN_INFO "esp_input(): processing unknown encap type: %u\n", decap->decap_type); break; } } iph->protocol = nexthdr[1]; pskb_trim(skb, skb->len - alen - padlen - 2); memcpy(workbuf, skb->nh.raw, iph->ihl*4); skb->h.raw = skb_pull(skb, sizeof(struct ip_esp_hdr) + esp->conf.ivlen); skb->nh.raw += encap_len + sizeof(struct ip_esp_hdr) + esp->conf.ivlen; memcpy(skb->nh.raw, workbuf, iph->ihl*4); skb->nh.iph->tot_len = htons(skb->len); } return 0; out: return -EINVAL; }
static int esp6_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struct sk_buff *skb) { struct ipv6hdr *iph; struct ipv6_esp_hdr *esph; struct esp_data *esp = x->data; struct sk_buff *trailer; int blksize = ALIGN(crypto_tfm_alg_blocksize(esp->conf.tfm), 4); int alen = esp->auth.icv_trunc_len; int elen = skb->len - sizeof(struct ipv6_esp_hdr) - esp->conf.ivlen - alen; int hdr_len = skb->h.raw - skb->nh.raw; int nfrags; unsigned char *tmp_hdr = NULL; int ret = 0; if (!pskb_may_pull(skb, sizeof(struct ipv6_esp_hdr))) { ret = -EINVAL; goto out_nofree; } esph = (struct ipv6_esp_hdr*)skb->data; if (elen <= 0 || (elen & (blksize-1))) { ret = -EINVAL; goto out_nofree; } tmp_hdr = kmalloc(hdr_len, GFP_ATOMIC); if (!tmp_hdr) { ret = -ENOMEM; goto out_nofree; } memcpy(tmp_hdr, skb->nh.raw, hdr_len); /* If integrity check is required, do this. */ if (esp->auth.icv_full_len) { u8 sum[esp->auth.icv_full_len]; u8 sum1[alen]; if (x->props.replay_window && xfrm_replay_check(x, esph->seq_no)) { ret = -EINVAL; goto out; } esp->auth.icv(esp, skb, 0, skb->len-alen, sum); if (skb_copy_bits(skb, skb->len-alen, sum1, alen)) BUG(); if (unlikely(memcmp(sum, sum1, alen))) { x->stats.integrity_failed++; ret = -EINVAL; goto out; } if (x->props.replay_window) xfrm_replay_advance(x, esph->seq_no); } if ((nfrags = skb_cow_data(skb, 0, &trailer)) < 0) { ret = -EINVAL; goto out; } skb->ip_summed = CHECKSUM_NONE; iph = skb->nh.ipv6h; /* Get ivec. This can be wrong, check against another impls. */ if (esp->conf.ivlen) crypto_cipher_set_iv(esp->conf.tfm, esph->enc_data, crypto_tfm_alg_ivsize(esp->conf.tfm)); { u8 nexthdr[2]; struct scatterlist *sg = &esp->sgbuf[0]; u8 padlen; if (unlikely(nfrags > ESP_NUM_FAST_SG)) { sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC); if (!sg) { ret = -ENOMEM; goto out; } } skb_to_sgvec(skb, sg, sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen, elen); crypto_cipher_decrypt(esp->conf.tfm, sg, sg, elen); if (unlikely(sg != &esp->sgbuf[0])) kfree(sg); if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2)) BUG(); padlen = nexthdr[0]; if (padlen+2 >= elen) { LIMIT_NETDEBUG(KERN_WARNING "ipsec esp packet is garbage padlen=%d, elen=%d\n", padlen+2, elen); ret = -EINVAL; goto out; } /* ... check padding bits here. Silly. :-) */ pskb_trim(skb, skb->len - alen - padlen - 2); skb->h.raw = skb_pull(skb, sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen); skb->nh.raw += sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen; memcpy(skb->nh.raw, tmp_hdr, hdr_len); skb->nh.ipv6h->payload_len = htons(skb->len - sizeof(struct ipv6hdr)); ret = nexthdr[1]; } out: kfree(tmp_hdr); out_nofree: return ret; }
static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) { int err; int hdr_len; struct ipv6hdr *top_iph; struct ipv6_esp_hdr *esph; struct crypto_tfm *tfm; struct esp_data *esp; struct sk_buff *trailer; int blksize; int clen; int alen; int nfrags; #if defined (CONFIG_OCTEON_NATIVE_IPSEC) if ((skb_is_nonlinear(skb) || skb_cloned(skb)) && skb_linearize(skb, GFP_ATOMIC) != 0) { err = -ENOMEM; goto error; } #endif esp = x->data; hdr_len = skb->h.raw - skb->data + sizeof(*esph) + esp->conf.ivlen; /* Strip IP+ESP header. */ __skb_pull(skb, hdr_len); /* Now skb is pure payload to encrypt */ err = -ENOMEM; /* Round to block size */ clen = skb->len; alen = esp->auth.icv_trunc_len; tfm = esp->conf.tfm; blksize = ALIGN(crypto_tfm_alg_blocksize(tfm), 4); clen = ALIGN(clen + 2, blksize); if (esp->conf.padlen) clen = ALIGN(clen, esp->conf.padlen); if ((nfrags = skb_cow_data(skb, clen-skb->len+alen, &trailer)) < 0) { goto error; } /* Fill padding... */ do { int i; for (i=0; i<clen-skb->len - 2; i++) *(u8*)(trailer->tail + i) = i+1; } while (0); *(u8*)(trailer->tail + clen-skb->len - 2) = (clen - skb->len)-2; pskb_put(skb, trailer, clen - skb->len); top_iph = (struct ipv6hdr *)__skb_push(skb, hdr_len); esph = (struct ipv6_esp_hdr *)skb->h.raw; top_iph->payload_len = htons(skb->len + alen - sizeof(*top_iph)); *(u8*)(trailer->tail - 1) = *skb->nh.raw; *skb->nh.raw = IPPROTO_ESP; esph->spi = x->id.spi; esph->seq_no = htonl(++x->replay.oseq); if (esp->conf.ivlen) crypto_cipher_set_iv(tfm, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm)); #if defined (CONFIG_OCTEON_NATIVE_IPSEC) { int ret; int len = ((unsigned char *)esph - skb->data); __skb_pull(skb, len); if (esp->conf.ivlen) { memcpy(esph->enc_data, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm)); } ret = cavium_process_esp_pkt(esp, skb); __skb_push(skb, len); if (esp->conf.ivlen) { crypto_cipher_get_iv(tfm, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm)); } if (likely(!ret)) { if (esp->auth.icv_full_len) skb_put(skb, alen); return 0; } if (unlikely(ret != -EIO)) goto error; goto error; /* if (esp->conf.ivlen) crypto_cipher_set_iv(tfm, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm)); */ } #else do { struct scatterlist *sg = &esp->sgbuf[0]; if (unlikely(nfrags > ESP_NUM_FAST_SG)) { sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC); if (!sg) goto error; } skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen); crypto_cipher_encrypt(tfm, sg, sg, clen); if (unlikely(sg != &esp->sgbuf[0])) kfree(sg); } while (0); if (esp->conf.ivlen) { memcpy(esph->enc_data, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm)); crypto_cipher_get_iv(tfm, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm)); } if (esp->auth.icv_full_len) { esp->auth.icv(esp, skb, (u8*)esph-skb->data, sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen+clen, trailer->tail); pskb_put(skb, trailer, alen); } #endif /* CONFIG_OCTEON_NATIVE_IPSEC */ err = 0; error: return err; }
static int esp6_output(struct sk_buff *skb) { int err; int hdr_len; struct dst_entry *dst = skb->dst; struct xfrm_state *x = dst->xfrm; struct ipv6hdr *top_iph; struct ipv6_esp_hdr *esph; struct crypto_tfm *tfm; struct esp_data *esp; struct sk_buff *trailer; int blksize; int clen; int alen; int nfrags; esp = x->data; hdr_len = skb->h.raw - skb->data + sizeof(*esph) + esp->conf.ivlen; /* Strip IP+ESP header. */ __skb_pull(skb, hdr_len); /* Now skb is pure payload to encrypt */ err = -ENOMEM; /* Round to block size */ clen = skb->len; alen = esp->auth.icv_trunc_len; tfm = esp->conf.tfm; blksize = (crypto_tfm_alg_blocksize(tfm) + 3) & ~3; clen = (clen + 2 + blksize-1)&~(blksize-1); if (esp->conf.padlen) clen = (clen + esp->conf.padlen-1)&~(esp->conf.padlen-1); if ((nfrags = skb_cow_data(skb, clen-skb->len+alen, &trailer)) < 0) { goto error; } /* Fill padding... */ do { int i; for (i=0; i<clen-skb->len - 2; i++) *(u8*)(trailer->tail + i) = i+1; } while (0); *(u8*)(trailer->tail + clen-skb->len - 2) = (clen - skb->len)-2; pskb_put(skb, trailer, clen - skb->len); top_iph = (struct ipv6hdr *)__skb_push(skb, hdr_len); esph = (struct ipv6_esp_hdr *)skb->h.raw; top_iph->payload_len = htons(skb->len + alen - sizeof(*top_iph)); *(u8*)(trailer->tail - 1) = *skb->nh.raw; *skb->nh.raw = IPPROTO_ESP; esph->spi = x->id.spi; esph->seq_no = htonl(++x->replay.oseq); if (esp->conf.ivlen) crypto_cipher_set_iv(tfm, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm)); do { struct scatterlist *sg = &esp->sgbuf[0]; if (unlikely(nfrags > ESP_NUM_FAST_SG)) { sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC); if (!sg) goto error; } skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen); crypto_cipher_encrypt(tfm, sg, sg, clen); if (unlikely(sg != &esp->sgbuf[0])) kfree(sg); } while (0); if (esp->conf.ivlen) { memcpy(esph->enc_data, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm)); crypto_cipher_get_iv(tfm, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm)); } if (esp->auth.icv_full_len) { esp->auth.icv(esp, skb, (u8*)esph-skb->data, sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen+clen, trailer->tail); pskb_put(skb, trailer, alen); } err = 0; error: return err; }
static int fill_sg_in(struct scatterlist *sg_in, struct sk_buff *skb, struct tls_offload_context_tx *ctx, u64 *rcd_sn, s32 *sync_size, int *resync_sgs) { int tcp_payload_offset = skb_transport_offset(skb) + tcp_hdrlen(skb); int payload_len = skb->len - tcp_payload_offset; u32 tcp_seq = ntohl(tcp_hdr(skb)->seq); struct tls_record_info *record; unsigned long flags; int remaining; int i; spin_lock_irqsave(&ctx->lock, flags); record = tls_get_record(ctx, tcp_seq, rcd_sn); if (!record) { spin_unlock_irqrestore(&ctx->lock, flags); WARN(1, "Record not found for seq %u\n", tcp_seq); return -EINVAL; } *sync_size = tcp_seq - tls_record_start_seq(record); if (*sync_size < 0) { int is_start_marker = tls_record_is_start_marker(record); spin_unlock_irqrestore(&ctx->lock, flags); /* This should only occur if the relevant record was * already acked. In that case it should be ok * to drop the packet and avoid retransmission. * * There is a corner case where the packet contains * both an acked and a non-acked record. * We currently don't handle that case and rely * on TCP to retranmit a packet that doesn't contain * already acked payload. */ if (!is_start_marker) *sync_size = 0; return -EINVAL; } remaining = *sync_size; for (i = 0; remaining > 0; i++) { skb_frag_t *frag = &record->frags[i]; __skb_frag_ref(frag); sg_set_page(sg_in + i, skb_frag_page(frag), skb_frag_size(frag), frag->page_offset); remaining -= skb_frag_size(frag); if (remaining < 0) sg_in[i].length += remaining; } *resync_sgs = i; spin_unlock_irqrestore(&ctx->lock, flags); if (skb_to_sgvec(skb, &sg_in[i], tcp_payload_offset, payload_len) < 0) return -EINVAL; return 0; }
/* * Note: detecting truncated vs. non-truncated authentication data is very * expensive, so we only support truncated data, which is the recommended * and common case. */ static int esp_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struct sk_buff *skb) { struct iphdr *iph; struct ip_esp_hdr *esph; struct esp_data *esp = x->data; struct sk_buff *trailer; int blksize = ALIGN(crypto_tfm_alg_blocksize(esp->conf.tfm), 4); int alen = esp->auth.icv_trunc_len; int elen = skb->len - sizeof(struct ip_esp_hdr) - esp->conf.ivlen - alen; int nfrags; int encap_len = 0; u8 nexthdr[2]; struct scatterlist *sg; u8 workbuf[60]; int padlen; if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr))) goto out; esph = (struct ip_esp_hdr*)skb->data; if (elen <= 0 || (elen & (blksize-1))) goto out; /* If integrity check is required, do this. */ if (esp->auth.icv_full_len) { u8 sum[esp->auth.icv_full_len]; u8 sum1[alen]; if (x->props.replay_window && xfrm_replay_check(x, esph->seq_no)) goto out; esp->auth.icv(esp, skb, 0, skb->len-alen, sum); if (skb_copy_bits(skb, skb->len-alen, sum1, alen)) BUG(); if (unlikely(memcmp(sum, sum1, alen))) { x->stats.integrity_failed++; goto out; } if (x->props.replay_window) xfrm_replay_advance(x, esph->seq_no); } if ((nfrags = skb_cow_data(skb, 0, &trailer)) < 0) goto out; skb->ip_summed = CHECKSUM_NONE; iph = skb->nh.iph; /* Get ivec. This can be wrong, check against another impls. */ if (esp->conf.ivlen) crypto_cipher_set_iv(esp->conf.tfm, esph->enc_data, crypto_tfm_alg_ivsize(esp->conf.tfm)); sg = &esp->sgbuf[0]; if (unlikely(nfrags > ESP_NUM_FAST_SG)) { sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC); if (!sg) goto out; } skb_to_sgvec(skb, sg, sizeof(struct ip_esp_hdr) + esp->conf.ivlen, elen); crypto_cipher_decrypt(esp->conf.tfm, sg, sg, elen); if (unlikely(sg != &esp->sgbuf[0])) kfree(sg); if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2)) BUG(); padlen = nexthdr[0]; if (padlen+2 >= elen) goto out; /* ... check padding bits here. Silly. :-) */ if (x->encap) { struct xfrm_encap_tmpl *encap = x->encap; struct udphdr *uh; if (encap->encap_type != decap->decap_type) goto out; uh = (struct udphdr *)(iph + 1); encap_len = (void*)esph - (void*)uh; /* * 1) if the NAT-T peer's IP or port changed then * advertize the change to the keying daemon. * This is an inbound SA, so just compare * SRC ports. */ if (iph->saddr != x->props.saddr.a4 || uh->source != encap->encap_sport) { xfrm_address_t ipaddr; ipaddr.a4 = iph->saddr; km_new_mapping(x, &ipaddr, uh->source); /* XXX: perhaps add an extra * policy check here, to see * if we should allow or * reject a packet from a * different source * address/port. */ } /* * 2) ignore UDP/TCP checksums in case * of NAT-T in Transport Mode, or * perform other post-processing fixes * as per draft-ietf-ipsec-udp-encaps-06, * section 3.1.2 */ if (!x->props.mode) skb->ip_summed = CHECKSUM_UNNECESSARY; } iph->protocol = nexthdr[1]; pskb_trim(skb, skb->len - alen - padlen - 2); memcpy(workbuf, skb->nh.raw, iph->ihl*4); skb->h.raw = skb_pull(skb, sizeof(struct ip_esp_hdr) + esp->conf.ivlen); skb->nh.raw += encap_len + sizeof(struct ip_esp_hdr) + esp->conf.ivlen; memcpy(skb->nh.raw, workbuf, iph->ihl*4); skb->nh.iph->tot_len = htons(skb->len); return 0; out: return -EINVAL; }
static int start_xmit(struct sk_buff *skb, struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); int num, err; struct scatterlist sg[1+MAX_SKB_FRAGS]; struct virtio_net_hdr *hdr; const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; sg_init_table(sg, 1+MAX_SKB_FRAGS); pr_debug("%s: xmit %p " MAC_FMT "\n", dev->name, skb, dest[0], dest[1], dest[2], dest[3], dest[4], dest[5]); /* Encode metadata header at front. */ hdr = skb_vnet_hdr(skb); if (skb->ip_summed == CHECKSUM_PARTIAL) { hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; hdr->csum_start = skb->csum_start - skb_headroom(skb); hdr->csum_offset = skb->csum_offset; } else { hdr->flags = 0; hdr->csum_offset = hdr->csum_start = 0; } if (skb_is_gso(skb)) { hdr->hdr_len = skb_transport_header(skb) - skb->data; hdr->gso_size = skb_shinfo(skb)->gso_size; if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4; else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6; else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP; else BUG(); if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN) hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN; } else { hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE; hdr->gso_size = hdr->hdr_len = 0; } vnet_hdr_to_sg(sg, skb); num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1; __skb_queue_head(&vi->send, skb); again: /* Free up any pending old buffers before queueing new ones. */ free_old_xmit_skbs(vi); err = vi->svq->vq_ops->add_buf(vi->svq, sg, num, 0, skb); if (err) { pr_debug("%s: virtio not prepared to send\n", dev->name); netif_stop_queue(dev); /* Activate callback for using skbs: if this returns false it * means some were used in the meantime. */ if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) { vi->svq->vq_ops->disable_cb(vi->svq); netif_start_queue(dev); goto again; } __skb_unlink(skb, &vi->send); return NETDEV_TX_BUSY; } vi->svq->vq_ops->kick(vi->svq); return 0; }
static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) { int err; int hdr_len; struct ipv6hdr *top_iph; struct ipv6_esp_hdr *esph; struct crypto_blkcipher *tfm; struct blkcipher_desc desc; struct esp_data *esp; struct sk_buff *trailer; int blksize; int clen; int alen; int nfrags; esp = x->data; hdr_len = skb->h.raw - skb->data + sizeof(*esph) + esp->conf.ivlen; /* Strip IP+ESP header. */ __skb_pull(skb, hdr_len); /* Now skb is pure payload to encrypt */ err = -ENOMEM; /* Round to block size */ clen = skb->len; alen = esp->auth.icv_trunc_len; tfm = esp->conf.tfm; desc.tfm = tfm; desc.flags = 0; blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4); clen = ALIGN(clen + 2, blksize); if (esp->conf.padlen) clen = ALIGN(clen, esp->conf.padlen); if ((nfrags = skb_cow_data(skb, clen-skb->len+alen, &trailer)) < 0) { goto error; } /* Fill padding... */ do { int i; for (i=0; i<clen-skb->len - 2; i++) *(u8*)(trailer->tail + i) = i+1; } while (0); *(u8*)(trailer->tail + clen-skb->len - 2) = (clen - skb->len)-2; pskb_put(skb, trailer, clen - skb->len); top_iph = (struct ipv6hdr *)__skb_push(skb, hdr_len); esph = (struct ipv6_esp_hdr *)skb->h.raw; top_iph->payload_len = htons(skb->len + alen - sizeof(*top_iph)); *(u8*)(trailer->tail - 1) = *skb->nh.raw; *skb->nh.raw = IPPROTO_ESP; esph->spi = x->id.spi; esph->seq_no = htonl(++x->replay.oseq); xfrm_aevent_doreplay(x); if (esp->conf.ivlen) { if (unlikely(!esp->conf.ivinitted)) { get_random_bytes(esp->conf.ivec, esp->conf.ivlen); esp->conf.ivinitted = 1; } crypto_blkcipher_set_iv(tfm, esp->conf.ivec, esp->conf.ivlen); } do { struct scatterlist *sg = &esp->sgbuf[0]; if (unlikely(nfrags > ESP_NUM_FAST_SG)) { sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC); if (!sg) goto error; } skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen); err = crypto_blkcipher_encrypt(&desc, sg, sg, clen); if (unlikely(sg != &esp->sgbuf[0])) kfree(sg); } while (0); if (unlikely(err)) goto error; if (esp->conf.ivlen) { memcpy(esph->enc_data, esp->conf.ivec, esp->conf.ivlen); crypto_blkcipher_get_iv(tfm, esp->conf.ivec, esp->conf.ivlen); } if (esp->auth.icv_full_len) { err = esp_mac_digest(esp, skb, (u8 *)esph - skb->data, sizeof(*esph) + esp->conf.ivlen + clen); memcpy(pskb_put(skb, trailer, alen), esp->auth.work_icv, alen); } error: return err; }
int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) { int start = skb_headlen(skb); int i, copy = start - offset; int elt = 0; if (copy > 0) { if (copy > len) copy = len; sg[elt].page = virt_to_page(skb->data + offset); sg[elt].offset = (unsigned long)(skb->data + offset) % PAGE_SIZE; sg[elt].length = copy; elt++; if ((len -= copy) == 0) return elt; offset += copy; } for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { int end; BUG_TRAP(start <= offset + len); end = start + skb_shinfo(skb)->frags[i].size; if ((copy = end - offset) > 0) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; if (copy > len) copy = len; sg[elt].page = frag->page; sg[elt].offset = frag->page_offset+offset-start; sg[elt].length = copy; elt++; if (!(len -= copy)) return elt; offset += copy; } start = end; } if (skb_shinfo(skb)->frag_list) { struct sk_buff *list = skb_shinfo(skb)->frag_list; for (; list; list = list->next) { int end; BUG_TRAP(start <= offset + len); end = start + list->len; if ((copy = end - offset) > 0) { if (copy > len) copy = len; elt += skb_to_sgvec(list, sg+elt, offset - start, copy); if ((len -= copy) == 0) return elt; offset += copy; } start = end; } } BUG_ON(len); return elt; }
int esp6_output(struct sk_buff *skb) { int err; int hdr_len = 0; struct dst_entry *dst = skb->dst; struct xfrm_state *x = dst->xfrm; struct ipv6hdr *iph = NULL, *top_iph; struct ipv6_esp_hdr *esph; struct crypto_tfm *tfm; struct esp_data *esp; struct sk_buff *trailer; int blksize; int clen; int alen; int nfrags; u8 *prevhdr; u8 nexthdr = 0; /* First, if the skb is not checksummed, complete checksum. */ if (skb->ip_summed == CHECKSUM_HW && skb_checksum_help(skb) == NULL) { err = -EINVAL; goto error_nolock; } spin_lock_bh(&x->lock); err = xfrm_check_output(x, skb, AF_INET6); if (err) goto error; err = -ENOMEM; /* Strip IP header in transport mode. Save it. */ if (!x->props.mode) { hdr_len = ip6_find_1stfragopt(skb, &prevhdr); nexthdr = *prevhdr; *prevhdr = IPPROTO_ESP; iph = kmalloc(hdr_len, GFP_ATOMIC); if (!iph) { err = -ENOMEM; goto error; } memcpy(iph, skb->nh.raw, hdr_len); __skb_pull(skb, hdr_len); } /* Now skb is pure payload to encrypt */ /* Round to block size */ clen = skb->len; esp = x->data; alen = esp->auth.icv_trunc_len; tfm = esp->conf.tfm; blksize = (crypto_tfm_alg_blocksize(tfm) + 3) & ~3; clen = (clen + 2 + blksize-1)&~(blksize-1); if (esp->conf.padlen) clen = (clen + esp->conf.padlen-1)&~(esp->conf.padlen-1); if ((nfrags = skb_cow_data(skb, clen-skb->len+alen, &trailer)) < 0) { if (!x->props.mode && iph) kfree(iph); goto error; } /* Fill padding... */ do { int i; for (i=0; i<clen-skb->len - 2; i++) *(u8*)(trailer->tail + i) = i+1; } while (0); *(u8*)(trailer->tail + clen-skb->len - 2) = (clen - skb->len)-2; pskb_put(skb, trailer, clen - skb->len); if (x->props.mode) { iph = skb->nh.ipv6h; top_iph = (struct ipv6hdr*)skb_push(skb, x->props.header_len); esph = (struct ipv6_esp_hdr*)(top_iph+1); *(u8*)(trailer->tail - 1) = IPPROTO_IPV6; top_iph->version = 6; top_iph->priority = iph->priority; top_iph->flow_lbl[0] = iph->flow_lbl[0]; top_iph->flow_lbl[1] = iph->flow_lbl[1]; top_iph->flow_lbl[2] = iph->flow_lbl[2]; if (x->props.flags & XFRM_STATE_NOECN) IP6_ECN_clear(top_iph); top_iph->nexthdr = IPPROTO_ESP; top_iph->payload_len = htons(skb->len + alen - sizeof(struct ipv6hdr)); top_iph->hop_limit = iph->hop_limit; ipv6_addr_copy(&top_iph->saddr, (struct in6_addr *)&x->props.saddr); ipv6_addr_copy(&top_iph->daddr, (struct in6_addr *)&x->id.daddr); } else { esph = (struct ipv6_esp_hdr*)skb_push(skb, x->props.header_len); skb->h.raw = (unsigned char*)esph; top_iph = (struct ipv6hdr*)skb_push(skb, hdr_len); memcpy(top_iph, iph, hdr_len); kfree(iph); top_iph->payload_len = htons(skb->len + alen - sizeof(struct ipv6hdr)); *(u8*)(trailer->tail - 1) = nexthdr; } esph->spi = x->id.spi; esph->seq_no = htonl(++x->replay.oseq); if (esp->conf.ivlen) crypto_cipher_set_iv(tfm, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm)); do { struct scatterlist sgbuf[nfrags>MAX_SG_ONSTACK ? 0 : nfrags]; struct scatterlist *sg = sgbuf; if (unlikely(nfrags > MAX_SG_ONSTACK)) { sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC); if (!sg) goto error; } skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen); crypto_cipher_encrypt(tfm, sg, sg, clen); if (unlikely(sg != sgbuf)) kfree(sg); } while (0); if (esp->conf.ivlen) { memcpy(esph->enc_data, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm)); crypto_cipher_get_iv(tfm, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm)); } if (esp->auth.icv_full_len) { esp->auth.icv(esp, skb, (u8*)esph-skb->data, sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen+clen, trailer->tail); pskb_put(skb, trailer, alen); } skb->nh.raw = skb->data; x->curlft.bytes += skb->len; x->curlft.packets++; spin_unlock_bh(&x->lock); if ((skb->dst = dst_pop(dst)) == NULL) { err = -EHOSTUNREACH; goto error_nolock; } return NET_XMIT_BYPASS; error: spin_unlock_bh(&x->lock); error_nolock: kfree_skb(skb); return err; }