u32 krb5_encrypt( struct crypto_blkcipher *tfm, void * iv, void * in, void * out, int length) { u32 ret = -EINVAL; struct scatterlist sg[1]; u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0}; struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv }; if (length % crypto_blkcipher_blocksize(tfm) != 0) goto out; if (crypto_blkcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) { dprintk("RPC: gss_k5encrypt: tfm iv size too large %d\n", crypto_blkcipher_ivsize(tfm)); goto out; } if (iv) memcpy(local_iv, iv, crypto_blkcipher_ivsize(tfm)); memcpy(out, in, length); sg_init_one(sg, out, length); ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, length); out: dprintk("RPC: krb5_encrypt returns %d\n", ret); return ret; }
u32 krb5_decrypt( struct crypto_blkcipher *tfm, void * iv, void * in, void * out, int length) { u32 ret = -EINVAL; struct scatterlist sg[1]; u8 local_iv[16] = {0}; struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv }; if (length % crypto_blkcipher_blocksize(tfm) != 0) goto out; if (crypto_blkcipher_ivsize(tfm) > 16) { dprintk("RPC: gss_k5decrypt: tfm iv size to large %d\n", crypto_blkcipher_ivsize(tfm)); goto out; } if (iv) memcpy(local_iv,iv, crypto_blkcipher_ivsize(tfm)); memcpy(out, in, length); sg_set_buf(sg, out, length); ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, length); out: dprintk("RPC: gss_k5decrypt returns %d\n",ret); return ret; }
static u32 esp4_get_mtu(struct xfrm_state *x, int mtu) { struct esp_data *esp = x->data; u32 blksize = ALIGN(crypto_blkcipher_blocksize(esp->conf.tfm), 4); u32 align = max_t(u32, blksize, esp->conf.padlen); u32 rem; mtu -= x->props.header_len + esp->auth.icv_trunc_len; rem = mtu & (align - 1); mtu &= ~(align - 1); switch (x->props.mode) { case XFRM_MODE_TUNNEL: break; default: case XFRM_MODE_TRANSPORT: /* The worst case */ mtu -= blksize - 4; mtu += min_t(u32, blksize - 4, rem); break; case XFRM_MODE_BEET: /* The worst case. */ mtu += min_t(u32, IPV4_BEET_PHMAXLEN, rem); break; } return mtu - 2; }
int blkcipher_walk_phys(struct blkcipher_desc *desc, struct blkcipher_walk *walk) { walk->flags |= BLKCIPHER_WALK_PHYS; walk->blocksize = crypto_blkcipher_blocksize(desc->tfm); return blkcipher_walk_first(desc, walk); }
static u32 esp4_get_max_size(struct xfrm_state *x, int mtu) { struct esp_data *esp = x->data; u32 blksize = ALIGN(crypto_blkcipher_blocksize(esp->conf.tfm), 4); int enclen = 0; switch (x->props.mode) { case XFRM_MODE_TUNNEL: mtu = ALIGN(mtu +2, blksize); break; default: case XFRM_MODE_TRANSPORT: /* The worst case */ mtu = ALIGN(mtu + 2, 4) + blksize - 4; break; case XFRM_MODE_BEET: /* The worst case. */ enclen = IPV4_BEET_PHMAXLEN; mtu = ALIGN(mtu + enclen + 2, blksize); break; } if (esp->conf.padlen) mtu = ALIGN(mtu, esp->conf.padlen); return mtu + x->props.header_len + esp->auth.icv_trunc_len - enclen; }
static int crypto_cts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct crypto_cts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); int bsize = crypto_blkcipher_blocksize(desc->tfm); int tot_blocks = (nbytes + bsize - 1) / bsize; int cbc_blocks = tot_blocks > 2 ? tot_blocks - 2 : 0; struct blkcipher_desc lcldesc; int err; lcldesc.tfm = ctx->child; lcldesc.info = desc->info; lcldesc.flags = desc->flags; if (tot_blocks == 1) { err = crypto_blkcipher_decrypt_iv(&lcldesc, dst, src, bsize); } else if (nbytes <= bsize * 2) { err = cts_cbc_decrypt(ctx, desc, dst, src, 0, nbytes); } else { /* do normal function for tot_blocks - 2 */ err = crypto_blkcipher_decrypt_iv(&lcldesc, dst, src, cbc_blocks * bsize); if (err == 0) { /* do cts for final two blocks */ err = cts_cbc_decrypt(ctx, desc, dst, src, cbc_blocks * bsize, nbytes - (cbc_blocks * bsize)); } } return err; }
static int cts_cbc_decrypt(struct crypto_cts_ctx *ctx, struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int offset, unsigned int nbytes) { int bsize = crypto_blkcipher_blocksize(desc->tfm); u8 tmp[bsize]; struct blkcipher_desc lcldesc; struct scatterlist sgsrc[1], sgdst[1]; int lastn = nbytes - bsize; u8 iv[bsize]; u8 s[bsize * 2], d[bsize * 2]; int err; if (lastn < 0) return -EINVAL; sg_init_table(sgsrc, 1); sg_init_table(sgdst, 1); scatterwalk_map_and_copy(s, src, offset, nbytes, 0); lcldesc.tfm = ctx->child; lcldesc.info = iv; lcldesc.flags = desc->flags; /* 1. Decrypt Cn-1 (s) to create Dn (tmp)*/ memset(iv, 0, sizeof(iv)); sg_set_buf(&sgsrc[0], s, bsize); sg_set_buf(&sgdst[0], tmp, bsize); err = crypto_blkcipher_decrypt_iv(&lcldesc, sgdst, sgsrc, bsize); if (err) return err; /* 2. Pad Cn with zeros at the end to create C of length BB */ memset(iv, 0, sizeof(iv)); memcpy(iv, s + bsize, lastn); /* 3. Exclusive-or Dn (tmp) with C (iv) to create Xn (tmp) */ crypto_xor(tmp, iv, bsize); /* 4. Select the first Ln bytes of Xn (tmp) to create Pn */ memcpy(d + bsize, tmp, lastn); /* 5. Append the tail (BB - Ln) bytes of Xn (tmp) to Cn to create En */ memcpy(s + bsize + lastn, tmp + lastn, bsize - lastn); /* 6. Decrypt En to create Pn-1 */ memset(iv, 0, sizeof(iv)); sg_set_buf(&sgsrc[0], s + bsize, bsize); sg_set_buf(&sgdst[0], d, bsize); err = crypto_blkcipher_decrypt_iv(&lcldesc, sgdst, sgsrc, bsize); /* XOR with previous block */ crypto_xor(d, desc->info, bsize); scatterwalk_map_and_copy(d, dst, offset, nbytes, 1); memcpy(desc->info, s, bsize); return err; }
int blkcipher_walk_phys(struct blkcipher_desc *desc, struct blkcipher_walk *walk) { walk->flags |= BLKCIPHER_WALK_PHYS; walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm); walk->cipher_blocksize = walk->walk_blocksize; walk->ivsize = crypto_blkcipher_ivsize(desc->tfm); walk->alignmask = crypto_blkcipher_alignmask(desc->tfm); return blkcipher_walk_first(desc, walk); }
int blkcipher_walk_virt_block(struct blkcipher_desc *desc, struct blkcipher_walk *walk, unsigned int blocksize) { walk->flags &= ~BLKCIPHER_WALK_PHYS; walk->walk_blocksize = blocksize; walk->cipher_blocksize = crypto_blkcipher_blocksize(desc->tfm); walk->ivsize = crypto_blkcipher_ivsize(desc->tfm); walk->alignmask = crypto_blkcipher_alignmask(desc->tfm); return blkcipher_walk_first(desc, walk); }
static int blkcipher_walk_next(struct blkcipher_desc *desc, struct blkcipher_walk *walk) { struct crypto_blkcipher *tfm = desc->tfm; unsigned int alignmask = crypto_blkcipher_alignmask(tfm); unsigned int bsize; unsigned int n; int err; n = walk->total; if (unlikely(n < crypto_blkcipher_blocksize(tfm))) { desc->flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN; return blkcipher_walk_done(desc, walk, -EINVAL); } walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY | BLKCIPHER_WALK_DIFF); if (!scatterwalk_aligned(&walk->in, alignmask) || !scatterwalk_aligned(&walk->out, alignmask) || (walk->flags & AMBA_CIPHER_COPY)) { walk->flags |= BLKCIPHER_WALK_COPY; if (!walk->page) { walk->page = (void *)__get_free_page(GFP_ATOMIC); if (!walk->page) n = 0; } } bsize = min(walk->blocksize, n); n = scatterwalk_clamp(&walk->in, n); n = scatterwalk_clamp(&walk->out, n); if (unlikely(n < bsize)) { err = blkcipher_next_slow(desc, walk, bsize, alignmask); goto set_phys_lowmem; } walk->nbytes = n; if (walk->flags & BLKCIPHER_WALK_COPY) { err = blkcipher_next_copy(walk); goto set_phys_lowmem; } return blkcipher_next_fast(desc, walk); set_phys_lowmem: if (walk->flags & BLKCIPHER_WALK_PHYS) { walk->src.phys.page = virt_to_page(walk->src.virt.addr); walk->dst.phys.page = virt_to_page(walk->dst.virt.addr); walk->src.phys.offset &= PAGE_SIZE - 1; walk->dst.phys.offset &= PAGE_SIZE - 1; } return err; }
static int cts_cbc_encrypt(struct crypto_cts_ctx *ctx, struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int offset, unsigned int nbytes) { int bsize = crypto_blkcipher_blocksize(desc->tfm); u8 tmp[bsize], tmp2[bsize]; struct blkcipher_desc lcldesc; struct scatterlist sgsrc[1], sgdst[1]; int lastn = nbytes - bsize; u8 iv[bsize]; u8 s[bsize * 2], d[bsize * 2]; int err; if (lastn < 0) return -EINVAL; sg_init_table(sgsrc, 1); sg_init_table(sgdst, 1); memset(s, 0, sizeof(s)); scatterwalk_map_and_copy(s, src, offset, nbytes, 0); memcpy(iv, desc->info, bsize); lcldesc.tfm = ctx->child; lcldesc.info = iv; lcldesc.flags = desc->flags; sg_set_buf(&sgsrc[0], s, bsize); sg_set_buf(&sgdst[0], tmp, bsize); err = crypto_blkcipher_encrypt_iv(&lcldesc, sgdst, sgsrc, bsize); memcpy(d + bsize, tmp, lastn); lcldesc.info = tmp; sg_set_buf(&sgsrc[0], s + bsize, bsize); sg_set_buf(&sgdst[0], tmp2, bsize); err = crypto_blkcipher_encrypt_iv(&lcldesc, sgdst, sgsrc, bsize); memcpy(d, tmp2, bsize); scatterwalk_map_and_copy(d, dst, offset, nbytes, 1); memcpy(desc->info, tmp2, bsize); return err; }
static int aes_get_sizes(void) { struct crypto_blkcipher *tfm; tfm = crypto_alloc_blkcipher(blkcipher_alg, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) { pr_err("encrypted_key: failed to alloc_cipher (%ld)\n", PTR_ERR(tfm)); return PTR_ERR(tfm); } ivsize = crypto_blkcipher_ivsize(tfm); blksize = crypto_blkcipher_blocksize(tfm); crypto_free_blkcipher(tfm); return 0; }
int dek_aes_decrypt(struct crypto_blkcipher *sdp_tfm, char *src, char *dst, int len) { struct blkcipher_desc desc; struct scatterlist src_sg, dst_sg; int bsize = crypto_blkcipher_blocksize(sdp_tfm); u8 iv[bsize]; memset(&iv, 0, sizeof(iv)); desc.tfm = sdp_tfm; desc.info = iv; desc.flags = 0; sg_init_one(&src_sg, src, len); sg_init_one(&dst_sg, dst, len); return crypto_blkcipher_decrypt_iv(&desc, &dst_sg, &src_sg, len); }
static u32 esp6_get_max_size(struct xfrm_state *x, int mtu) { struct esp_data *esp = x->data; u32 blksize = ALIGN(crypto_blkcipher_blocksize(esp->conf.tfm), 4); if (x->props.mode == XFRM_MODE_TUNNEL) { mtu = ALIGN(mtu + 2, blksize); } else { /* The worst case. */ u32 padsize = ((blksize - 1) & 7) + 1; mtu = ALIGN(mtu + 2, padsize) + blksize - padsize; } if (esp->conf.padlen) mtu = ALIGN(mtu, esp->conf.padlen); return mtu + x->props.header_len + esp->auth.icv_trunc_len; }
static u32 esp6_get_mtu(struct xfrm_state *x, int mtu) { struct esp_data *esp = x->data; u32 blksize = ALIGN(crypto_blkcipher_blocksize(esp->conf.tfm), 4); u32 align = max_t(u32, blksize, esp->conf.padlen); u32 rem; mtu -= x->props.header_len + esp->auth.icv_trunc_len; rem = mtu & (align - 1); mtu &= ~(align - 1); if (x->props.mode != XFRM_MODE_TUNNEL) { u32 padsize = ((blksize - 1) & 7) + 1; mtu -= blksize - padsize; mtu += min_t(u32, blksize - padsize, rem); } return mtu - 2; }
static inline int blkcipher_copy_iv(struct blkcipher_walk *walk, struct crypto_blkcipher *tfm, unsigned int alignmask) { unsigned bs = crypto_blkcipher_blocksize(tfm); unsigned int ivsize = crypto_blkcipher_ivsize(tfm); unsigned int size = bs * 2 + ivsize + max(bs, ivsize) - (alignmask + 1); u8 *iv; size += alignmask & ~(crypto_tfm_ctx_alignment() - 1); walk->buffer = kmalloc(size, GFP_ATOMIC); if (!walk->buffer) return -ENOMEM; iv = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1); iv = blkcipher_get_spot(iv, bs) + bs; iv = blkcipher_get_spot(iv, bs) + bs; iv = blkcipher_get_spot(iv, ivsize); walk->iv = memcpy(iv, walk->iv, ivsize); return 0; }
int blkcipher_walk_done(struct blkcipher_desc *desc, struct blkcipher_walk *walk, int err) { struct crypto_blkcipher *tfm = desc->tfm; unsigned int nbytes = 0; if (likely(err >= 0)) { unsigned int bsize = crypto_blkcipher_blocksize(tfm); unsigned int n; if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW))) n = blkcipher_done_fast(walk, err); else n = blkcipher_done_slow(tfm, walk, bsize); nbytes = walk->total - n; err = 0; } scatterwalk_done(&walk->in, 0, nbytes); scatterwalk_done(&walk->out, 1, nbytes); walk->total = nbytes; walk->nbytes = nbytes; if (nbytes) { crypto_yield(desc->flags); return blkcipher_walk_next(desc, walk); } if (walk->iv != desc->info) memcpy(desc->info, walk->iv, crypto_blkcipher_ivsize(tfm)); if (walk->buffer != walk->page) kfree(walk->buffer); if (walk->page) free_page((unsigned long)walk->page); return err; }
static int esp_init_state(struct xfrm_state *x) { struct esp_data *esp = NULL; struct crypto_blkcipher *tfm; u32 align; /* null auth and encryption can have zero length keys */ if (x->aalg) { if (x->aalg->alg_key_len > 512) goto error; } if (x->ealg == NULL) goto error; esp = kzalloc(sizeof(*esp), GFP_KERNEL); if (esp == NULL) return -ENOMEM; if (x->aalg) { struct xfrm_algo_desc *aalg_desc; struct crypto_hash *hash; esp->auth.key = x->aalg->alg_key; esp->auth.key_len = (x->aalg->alg_key_len+7)/8; hash = crypto_alloc_hash(x->aalg->alg_name, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(hash)) goto error; esp->auth.tfm = hash; if (crypto_hash_setkey(hash, esp->auth.key, esp->auth.key_len)) goto error; aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); BUG_ON(!aalg_desc); if (aalg_desc->uinfo.auth.icv_fullbits/8 != crypto_hash_digestsize(hash)) { NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n", x->aalg->alg_name, crypto_hash_digestsize(hash), aalg_desc->uinfo.auth.icv_fullbits/8); goto error; } esp->auth.icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8; esp->auth.icv_trunc_len = aalg_desc->uinfo.auth.icv_truncbits/8; esp->auth.work_icv = kmalloc(esp->auth.icv_full_len, GFP_KERNEL); if (!esp->auth.work_icv) goto error; } esp->conf.key = x->ealg->alg_key; esp->conf.key_len = (x->ealg->alg_key_len+7)/8; tfm = crypto_alloc_blkcipher(x->ealg->alg_name, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) goto error; esp->conf.tfm = tfm; esp->conf.ivlen = crypto_blkcipher_ivsize(tfm); esp->conf.padlen = 0; if (esp->conf.ivlen) { esp->conf.ivec = kmalloc(esp->conf.ivlen, GFP_KERNEL); if (unlikely(esp->conf.ivec == NULL)) goto error; esp->conf.ivinitted = 0; } if (crypto_blkcipher_setkey(tfm, esp->conf.key, esp->conf.key_len)) goto error; x->props.header_len = sizeof(struct ip_esp_hdr) + esp->conf.ivlen; if (x->props.mode == XFRM_MODE_TUNNEL) x->props.header_len += sizeof(struct iphdr); else if (x->props.mode == XFRM_MODE_BEET) x->props.header_len += IPV4_BEET_PHMAXLEN; if (x->encap) { struct xfrm_encap_tmpl *encap = x->encap; switch (encap->encap_type) { default: goto error; case UDP_ENCAP_ESPINUDP: x->props.header_len += sizeof(struct udphdr); break; case UDP_ENCAP_ESPINUDP_NON_IKE: x->props.header_len += sizeof(struct udphdr) + 2 * sizeof(u32); break; } } x->data = esp; align = ALIGN(crypto_blkcipher_blocksize(esp->conf.tfm), 4); if (esp->conf.padlen) align = max_t(u32, align, esp->conf.padlen); x->props.trailer_len = align + 1 + esp->auth.icv_trunc_len; return 0; error: x->data = esp; esp_destroy(x); x->data = NULL; return -EINVAL; }
static int esp6_input(struct xfrm_state *x, struct sk_buff *skb) { struct ipv6hdr *iph; struct ipv6_esp_hdr *esph; struct esp_data *esp = x->data; struct crypto_blkcipher *tfm = esp->conf.tfm; struct blkcipher_desc desc = { .tfm = tfm }; struct sk_buff *trailer; int blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4); int alen = esp->auth.icv_trunc_len; int elen = skb->len - sizeof(struct ipv6_esp_hdr) - esp->conf.ivlen - alen; int hdr_len = skb->h.raw - skb->nh.raw; int nfrags; int ret = 0; if (!pskb_may_pull(skb, sizeof(struct ipv6_esp_hdr))) { ret = -EINVAL; goto out; } if (elen <= 0 || (elen & (blksize-1))) { ret = -EINVAL; goto out; } /* If integrity check is required, do this. */ if (esp->auth.icv_full_len) { u8 sum[alen]; ret = esp_mac_digest(esp, skb, 0, skb->len - alen); if (ret) goto out; if (skb_copy_bits(skb, skb->len - alen, sum, alen)) BUG(); if (unlikely(memcmp(esp->auth.work_icv, sum, alen))) { x->stats.integrity_failed++; ret = -EINVAL; goto out; } } if ((nfrags = skb_cow_data(skb, 0, &trailer)) < 0) { ret = -EINVAL; goto out; } skb->ip_summed = CHECKSUM_NONE; esph = (struct ipv6_esp_hdr*)skb->data; iph = skb->nh.ipv6h; /* Get ivec. This can be wrong, check against another impls. */ if (esp->conf.ivlen) crypto_blkcipher_set_iv(tfm, esph->enc_data, esp->conf.ivlen); { u8 nexthdr[2]; struct scatterlist *sg = &esp->sgbuf[0]; u8 padlen; if (unlikely(nfrags > ESP_NUM_FAST_SG)) { sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC); if (!sg) { ret = -ENOMEM; goto out; } } skb_to_sgvec(skb, sg, sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen, elen); ret = crypto_blkcipher_decrypt(&desc, sg, sg, elen); if (unlikely(sg != &esp->sgbuf[0])) kfree(sg); if (unlikely(ret)) goto out; if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2)) BUG(); padlen = nexthdr[0]; if (padlen+2 >= elen) { LIMIT_NETDEBUG(KERN_WARNING "ipsec esp packet is garbage padlen=%d, elen=%d\n", padlen+2, elen); ret = -EINVAL; goto out; } /* ... check padding bits here. Silly. :-) */ pskb_trim(skb, skb->len - alen - padlen - 2); ret = nexthdr[1]; } skb->h.raw = __skb_pull(skb, sizeof(*esph) + esp->conf.ivlen) - hdr_len; out: return ret; }
static int esp_output(struct xfrm_state *x, struct sk_buff *skb) { int err; struct iphdr *top_iph; struct ip_esp_hdr *esph; struct crypto_blkcipher *tfm; struct blkcipher_desc desc; struct esp_data *esp; struct sk_buff *trailer; int blksize; int clen; int alen; int nfrags; /* Strip IP+ESP header. */ __skb_pull(skb, skb->h.raw - skb->data); /* Now skb is pure payload to encrypt */ err = -ENOMEM; /* Round to block size */ clen = skb->len; esp = x->data; alen = esp->auth.icv_trunc_len; tfm = esp->conf.tfm; desc.tfm = tfm; desc.flags = 0; blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4); clen = ALIGN(clen + 2, blksize); if (esp->conf.padlen) clen = ALIGN(clen, esp->conf.padlen); if ((nfrags = skb_cow_data(skb, clen-skb->len+alen, &trailer)) < 0) goto error; /* Fill padding... */ do { int i; for (i=0; i<clen-skb->len - 2; i++) *(u8*)(trailer->tail + i) = i+1; } while (0); *(u8*)(trailer->tail + clen-skb->len - 2) = (clen - skb->len)-2; pskb_put(skb, trailer, clen - skb->len); __skb_push(skb, skb->data - skb->nh.raw); top_iph = skb->nh.iph; esph = (struct ip_esp_hdr *)(skb->nh.raw + top_iph->ihl*4); top_iph->tot_len = htons(skb->len + alen); *(u8*)(trailer->tail - 1) = top_iph->protocol; /* this is non-NULL only with UDP Encapsulation */ if (x->encap) { struct xfrm_encap_tmpl *encap = x->encap; struct udphdr *uh; u32 *udpdata32; uh = (struct udphdr *)esph; uh->source = encap->encap_sport; uh->dest = encap->encap_dport; uh->len = htons(skb->len + alen - top_iph->ihl*4); uh->check = 0; switch (encap->encap_type) { default: case UDP_ENCAP_ESPINUDP: esph = (struct ip_esp_hdr *)(uh + 1); break; case UDP_ENCAP_ESPINUDP_NON_IKE: udpdata32 = (u32 *)(uh + 1); udpdata32[0] = udpdata32[1] = 0; esph = (struct ip_esp_hdr *)(udpdata32 + 2); break; } top_iph->protocol = IPPROTO_UDP; } else top_iph->protocol = IPPROTO_ESP; esph->spi = x->id.spi; esph->seq_no = htonl(++x->replay.oseq); xfrm_aevent_doreplay(x); if (esp->conf.ivlen) { if (unlikely(!esp->conf.ivinitted)) { get_random_bytes(esp->conf.ivec, esp->conf.ivlen); esp->conf.ivinitted = 1; } crypto_blkcipher_set_iv(tfm, esp->conf.ivec, esp->conf.ivlen); } do { struct scatterlist *sg = &esp->sgbuf[0]; if (unlikely(nfrags > ESP_NUM_FAST_SG)) { sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC); if (!sg) goto error; } skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen); err = crypto_blkcipher_encrypt(&desc, sg, sg, clen); if (unlikely(sg != &esp->sgbuf[0])) kfree(sg); } while (0); if (unlikely(err)) goto error; if (esp->conf.ivlen) { memcpy(esph->enc_data, esp->conf.ivec, esp->conf.ivlen); crypto_blkcipher_get_iv(tfm, esp->conf.ivec, esp->conf.ivlen); } if (esp->auth.icv_full_len) { err = esp_mac_digest(esp, skb, (u8 *)esph - skb->data, sizeof(*esph) + esp->conf.ivlen + clen); memcpy(pskb_put(skb, trailer, alen), esp->auth.work_icv, alen); } ip_send_check(top_iph); error: return err; }
/* * Note: detecting truncated vs. non-truncated authentication data is very * expensive, so we only support truncated data, which is the recommended * and common case. */ static int esp_input(struct xfrm_state *x, struct sk_buff *skb) { struct iphdr *iph; struct ip_esp_hdr *esph; struct esp_data *esp = x->data; struct crypto_blkcipher *tfm = esp->conf.tfm; struct blkcipher_desc desc = { .tfm = tfm }; struct sk_buff *trailer; int blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4); int alen = esp->auth.icv_trunc_len; int elen = skb->len - sizeof(struct ip_esp_hdr) - esp->conf.ivlen - alen; int nfrags; int ihl; u8 nexthdr[2]; struct scatterlist *sg; int padlen; int err; if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr))) goto out; if (elen <= 0 || (elen & (blksize-1))) goto out; /* If integrity check is required, do this. */ if (esp->auth.icv_full_len) { u8 sum[alen]; err = esp_mac_digest(esp, skb, 0, skb->len - alen); if (err) goto out; if (skb_copy_bits(skb, skb->len - alen, sum, alen)) BUG(); if (unlikely(memcmp(esp->auth.work_icv, sum, alen))) { x->stats.integrity_failed++; goto out; } } if ((nfrags = skb_cow_data(skb, 0, &trailer)) < 0) goto out; skb->ip_summed = CHECKSUM_NONE; esph = (struct ip_esp_hdr*)skb->data; /* Get ivec. This can be wrong, check against another impls. */ if (esp->conf.ivlen) crypto_blkcipher_set_iv(tfm, esph->enc_data, esp->conf.ivlen); sg = &esp->sgbuf[0]; if (unlikely(nfrags > ESP_NUM_FAST_SG)) { sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC); if (!sg) goto out; } skb_to_sgvec(skb, sg, sizeof(struct ip_esp_hdr) + esp->conf.ivlen, elen); err = crypto_blkcipher_decrypt(&desc, sg, sg, elen); if (unlikely(sg != &esp->sgbuf[0])) kfree(sg); if (unlikely(err)) return err; if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2)) BUG(); padlen = nexthdr[0]; if (padlen+2 >= elen) goto out; /* ... check padding bits here. Silly. :-) */ iph = skb->nh.iph; ihl = iph->ihl * 4; if (x->encap) { struct xfrm_encap_tmpl *encap = x->encap; struct udphdr *uh = (void *)(skb->nh.raw + ihl); /* * 1) if the NAT-T peer's IP or port changed then * advertize the change to the keying daemon. * This is an inbound SA, so just compare * SRC ports. */ if (iph->saddr != x->props.saddr.a4 || uh->source != encap->encap_sport) { xfrm_address_t ipaddr; ipaddr.a4 = iph->saddr; km_new_mapping(x, &ipaddr, uh->source); /* XXX: perhaps add an extra * policy check here, to see * if we should allow or * reject a packet from a * different source * address/port. */ } /* * 2) ignore UDP/TCP checksums in case * of NAT-T in Transport Mode, or * perform other post-processing fixes * as per draft-ietf-ipsec-udp-encaps-06, * section 3.1.2 */ if (x->props.mode == XFRM_MODE_TRANSPORT || x->props.mode == XFRM_MODE_BEET) skb->ip_summed = CHECKSUM_UNNECESSARY; } iph->protocol = nexthdr[1]; pskb_trim(skb, skb->len - alen - padlen - 2); skb->h.raw = __skb_pull(skb, sizeof(*esph) + esp->conf.ivlen) - ihl; return 0; out: return -EINVAL; }
static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) { int err; int hdr_len; struct ipv6hdr *top_iph; struct ipv6_esp_hdr *esph; struct crypto_blkcipher *tfm; struct blkcipher_desc desc; struct esp_data *esp; struct sk_buff *trailer; int blksize; int clen; int alen; int nfrags; esp = x->data; hdr_len = skb->h.raw - skb->data + sizeof(*esph) + esp->conf.ivlen; /* Strip IP+ESP header. */ __skb_pull(skb, hdr_len); /* Now skb is pure payload to encrypt */ err = -ENOMEM; /* Round to block size */ clen = skb->len; alen = esp->auth.icv_trunc_len; tfm = esp->conf.tfm; desc.tfm = tfm; desc.flags = 0; blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4); clen = ALIGN(clen + 2, blksize); if (esp->conf.padlen) clen = ALIGN(clen, esp->conf.padlen); if ((nfrags = skb_cow_data(skb, clen-skb->len+alen, &trailer)) < 0) { goto error; } /* Fill padding... */ do { int i; for (i=0; i<clen-skb->len - 2; i++) *(u8*)(trailer->tail + i) = i+1; } while (0); *(u8*)(trailer->tail + clen-skb->len - 2) = (clen - skb->len)-2; pskb_put(skb, trailer, clen - skb->len); top_iph = (struct ipv6hdr *)__skb_push(skb, hdr_len); esph = (struct ipv6_esp_hdr *)skb->h.raw; top_iph->payload_len = htons(skb->len + alen - sizeof(*top_iph)); *(u8*)(trailer->tail - 1) = *skb->nh.raw; *skb->nh.raw = IPPROTO_ESP; esph->spi = x->id.spi; esph->seq_no = htonl(++x->replay.oseq); xfrm_aevent_doreplay(x); if (esp->conf.ivlen) { if (unlikely(!esp->conf.ivinitted)) { get_random_bytes(esp->conf.ivec, esp->conf.ivlen); esp->conf.ivinitted = 1; } crypto_blkcipher_set_iv(tfm, esp->conf.ivec, esp->conf.ivlen); } do { struct scatterlist *sg = &esp->sgbuf[0]; if (unlikely(nfrags > ESP_NUM_FAST_SG)) { sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC); if (!sg) goto error; } skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen); err = crypto_blkcipher_encrypt(&desc, sg, sg, clen); if (unlikely(sg != &esp->sgbuf[0])) kfree(sg); } while (0); if (unlikely(err)) goto error; if (esp->conf.ivlen) { memcpy(esph->enc_data, esp->conf.ivec, esp->conf.ivlen); crypto_blkcipher_get_iv(tfm, esp->conf.ivec, esp->conf.ivlen); } if (esp->auth.icv_full_len) { err = esp_mac_digest(esp, skb, (u8 *)esph - skb->data, sizeof(*esph) + esp->conf.ivlen + clen); memcpy(pskb_put(skb, trailer, alen), esp->auth.work_icv, alen); } error: return err; }
static int tf_self_test_perform_blkcipher( const char *alg_name, const struct blkcipher_test_vector *tv, bool decrypt) { struct blkcipher_desc desc = {0}; struct scatterlist sg_in, sg_out; unsigned char *in = NULL; unsigned char *out = NULL; unsigned in_size, out_size; int error; desc.tfm = crypto_alloc_blkcipher(alg_name, 0, 0); if (IS_ERR_OR_NULL(desc.tfm)) { ERROR("crypto_alloc_blkcipher(%s) failed", alg_name); error = (desc.tfm == NULL ? -ENOMEM : (int)desc.tfm); goto abort; } INFO("%s alg_name=%s driver_name=%s key_size=%u block_size=%u", decrypt ? "decrypt" : "encrypt", alg_name, crypto_tfm_alg_driver_name(crypto_blkcipher_tfm(desc.tfm)), tv->key_length * 8, crypto_blkcipher_blocksize(desc.tfm)); in_size = tv->length; in = kmalloc(in_size, GFP_KERNEL); if (IS_ERR_OR_NULL(in)) { ERROR("kmalloc(%u) failed: %d", in_size, (int)in); error = (in == NULL ? -ENOMEM : (int)in); goto abort; } memcpy(in, decrypt ? tv->ciphertext : tv->plaintext, tv->length); out_size = tv->length + crypto_blkcipher_blocksize(desc.tfm); out = kmalloc(out_size, GFP_KERNEL); if (IS_ERR_OR_NULL(out)) { ERROR("kmalloc(%u) failed: %d", out_size, (int)out); error = (out == NULL ? -ENOMEM : (int)out); goto abort; } error = crypto_blkcipher_setkey(desc.tfm, tv->key, tv->key_length); if (error) { ERROR("crypto_alloc_setkey(%s) failed", alg_name); goto abort; } TF_TRACE_ARRAY(tv->key, tv->key_length); if (tv->iv != NULL) { unsigned iv_length = crypto_blkcipher_ivsize(desc.tfm); crypto_blkcipher_set_iv(desc.tfm, tv->iv, iv_length); TF_TRACE_ARRAY(tv->iv, iv_length); } sg_init_one(&sg_in, in, tv->length); sg_init_one(&sg_out, out, tv->length); TF_TRACE_ARRAY(in, tv->length); (decrypt ? crypto_blkcipher_decrypt : crypto_blkcipher_encrypt) (&desc, &sg_out, &sg_in, tv->length); if (error) { ERROR("crypto_blkcipher_%s(%s) failed", decrypt ? "decrypt" : "encrypt", alg_name); goto abort; } TF_TRACE_ARRAY(out, tv->length); crypto_free_blkcipher(desc.tfm); if (memcmp((decrypt ? tv->plaintext : tv->ciphertext), out, tv->length)) { ERROR("Wrong %s/%u %s result", alg_name, tv->key_length * 8, decrypt ? "decryption" : "encryption"); error = -EINVAL; } else { INFO("%s/%u: %s successful", alg_name, tv->key_length * 8, decrypt ? "decryption" : "encryption"); error = 0; } kfree(in); kfree(out); return error; abort: if (!IS_ERR_OR_NULL(desc.tfm)) crypto_free_blkcipher(desc.tfm); if (!IS_ERR_OR_NULL(out)) kfree(out); if (!IS_ERR_OR_NULL(in)) kfree(in); return error; }
static struct pdu_ser * pdu_serialize_gfp(gfp_t flags, const const struct serdes * instance, struct pdu * pdu, struct dup_config_entry * dup_conf, struct crypto_blkcipher * blkcipher) { struct pdu_ser * tmp; struct dt_cons * dt_cons; const void * buffer_data; const struct buffer * buffer; const struct pci * pci; size_t size; ssize_t buffer_size; ssize_t blk_size; ssize_t encrypted_size; ssize_t pci_size; char * data; pdu_type_t pdu_type; seq_num_t seq; struct buffer * buf; int i; if (!pdu_is_ok(pdu)) return NULL; if (!instance) return NULL; dt_cons = instance->dt_cons; ASSERT(dt_cons); buffer = pdu_buffer_get_ro(pdu); if (!buffer) return NULL; buffer_data = buffer_data_ro(buffer); if (!buffer_data) return NULL; buffer_size = buffer_length(buffer); if (buffer_size <= 0) return NULL; pci = pdu_pci_get_ro(pdu); if (!pci) return NULL; pdu_type = pci_type(pci); if (!pdu_type_is_ok(pdu_type)) { LOG_ERR("Wrong PDU type"); return NULL; } /* Base PCI size, fields present in all PDUs */ pci_size = base_pci_size(dt_cons); /* * These are available in the stack at this point in time * Extend if necessary (e.g.) NACKs, SACKs, ... * Set size in first switch/case */ switch (pdu_type) { case PDU_TYPE_MGMT: case PDU_TYPE_DT: size = pci_size + dt_cons->seq_num_length + buffer_size; if (size <= 0) return NULL; break; case PDU_TYPE_FC: size = pci_size + CTRL_SEQ_NR + fc_pci_size(dt_cons); if (size <= 0) return NULL; break; case PDU_TYPE_ACK: size = pci_size + CTRL_SEQ_NR + dt_cons->seq_num_length; if (size <= 0) return NULL; break; case PDU_TYPE_ACK_AND_FC: size = pci_size + CTRL_SEQ_NR + fc_pci_size(dt_cons) + dt_cons->seq_num_length; if (size <= 0) return NULL; break; case PDU_TYPE_CACK: size = pci_size + 2 * CTRL_SEQ_NR + 4 * dt_cons->seq_num_length + RATE_LEN; if (size <= 0) return NULL; break; default: LOG_ERR("Unknown PDU type %02X", pdu_type); return NULL; } data = rkmalloc(size, flags); if (!data) return NULL; /* Needed for all PDUs */ if (serialize_base_pci(instance, data, pci, size)) { LOG_ERR("Failed to serialize base PCI"); rkfree(data); return NULL; } /* Do actual serializing in the second switch case */ switch (pdu_type) { case PDU_TYPE_MGMT: case PDU_TYPE_DT: seq = pci_sequence_number_get(pci); memcpy(data + pci_size, &seq, dt_cons->seq_num_length); memcpy(data + pci_size + dt_cons->seq_num_length, buffer_data, buffer_size); break; case PDU_TYPE_FC: if (serialize_ctrl_seq(instance, data, pci, pci_size) || serialize_fc_pci(instance, data, pci, pci_size + CTRL_SEQ_NR)) { rkfree(data); return NULL; } break; case PDU_TYPE_ACK: if (serialize_ctrl_seq(instance, data, pci, pci_size) || serialize_ack_pci(instance, data, pci, pci_size + CTRL_SEQ_NR)) { rkfree(data); return NULL; } break; case PDU_TYPE_ACK_AND_FC: if (serialize_ctrl_seq(instance, data, pci, pci_size) || serialize_ack_pci(instance, data, pci, pci_size + CTRL_SEQ_NR) || serialize_fc_pci(instance, data, pci, pci_size + CTRL_SEQ_NR + dt_cons->seq_num_length)) { rkfree(data); return NULL; } break; case PDU_TYPE_CACK: if (serialize_ctrl_seq(instance, data, pci, pci_size) || serialize_cc_pci(instance, data, pci, pci_size + CTRL_SEQ_NR)) { rkfree(data); return NULL; } break; default: LOG_ERR("Unknown PDU type %02X", pdu_type); return NULL; } buf = buffer_create_with_gfp(flags, data, size); if (!buf) { rkfree(data); return NULL; } tmp = pdu_ser_create_buffer_with_gfp(flags, buf); if (!tmp) { rkfree(buf); return NULL; } /* FIXME: this should be moved to specific policy code */ if (dup_conf != NULL && dup_conf->ttl_policy != NULL){ if (pdu_ser_head_grow_gfp(flags, tmp, sizeof(u8))) { LOG_ERR("Failed to grow ser PDU"); pdu_ser_destroy(tmp); return NULL; } if (!dup_ttl_set(tmp, pci_ttl(pci))) { LOG_ERR("Could not set TTL"); pdu_ser_destroy(tmp); return NULL; } if (dup_ttl_is_expired(tmp)) { LOG_DBG("TTL is expired, dropping PDU"); pdu_ser_destroy(tmp); return NULL; } } /* FIXME: this should be moved to specific policy code */ if (blkcipher != NULL && dup_conf != NULL && dup_conf->enable_encryption){ buf = pdu_ser_buffer(tmp); blk_size = crypto_blkcipher_blocksize(blkcipher); buffer_size = buffer_length(buf); encrypted_size = (buffer_size/blk_size + 1) * blk_size; if (pdu_ser_tail_grow_gfp(tmp, encrypted_size - buffer_size)){ LOG_ERR("Failed to grow ser PDU"); pdu_ser_destroy(tmp); return NULL; } /* PADDING */ data = buffer_data_rw(buf); for (i=encrypted_size-1; i>buffer_size; i--){ data[i] = encrypted_size - buffer_size; } /* Encrypt */ if (!dup_encrypt_data(tmp, blkcipher)) { LOG_ERR("Failed to encrypt PDU"); pdu_ser_destroy(tmp); return NULL; } } /* FIXME: this should be moved to specific policy code */ if (dup_conf != NULL && dup_conf->error_check_policy != NULL){ /* Assuming CRC32 */ if (pdu_ser_head_grow_gfp(flags, tmp, sizeof(u32))) { LOG_ERR("Failed to grow ser PDU"); pdu_ser_destroy(tmp); return NULL; } if (!dup_chksum_set(tmp)) { LOG_ERR("Failed to add CRC"); pdu_ser_destroy(tmp); return NULL; } ASSERT(dup_chksum_is_ok(tmp)); } return tmp; }