static int crypto_cts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct crypto_cts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); int bsize = crypto_blkcipher_blocksize(desc->tfm); int tot_blocks = (nbytes + bsize - 1) / bsize; int cbc_blocks = tot_blocks > 2 ? tot_blocks - 2 : 0; struct blkcipher_desc lcldesc; int err; lcldesc.tfm = ctx->child; lcldesc.info = desc->info; lcldesc.flags = desc->flags; if (tot_blocks == 1) { err = crypto_blkcipher_encrypt_iv(&lcldesc, dst, src, bsize); } else if (nbytes <= bsize * 2) { err = cts_cbc_encrypt(ctx, desc, dst, src, 0, nbytes); } else { /* do normal function for tot_blocks - 2 */ err = crypto_blkcipher_encrypt_iv(&lcldesc, dst, src, cbc_blocks * bsize); if (err == 0) { /* do cts for final two blocks */ err = cts_cbc_encrypt(ctx, desc, dst, src, cbc_blocks * bsize, nbytes - (cbc_blocks * bsize)); } } return err; }
static int cts_cbc_encrypt(struct crypto_cts_ctx *ctx, struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int offset, unsigned int nbytes) { int bsize = crypto_blkcipher_blocksize(desc->tfm); u8 tmp[bsize], tmp2[bsize]; struct blkcipher_desc lcldesc; struct scatterlist sgsrc[1], sgdst[1]; int lastn = nbytes - bsize; u8 iv[bsize]; u8 s[bsize * 2], d[bsize * 2]; int err; if (lastn < 0) return -EINVAL; sg_init_table(sgsrc, 1); sg_init_table(sgdst, 1); memset(s, 0, sizeof(s)); scatterwalk_map_and_copy(s, src, offset, nbytes, 0); memcpy(iv, desc->info, bsize); lcldesc.tfm = ctx->child; lcldesc.info = iv; lcldesc.flags = desc->flags; sg_set_buf(&sgsrc[0], s, bsize); sg_set_buf(&sgdst[0], tmp, bsize); err = crypto_blkcipher_encrypt_iv(&lcldesc, sgdst, sgsrc, bsize); memcpy(d + bsize, tmp, lastn); lcldesc.info = tmp; sg_set_buf(&sgsrc[0], s + bsize, bsize); sg_set_buf(&sgdst[0], tmp2, bsize); err = crypto_blkcipher_encrypt_iv(&lcldesc, sgdst, sgsrc, bsize); memcpy(d, tmp2, bsize); scatterwalk_map_and_copy(d, dst, offset, nbytes, 1); memcpy(desc->info, tmp2, bsize); return err; }
u32 krb5_encrypt( struct crypto_blkcipher *tfm, void * iv, void * in, void * out, int length) { u32 ret = -EINVAL; struct scatterlist sg[1]; u8 local_iv[16] = {0}; struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv }; if (length % crypto_blkcipher_blocksize(tfm) != 0) goto out; if (crypto_blkcipher_ivsize(tfm) > 16) { dprintk("RPC: gss_k5encrypt: tfm iv size to large %d\n", crypto_blkcipher_ivsize(tfm)); goto out; } if (iv) memcpy(local_iv, iv, crypto_blkcipher_ivsize(tfm)); memcpy(out, in, length); sg_set_buf(sg, out, length); ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, length); out: dprintk("RPC: krb5_encrypt returns %d\n", ret); return ret; }
static int crypto_rfc3686_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct crypto_blkcipher *tfm = desc->tfm; struct crypto_rfc3686_ctx *ctx = crypto_blkcipher_ctx(tfm); struct crypto_blkcipher *child = ctx->child; unsigned long alignmask = crypto_blkcipher_alignmask(tfm); u8 ivblk[CTR_RFC3686_BLOCK_SIZE + alignmask]; u8 *iv = PTR_ALIGN(ivblk + 0, alignmask + 1); u8 *info = desc->info; int err; /* set up counter block */ memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE); memcpy(iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE); /* initialize counter portion of counter block */ *(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) = cpu_to_be32(1); desc->tfm = child; desc->info = iv; err = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes); desc->tfm = tfm; desc->info = info; return err; }
static int fallback_blk_enc(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { unsigned int ret; struct crypto_blkcipher *tfm; struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm); tfm = desc->tfm; desc->tfm = op->fallback.blk; ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes); desc->tfm = tfm; return ret; }
static int xts_fallback_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm); struct crypto_blkcipher *tfm; unsigned int ret; tfm = desc->tfm; desc->tfm = xts_ctx->fallback; ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes); desc->tfm = tfm; return ret; }
int dek_aes_encrypt(struct crypto_blkcipher *sdp_tfm, char *src, char *dst, int len) { struct blkcipher_desc desc; struct scatterlist src_sg, dst_sg; int bsize = crypto_blkcipher_blocksize(sdp_tfm); u8 iv[bsize]; memset(&iv, 0, sizeof(iv)); desc.tfm = sdp_tfm; desc.info = iv; desc.flags = 0; sg_init_one(&src_sg, src, len); sg_init_one(&dst_sg, dst, len); return crypto_blkcipher_encrypt_iv(&desc, &dst_sg, &src_sg, len); }
static void AES_xts(struct crypto_blkcipher *cipher, int tweak, const __u8 *input, __u8 *output, int length, int encrypt) { struct blkcipher_desc desc; struct scatterlist dst[1]; struct scatterlist src[1]; __u8 tweakBytes[AES_BLOCK_SIZE]; initializeTweakBytes(tweakBytes, tweak); sg_init_table(dst, 1); sg_init_table(src, 1); sg_set_buf(&dst[0], output, length); sg_set_buf(&src[0], input, length); desc.tfm = cipher; desc.flags = 0; desc.info = tweakBytes; if (encrypt) crypto_blkcipher_encrypt_iv(&desc, &dst[0], &src[0], length); else crypto_blkcipher_decrypt_iv(&desc, &dst[0], &src[0], length); }
static int gcm_aes_nx_crypt(struct aead_request *req, int enc) { struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; struct blkcipher_desc desc; unsigned int nbytes = req->cryptlen; int rc = -EINVAL; if (nbytes > nx_ctx->ap->databytelen) goto out; desc.info = nx_ctx->priv.gcm.iv; /* initialize the counter */ *(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1; /* For scenarios where the input message is zero length, AES CTR mode * may be used. Set the source data to be a single block (16B) of all * zeros, and set the input IV value to be the same as the GMAC IV * value. - nx_wb 4.8.1.3 */ if (nbytes == 0) { char src[AES_BLOCK_SIZE] = {}; struct scatterlist sg; desc.tfm = crypto_alloc_blkcipher("ctr(aes)", 0, 0); if (IS_ERR(desc.tfm)) { rc = -ENOMEM; goto out; } crypto_blkcipher_setkey(desc.tfm, csbcpb->cpb.aes_gcm.key, NX_CPB_KEY_SIZE(csbcpb) == NX_KS_AES_128 ? 16 : NX_CPB_KEY_SIZE(csbcpb) == NX_KS_AES_192 ? 24 : 32); sg_init_one(&sg, src, AES_BLOCK_SIZE); if (enc) crypto_blkcipher_encrypt_iv(&desc, req->dst, &sg, AES_BLOCK_SIZE); else crypto_blkcipher_decrypt_iv(&desc, req->dst, &sg, AES_BLOCK_SIZE); crypto_free_blkcipher(desc.tfm); rc = 0; goto out; } desc.tfm = (struct crypto_blkcipher *)req->base.tfm; csbcpb->cpb.aes_gcm.bit_length_aad = req->assoclen * 8; if (req->assoclen) { rc = nx_gca(nx_ctx, req, csbcpb->cpb.aes_gcm.in_pat_or_aad); if (rc) goto out; } if (enc) NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; else nbytes -= crypto_aead_authsize(crypto_aead_reqtfm(req)); csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8; rc = nx_build_sg_lists(nx_ctx, &desc, req->dst, req->src, nbytes, csbcpb->cpb.aes_gcm.iv_or_cnt); if (rc) goto out; rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); if (rc) goto out; atomic_inc(&(nx_ctx->stats->aes_ops)); atomic64_add(csbcpb->csb.processed_byte_count, &(nx_ctx->stats->aes_bytes)); if (enc) { /* copy out the auth tag */ scatterwalk_map_and_copy(csbcpb->cpb.aes_gcm.out_pat_or_mac, req->dst, nbytes, crypto_aead_authsize(crypto_aead_reqtfm(req)), SCATTERWALK_TO_SG); } else if (req->assoclen) { u8 *itag = nx_ctx->priv.gcm.iauth_tag; u8 *otag = csbcpb->cpb.aes_gcm.out_pat_or_mac; scatterwalk_map_and_copy(itag, req->dst, nbytes, crypto_aead_authsize(crypto_aead_reqtfm(req)), SCATTERWALK_FROM_SG); rc = memcmp(itag, otag, crypto_aead_authsize(crypto_aead_reqtfm(req))) ? -EBADMSG : 0; } out: return rc; }
char * decrypt_filename(const char *c, const unsigned char *k, int kl) { struct crypto_blkcipher *tfm = 0; struct scatterlist sgi[1], sgo[1]; unsigned char *work; unsigned char ivec[16]; int outlen = 0, rlen; unsigned int sum1, sum2; char *ret = 0, *rp = 0; struct blkcipher_desc desc[1]; int rc; tfm = crypto_alloc_blkcipher(ALG, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) { printk(KERN_ERR "decrypt_filename: crypto_alloc_blkcipher (%s) failed\n", ALG); ret = (void*)tfm; /* ERR_PTR(PTR_ERR(tfm)) */ tfm = 0; goto Done; } if (crypto_blkcipher_setkey(tfm, k, kl) < 0) { printk(KERN_ERR "decrypt_filename: crypto_blkcipher_setkey failed (bad key?)\n"); ret = ERR_PTR(-EIO); goto Done; } work = b92_decode(c, &outlen); if (!work || outlen < 5) { printk(KERN_ERR "decrypt_filename: bad filename %d/<%s> %d\n", (int)strlen(c), c, outlen); ret = ERR_PTR(-ENOMEM); goto Done; } if (!work) { printk(KERN_ERR "decrypt_filename: b92_decode failed!\n"); ret = ERR_PTR(-ENOMEM); goto Done; } memcpy(ivec, work, 4); memset(ivec+4, 0, sizeof ivec-5); ivec[sizeof ivec-1]=1; rlen = NAME_ALLOC_LEN(outlen-4); rp = kmalloc(rlen, GFP_KERNEL); if (!rp) { printk(KERN_ERR "decrypt_filename: can't allocate %d bytes\n", rlen-3); ret = ERR_PTR(-ENOMEM); goto Done; } rp[outlen-4] = 0; sg_init_table(sgi, 1); sg_init_table(sgo, 1); sg_set_buf(sgi, work+4, outlen-4); sg_set_buf(sgo, rp, outlen-4); memset(desc, 0, sizeof *desc); desc->info = ivec; desc->tfm = tfm; rc = crypto_blkcipher_encrypt_iv(desc, sgo, sgi, outlen-4); if (rc) { printk(KERN_ERR "decrypt_filename: encrypt failed code=%d\n", rc); ret = ERR_PTR(rc); goto Done; } sum1 = ~crc32_le(~0, rp, outlen-4); sum2 = (((unsigned int)work[0])<<24) | (((unsigned int)work[1])<<16) | (((unsigned int)work[2])<<8) | ((unsigned int)work[3]); if (sum1 != sum2) { printk(KERN_ERR "decrypt_filename: crc didn't match! (on %x) (got %x)\n", be32_to_cpu(sum1), be32_to_cpu(sum2)); ret = ERR_PTR(-EIO); goto Done; } ret = rp; rp = 0; Done: if (work) kfree(work); if (tfm) crypto_free_blkcipher(tfm); if (rp) kfree(rp); return ret; }
char * encrypt_filename(const char *c, const unsigned char *k, int kl) { struct crypto_blkcipher *tfm = 0; struct scatterlist sg[1]; unsigned char ivec[16]; int l, l2; unsigned int sum; char *ret = 0; char *work; struct blkcipher_desc desc[1]; int rc; tfm = crypto_alloc_blkcipher(ALG, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) { printk(KERN_ERR "encrypt_filename: crypto_alloc_blkcipher (%s) failed\n", ALG); ret = (void*)tfm; /* ERR_PTR(PTR_ERR(tfm)) */ tfm = 0; goto Failed; } if (crypto_blkcipher_setkey(tfm, k, kl) < 0) { printk(KERN_ERR "encrypt_filename: crypto_blkcipher_setkey failed (bad key?)\n"); ret = ERR_PTR(-EIO); goto Failed; } l = strlen(c); /* NOTE: bitrev(crc32_le != crc32_be( */ sum = ~crc32_le(~0, c, l); l2 = l + 4; work = kmalloc(l2, GFP_KERNEL); if (!work) { printk(KERN_ERR "encrypt_filename: can't allocate %d bytes\n", l2); ret = ERR_PTR(-ENOMEM); goto Failed; } work[0] = sum>>24; work[1] = sum>>16; work[2] = sum>>8; work[3] = sum; memcpy(work+4, c, l); memcpy(ivec, work, 4); memset(ivec+4, 0, sizeof ivec-5); ivec[sizeof ivec-1]=1; sg_init_table(sg, 1); sg_set_buf(sg, work+4, l); memset(desc, 0, sizeof *desc); desc->info = ivec; desc->tfm = tfm; rc = crypto_blkcipher_encrypt_iv(desc, sg, sg, l); if (rc) { printk(KERN_ERR "encrypt_filename: encrypt failed code=%d\n", rc); ret = ERR_PTR(rc); goto Failed; } ret = b92_encode(work, l2); Failed: if (work) kfree(work); if (tfm) crypto_free_blkcipher(tfm); return ret; }