static int cryptoloop_transfer(struct loop_device *lo, int cmd, struct page *raw_page, unsigned raw_off, struct page *loop_page, unsigned loop_off, int size, sector_t IV) { struct crypto_blkcipher *tfm = lo->key_data; struct blkcipher_desc desc = { .tfm = tfm, .flags = CRYPTO_TFM_REQ_MAY_SLEEP, }; struct scatterlist sg_out = { NULL, }; struct scatterlist sg_in = { NULL, }; encdec_cbc_t encdecfunc; struct page *in_page, *out_page; unsigned in_offs, out_offs; int err; if (cmd == READ) { in_page = raw_page; in_offs = raw_off; out_page = loop_page; out_offs = loop_off; encdecfunc = crypto_blkcipher_crt(tfm)->decrypt; } else { in_page = loop_page; in_offs = loop_off; out_page = raw_page; out_offs = raw_off; encdecfunc = crypto_blkcipher_crt(tfm)->encrypt; } while (size > 0) { const int sz = min(size, LOOP_IV_SECTOR_SIZE); u32 iv[4] = { 0, }; iv[0] = cpu_to_le32(IV & 0xffffffff); sg_in.page = in_page; sg_in.offset = in_offs; sg_in.length = sz; sg_out.page = out_page; sg_out.offset = out_offs; sg_out.length = sz; desc.info = iv; err = encdecfunc(&desc, &sg_out, &sg_in, sz); if (err) return err; IV++; size -= sz; in_offs += sz; out_offs += sz; } return 0; }
static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err) { struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm); struct crypto_blkcipher *child = ctx->child; cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err, crypto_blkcipher_crt(child)->decrypt); }
int __ablk_encrypt(struct ablkcipher_request *req) { struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm); struct blkcipher_desc desc; desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm); desc.info = req->info; desc.flags = 0; return crypto_blkcipher_crt(desc.tfm)->encrypt( &desc, req->dst, req->src, req->nbytes); }
/* * CC-MAC function WUSB1.0[6.5] * * Take a data string and produce the encrypted CBC Counter-mode MIC * * Note the names for most function arguments are made to (more or * less) match those used in the pseudo-function definition given in * WUSB1.0[6.5]. * * @tfm_cbc: CBC(AES) blkcipher handle (initialized) * * @tfm_aes: AES cipher handle (initialized) * * @mic: buffer for placing the computed MIC (Message Integrity * Code). This is exactly 8 bytes, and we expect the buffer to * be at least eight bytes in length. * * @key: 128 bit symmetric key * * @n: CCM nonce * * @a: ASCII string, 14 bytes long (I guess zero padded if needed; * we use exactly 14 bytes). * * @b: data stream to be processed; cannot be a global or const local * (will confuse the scatterlists) * * @blen: size of b... * * Still not very clear how this is done, but looks like this: we * create block B0 (as WUSB1.0[6.5] says), then we AES-crypt it with * @key. We bytewise xor B0 with B1 (1) and AES-crypt that. Then we * take the payload and divide it in blocks (16 bytes), xor them with * the previous crypto result (16 bytes) and crypt it, repeat the next * block with the output of the previous one, rinse wash (I guess this * is what AES CBC mode means...but I truly have no idea). So we use * the CBC(AES) blkcipher, that does precisely that. The IV (Initial * Vector) is 16 bytes and is set to zero, so * * See rfc3610. Linux crypto has a CBC implementation, but the * documentation is scarce, to say the least, and the example code is * so intricated that is difficult to understand how things work. Most * of this is guess work -- bite me. * * (1) Created as 6.5 says, again, using as l(a) 'Blen + 14', and * using the 14 bytes of @a to fill up * b1.{mac_header,e0,security_reserved,padding}. * * NOTE: The definition of l(a) in WUSB1.0[6.5] vs the definition of * l(m) is orthogonal, they bear no relationship, so it is not * in conflict with the parameter's relation that * WUSB1.0[6.4.2]) defines. * * NOTE: WUSB1.0[A.1]: Host Nonce is missing a nibble? (1e); fixed in * first errata released on 2005/07. * * NOTE: we need to clean IV to zero at each invocation to make sure * we start with a fresh empty Initial Vector, so that the CBC * works ok. * * NOTE: blen is not aligned to a block size, we'll pad zeros, that's * what sg[4] is for. Maybe there is a smarter way to do this. */ static int wusb_ccm_mac(struct crypto_blkcipher *tfm_cbc, struct crypto_cipher *tfm_aes, void *mic, const struct aes_ccm_nonce *n, const struct aes_ccm_label *a, const void *b, size_t blen) { int result = 0; struct blkcipher_desc desc; struct aes_ccm_b0 b0; struct aes_ccm_b1 b1; struct aes_ccm_a ax; struct scatterlist sg[4], sg_dst; void *iv, *dst_buf; size_t ivsize, dst_size; const u8 bzero[16] = { 0 }; size_t zero_padding; /* * These checks should be compile time optimized out * ensure @a fills b1's mac_header and following fields */ WARN_ON(sizeof(*a) != sizeof(b1) - sizeof(b1.la)); WARN_ON(sizeof(b0) != sizeof(struct aes_ccm_block)); WARN_ON(sizeof(b1) != sizeof(struct aes_ccm_block)); WARN_ON(sizeof(ax) != sizeof(struct aes_ccm_block)); result = -ENOMEM; zero_padding = sizeof(struct aes_ccm_block) - blen % sizeof(struct aes_ccm_block); zero_padding = blen % sizeof(struct aes_ccm_block); if (zero_padding) zero_padding = sizeof(struct aes_ccm_block) - zero_padding; dst_size = blen + sizeof(b0) + sizeof(b1) + zero_padding; dst_buf = kzalloc(dst_size, GFP_KERNEL); if (dst_buf == NULL) { printk(KERN_ERR "E: can't alloc destination buffer\n"); goto error_dst_buf; } iv = crypto_blkcipher_crt(tfm_cbc)->iv; ivsize = crypto_blkcipher_ivsize(tfm_cbc); memset(iv, 0, ivsize); /* Setup B0 */ b0.flags = 0x59; /* Format B0 */ b0.ccm_nonce = *n; b0.lm = cpu_to_be16(0); /* WUSB1.0[6.5] sez l(m) is 0 */ /* Setup B1
int ceph_aes_decrypt(const void *key, int key_len, void *dst, size_t *dst_len, const void *src, size_t src_len){ struct scatterlist sg_in[1], sg_out[2]; struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); struct blkcipher_desc desc = { .tfm = tfm }; char pad[16]; void *iv; int ivsize; int ret; int last_byte; if (IS_ERR(tfm)) return PTR_ERR(tfm); crypto_blkcipher_setkey((void *)tfm, key, key_len); sg_init_table(sg_in, 1); sg_init_table(sg_out, 2); sg_set_buf(sg_in, src, src_len); sg_set_buf(&sg_out[0], dst, *dst_len); sg_set_buf(&sg_out[1], pad, sizeof(pad)); iv = crypto_blkcipher_crt(tfm)->iv; ivsize = crypto_blkcipher_ivsize(tfm); memcpy(iv, aes_iv, ivsize); ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, src_len); crypto_free_blkcipher(tfm); if (ret < 0) { pr_err("ceph_aes_decrypt failed %d\n", ret); return ret; } if (src_len <= *dst_len) last_byte = ((char*)dst)[src_len - 1]; else last_byte = pad[src_len - *dst_len - 1]; if (last_byte <= 16 && src_len >= last_byte) { *dst_len = src_len - last_byte; } else { pr_err("ceph_aes_decrypt got bad padding %d on src len %d\n", last_byte, (int)src_len); return -EPERM; /* bad padding */ } return 0; }
static int ablk_decrypt(struct ablkcipher_request *req) { struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); if (!irq_fpu_usable()) { struct ablkcipher_request *cryptd_req = ablkcipher_request_ctx(req); memcpy(cryptd_req, req, sizeof(*req)); ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); return crypto_ablkcipher_decrypt(cryptd_req); } else { struct blkcipher_desc desc; desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm); desc.info = req->info; desc.flags = 0; return crypto_blkcipher_crt(desc.tfm)->decrypt( &desc, req->dst, req->src, req->nbytes); } }
int ablk_decrypt(struct ablkcipher_request *req) { struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm); if (!may_use_simd()) { struct ablkcipher_request *cryptd_req = ablkcipher_request_ctx(req); *cryptd_req = *req; ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); return crypto_ablkcipher_decrypt(cryptd_req); } else { struct blkcipher_desc desc; desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm); desc.info = req->info; desc.flags = 0; return crypto_blkcipher_crt(desc.tfm)->decrypt( &desc, req->dst, req->src, req->nbytes); } }
static int aml_keybox_aes_encrypt(const void *key, int key_len, const u8 *aes_iv, void *dst, size_t *dst_len, const void *src, size_t src_len) { struct scatterlist sg_in[1], sg_out[1]; struct crypto_blkcipher *tfm = aml_keybox_crypto_alloc_cipher(); struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 }; int ret; void *iv; int ivsize; if(src_len & 0x0f){ printk("%s:%d,src_len %d is not 16byte align",__func__,__LINE__,src_len); return -1; } if (IS_ERR(tfm)){ printk("%s:%d,crypto_alloc fail\n",__func__,__LINE__); return PTR_ERR(tfm); } *dst_len = src_len ; crypto_blkcipher_setkey((void *) tfm, key, key_len); sg_init_table(sg_in, 1); sg_set_buf(&sg_in[0], src, src_len); sg_init_table(sg_out, 1); sg_set_buf(sg_out, dst, *dst_len); iv = crypto_blkcipher_crt(tfm)->iv; ivsize = crypto_blkcipher_ivsize(tfm); //printk("key_len:%d,ivsize:%d\n",key_len,ivsize); memcpy(iv, aes_iv, ivsize); ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in,src_len); crypto_free_blkcipher(tfm); if (ret < 0){ printk("%s:%d,ceph_aes_crypt failed %d\n", __func__,__LINE__,ret); return ret; } return 0; } static int aml_keybox_aes_decrypt(const void *key, int key_len, const u8 *aes_iv, void *dst, size_t *dst_len, const void *src, size_t src_len) { struct scatterlist sg_in[1], sg_out[1]; struct crypto_blkcipher *tfm = aml_keybox_crypto_alloc_cipher(); struct blkcipher_desc desc = { .tfm = tfm }; void *iv; int ivsize; int ret; // int last_byte; if(src_len &0x0f){ printk("%s:%d,src_len %d is not 16byte align",__func__,__LINE__,src_len); return -1; } if (IS_ERR(tfm)){ printk("%s:%d,crypto_alloc fail\n",__func__,__LINE__); return PTR_ERR(tfm); } crypto_blkcipher_setkey((void *) tfm, key, key_len); sg_init_table(sg_in, 1); sg_init_table(sg_out, 1); sg_set_buf(sg_in, src, src_len); sg_set_buf(&sg_out[0], dst, *dst_len); iv = crypto_blkcipher_crt(tfm)->iv; ivsize = crypto_blkcipher_ivsize(tfm); //printk("key_len:%d,ivsize:%d\n",key_len,ivsize); memcpy(iv, aes_iv, ivsize); ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, src_len); crypto_free_blkcipher(tfm); if (ret < 0){ printk("%s:%d,ceph_aes_decrypt failed %d\n", __func__,__LINE__,ret); return ret; } *dst_len = src_len; return 0; } int aes_crypto_encrypt(void *dst,size_t *dst_len,const void *src,size_t src_len) { int ret; unsigned char iv_aes[16]; unsigned char key_aes[32]; memcpy(iv_aes,&default_AESkey[0],16); memcpy(key_aes,&default_AESkey[16],32); ret = aml_keybox_aes_encrypt(key_aes,sizeof(key_aes),iv_aes,dst,dst_len,src,src_len); return ret; }
/* * CC-MAC function WUSB1.0[6.5] * * Take a data string and produce the encrypted CBC Counter-mode MIC * * Note the names for most function arguments are made to (more or * less) match those used in the pseudo-function definition given in * WUSB1.0[6.5]. * * @tfm_cbc: CBC(AES) blkcipher handle (initialized) * * @tfm_aes: AES cipher handle (initialized) * * @mic: buffer for placing the computed MIC (Message Integrity * Code). This is exactly 8 bytes, and we expect the buffer to * be at least eight bytes in length. * * @key: 128 bit symmetric key * * @n: CCM nonce * * @a: ASCII string, 14 bytes long (I guess zero padded if needed; * we use exactly 14 bytes). * * @b: data stream to be processed; cannot be a global or const local * (will confuse the scatterlists) * * @blen: size of b... * * Still not very clear how this is done, but looks like this: we * create block B0 (as WUSB1.0[6.5] says), then we AES-crypt it with * @key. We bytewise xor B0 with B1 (1) and AES-crypt that. Then we * take the payload and divide it in blocks (16 bytes), xor them with * the previous crypto result (16 bytes) and crypt it, repeat the next * block with the output of the previous one, rinse wash (I guess this * is what AES CBC mode means...but I truly have no idea). So we use * the CBC(AES) blkcipher, that does precisely that. The IV (Initial * Vector) is 16 bytes and is set to zero, so * * See rfc3610. Linux crypto has a CBC implementation, but the * documentation is scarce, to say the least, and the example code is * so intricated that is difficult to understand how things work. Most * of this is guess work -- bite me. * * (1) Created as 6.5 says, again, using as l(a) 'Blen + 14', and * using the 14 bytes of @a to fill up * b1.{mac_header,e0,security_reserved,padding}. * * NOTE: The definition of l(a) in WUSB1.0[6.5] vs the definition of * l(m) is orthogonal, they bear no relationship, so it is not * in conflict with the parameter's relation that * WUSB1.0[6.4.2]) defines. * * NOTE: WUSB1.0[A.1]: Host Nonce is missing a nibble? (1e); fixed in * first errata released on 2005/07. * * NOTE: we need to clean IV to zero at each invocation to make sure * we start with a fresh empty Initial Vector, so that the CBC * works ok. * * NOTE: blen is not aligned to a block size, we'll pad zeros, that's * what sg[4] is for. Maybe there is a smarter way to do this. */ static int wusb_ccm_mac(struct crypto_blkcipher *tfm_cbc, struct crypto_cipher *tfm_aes, void *mic, const struct aes_ccm_nonce *n, const struct aes_ccm_label *a, const void *b, size_t blen) { int result = 0; struct blkcipher_desc desc; struct aes_ccm_b0 b0; struct aes_ccm_b1 b1; struct aes_ccm_a ax; struct scatterlist sg[4], sg_dst; void *iv, *dst_buf; size_t ivsize, dst_size; const u8 bzero[16] = { 0 }; size_t zero_padding; /* * These checks should be compile time optimized out * ensure @a fills b1's mac_header and following fields */ WARN_ON(sizeof(*a) != sizeof(b1) - sizeof(b1.la)); WARN_ON(sizeof(b0) != sizeof(struct aes_ccm_block)); WARN_ON(sizeof(b1) != sizeof(struct aes_ccm_block)); WARN_ON(sizeof(ax) != sizeof(struct aes_ccm_block)); result = -ENOMEM; zero_padding = sizeof(struct aes_ccm_block) - blen % sizeof(struct aes_ccm_block); zero_padding = blen % sizeof(struct aes_ccm_block); if (zero_padding) zero_padding = sizeof(struct aes_ccm_block) - zero_padding; dst_size = blen + sizeof(b0) + sizeof(b1) + zero_padding; dst_buf = kzalloc(dst_size, GFP_KERNEL); if (dst_buf == NULL) { printk(KERN_ERR "E: can't alloc destination buffer\n"); goto error_dst_buf; } iv = crypto_blkcipher_crt(tfm_cbc)->iv; ivsize = crypto_blkcipher_ivsize(tfm_cbc); memset(iv, 0, ivsize); /* Setup B0 */ b0.flags = 0x59; /* Format B0 */ b0.ccm_nonce = *n; b0.lm = cpu_to_be16(0); /* WUSB1.0[6.5] sez l(m) is 0 */ /* Setup B1 * * The WUSB spec is anything but clear! WUSB1.0[6.5] * says that to initialize B1 from A with 'l(a) = blen + * 14'--after clarification, it means to use A's contents * for MAC Header, EO, sec reserved and padding. */ b1.la = cpu_to_be16(blen + 14); memcpy(&b1.mac_header, a, sizeof(*a)); sg_init_table(sg, ARRAY_SIZE(sg)); sg_set_buf(&sg[0], &b0, sizeof(b0)); sg_set_buf(&sg[1], &b1, sizeof(b1)); sg_set_buf(&sg[2], b, blen); /* 0 if well behaved :) */ sg_set_buf(&sg[3], bzero, zero_padding); sg_init_one(&sg_dst, dst_buf, dst_size); desc.tfm = tfm_cbc; desc.flags = 0; result = crypto_blkcipher_encrypt(&desc, &sg_dst, sg, dst_size); if (result < 0) { printk(KERN_ERR "E: can't compute CBC-MAC tag (MIC): %d\n", result); goto error_cbc_crypt; } /* Now we crypt the MIC Tag (*iv) with Ax -- values per WUSB1.0[6.5] * The procedure is to AES crypt the A0 block and XOR the MIC * Tag against it; we only do the first 8 bytes and place it * directly in the destination buffer. * * POS Crypto API: size is assumed to be AES's block size. * Thanks for documenting it -- tip taken from airo.c */ ax.flags = 0x01; /* as per WUSB 1.0 spec */ ax.ccm_nonce = *n; ax.counter = 0; crypto_cipher_encrypt_one(tfm_aes, (void *)&ax, (void *)&ax); bytewise_xor(mic, &ax, iv, 8); result = 8; error_cbc_crypt: kfree(dst_buf); error_dst_buf: return result; }
static int ceph_aes_encrypt(const void *key, int key_len, void *dst, size_t *dst_len, const void *src, size_t src_len) { struct scatterlist sg_in[2], sg_out[1]; struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 }; int ret; void *iv; int ivsize; size_t zero_padding = (0x10 - (src_len & 0x0f)); char pad[16]; if (IS_ERR(tfm)) return PTR_ERR(tfm); memset(pad, zero_padding, zero_padding); *dst_len = src_len + zero_padding; crypto_blkcipher_setkey((void *)tfm, key, key_len); sg_init_table(sg_in, 2); sg_set_buf(&sg_in[0], src, src_len); sg_set_buf(&sg_in[1], pad, zero_padding); sg_init_table(sg_out, 1); sg_set_buf(sg_out, dst, *dst_len); iv = crypto_blkcipher_crt(tfm)->iv; ivsize = crypto_blkcipher_ivsize(tfm); memcpy(iv, aes_iv, ivsize); ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in, src_len + zero_padding); crypto_free_blkcipher(tfm); if (ret < 0) pr_err("ceph_aes_crypt failed %d\n", ret); return 0; } static int ceph_aes_encrypt2(const void *key, int key_len, void *dst, size_t *dst_len, const void *src1, size_t src1_len, const void *src2, size_t src2_len) { struct scatterlist sg_in[3], sg_out[1]; struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 }; int ret; void *iv; int ivsize; size_t zero_padding = (0x10 - ((src1_len + src2_len) & 0x0f)); char pad[16]; if (IS_ERR(tfm)) return PTR_ERR(tfm); memset(pad, zero_padding, zero_padding); *dst_len = src1_len + src2_len + zero_padding; crypto_blkcipher_setkey((void *)tfm, key, key_len); sg_init_table(sg_in, 3); sg_set_buf(&sg_in[0], src1, src1_len); sg_set_buf(&sg_in[1], src2, src2_len); sg_set_buf(&sg_in[2], pad, zero_padding); sg_init_table(sg_out, 1); sg_set_buf(sg_out, dst, *dst_len); iv = crypto_blkcipher_crt(tfm)->iv; ivsize = crypto_blkcipher_ivsize(tfm); memcpy(iv, aes_iv, ivsize); ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in, src1_len + src2_len + zero_padding); crypto_free_blkcipher(tfm); if (ret < 0) pr_err("ceph_aes_crypt2 failed %d\n", ret); return 0; } static int ceph_aes_decrypt(const void *key, int key_len, void *dst, size_t *dst_len, const void *src, size_t src_len) { struct scatterlist sg_in[1], sg_out[2]; struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); struct blkcipher_desc desc = { .tfm = tfm }; char pad[16]; void *iv; int ivsize; int ret; int last_byte; if (IS_ERR(tfm)) return PTR_ERR(tfm); crypto_blkcipher_setkey((void *)tfm, key, key_len); sg_init_table(sg_in, 1); sg_init_table(sg_out, 2); sg_set_buf(sg_in, src, src_len); sg_set_buf(&sg_out[0], dst, *dst_len); sg_set_buf(&sg_out[1], pad, sizeof(pad)); iv = crypto_blkcipher_crt(tfm)->iv; ivsize = crypto_blkcipher_ivsize(tfm); memcpy(iv, aes_iv, ivsize); ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, src_len); crypto_free_blkcipher(tfm); if (ret < 0) { pr_err("ceph_aes_decrypt failed %d\n", ret); return ret; } if (src_len <= *dst_len) last_byte = ((char *)dst)[src_len - 1]; else last_byte = pad[src_len - *dst_len - 1]; if (last_byte <= 16 && src_len >= last_byte) { *dst_len = src_len - last_byte; } else { pr_err("ceph_aes_decrypt got bad padding %d on src len %d\n", last_byte, (int)src_len); return -EPERM; } return 0; } static int ceph_aes_decrypt2(const void *key, int key_len, void *dst1, size_t *dst1_len, void *dst2, size_t *dst2_len, const void *src, size_t src_len) { struct scatterlist sg_in[1], sg_out[3]; struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); struct blkcipher_desc desc = { .tfm = tfm }; char pad[16]; void *iv; int ivsize; int ret; int last_byte; if (IS_ERR(tfm)) return PTR_ERR(tfm); sg_init_table(sg_in, 1); sg_set_buf(sg_in, src, src_len); sg_init_table(sg_out, 3); sg_set_buf(&sg_out[0], dst1, *dst1_len); sg_set_buf(&sg_out[1], dst2, *dst2_len); sg_set_buf(&sg_out[2], pad, sizeof(pad)); crypto_blkcipher_setkey((void *)tfm, key, key_len); iv = crypto_blkcipher_crt(tfm)->iv; ivsize = crypto_blkcipher_ivsize(tfm); memcpy(iv, aes_iv, ivsize); ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, src_len); crypto_free_blkcipher(tfm); if (ret < 0) { pr_err("ceph_aes_decrypt failed %d\n", ret); return ret; } if (src_len <= *dst1_len) last_byte = ((char *)dst1)[src_len - 1]; else if (src_len <= *dst1_len + *dst2_len) last_byte = ((char *)dst2)[src_len - *dst1_len - 1]; else last_byte = pad[src_len - *dst1_len - *dst2_len - 1]; if (last_byte <= 16 && src_len >= last_byte) { src_len -= last_byte; } else { pr_err("ceph_aes_decrypt got bad padding %d on src len %d\n", last_byte, (int)src_len); return -EPERM; } if (src_len < *dst1_len) { *dst1_len = src_len; *dst2_len = 0; } else { *dst2_len = src_len - *dst1_len; } return 0; } int ceph_decrypt(struct ceph_crypto_key *secret, void *dst, size_t *dst_len, const void *src, size_t src_len) { switch (secret->type) { case CEPH_CRYPTO_NONE: if (*dst_len < src_len) return -ERANGE; memcpy(dst, src, src_len); *dst_len = src_len; return 0; case CEPH_CRYPTO_AES: return ceph_aes_decrypt(secret->key, secret->len, dst, dst_len, src, src_len); default: return -EINVAL; } } int ceph_decrypt2(struct ceph_crypto_key *secret, void *dst1, size_t *dst1_len, void *dst2, size_t *dst2_len, const void *src, size_t src_len) { size_t t; switch (secret->type) { case CEPH_CRYPTO_NONE: if (*dst1_len + *dst2_len < src_len) return -ERANGE; t = min(*dst1_len, src_len); memcpy(dst1, src, t); *dst1_len = t; src += t; src_len -= t; if (src_len) { t = min(*dst2_len, src_len); memcpy(dst2, src, t); *dst2_len = t; } return 0; case CEPH_CRYPTO_AES: return ceph_aes_decrypt2(secret->key, secret->len, dst1, dst1_len, dst2, dst2_len, src, src_len); default: return -EINVAL; } } int ceph_encrypt(struct ceph_crypto_key *secret, void *dst, size_t *dst_len, const void *src, size_t src_len) { switch (secret->type) { case CEPH_CRYPTO_NONE: if (*dst_len < src_len) return -ERANGE; memcpy(dst, src, src_len); *dst_len = src_len; return 0; case CEPH_CRYPTO_AES: return ceph_aes_encrypt(secret->key, secret->len, dst, dst_len, src, src_len); default: return -EINVAL; } } int ceph_encrypt2(struct ceph_crypto_key *secret, void *dst, size_t *dst_len, const void *src1, size_t src1_len, const void *src2, size_t src2_len) { switch (secret->type) { case CEPH_CRYPTO_NONE: if (*dst_len < src1_len + src2_len) return -ERANGE; memcpy(dst, src1, src1_len); memcpy(dst + src1_len, src2, src2_len); *dst_len = src1_len + src2_len; return 0; case CEPH_CRYPTO_AES: return ceph_aes_encrypt2(secret->key, secret->len, dst, dst_len, src1, src1_len, src2, src2_len); default: return -EINVAL; } } int ceph_key_instantiate(struct key *key, const void *data, size_t datalen) { struct ceph_crypto_key *ckey; int ret; void *p; ret = -EINVAL; if (datalen <= 0 || datalen > 32767 || !data) goto err; ret = key_payload_reserve(key, datalen); if (ret < 0) goto err; ret = -ENOMEM; ckey = kmalloc(sizeof(*ckey), GFP_KERNEL); if (!ckey) goto err; p = (void *)data; ret = ceph_crypto_key_decode(ckey, &p, (char*)data+datalen); if (ret < 0) goto err_ckey; key->payload.data = ckey; return 0; err_ckey: kfree(ckey); err: return ret; } int ceph_key_match(const struct key *key, const void *description) { return strcmp(key->description, description) == 0; } void ceph_key_destroy(struct key *key) { struct ceph_crypto_key *ckey = key->payload.data; ceph_crypto_key_destroy(ckey); kfree(ckey); } struct key_type key_type_ceph = { .name = "ceph", .instantiate = ceph_key_instantiate, .match = ceph_key_match, .destroy = ceph_key_destroy, }; int ceph_crypto_init(void) { return register_key_type(&key_type_ceph); } void ceph_crypto_shutdown(void) { unregister_key_type(&key_type_ceph); }
static int wusb_ccm_mac(struct crypto_blkcipher *tfm_cbc, struct crypto_cipher *tfm_aes, void *mic, const struct aes_ccm_nonce *n, const struct aes_ccm_label *a, const void *b, size_t blen) { int result = 0; struct blkcipher_desc desc; struct aes_ccm_b0 b0; struct aes_ccm_b1 b1; struct aes_ccm_a ax; struct scatterlist sg[4], sg_dst; void *iv, *dst_buf; size_t ivsize, dst_size; const u8 bzero[16] = { 0 }; size_t zero_padding; WARN_ON(sizeof(*a) != sizeof(b1) - sizeof(b1.la)); WARN_ON(sizeof(b0) != sizeof(struct aes_ccm_block)); WARN_ON(sizeof(b1) != sizeof(struct aes_ccm_block)); WARN_ON(sizeof(ax) != sizeof(struct aes_ccm_block)); result = -ENOMEM; zero_padding = sizeof(struct aes_ccm_block) - blen % sizeof(struct aes_ccm_block); zero_padding = blen % sizeof(struct aes_ccm_block); if (zero_padding) zero_padding = sizeof(struct aes_ccm_block) - zero_padding; dst_size = blen + sizeof(b0) + sizeof(b1) + zero_padding; dst_buf = kzalloc(dst_size, GFP_KERNEL); if (dst_buf == NULL) { printk(KERN_ERR "E: can't alloc destination buffer\n"); goto error_dst_buf; } iv = crypto_blkcipher_crt(tfm_cbc)->iv; ivsize = crypto_blkcipher_ivsize(tfm_cbc); memset(iv, 0, ivsize); b0.flags = 0x59; b0.ccm_nonce = *n; b0.lm = cpu_to_be16(0); b1.la = cpu_to_be16(blen + 14); memcpy(&b1.mac_header, a, sizeof(*a)); sg_init_table(sg, ARRAY_SIZE(sg)); sg_set_buf(&sg[0], &b0, sizeof(b0)); sg_set_buf(&sg[1], &b1, sizeof(b1)); sg_set_buf(&sg[2], b, blen); sg_set_buf(&sg[3], bzero, zero_padding); sg_init_one(&sg_dst, dst_buf, dst_size); desc.tfm = tfm_cbc; desc.flags = 0; result = crypto_blkcipher_encrypt(&desc, &sg_dst, sg, dst_size); if (result < 0) { printk(KERN_ERR "E: can't compute CBC-MAC tag (MIC): %d\n", result); goto error_cbc_crypt; } ax.flags = 0x01; ax.ccm_nonce = *n; ax.counter = 0; crypto_cipher_encrypt_one(tfm_aes, (void *)&ax, (void *)&ax); bytewise_xor(mic, &ax, iv, 8); result = 8; error_cbc_crypt: kfree(dst_buf); error_dst_buf: return result; }
static int crypto_fpu_encrypt(struct blkcipher_desc *desc_in, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { int err; struct crypto_fpu_ctx *ctx = crypto_blkcipher_ctx(desc_in->tfm); struct crypto_blkcipher *child = ctx->child; struct blkcipher_desc desc = { .tfm = child, .info = desc_in->info, .flags = desc_in->flags & ~CRYPTO_TFM_REQ_MAY_SLEEP, }; kernel_fpu_begin(); err = crypto_blkcipher_crt(desc.tfm)->encrypt(&desc, dst, src, nbytes); kernel_fpu_end(); return err; } static int crypto_fpu_decrypt(struct blkcipher_desc *desc_in, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { int err; struct crypto_fpu_ctx *ctx = crypto_blkcipher_ctx(desc_in->tfm); struct crypto_blkcipher *child = ctx->child; struct blkcipher_desc desc = { .tfm = child, .info = desc_in->info, .flags = desc_in->flags & ~CRYPTO_TFM_REQ_MAY_SLEEP, }; kernel_fpu_begin(); err = crypto_blkcipher_crt(desc.tfm)->decrypt(&desc, dst, src, nbytes); kernel_fpu_end(); return err; } static int crypto_fpu_init_tfm(struct crypto_tfm *tfm) { struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); struct crypto_spawn *spawn = crypto_instance_ctx(inst); struct crypto_fpu_ctx *ctx = crypto_tfm_ctx(tfm); struct crypto_blkcipher *cipher; cipher = crypto_spawn_blkcipher(spawn); if (IS_ERR(cipher)) return PTR_ERR(cipher); ctx->child = cipher; return 0; } static void crypto_fpu_exit_tfm(struct crypto_tfm *tfm) { struct crypto_fpu_ctx *ctx = crypto_tfm_ctx(tfm); crypto_free_blkcipher(ctx->child); } static struct crypto_instance *crypto_fpu_alloc(struct rtattr **tb) { struct crypto_instance *inst; struct crypto_alg *alg; int err; err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); if (err) return ERR_PTR(err); alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER, CRYPTO_ALG_TYPE_MASK); if (IS_ERR(alg)) return ERR_CAST(alg); inst = crypto_alloc_instance("fpu", alg); if (IS_ERR(inst)) goto out_put_alg; inst->alg.cra_flags = alg->cra_flags; inst->alg.cra_priority = alg->cra_priority; inst->alg.cra_blocksize = alg->cra_blocksize; inst->alg.cra_alignmask = alg->cra_alignmask; inst->alg.cra_type = alg->cra_type; inst->alg.cra_blkcipher.ivsize = alg->cra_blkcipher.ivsize; inst->alg.cra_blkcipher.min_keysize = alg->cra_blkcipher.min_keysize; inst->alg.cra_blkcipher.max_keysize = alg->cra_blkcipher.max_keysize; inst->alg.cra_ctxsize = sizeof(struct crypto_fpu_ctx); inst->alg.cra_init = crypto_fpu_init_tfm; inst->alg.cra_exit = crypto_fpu_exit_tfm; inst->alg.cra_blkcipher.setkey = crypto_fpu_setkey; inst->alg.cra_blkcipher.encrypt = crypto_fpu_encrypt; inst->alg.cra_blkcipher.decrypt = crypto_fpu_decrypt; out_put_alg: crypto_mod_put(alg); return inst; } static void crypto_fpu_free(struct crypto_instance *inst) { crypto_drop_spawn(crypto_instance_ctx(inst)); kfree(inst); }
static int ceph_aes_encrypt(const void *key, int key_len, void *dst, size_t *dst_len, const void *src, size_t src_len) { struct scatterlist sg_in[2], sg_out[1]; struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 }; int ret; void *iv; int ivsize; size_t zero_padding = (0x10 - (src_len & 0x0f)); char pad[16]; if (IS_ERR(tfm)) return PTR_ERR(tfm); memset(pad, zero_padding, zero_padding); *dst_len = src_len + zero_padding; crypto_blkcipher_setkey((void *)tfm, key, key_len); sg_init_table(sg_in, 2); sg_set_buf(&sg_in[0], src, src_len); sg_set_buf(&sg_in[1], pad, zero_padding); sg_init_table(sg_out, 1); sg_set_buf(sg_out, dst, *dst_len); iv = crypto_blkcipher_crt(tfm)->iv; ivsize = crypto_blkcipher_ivsize(tfm); memcpy(iv, aes_iv, ivsize); /* print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1, key, key_len, 1); print_hex_dump(KERN_ERR, "enc src: ", DUMP_PREFIX_NONE, 16, 1, src, src_len, 1); print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1, pad, zero_padding, 1); */ ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in, src_len + zero_padding); crypto_free_blkcipher(tfm); if (ret < 0) pr_err("ceph_aes_crypt failed %d\n", ret); /* print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1, dst, *dst_len, 1); */ return 0; } static int ceph_aes_encrypt2(const void *key, int key_len, void *dst, size_t *dst_len, const void *src1, size_t src1_len, const void *src2, size_t src2_len) { struct scatterlist sg_in[3], sg_out[1]; struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 }; int ret; void *iv; int ivsize; size_t zero_padding = (0x10 - ((src1_len + src2_len) & 0x0f)); char pad[16]; if (IS_ERR(tfm)) return PTR_ERR(tfm); memset(pad, zero_padding, zero_padding); *dst_len = src1_len + src2_len + zero_padding; crypto_blkcipher_setkey((void *)tfm, key, key_len); sg_init_table(sg_in, 3); sg_set_buf(&sg_in[0], src1, src1_len); sg_set_buf(&sg_in[1], src2, src2_len); sg_set_buf(&sg_in[2], pad, zero_padding); sg_init_table(sg_out, 1); sg_set_buf(sg_out, dst, *dst_len); iv = crypto_blkcipher_crt(tfm)->iv; ivsize = crypto_blkcipher_ivsize(tfm); memcpy(iv, aes_iv, ivsize); /* print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1, key, key_len, 1); print_hex_dump(KERN_ERR, "enc src1: ", DUMP_PREFIX_NONE, 16, 1, src1, src1_len, 1); print_hex_dump(KERN_ERR, "enc src2: ", DUMP_PREFIX_NONE, 16, 1, src2, src2_len, 1); print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1, pad, zero_padding, 1); */ ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in, src1_len + src2_len + zero_padding); crypto_free_blkcipher(tfm); if (ret < 0) pr_err("ceph_aes_crypt2 failed %d\n", ret); /* print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1, dst, *dst_len, 1); */ return 0; } static int ceph_aes_decrypt(const void *key, int key_len, void *dst, size_t *dst_len, const void *src, size_t src_len) { struct scatterlist sg_in[1], sg_out[2]; struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); struct blkcipher_desc desc = { .tfm = tfm }; char pad[16]; void *iv; int ivsize; int ret; int last_byte; if (IS_ERR(tfm)) return PTR_ERR(tfm); crypto_blkcipher_setkey((void *)tfm, key, key_len); sg_init_table(sg_in, 1); sg_init_table(sg_out, 2); sg_set_buf(sg_in, src, src_len); sg_set_buf(&sg_out[0], dst, *dst_len); sg_set_buf(&sg_out[1], pad, sizeof(pad)); iv = crypto_blkcipher_crt(tfm)->iv; ivsize = crypto_blkcipher_ivsize(tfm); memcpy(iv, aes_iv, ivsize); /* print_hex_dump(KERN_ERR, "dec key: ", DUMP_PREFIX_NONE, 16, 1, key, key_len, 1); print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1, src, src_len, 1); */ ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, src_len); crypto_free_blkcipher(tfm); if (ret < 0) { pr_err("ceph_aes_decrypt failed %d\n", ret); return ret; } if (src_len <= *dst_len) last_byte = ((char *)dst)[src_len - 1]; else last_byte = pad[src_len - *dst_len - 1]; if (last_byte <= 16 && src_len >= last_byte) { *dst_len = src_len - last_byte; } else { pr_err("ceph_aes_decrypt got bad padding %d on src len %d\n", last_byte, (int)src_len); return -EPERM; /* bad padding */ } /* print_hex_dump(KERN_ERR, "dec out: ", DUMP_PREFIX_NONE, 16, 1, dst, *dst_len, 1); */ return 0; } static int ceph_aes_decrypt2(const void *key, int key_len, void *dst1, size_t *dst1_len, void *dst2, size_t *dst2_len, const void *src, size_t src_len) { struct scatterlist sg_in[1], sg_out[3]; struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); struct blkcipher_desc desc = { .tfm = tfm }; char pad[16]; void *iv; int ivsize; int ret; int last_byte; if (IS_ERR(tfm)) return PTR_ERR(tfm); sg_init_table(sg_in, 1); sg_set_buf(sg_in, src, src_len); sg_init_table(sg_out, 3); sg_set_buf(&sg_out[0], dst1, *dst1_len); sg_set_buf(&sg_out[1], dst2, *dst2_len); sg_set_buf(&sg_out[2], pad, sizeof(pad)); crypto_blkcipher_setkey((void *)tfm, key, key_len); iv = crypto_blkcipher_crt(tfm)->iv; ivsize = crypto_blkcipher_ivsize(tfm); memcpy(iv, aes_iv, ivsize); /* print_hex_dump(KERN_ERR, "dec key: ", DUMP_PREFIX_NONE, 16, 1, key, key_len, 1); print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1, src, src_len, 1); */ ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, src_len); crypto_free_blkcipher(tfm); if (ret < 0) { pr_err("ceph_aes_decrypt failed %d\n", ret); return ret; } if (src_len <= *dst1_len) last_byte = ((char *)dst1)[src_len - 1]; else if (src_len <= *dst1_len + *dst2_len) last_byte = ((char *)dst2)[src_len - *dst1_len - 1]; else last_byte = pad[src_len - *dst1_len - *dst2_len - 1]; if (last_byte <= 16 && src_len >= last_byte) { src_len -= last_byte; } else { pr_err("ceph_aes_decrypt got bad padding %d on src len %d\n", last_byte, (int)src_len); return -EPERM; /* bad padding */ } if (src_len < *dst1_len) { *dst1_len = src_len; *dst2_len = 0; } else { *dst2_len = src_len - *dst1_len; } /* print_hex_dump(KERN_ERR, "dec out1: ", DUMP_PREFIX_NONE, 16, 1, dst1, *dst1_len, 1); print_hex_dump(KERN_ERR, "dec out2: ", DUMP_PREFIX_NONE, 16, 1, dst2, *dst2_len, 1); */ return 0; } int ceph_decrypt(struct ceph_crypto_key *secret, void *dst, size_t *dst_len, const void *src, size_t src_len) { switch (secret->type) { case CEPH_CRYPTO_NONE: if (*dst_len < src_len) return -ERANGE; memcpy(dst, src, src_len); *dst_len = src_len; return 0; case CEPH_CRYPTO_AES: return ceph_aes_decrypt(secret->key, secret->len, dst, dst_len, src, src_len); default: return -EINVAL; } } int ceph_decrypt2(struct ceph_crypto_key *secret, void *dst1, size_t *dst1_len, void *dst2, size_t *dst2_len, const void *src, size_t src_len) { size_t t; switch (secret->type) { case CEPH_CRYPTO_NONE: if (*dst1_len + *dst2_len < src_len) return -ERANGE; t = min(*dst1_len, src_len); memcpy(dst1, src, t); *dst1_len = t; src += t; src_len -= t; if (src_len) { t = min(*dst2_len, src_len); memcpy(dst2, src, t); *dst2_len = t; } return 0; case CEPH_CRYPTO_AES: return ceph_aes_decrypt2(secret->key, secret->len, dst1, dst1_len, dst2, dst2_len, src, src_len); default: return -EINVAL; } } int ceph_encrypt(struct ceph_crypto_key *secret, void *dst, size_t *dst_len, const void *src, size_t src_len) { switch (secret->type) { case CEPH_CRYPTO_NONE: if (*dst_len < src_len) return -ERANGE; memcpy(dst, src, src_len); *dst_len = src_len; return 0; case CEPH_CRYPTO_AES: return ceph_aes_encrypt(secret->key, secret->len, dst, dst_len, src, src_len); default: return -EINVAL; } } int ceph_encrypt2(struct ceph_crypto_key *secret, void *dst, size_t *dst_len, const void *src1, size_t src1_len, const void *src2, size_t src2_len) { switch (secret->type) { case CEPH_CRYPTO_NONE: if (*dst_len < src1_len + src2_len) return -ERANGE; memcpy(dst, src1, src1_len); memcpy(dst + src1_len, src2, src2_len); *dst_len = src1_len + src2_len; return 0; case CEPH_CRYPTO_AES: return ceph_aes_encrypt2(secret->key, secret->len, dst, dst_len, src1, src1_len, src2, src2_len); default: return -EINVAL; } }