int crypto_init_cipher_ops(struct crypto_tfm *tfm) { struct cipher_tfm *ops = &tfm->crt_cipher; struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher; ops->cit_setkey = setkey; ops->cit_encrypt_one = crypto_tfm_alg_alignmask(tfm) ? cipher_encrypt_unaligned : cipher->cia_encrypt; ops->cit_decrypt_one = crypto_tfm_alg_alignmask(tfm) ? cipher_decrypt_unaligned : cipher->cia_decrypt; return 0; }
static int crypto_ccm_init_tfm(struct crypto_tfm *tfm) { struct crypto_instance *inst = (void *)tfm->__crt_alg; struct ccm_instance_ctx *ictx = crypto_instance_ctx(inst); struct crypto_ccm_ctx *ctx = crypto_tfm_ctx(tfm); struct crypto_cipher *cipher; struct crypto_ablkcipher *ctr; unsigned long align; int err; cipher = crypto_spawn_cipher(&ictx->cipher); if (IS_ERR(cipher)) return PTR_ERR(cipher); ctr = crypto_spawn_skcipher(&ictx->ctr); err = PTR_ERR(ctr); if (IS_ERR(ctr)) goto err_free_cipher; ctx->cipher = cipher; ctx->ctr = ctr; align = crypto_tfm_alg_alignmask(tfm); align &= ~(crypto_tfm_ctx_alignment() - 1); tfm->crt_aead.reqsize = align + sizeof(struct crypto_ccm_req_priv_ctx) + crypto_ablkcipher_reqsize(ctr); return 0; err_free_cipher: crypto_free_cipher(cipher); return err; }
static void cipher_decrypt_unaligned(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { unsigned long alignmask = crypto_tfm_alg_alignmask(tfm); struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher; if (unlikely(((unsigned long)dst | (unsigned long)src) & alignmask)) { cipher_crypt_unaligned(cipher->cia_decrypt, tfm, dst, src); return; } cipher->cia_decrypt(tfm, dst, src); }
static int update2(struct hash_desc *desc, struct scatterlist *sg, unsigned int nbytes) { struct crypto_tfm *tfm = crypto_hash_tfm(desc->tfm); unsigned int alignmask = crypto_tfm_alg_alignmask(tfm); if (!nbytes) return 0; for (;;) { struct page *pg = sg_page(sg); unsigned int offset = sg->offset; unsigned int l = sg->length; if (unlikely(l > nbytes)) l = nbytes; nbytes -= l; do { unsigned int bytes_from_page = min(l, ((unsigned int) (PAGE_SIZE)) - offset); char *src = crypto_kmap(pg, 0); char *p = src + offset; if (unlikely(offset & alignmask)) { unsigned int bytes = alignmask + 1 - (offset & alignmask); bytes = min(bytes, bytes_from_page); tfm->__crt_alg->cra_digest.dia_update(tfm, p, bytes); p += bytes; bytes_from_page -= bytes; l -= bytes; } tfm->__crt_alg->cra_digest.dia_update(tfm, p, bytes_from_page); crypto_kunmap(src, 0); crypto_yield(desc->flags); offset = 0; pg++; l -= bytes_from_page; } while (l > 0); if (!nbytes) break; sg = scatterwalk_sg_next(sg); } return 0; }
static void cipher_crypt_unaligned(void (*fn)(struct crypto_tfm *, u8 *, const u8 *), struct crypto_tfm *tfm, u8 *dst, const u8 *src) { unsigned long alignmask = crypto_tfm_alg_alignmask(tfm); unsigned int size = crypto_tfm_alg_blocksize(tfm); u8 buffer[size + alignmask]; u8 *tmp = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); memcpy(tmp, src, size); fn(tfm, tmp, tmp); memcpy(dst, tmp, size); }
static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) { struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher; unsigned long alignmask = crypto_tfm_alg_alignmask(tfm); if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) { tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; return -EINVAL; } if ((unsigned long)key & alignmask) return setkey_unaligned(tfm, key, keylen); return cipher->setkey(tfm, key, keylen); }
static void cipher_encrypt_unaligned(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { unsigned long alignmask = crypto_tfm_alg_alignmask(tfm); struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher; #ifdef CONFIG_CRYPTO_FIPS if (unlikely(in_fips_err())) return; #endif if (unlikely(((unsigned long)dst | (unsigned long)src) & alignmask)) { cipher_crypt_unaligned(cipher->cia_encrypt, tfm, dst, src); return; } cipher->cia_encrypt(tfm, dst, src); }
static void cipher_crypt_unaligned(void (*fn)(struct crypto_tfm *, u8 *, const u8 *), struct crypto_tfm *tfm, u8 *dst, const u8 *src) { unsigned long alignmask = crypto_tfm_alg_alignmask(tfm); unsigned int size = crypto_tfm_alg_blocksize(tfm); u8 buffer[size + alignmask]; u8 *tmp = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); #ifdef CONFIG_CRYPTO_FIPS if (unlikely(in_fips_err())) return; #endif memcpy(tmp, src, size); fn(tfm, tmp, tmp); memcpy(dst, tmp, size); }
static unsigned int crypt_slow(const struct cipher_desc *desc, struct scatter_walk *in, struct scatter_walk *out, unsigned int bsize) { unsigned long alignmask = crypto_tfm_alg_alignmask(desc->tfm); u8 buffer[bsize * 2 + alignmask]; u8 *src = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); u8 *dst = src + bsize; unsigned int n; n = scatterwalk_copychunks(src, in, bsize, 0); scatterwalk_advance(in, n); desc->prfn(desc, dst, src, bsize); n = scatterwalk_copychunks(dst, out, bsize, 1); scatterwalk_advance(out, n); return bsize; }
static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm) { struct blkcipher_tfm *crt = &tfm->crt_blkcipher; struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; unsigned long align = crypto_tfm_alg_alignmask(tfm) + 1; unsigned long addr; if (alg->ivsize > PAGE_SIZE / 8) return -EINVAL; crt->setkey = setkey; crt->encrypt = alg->encrypt; crt->decrypt = alg->decrypt; addr = (unsigned long)crypto_tfm_ctx(tfm); addr = ALIGN(addr, align); addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align); crt->iv = (void *)addr; return 0; }
static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) { struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher; unsigned long alignmask = crypto_tfm_alg_alignmask(tfm); int ret; u8 *buffer, *alignbuffer; unsigned long absize; absize = keylen + alignmask; buffer = kmalloc(absize, GFP_ATOMIC); if (!buffer) return -ENOMEM; alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); memcpy(alignbuffer, key, keylen); ret = cipher->setkey(tfm, alignbuffer, keylen); memset(alignbuffer, 0, keylen); kfree(buffer); return ret; }
static int crypto_rfc3686_init_tfm(struct crypto_tfm *tfm) { struct crypto_instance *inst = (void *)tfm->__crt_alg; struct crypto_skcipher_spawn *spawn = crypto_instance_ctx(inst); struct crypto_rfc3686_ctx *ctx = crypto_tfm_ctx(tfm); struct crypto_ablkcipher *cipher; unsigned long align; cipher = crypto_spawn_skcipher(spawn); if (IS_ERR(cipher)) return PTR_ERR(cipher); ctx->child = cipher; align = crypto_tfm_alg_alignmask(tfm); align &= ~(crypto_tfm_ctx_alignment() - 1); tfm->crt_ablkcipher.reqsize = align + sizeof(struct crypto_rfc3686_req_ctx) + crypto_ablkcipher_reqsize(cipher); return 0; }
static int crypto_gcm_init_tfm(struct crypto_tfm *tfm) { struct crypto_instance *inst = (void *)tfm->__crt_alg; struct gcm_instance_ctx *ictx = crypto_instance_ctx(inst); struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(tfm); struct crypto_ablkcipher *ctr; struct crypto_ahash *ghash; unsigned long align; int err; ghash = crypto_spawn_ahash(&ictx->ghash); if (IS_ERR(ghash)) return PTR_ERR(ghash); ctr = crypto_spawn_skcipher(&ictx->ctr); err = PTR_ERR(ctr); if (IS_ERR(ctr)) goto err_free_hash; ctx->ctr = ctr; ctx->ghash = ghash; align = crypto_tfm_alg_alignmask(tfm); align &= ~(crypto_tfm_ctx_alignment() - 1); tfm->crt_aead.reqsize = align + offsetof(struct crypto_gcm_req_priv_ctx, u) + max(sizeof(struct ablkcipher_request) + crypto_ablkcipher_reqsize(ctr), sizeof(struct ahash_request) + crypto_ahash_reqsize(ghash)); return 0; err_free_hash: crypto_free_ahash(ghash); return err; }
static int crypt_iv_unaligned(struct cipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct crypto_tfm *tfm = desc->tfm; unsigned long alignmask = crypto_tfm_alg_alignmask(tfm); u8 *iv = desc->info; if (unlikely(((unsigned long)iv & alignmask))) { unsigned int ivsize = tfm->crt_cipher.cit_ivsize; u8 buffer[ivsize + alignmask]; u8 *tmp = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); int err; desc->info = memcpy(tmp, iv, ivsize); err = crypt(desc, dst, src, nbytes); memcpy(iv, tmp, ivsize); return err; } return crypt(desc, dst, src, nbytes); }
static int async_encrypt(struct ablkcipher_request *req) { struct crypto_tfm *tfm = req->base.tfm; struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; struct blkcipher_desc desc = { .tfm = __crypto_blkcipher_cast(tfm), .info = req->info, .flags = req->base.flags, }; return alg->encrypt(&desc, req->dst, req->src, req->nbytes); } static int async_decrypt(struct ablkcipher_request *req) { struct crypto_tfm *tfm = req->base.tfm; struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; struct blkcipher_desc desc = { .tfm = __crypto_blkcipher_cast(tfm), .info = req->info, .flags = req->base.flags, }; return alg->decrypt(&desc, req->dst, req->src, req->nbytes); } static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg, u32 type, u32 mask) { struct blkcipher_alg *cipher = &alg->cra_blkcipher; unsigned int len = alg->cra_ctxsize; type ^= CRYPTO_ALG_ASYNC; mask &= CRYPTO_ALG_ASYNC; if ((type & mask) && cipher->ivsize) { len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1); len += cipher->ivsize; } return len; } static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm) { struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher; struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; crt->setkey = async_setkey; crt->encrypt = async_encrypt; crt->decrypt = async_decrypt; crt->ivsize = alg->ivsize; return 0; } static int crypto_init_blkcipher_ops_sync(struct crypto_tfm *tfm) { struct blkcipher_tfm *crt = &tfm->crt_blkcipher; struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; unsigned long align = crypto_tfm_alg_alignmask(tfm) + 1; unsigned long addr; crt->setkey = setkey; crt->encrypt = alg->encrypt; crt->decrypt = alg->decrypt; addr = (unsigned long)crypto_tfm_ctx(tfm); addr = ALIGN(addr, align); addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align); crt->iv = (void *)addr; return 0; } static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask) { struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; if (alg->ivsize > PAGE_SIZE / 8) return -EINVAL; type ^= CRYPTO_ALG_ASYNC; mask &= CRYPTO_ALG_ASYNC; if (type & mask) return crypto_init_blkcipher_ops_sync(tfm); else return crypto_init_blkcipher_ops_async(tfm); } static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg) __attribute__ ((unused)); static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg) { seq_printf(m, "type : blkcipher\n"); seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); seq_printf(m, "min keysize : %u\n", alg->cra_blkcipher.min_keysize); seq_printf(m, "max keysize : %u\n", alg->cra_blkcipher.max_keysize); seq_printf(m, "ivsize : %u\n", alg->cra_blkcipher.ivsize); } const struct crypto_type crypto_blkcipher_type = { .ctxsize = crypto_blkcipher_ctxsize, .init = crypto_init_blkcipher_ops, #ifdef CONFIG_PROC_FS .show = crypto_blkcipher_show, #endif }; EXPORT_SYMBOL_GPL(crypto_blkcipher_type); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Generic block chaining cipher type");
/* * Generic encrypt/decrypt wrapper for ciphers, handles operations across * multiple page boundaries by using temporary blocks. In user context, * the kernel is given a chance to schedule us once per page. */ static int crypt(const struct cipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct scatter_walk walk_in, walk_out; struct crypto_tfm *tfm = desc->tfm; const unsigned int bsize = crypto_tfm_alg_blocksize(tfm); unsigned int alignmask = crypto_tfm_alg_alignmask(tfm); unsigned long buffer = 0; if (!nbytes) return 0; if (nbytes % bsize) { tfm->crt_flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN; return -EINVAL; } scatterwalk_start(&walk_in, src); scatterwalk_start(&walk_out, dst); for(;;) { unsigned int n = nbytes; u8 *tmp = NULL; if (!scatterwalk_aligned(&walk_in, alignmask) || !scatterwalk_aligned(&walk_out, alignmask)) { if (!buffer) { buffer = __get_free_page(GFP_ATOMIC); if (!buffer) n = 0; } tmp = (u8 *)buffer; } scatterwalk_map(&walk_in, 0); scatterwalk_map(&walk_out, 1); n = scatterwalk_clamp(&walk_in, n); n = scatterwalk_clamp(&walk_out, n); if (likely(n >= bsize)) n = crypt_fast(desc, &walk_in, &walk_out, n, tmp); else n = crypt_slow(desc, &walk_in, &walk_out, bsize); nbytes -= n; scatterwalk_done(&walk_in, 0, nbytes); scatterwalk_done(&walk_out, 1, nbytes); if (!nbytes) break; crypto_yield(tfm); } if (buffer) free_page(buffer); return 0; }
static int async_encrypt(struct ablkcipher_request *req) { struct crypto_tfm *tfm = req->base.tfm; struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; struct blkcipher_desc desc = { .tfm = __crypto_blkcipher_cast(tfm), .info = req->info, .flags = req->base.flags, }; return alg->encrypt(&desc, req->dst, req->src, req->nbytes); } static int async_decrypt(struct ablkcipher_request *req) { struct crypto_tfm *tfm = req->base.tfm; struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; struct blkcipher_desc desc = { .tfm = __crypto_blkcipher_cast(tfm), .info = req->info, .flags = req->base.flags, }; return alg->decrypt(&desc, req->dst, req->src, req->nbytes); } static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg, u32 type, u32 mask) { struct blkcipher_alg *cipher = &alg->cra_blkcipher; unsigned int len = alg->cra_ctxsize; if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK && cipher->ivsize) { len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1); len += cipher->ivsize; } return len; } static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm) { struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher; struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; crt->setkey = async_setkey; crt->encrypt = async_encrypt; crt->decrypt = async_decrypt; if (!alg->ivsize) { crt->givencrypt = skcipher_null_givencrypt; crt->givdecrypt = skcipher_null_givdecrypt; } crt->base = __crypto_ablkcipher_cast(tfm); crt->ivsize = alg->ivsize; return 0; } static int crypto_init_blkcipher_ops_sync(struct crypto_tfm *tfm) { struct blkcipher_tfm *crt = &tfm->crt_blkcipher; struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; unsigned long align = crypto_tfm_alg_alignmask(tfm) + 1; unsigned long addr; crt->setkey = setkey; crt->encrypt = alg->encrypt; crt->decrypt = alg->decrypt; addr = (unsigned long)crypto_tfm_ctx(tfm); addr = ALIGN(addr, align); addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align); crt->iv = (void *)addr; return 0; } static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask) { struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; if (alg->ivsize > PAGE_SIZE / 8) return -EINVAL; if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK) return crypto_init_blkcipher_ops_sync(tfm); else return crypto_init_blkcipher_ops_async(tfm); } static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg) __attribute__ ((unused)); static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg) { seq_printf(m, "type : blkcipher\n"); seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); seq_printf(m, "min keysize : %u\n", alg->cra_blkcipher.min_keysize); seq_printf(m, "max keysize : %u\n", alg->cra_blkcipher.max_keysize); seq_printf(m, "ivsize : %u\n", alg->cra_blkcipher.ivsize); seq_printf(m, "geniv : %s\n", alg->cra_blkcipher.geniv ?: "<default>"); } const struct crypto_type crypto_blkcipher_type = { .ctxsize = crypto_blkcipher_ctxsize, .init = crypto_init_blkcipher_ops, #ifdef CONFIG_PROC_FS .show = crypto_blkcipher_show, #endif }; EXPORT_SYMBOL_GPL(crypto_blkcipher_type); static int crypto_grab_nivcipher(struct crypto_skcipher_spawn *spawn, const char *name, u32 type, u32 mask) { struct crypto_alg *alg; int err; type = crypto_skcipher_type(type); mask = crypto_skcipher_mask(mask)| CRYPTO_ALG_GENIV; alg = crypto_alg_mod_lookup(name, type, mask); if (IS_ERR(alg)) return PTR_ERR(alg); err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask); crypto_mod_put(alg); return err; } struct crypto_instance *skcipher_geniv_alloc(struct crypto_template *tmpl, struct rtattr **tb, u32 type, u32 mask) { struct { int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key, unsigned int keylen); int (*encrypt)(struct ablkcipher_request *req); int (*decrypt)(struct ablkcipher_request *req); unsigned int min_keysize; unsigned int max_keysize; unsigned int ivsize; const char *geniv; } balg; const char *name; struct crypto_skcipher_spawn *spawn; struct crypto_attr_type *algt; struct crypto_instance *inst; struct crypto_alg *alg; int err; algt = crypto_get_attr_type(tb); err = PTR_ERR(algt); if (IS_ERR(algt)) return ERR_PTR(err); if ((algt->type ^ (CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV)) & algt->mask) return ERR_PTR(-EINVAL); name = crypto_attr_alg_name(tb[1]); err = PTR_ERR(name); if (IS_ERR(name)) return ERR_PTR(err); inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) return ERR_PTR(-ENOMEM); spawn = crypto_instance_ctx(inst); /* Ignore async algorithms if necessary. */ mask |= crypto_requires_sync(algt->type, algt->mask); crypto_set_skcipher_spawn(spawn, inst); err = crypto_grab_nivcipher(spawn, name, type, mask); if (err) goto err_free_inst; alg = crypto_skcipher_spawn_alg(spawn); if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_BLKCIPHER) { balg.ivsize = alg->cra_blkcipher.ivsize; balg.min_keysize = alg->cra_blkcipher.min_keysize; balg.max_keysize = alg->cra_blkcipher.max_keysize; balg.setkey = async_setkey; balg.encrypt = async_encrypt; balg.decrypt = async_decrypt; balg.geniv = alg->cra_blkcipher.geniv; } else { balg.ivsize = alg->cra_ablkcipher.ivsize; balg.min_keysize = alg->cra_ablkcipher.min_keysize; balg.max_keysize = alg->cra_ablkcipher.max_keysize; balg.setkey = alg->cra_ablkcipher.setkey; balg.encrypt = alg->cra_ablkcipher.encrypt; balg.decrypt = alg->cra_ablkcipher.decrypt; balg.geniv = alg->cra_ablkcipher.geniv; } err = -EINVAL; if (!balg.ivsize) goto err_drop_alg; /* * This is only true if we're constructing an algorithm with its * default IV generator. For the default generator we elide the * template name and double-check the IV generator. */ if (algt->mask & CRYPTO_ALG_GENIV) { if (!balg.geniv) balg.geniv = crypto_default_geniv(alg); err = -EAGAIN; if (strcmp(tmpl->name, balg.geniv)) goto err_drop_alg; memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); memcpy(inst->alg.cra_driver_name, alg->cra_driver_name, CRYPTO_MAX_ALG_NAME); } else { err = -ENAMETOOLONG; if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", tmpl->name, alg->cra_name) >= CRYPTO_MAX_ALG_NAME) goto err_drop_alg; if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", tmpl->name, alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto err_drop_alg; } inst->alg.cra_flags = CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV; inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC; inst->alg.cra_priority = alg->cra_priority; inst->alg.cra_blocksize = alg->cra_blocksize; inst->alg.cra_alignmask = alg->cra_alignmask; inst->alg.cra_type = &crypto_givcipher_type; inst->alg.cra_ablkcipher.ivsize = balg.ivsize; inst->alg.cra_ablkcipher.min_keysize = balg.min_keysize; inst->alg.cra_ablkcipher.max_keysize = balg.max_keysize; inst->alg.cra_ablkcipher.geniv = balg.geniv; inst->alg.cra_ablkcipher.setkey = balg.setkey; inst->alg.cra_ablkcipher.encrypt = balg.encrypt; inst->alg.cra_ablkcipher.decrypt = balg.decrypt; out: return inst; err_drop_alg: crypto_drop_skcipher(spawn); err_free_inst: kfree(inst); inst = ERR_PTR(err); goto out; } EXPORT_SYMBOL_GPL(skcipher_geniv_alloc); void skcipher_geniv_free(struct crypto_instance *inst) { crypto_drop_skcipher(crypto_instance_ctx(inst)); kfree(inst); }
return 0; } static int update(struct hash_desc *desc, struct scatterlist *sg, unsigned int nbytes) { if (WARN_ON_ONCE(in_irq())) return -EDEADLK; return update2(desc, sg, nbytes); } static int final(struct hash_desc *desc, u8 *out) { struct crypto_tfm *tfm = crypto_hash_tfm(desc->tfm); unsigned long alignmask = crypto_tfm_alg_alignmask(tfm); struct digest_alg *digest = &tfm->__crt_alg->cra_digest; if (unlikely((unsigned long)out & alignmask)) { unsigned long align = alignmask + 1; unsigned long addr = (unsigned long)crypto_tfm_ctx(tfm); u8 *dst = (u8 *)ALIGN(addr, align) + ALIGN(tfm->__crt_alg->cra_ctxsize, align); digest->dia_final(tfm, dst); memcpy(out, dst, digest->dia_digestsize); } else digest->dia_final(tfm, out); return 0; }
int crypto_init_cipher_ops(struct crypto_tfm *tfm) { int ret = 0; struct cipher_tfm *ops = &tfm->crt_cipher; ops->cit_setkey = setkey; switch (tfm->crt_cipher.cit_mode) { case CRYPTO_TFM_MODE_ECB: ops->cit_encrypt = ecb_encrypt; ops->cit_decrypt = ecb_decrypt; break; case CRYPTO_TFM_MODE_CBC: ops->cit_encrypt = cbc_encrypt; ops->cit_decrypt = cbc_decrypt; ops->cit_encrypt_iv = cbc_encrypt_iv; ops->cit_decrypt_iv = cbc_decrypt_iv; break; case CRYPTO_TFM_MODE_CFB: ops->cit_encrypt = nocrypt; ops->cit_decrypt = nocrypt; ops->cit_encrypt_iv = nocrypt_iv; ops->cit_decrypt_iv = nocrypt_iv; break; case CRYPTO_TFM_MODE_CTR: ops->cit_encrypt = nocrypt; ops->cit_decrypt = nocrypt; ops->cit_encrypt_iv = nocrypt_iv; ops->cit_decrypt_iv = nocrypt_iv; break; default: BUG(); } if (ops->cit_mode == CRYPTO_TFM_MODE_CBC) { unsigned long align; unsigned long addr; switch (crypto_tfm_alg_blocksize(tfm)) { case 8: ops->cit_xor_block = xor_64; break; case 16: ops->cit_xor_block = xor_128; break; default: printk(KERN_WARNING "%s: block size %u not supported\n", crypto_tfm_alg_name(tfm), crypto_tfm_alg_blocksize(tfm)); ret = -EINVAL; goto out; } ops->cit_ivsize = crypto_tfm_alg_blocksize(tfm); align = crypto_tfm_alg_alignmask(tfm) + 1; addr = (unsigned long)crypto_tfm_ctx(tfm); addr = ALIGN(addr, align); addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align); ops->cit_iv = (void *)addr; #ifdef CONFIG_CRYPTO_XCBC ret = crypto_alloc_xcbc_block(tfm); #endif } out: return ret; }