int blkcipher_aead_walk_virt_block(struct blkcipher_desc *desc, struct blkcipher_walk *walk, struct crypto_aead *tfm, unsigned int blocksize) { walk->flags &= ~BLKCIPHER_WALK_PHYS; walk->walk_blocksize = blocksize; walk->cipher_blocksize = crypto_aead_blocksize(tfm); walk->ivsize = crypto_aead_ivsize(tfm); walk->alignmask = crypto_aead_alignmask(tfm); return blkcipher_walk_first(desc, walk); }
/* This function is to get mtu size Currently this function is not used. */ static u32 get_mtu_size(struct crypto_aead *aead, int mtu) { u32 mtu_size = 1456; //todo ASF_FP_LINUX_CRYPTO_FENTRY; #if 0 //struct crypto_aead *aead = x->data; u32 blksize = ALIGN(crypto_aead_blocksize(aead), 4); unsigned int net_adj; // Tunnel mode int x_props_mode = XFRM_MODE_TUNNEL; //1 int x_props_header_len = 44; ASF_FP_LINUX_CRYPTO_DEBUG("Entered Function get_mtu_size()"); //switch (x->props.mode) { switch (x_props_mode) { case XFRM_MODE_TRANSPORT: case XFRM_MODE_BEET: net_adj = sizeof(struct iphdr); break; case XFRM_MODE_TUNNEL: net_adj = 0; break; default: BUG(); } ASF_FP_LINUX_CRYPTO_DEBUG("About to Exit Function get_mtu_size()"); //return ((mtu - x->props.header_len - crypto_aead_authsize(aead) - //return ((mtu - x_props_header_len - crypto_aead_authsize(aead) - //net_adj) & ~(blksize - 1)) + net_adj - 2; /* Return MTU as 1400 */ #endif ASF_FP_LINUX_CRYPTO_FEXIT; return mtu_size; }
int cryptodev_cipher_init(struct cipher_data *out, const char *alg_name, uint8_t *keyp, size_t keylen, int stream, int aead) { int ret; if (aead == 0) { struct ablkcipher_alg *alg; out->async.s = crypto_alloc_ablkcipher(alg_name, 0, 0); if (unlikely(IS_ERR(out->async.s))) { ddebug(1, "Failed to load cipher %s", alg_name); return -EINVAL; } alg = crypto_ablkcipher_alg(out->async.s); if (alg != NULL) { /* Was correct key length supplied? */ if (alg->max_keysize > 0 && unlikely((keylen < alg->min_keysize) || (keylen > alg->max_keysize))) { ddebug(1, "Wrong keylen '%zu' for algorithm '%s'. Use %u to %u.", keylen, alg_name, alg->min_keysize, alg->max_keysize); ret = -EINVAL; goto error; } } out->blocksize = crypto_ablkcipher_blocksize(out->async.s); out->ivsize = crypto_ablkcipher_ivsize(out->async.s); out->alignmask = crypto_ablkcipher_alignmask(out->async.s); ret = crypto_ablkcipher_setkey(out->async.s, keyp, keylen); } else { out->async.as = crypto_alloc_aead(alg_name, 0, 0); if (unlikely(IS_ERR(out->async.as))) { ddebug(1, "Failed to load cipher %s", alg_name); return -EINVAL; } out->blocksize = crypto_aead_blocksize(out->async.as); out->ivsize = crypto_aead_ivsize(out->async.as); out->alignmask = crypto_aead_alignmask(out->async.as); ret = crypto_aead_setkey(out->async.as, keyp, keylen); } if (unlikely(ret)) { ddebug(1, "Setting key failed for %s-%zu.", alg_name, keylen*8); ret = -EINVAL; goto error; } out->stream = stream; out->aead = aead; out->async.result = kzalloc(sizeof(*out->async.result), GFP_KERNEL); if (unlikely(!out->async.result)) { ret = -ENOMEM; goto error; } init_completion(&out->async.result->completion); if (aead == 0) { out->async.request = ablkcipher_request_alloc(out->async.s, GFP_KERNEL); if (unlikely(!out->async.request)) { derr(1, "error allocating async crypto request"); ret = -ENOMEM; goto error; } ablkcipher_request_set_callback(out->async.request, CRYPTO_TFM_REQ_MAY_BACKLOG, cryptodev_complete, out->async.result); } else { out->async.arequest = aead_request_alloc(out->async.as, GFP_KERNEL); if (unlikely(!out->async.arequest)) { derr(1, "error allocating async crypto request"); ret = -ENOMEM; goto error; } aead_request_set_callback(out->async.arequest, CRYPTO_TFM_REQ_MAY_BACKLOG, cryptodev_complete, out->async.result); } out->init = 1; return 0; error: if (aead == 0) { if (out->async.request) ablkcipher_request_free(out->async.request); if (out->async.s) crypto_free_ablkcipher(out->async.s); } else { if (out->async.arequest) aead_request_free(out->async.arequest); if (out->async.as) crypto_free_aead(out->async.as); } kfree(out->async.result); return ret; }