static void *lib80211_wep_init(int keyidx) { struct lib80211_wep_data *priv; priv = kzalloc(sizeof(*priv), GFP_ATOMIC); if (priv == NULL) goto fail; priv->key_idx = keyidx; priv->tx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(priv->tx_tfm)) { priv->tx_tfm = NULL; goto fail; } priv->rx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(priv->rx_tfm)) { priv->rx_tfm = NULL; goto fail; } /* start WEP IV from a random value */ get_random_bytes(&priv->iv, 4); return priv; fail: if (priv) { if (priv->tx_tfm) crypto_free_blkcipher(priv->tx_tfm); if (priv->rx_tfm) crypto_free_blkcipher(priv->rx_tfm); kfree(priv); } return NULL; }
void ieee80211_wep_free(struct ieee80211_local *local) { if (!IS_ERR(local->wep_tx_tfm)) crypto_free_blkcipher(local->wep_tx_tfm); if (!IS_ERR(local->wep_rx_tfm)) crypto_free_blkcipher(local->wep_rx_tfm); }
int my_decrypt(char *input, int inputlen, char *output, int outputlen, char *key, int keylen) { struct crypto_blkcipher *tfm = NULL; struct blkcipher_desc desc; struct scatterlist src[1], dst[1]; unsigned int retval = 0; tfm = crypto_alloc_blkcipher("ctr(aes)", 0, 0); if (IS_ERR(tfm)) { printk(KERN_INFO "crypto_alloc_blkcipher failed\n"); return -EINVAL; } desc.tfm = tfm; desc.flags = 0; retval = crypto_blkcipher_setkey(tfm, key, keylen); if (retval) { printk(KERN_INFO "crypto_blkcipher_setkey failed\n"); crypto_free_blkcipher(tfm); return -EINVAL; } sg_init_table(src, 1); sg_set_buf(&src[0], input, inputlen); sg_init_table(dst, 1); sg_set_buf(dst, output, outputlen); retval = crypto_blkcipher_decrypt(&desc, dst, src, inputlen); crypto_free_blkcipher(tfm); return retval; }
static void gss_delete_sec_context_kerberos(void *internal_ctx) { struct krb5_ctx *kctx = internal_ctx; crypto_free_blkcipher(kctx->seq); crypto_free_blkcipher(kctx->enc); kfree(kctx->mech_used.data); kfree(kctx); }
static void * prism2_wep_init(int keyidx) { struct prism2_wep_data *priv; priv = kmalloc(sizeof(*priv), GFP_ATOMIC); if (priv == NULL) goto fail; memset(priv, 0, sizeof(*priv)); priv->key_idx = keyidx; #if((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)) && (!OPENSUSE_SLED)) priv->tfm = crypto_alloc_tfm("arc4", 0); if (priv->tfm == NULL) { printk(KERN_DEBUG "ieee80211_crypt_wep: could not allocate " "crypto API arc4\n"); goto fail; } #else priv->tx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(priv->tx_tfm)) { printk(KERN_DEBUG "ieee80211_crypt_wep: could not allocate " "crypto API arc4\n"); priv->tx_tfm = NULL; goto fail; } priv->rx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(priv->rx_tfm)) { printk(KERN_DEBUG "ieee80211_crypt_wep: could not allocate " "crypto API arc4\n"); priv->rx_tfm = NULL; goto fail; } #endif /* start WEP IV from a random value */ get_random_bytes(&priv->iv, 4); return priv; fail: #if((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)) && (!OPENSUSE_SLED)) if (priv) { if (priv->tfm) crypto_free_tfm(priv->tfm); kfree(priv); } #else if (priv) { if (priv->tx_tfm) crypto_free_blkcipher(priv->tx_tfm); if (priv->rx_tfm) crypto_free_blkcipher(priv->rx_tfm); kfree(priv); } #endif return NULL; }
static int encrypt_Cipher(char *key, char *src, char *dest, unsigned int len, int *written) { struct crypto_blkcipher *blkcipher = NULL; char *cipher = "cbc(aes)"; struct scatterlist sg_in[2]; struct scatterlist sg_out[1]; struct blkcipher_desc desc; unsigned int encrypted_datalen; unsigned int padlen; char pad[16]; char *iv=NULL; int ret = -EFAULT; encrypted_datalen = nearestRoundup(len); padlen = encrypted_datalen - len; blkcipher = crypto_alloc_blkcipher(cipher, 0, 0); if (IS_ERR(blkcipher)) { printk("could not allocate blkcipher handle for %s\n", cipher); return -PTR_ERR(blkcipher); } if (crypto_blkcipher_setkey(blkcipher, key, strlen(key))) { printk("key could not be set\n"); ret = -EAGAIN; goto out; } desc.flags = 0; desc.tfm = blkcipher; iv = (char *)kmalloc(crypto_blkcipher_ivsize(blkcipher) , GFP_KERNEL); if(iv==NULL) { printk("Initialisation vector not initialised\n"); ret = -ENOMEM; goto out; } memset(iv, 0, crypto_blkcipher_ivsize(blkcipher)); memset(pad, 0, sizeof pad); sg_init_table(sg_in, 2); sg_set_buf(&sg_in[0], src, len); sg_set_buf(&sg_in[1], pad, padlen); sg_init_table(sg_out, 1); sg_set_buf(sg_out, dest, encrypted_datalen); crypto_blkcipher_set_iv(blkcipher, iv, crypto_blkcipher_ivsize(blkcipher)); ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in, encrypted_datalen); (*written) = encrypted_datalen; printk("Cipher Encryption operation completed\n"); kfree(iv); crypto_free_blkcipher(blkcipher); return ret; out: if (blkcipher) crypto_free_blkcipher(blkcipher); if (iv) kfree(iv); return ret; }
static void lib80211_wep_deinit(void *priv) { struct lib80211_wep_data *_priv = priv; if (_priv) { if (_priv->tx_tfm) crypto_free_blkcipher(_priv->tx_tfm); if (_priv->rx_tfm) crypto_free_blkcipher(_priv->rx_tfm); } kfree(priv); }
int aes_decrypt(char *buf, unsigned int keylen, void *read_buf, size_t src_len) { struct scatterlist sg; struct blkcipher_desc desc; int ret=0; struct crypto_blkcipher *tfm = crypto_alloc_blkcipher("cbc(aes)", 0, 0); if (IS_ERR(tfm)) {return PTR_ERR(tfm);} desc.tfm = tfm; desc.flags=0; ret=crypto_blkcipher_setkey((void *)tfm, buf, keylen); if(ret) { goto free_tfm; } sg_set_buf(&sg, read_buf, src_len); ret = crypto_blkcipher_decrypt(&desc, &sg, &sg, src_len); if (ret) { goto free_tfm; } free_tfm: crypto_free_blkcipher(tfm); return ret; }
int wrapfs_decrypt_page(struct page *dst_page,struct page *src_page, char *key) { int ret = 0; struct crypto_blkcipher *tfm = NULL; struct blkcipher_desc desc; const char *algo = "ctr(aes)"; struct scatterlist src_sg, dst_sg; sg_init_table(&src_sg, 1); sg_init_table(&dst_sg, 1); sg_set_page(&src_sg, src_page, PAGE_CACHE_SIZE, 0); sg_set_page(&dst_sg, dst_page, PAGE_CACHE_SIZE, 0); tfm = crypto_alloc_blkcipher(algo,0,CRYPTO_ALG_ASYNC); if(IS_ERR(tfm)){ printk(KERN_ERR "AES: cipher: Failed to load transform for %ld\n",PTR_ERR(tfm)); return PTR_ERR(tfm); } desc.tfm = tfm; desc.flags = 0; ret = crypto_blkcipher_setkey(tfm,key,32); ret = crypto_blkcipher_decrypt(&desc, &dst_sg, &src_sg, PAGE_CACHE_SIZE); if (ret) { printk(KERN_ERR "Error encrypting\n"); goto out; } out: crypto_free_blkcipher(tfm); return ret; }
static int derived_key_decrypt(struct encrypted_key_payload *epayload, const u8 *derived_key, unsigned int derived_keylen) { struct scatterlist sg_in[1]; struct scatterlist sg_out[2]; struct blkcipher_desc desc; unsigned int encrypted_datalen; char pad[16]; int ret; encrypted_datalen = roundup(epayload->decrypted_datalen, blksize); ret = init_blkcipher_desc(&desc, derived_key, derived_keylen, epayload->iv, ivsize); if (ret < 0) goto out; dump_encrypted_data(epayload, encrypted_datalen); memset(pad, 0, sizeof pad); sg_init_table(sg_in, 1); sg_init_table(sg_out, 2); sg_set_buf(sg_in, epayload->encrypted_data, encrypted_datalen); sg_set_buf(&sg_out[0], epayload->decrypted_data, epayload->decrypted_datalen); sg_set_buf(&sg_out[1], pad, sizeof pad); ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, encrypted_datalen); crypto_free_blkcipher(desc.tfm); if (ret < 0) goto out; dump_decrypted_data(epayload); out: return ret; }
static void fallback_exit_blk(struct crypto_tfm *tfm) { struct geode_aes_op *op = crypto_tfm_ctx(tfm); crypto_free_blkcipher(op->fallback.blk); op->fallback.blk = NULL; }
static void AES_cbc(const __u8 *iv, int ivLength, const __u8 *key, int keyLength, const __u8 *input, int inputLength, __u8 *output, int encrypt) { struct scatterlist src[1]; struct scatterlist dst[1]; struct blkcipher_desc desc; struct crypto_blkcipher *cipher = crypto_alloc_blkcipher("cbc(aes)", 0, 0); crypto_blkcipher_setkey(cipher, key, keyLength); sg_init_table(dst, 1); sg_init_table(src, 1); sg_set_buf(&dst[0], output, inputLength); sg_set_buf(&src[0], input, inputLength); desc.tfm = cipher; desc.flags = 0; crypto_blkcipher_set_iv(cipher, iv, ivLength); if (encrypt) crypto_blkcipher_encrypt(&desc, dst, src, inputLength); else crypto_blkcipher_decrypt(&desc, dst, src, inputLength); crypto_free_blkcipher(cipher); }
static void crypto_rfc4543_exit_tfm(struct crypto_tfm *tfm) { struct crypto_rfc4543_ctx *ctx = crypto_tfm_ctx(tfm); crypto_free_aead(ctx->child); crypto_free_blkcipher(ctx->null); }
static void prism2_wep_deinit(void *priv) { struct prism2_wep_data *_priv = priv; #if((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)) && (!OPENSUSE_SLED)) if (_priv && _priv->tfm) crypto_free_tfm(_priv->tfm); #else if (_priv) { if (_priv->tx_tfm) crypto_free_blkcipher(_priv->tx_tfm); if (_priv->rx_tfm) crypto_free_blkcipher(_priv->rx_tfm); } #endif kfree(priv); }
static void xts_fallback_exit(struct crypto_tfm *tfm) { struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm); crypto_free_blkcipher(xts_ctx->fallback); xts_ctx->fallback = NULL; }
static void fallback_exit_blk(struct crypto_tfm *tfm) { struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); crypto_free_blkcipher(sctx->fallback.blk); sctx->fallback.blk = NULL; }
int calc_seckey(struct cifs_ses *ses) { int rc; struct crypto_blkcipher *tfm_arc4; struct scatterlist sgin, sgout; struct blkcipher_desc desc; unsigned char sec_key[CIFS_SESS_KEY_SIZE]; /* a nonce */ get_random_bytes(sec_key, CIFS_SESS_KEY_SIZE); tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm_arc4)) { rc = PTR_ERR(tfm_arc4); cifs_dbg(VFS, "could not allocate crypto API arc4\n"); return rc; } desc.tfm = tfm_arc4; rc = crypto_blkcipher_setkey(tfm_arc4, ses->auth_key.response, CIFS_SESS_KEY_SIZE); if (rc) { cifs_dbg(VFS, "%s: Could not set response as a key\n", __func__); return rc; } sg_init_one(&sgin, sec_key, CIFS_SESS_KEY_SIZE); sg_init_one(&sgout, ses->ntlmssp->ciphertext, CIFS_CPHTXT_SIZE); rc = crypto_blkcipher_encrypt(&desc, &sgout, &sgin, CIFS_CPHTXT_SIZE); if (rc) { cifs_dbg(VFS, "could not encrypt session key rc: %d\n", rc); crypto_free_blkcipher(tfm_arc4); return rc; } /* make secondary_key/nonce as session key */ memcpy(ses->auth_key.response, sec_key, CIFS_SESS_KEY_SIZE); /* and make len as that of session key only */ ses->auth_key.len = CIFS_SESS_KEY_SIZE; crypto_free_blkcipher(tfm_arc4); return rc; }
static void p8_aes_cbc_exit(struct crypto_tfm *tfm) { struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm); if (ctx->fallback) { crypto_free_blkcipher(ctx->fallback); ctx->fallback = NULL; } }
static inline const void * get_key(const void *p, const void *end, struct krb5_ctx *ctx, struct crypto_blkcipher **res) { struct xdr_netobj key; int alg; p = simple_get_bytes(p, end, &alg, sizeof(alg)); if (IS_ERR(p)) goto out_err; switch (alg) { case ENCTYPE_DES_CBC_CRC: case ENCTYPE_DES_CBC_MD4: case ENCTYPE_DES_CBC_MD5: /* Map all these key types to ENCTYPE_DES_CBC_RAW */ alg = ENCTYPE_DES_CBC_RAW; break; } if (!supported_gss_krb5_enctype(alg)) { printk(KERN_WARNING "gss_kerberos_mech: unsupported " "encryption key algorithm %d\n", alg); p = ERR_PTR(-EINVAL); goto out_err; } p = simple_get_netobj(p, end, &key); if (IS_ERR(p)) goto out_err; *res = crypto_alloc_blkcipher(ctx->gk5e->encrypt_name, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(*res)) { printk(KERN_WARNING "gss_kerberos_mech: unable to initialize " "crypto algorithm %s\n", ctx->gk5e->encrypt_name); *res = NULL; goto out_err_free_key; } if (crypto_blkcipher_setkey(*res, key.data, key.len)) { printk(KERN_WARNING "gss_kerberos_mech: error setting key for " "crypto algorithm %s\n", ctx->gk5e->encrypt_name); goto out_err_free_tfm; } kfree(key.data); return p; out_err_free_tfm: crypto_free_blkcipher(*res); out_err_free_key: kfree(key.data); p = ERR_PTR(-EINVAL); out_err: return p; }
static int encrypt_decrypt_file(char *buf, unsigned char *key, int len, int flag) { struct crypto_blkcipher *blkcipher = NULL; char *cipher = "ctr(aes)"; struct scatterlist sg; struct blkcipher_desc desc; int rc; blkcipher = crypto_alloc_blkcipher(cipher, 0, 0); if (IS_ERR(blkcipher)) { printk("could not allocate blkcipher handle for %s\n", cipher); rc= -PTR_ERR(blkcipher); goto out; } if (crypto_blkcipher_setkey(blkcipher, key, 16)) { printk("key could not be set\n"); rc = -EAGAIN; goto out; } desc.flags = 0; desc.tfm = blkcipher; sg_init_one(&sg, buf, len); /* encrypt data */ if(flag == 1) { rc = crypto_blkcipher_encrypt(&desc, &sg, &sg, len); if(rc){ printk("Encryption failed \n"); rc = -EFAULT; goto out; } } /* decrypt data */ else if(flag == 0) { rc = crypto_blkcipher_decrypt(&desc, &sg, &sg, len); if(rc){ printk("Decryption failed \n"); rc = -EFAULT; goto out; } } return 0; out: if (blkcipher) crypto_free_blkcipher(blkcipher); return rc; }
/* * WUSB Pseudo Random Function (WUSB1.0[6.5]) * * @b: buffer to the source data; cannot be a global or const local * (will confuse the scatterlists) */ ssize_t wusb_prf(void *out, size_t out_size, const u8 key[16], const struct aes_ccm_nonce *_n, const struct aes_ccm_label *a, const void *b, size_t blen, size_t len) { ssize_t result, bytes = 0, bitr; struct aes_ccm_nonce n = *_n; struct crypto_blkcipher *tfm_cbc; struct crypto_cipher *tfm_aes; u64 sfn = 0; __le64 sfn_le; tfm_cbc = crypto_alloc_blkcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm_cbc)) { result = PTR_ERR(tfm_cbc); printk(KERN_ERR "E: can't load CBC(AES): %d\n", (int)result); goto error_alloc_cbc; } result = crypto_blkcipher_setkey(tfm_cbc, key, 16); if (result < 0) { printk(KERN_ERR "E: can't set CBC key: %d\n", (int)result); goto error_setkey_cbc; } tfm_aes = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm_aes)) { result = PTR_ERR(tfm_aes); printk(KERN_ERR "E: can't load AES: %d\n", (int)result); goto error_alloc_aes; } result = crypto_cipher_setkey(tfm_aes, key, 16); if (result < 0) { printk(KERN_ERR "E: can't set AES key: %d\n", (int)result); goto error_setkey_aes; } for (bitr = 0; bitr < (len + 63) / 64; bitr++) { sfn_le = cpu_to_le64(sfn++); memcpy(&n.sfn, &sfn_le, sizeof(n.sfn)); /* n.sfn++... */ result = wusb_ccm_mac(tfm_cbc, tfm_aes, out + bytes, &n, a, b, blen); if (result < 0) goto error_ccm_mac; bytes += result; } result = bytes; error_ccm_mac: error_setkey_aes: crypto_free_cipher(tfm_aes); error_alloc_aes: error_setkey_cbc: crypto_free_blkcipher(tfm_cbc); error_alloc_cbc: return result; }
static int cryptoloop_release(struct loop_device *lo) { struct crypto_blkcipher *tfm = lo->key_data; if (tfm != NULL) { crypto_free_blkcipher(tfm); lo->key_data = NULL; return 0; } ; return -EINVAL; }
static int cryptoloop_release(struct loop_device *lo) { struct crypto_blkcipher *tfm = lo->key_data; if (tfm != NULL) { crypto_free_blkcipher(tfm); lo->key_data = NULL; return 0; } printk(KERN_ERR "cryptoloop_release(): tfm == NULL?\n"); return -EINVAL; }
static void *prism2_wep_init(int keyidx) { struct prism2_wep_data *priv; priv = kzalloc(sizeof(*priv), GFP_ATOMIC); if (priv == NULL) goto fail; priv->key_idx = keyidx; priv->tx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(priv->tx_tfm)) { printk(KERN_DEBUG "rtllib_crypt_wep: could not allocate " "crypto API arc4\n"); priv->tx_tfm = NULL; goto fail; } priv->rx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(priv->rx_tfm)) { printk(KERN_DEBUG "rtllib_crypt_wep: could not allocate " "crypto API arc4\n"); priv->rx_tfm = NULL; goto fail; } /* start WEP IV from a random value */ get_random_bytes(&priv->iv, 4); return priv; fail: if (priv) { if (priv->tx_tfm) crypto_free_blkcipher(priv->tx_tfm); if (priv->rx_tfm) crypto_free_blkcipher(priv->rx_tfm); kfree(priv); } return NULL; }
void smp_chan_destroy(struct l2cap_conn *conn) { struct smp_chan *smp = conn->smp_chan; clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->pend); if (smp->tfm) crypto_free_blkcipher(smp->tfm); kfree(smp); conn->smp_chan = NULL; hci_conn_put(conn->hcon); }
void smp_chan_destroy(struct l2cap_conn *conn) { struct smp_chan *smp = conn->smp_chan; BUG_ON(!smp); if (smp->tfm) crypto_free_blkcipher(smp->tfm); kfree(smp); conn->smp_chan = NULL; conn->hcon->smp_conn = NULL; hci_conn_drop(conn->hcon); }
static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm) { struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm); struct cryptd_state *state = cryptd_get_state(tfm); int active; mutex_lock(&state->mutex); active = ablkcipher_tfm_in_queue(__crypto_ablkcipher_cast(tfm)); mutex_unlock(&state->mutex); BUG_ON(active); crypto_free_blkcipher(ctx->child); }
static int aes_get_sizes(void) { struct crypto_blkcipher *tfm; tfm = crypto_alloc_blkcipher(blkcipher_alg, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) { pr_err("encrypted_key: failed to alloc_cipher (%ld)\n", PTR_ERR(tfm)); return PTR_ERR(tfm); } ivsize = crypto_blkcipher_ivsize(tfm); blksize = crypto_blkcipher_blocksize(tfm); crypto_free_blkcipher(tfm); return 0; }
static inline void test2(void) { struct crypto_blkcipher *tfm; int rc; unsigned char *crap; struct blkcipher_desc desc; struct scatterlist in, out; printk(KERN_INFO "test in\n"); crap = kmalloc(4096, GFP_KERNEL); tfm = crypto_alloc_blkcipher("rsa", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) { printk(KERN_INFO "crypto_alloc_blkcipher()\n"); return; } rc = crypto_blkcipher_setkey(tfm, key, sizeof(key) - 1); printk(KERN_INFO "crypto_blkcipher_setkey = %d\n", rc); strcpy(crap, "AABC"); desc.tfm = tfm; desc.flags = 0; sg_init_table(&in, 1); sg_set_buf(&in, crap, 4); sg_init_table(&out, 1); sg_set_buf(&out, crap, 4096); rc = crypto_blkcipher_encrypt(&desc, &out, &in, 4); printk(KERN_INFO "crypto_blkcipher_encrypt RC %d %x %x %x\n", rc, crap[0], crap[1], crap[2]); sg_init_one(&in, crap, rc); sg_init_one(&out, crap, 4096); rc = crypto_blkcipher_decrypt(&desc, &out, &in, rc); printk(KERN_INFO "crypto_blkcipher_decrypt RC %d %x %x %x\n", rc, crap[0], crap[1], crap[2]); crypto_free_blkcipher(tfm); kfree(crap); printk(KERN_INFO "test out\n"); }
int ceph_aes_decrypt(const void *key, int key_len, void *dst, size_t *dst_len, const void *src, size_t src_len){ struct scatterlist sg_in[1], sg_out[2]; struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); struct blkcipher_desc desc = { .tfm = tfm }; char pad[16]; void *iv; int ivsize; int ret; int last_byte; if (IS_ERR(tfm)) return PTR_ERR(tfm); crypto_blkcipher_setkey((void *)tfm, key, key_len); sg_init_table(sg_in, 1); sg_init_table(sg_out, 2); sg_set_buf(sg_in, src, src_len); sg_set_buf(&sg_out[0], dst, *dst_len); sg_set_buf(&sg_out[1], pad, sizeof(pad)); iv = crypto_blkcipher_crt(tfm)->iv; ivsize = crypto_blkcipher_ivsize(tfm); memcpy(iv, aes_iv, ivsize); ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, src_len); crypto_free_blkcipher(tfm); if (ret < 0) { pr_err("ceph_aes_decrypt failed %d\n", ret); return ret; } if (src_len <= *dst_len) last_byte = ((char*)dst)[src_len - 1]; else last_byte = pad[src_len - *dst_len - 1]; if (last_byte <= 16 && src_len >= last_byte) { *dst_len = src_len - last_byte; } else { pr_err("ceph_aes_decrypt got bad padding %d on src len %d\n", last_byte, (int)src_len); return -EPERM; /* bad padding */ } return 0; }