void ieee80211_wep_free(struct ieee80211_local *local) { if (!IS_ERR(local->wep_tx_tfm)) crypto_free_cipher(local->wep_tx_tfm); if (!IS_ERR(local->wep_rx_tfm)) crypto_free_cipher(local->wep_rx_tfm); }
/* Set up per cpu cipher state */ static struct crypto_cipher *setup_essiv_cpu(struct crypt_config *cc, struct dm_target *ti, u8 *salt, unsigned saltsize) { struct crypto_cipher *essiv_tfm; int err; /* Setup the essiv_tfm with the given salt */ essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(essiv_tfm)) { ti->error = "Error allocating crypto tfm for ESSIV"; return essiv_tfm; } if (crypto_cipher_blocksize(essiv_tfm) != crypto_ablkcipher_ivsize(any_tfm(cc))) { ti->error = "Block size of ESSIV cipher does " "not match IV size of block cipher"; crypto_free_cipher(essiv_tfm); return ERR_PTR(-EINVAL); } err = crypto_cipher_setkey(essiv_tfm, salt, saltsize); if (err) { ti->error = "Failed to set key for ESSIV cipher"; crypto_free_cipher(essiv_tfm); return ERR_PTR(err); } return essiv_tfm; }
static int reset_prng_context(struct prng_context *ctx, unsigned char *key, size_t klen, unsigned char *V, unsigned char *DT) { int ret; int rc = -EINVAL; unsigned char *prng_key; spin_lock(&ctx->prng_lock); ctx->flags |= PRNG_NEED_RESET; prng_key = (key != NULL) ? key : (unsigned char *)DEFAULT_PRNG_KEY; if (!key) klen = DEFAULT_PRNG_KSZ; if (V) memcpy(ctx->V, V, DEFAULT_BLK_SZ); else memcpy(ctx->V, DEFAULT_V_SEED, DEFAULT_BLK_SZ); if (DT) memcpy(ctx->DT, DT, DEFAULT_BLK_SZ); else memset(ctx->DT, 0, DEFAULT_BLK_SZ); memset(ctx->rand_data, 0, DEFAULT_BLK_SZ); memset(ctx->last_rand_data, 0, DEFAULT_BLK_SZ); if (ctx->tfm) crypto_free_cipher(ctx->tfm); ctx->tfm = crypto_alloc_cipher("aes", 0, 0); if (IS_ERR(ctx->tfm)) { dbgprint(KERN_CRIT "Failed to alloc tfm for context %p\n", ctx); ctx->tfm = NULL; goto out; } ctx->rand_data_valid = DEFAULT_BLK_SZ; ret = crypto_cipher_setkey(ctx->tfm, prng_key, klen); if (ret) { dbgprint(KERN_CRIT "PRNG: setkey() failed flags=%x\n", crypto_cipher_get_flags(ctx->tfm)); crypto_free_cipher(ctx->tfm); goto out; } rc = 0; ctx->flags &= ~PRNG_NEED_RESET; out: spin_unlock(&ctx->prng_lock); return rc; }
static void tcp_fastopen_ctx_free(struct rcu_head *head) { struct tcp_fastopen_context *ctx = container_of(head, struct tcp_fastopen_context, rcu); crypto_free_cipher(ctx->tfm); kfree(ctx); }
static void fallback_exit_cip(struct crypto_tfm *tfm) { struct geode_aes_op *op = crypto_tfm_ctx(tfm); crypto_free_cipher(op->fallback.cip); op->fallback.cip = NULL; }
int tcp_fastopen_reset_cipher(void *key, unsigned int len) { int err; struct tcp_fastopen_context *ctx, *octx; ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; ctx->tfm = crypto_alloc_cipher("aes", 0, 0); if (IS_ERR(ctx->tfm)) { err = PTR_ERR(ctx->tfm); error: kfree(ctx); pr_err("TCP: TFO aes cipher alloc error: %d\n", err); return err; } err = crypto_cipher_setkey(ctx->tfm, key, len); if (err) { pr_err("TCP: TFO cipher key error: %d\n", err); crypto_free_cipher(ctx->tfm); goto error; } memcpy(ctx->key, key, len); spin_lock(&tcp_fastopen_ctx_lock); octx = rcu_dereference_protected(tcp_fastopen_ctx, lockdep_is_held(&tcp_fastopen_ctx_lock)); rcu_assign_pointer(tcp_fastopen_ctx, ctx); spin_unlock(&tcp_fastopen_ctx_lock); if (octx) call_rcu(&octx->rcu, tcp_fastopen_ctx_free); return err; }
static int crypto4xx_compute_gcm_hash_key_sw(__le32 *hash_start, const u8 *key, unsigned int keylen) { struct crypto_cipher *aes_tfm = NULL; uint8_t src[16] = { 0 }; int rc = 0; aes_tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(aes_tfm)) { rc = PTR_ERR(aes_tfm); pr_warn("could not load aes cipher driver: %d\n", rc); return rc; } rc = crypto_cipher_setkey(aes_tfm, key, keylen); if (rc) { pr_err("setkey() failed: %d\n", rc); goto out; } crypto_cipher_encrypt_one(aes_tfm, src, src); crypto4xx_memcpy_to_le32(hash_start, src, 16); out: crypto_free_cipher(aes_tfm); return rc; }
static int crypto_ccm_init_tfm(struct crypto_tfm *tfm) { struct crypto_instance *inst = (void *)tfm->__crt_alg; struct ccm_instance_ctx *ictx = crypto_instance_ctx(inst); struct crypto_ccm_ctx *ctx = crypto_tfm_ctx(tfm); struct crypto_cipher *cipher; struct crypto_ablkcipher *ctr; unsigned long align; int err; cipher = crypto_spawn_cipher(&ictx->cipher); if (IS_ERR(cipher)) return PTR_ERR(cipher); ctr = crypto_spawn_skcipher(&ictx->ctr); err = PTR_ERR(ctr); if (IS_ERR(ctr)) goto err_free_cipher; ctx->cipher = cipher; ctx->ctr = ctr; align = crypto_tfm_alg_alignmask(tfm); align &= ~(crypto_tfm_ctx_alignment() - 1); tfm->crt_aead.reqsize = align + sizeof(struct crypto_ccm_req_priv_ctx) + crypto_ablkcipher_reqsize(ctr); return 0; err_free_cipher: crypto_free_cipher(cipher); return err; }
static void crypto_ccm_exit_tfm(struct crypto_tfm *tfm) { struct crypto_ccm_ctx *ctx = crypto_tfm_ctx(tfm); crypto_free_cipher(ctx->cipher); crypto_free_ablkcipher(ctx->ctr); }
static void fallback_exit_cip(struct crypto_tfm *tfm) { struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); crypto_free_cipher(sctx->fallback.cip); sctx->fallback.cip = NULL; }
static void __exit sbd_exit(void) { crypto_free_cipher(crypt); del_gendisk(Device.gd); put_disk(Device.gd); unregister_blkdev(major_num, "sbd"); blk_cleanup_queue(Queue); vfree(Device.data); }
static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, const char *opts) { struct crypto_cipher *essiv_tfm = NULL; struct crypto_hash *hash_tfm = NULL; u8 *salt = NULL; int err; if (!opts) { ti->error = "Digest algorithm missing for ESSIV mode"; return -EINVAL; } /* Allocate hash algorithm */ hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(hash_tfm)) { ti->error = "Error initializing ESSIV hash"; err = PTR_ERR(hash_tfm); goto bad; } salt = kzalloc(crypto_hash_digestsize(hash_tfm), GFP_KERNEL); if (!salt) { ti->error = "Error kmallocing salt storage in ESSIV"; err = -ENOMEM; goto bad; } /* Allocate essiv_tfm */ essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(essiv_tfm)) { ti->error = "Error allocating crypto tfm for ESSIV"; err = PTR_ERR(essiv_tfm); goto bad; } if (crypto_cipher_blocksize(essiv_tfm) != crypto_ablkcipher_ivsize(cc->tfm)) { ti->error = "Block size of ESSIV cipher does " "not match IV size of block cipher"; err = -EINVAL; goto bad; } cc->iv_gen_private.essiv.salt = salt; cc->iv_gen_private.essiv.tfm = essiv_tfm; cc->iv_gen_private.essiv.hash_tfm = hash_tfm; return 0; bad: if (essiv_tfm && !IS_ERR(essiv_tfm)) crypto_free_cipher(essiv_tfm); if (hash_tfm && !IS_ERR(hash_tfm)) crypto_free_hash(hash_tfm); kfree(salt); return err; }
static void put_crypt_info(struct fscrypt_info *ci) { if (!ci) return; crypto_free_skcipher(ci->ci_ctfm); crypto_free_cipher(ci->ci_essiv_tfm); kmem_cache_free(fscrypt_info_cachep, ci); }
/* * WUSB Pseudo Random Function (WUSB1.0[6.5]) * * @b: buffer to the source data; cannot be a global or const local * (will confuse the scatterlists) */ ssize_t wusb_prf(void *out, size_t out_size, const u8 key[16], const struct aes_ccm_nonce *_n, const struct aes_ccm_label *a, const void *b, size_t blen, size_t len) { ssize_t result, bytes = 0, bitr; struct aes_ccm_nonce n = *_n; struct crypto_blkcipher *tfm_cbc; struct crypto_cipher *tfm_aes; u64 sfn = 0; __le64 sfn_le; tfm_cbc = crypto_alloc_blkcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm_cbc)) { result = PTR_ERR(tfm_cbc); printk(KERN_ERR "E: can't load CBC(AES): %d\n", (int)result); goto error_alloc_cbc; } result = crypto_blkcipher_setkey(tfm_cbc, key, 16); if (result < 0) { printk(KERN_ERR "E: can't set CBC key: %d\n", (int)result); goto error_setkey_cbc; } tfm_aes = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm_aes)) { result = PTR_ERR(tfm_aes); printk(KERN_ERR "E: can't load AES: %d\n", (int)result); goto error_alloc_aes; } result = crypto_cipher_setkey(tfm_aes, key, 16); if (result < 0) { printk(KERN_ERR "E: can't set AES key: %d\n", (int)result); goto error_setkey_aes; } for (bitr = 0; bitr < (len + 63) / 64; bitr++) { sfn_le = cpu_to_le64(sfn++); memcpy(&n.sfn, &sfn_le, sizeof(n.sfn)); /* n.sfn++... */ result = wusb_ccm_mac(tfm_cbc, tfm_aes, out + bytes, &n, a, b, blen); if (result < 0) goto error_ccm_mac; bytes += result; } result = bytes; error_ccm_mac: error_setkey_aes: crypto_free_cipher(tfm_aes); error_alloc_aes: error_setkey_cbc: crypto_free_blkcipher(tfm_cbc); error_alloc_cbc: return result; }
static void crypt_iv_essiv_dtr(struct crypt_config *cc) { struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; crypto_free_cipher(essiv->tfm); essiv->tfm = NULL; crypto_free_hash(essiv->hash_tfm); essiv->hash_tfm = NULL; kzfree(essiv->salt); essiv->salt = NULL; }
static void __exit sbd_exit(void) { del_gendisk(Device.gd); put_disk(Device.gd); unregister_blkdev(major_num, "sbd"); blk_cleanup_queue(Queue); crypto_free_cipher(tfm); device_remove_file(&rd_root_dev, &dev_attr_key); device_unregister(&rd_root_dev); vfree(Device.data); }
int wrapfs_read_lower(char *data, loff_t offset, size_t size, struct inode *wrapfs_inode, struct file *file) { struct file *lower_file; mm_segment_t fs_save; ssize_t rc; mode_t previous_mode; #ifdef WRAPFS_CRYPTO struct crypto_cipher *tfm; char *decrypted_page_buffer = NULL; // free this #endif lower_file = wrapfs_lower_file(file); if (!lower_file) return -EIO; fs_save = get_fs(); set_fs(get_ds()); previous_mode = lower_file->f_mode; lower_file->f_mode |= FMODE_READ; rc = vfs_read(lower_file, data, size, &offset); lower_file->f_mode = previous_mode; #ifdef WRAPFS_CRYPTO decrypted_page_buffer = kmalloc(size, GFP_KERNEL); if (decrypted_page_buffer == NULL) goto out; memset(decrypted_page_buffer, 0, size); tfm = crypto_alloc_cipher("aes", 0, 16); if (!IS_ERR(tfm)) crypto_cipher_setkey(tfm, WRAPFS_SB(file->f_dentry->d_sb)->key, 16); else goto fail; crypto_cipher_decrypt_one(tfm, decrypted_page_buffer, data); /*printk(KERN_ALERT "Decrypted buffer = %s\n", decrypted_page_buffer);*/ memcpy(data, decrypted_page_buffer, size); #endif set_fs(fs_save); #ifdef WRAPFS_CRYPTO crypto_free_cipher(tfm); fail: kfree(decrypted_page_buffer); out: #endif return rc; }
int wrapfs_write_lower_page_segment(struct inode *wrapfs_inode, struct page *page_for_lower, size_t offset_in_page, size_t size, struct file *file) { char *virt; loff_t offset; int rc = -1; #ifdef WRAPFS_CRYPTO unsigned char *encrypted_page_buffer; struct crypto_cipher *tfm; #endif offset = ((((loff_t)page_for_lower->index) << PAGE_CACHE_SHIFT) + offset_in_page); virt = kmap(page_for_lower); #ifdef WRAPFS_CRYPTO encrypted_page_buffer = kmalloc(size, GFP_KERNEL); //free this if (encrypted_page_buffer == NULL) goto out; memset(encrypted_page_buffer, 0, size); tfm = crypto_alloc_cipher("aes", 0, 16); if (!IS_ERR(tfm)) crypto_cipher_setkey(tfm, WRAPFS_SB(file->f_dentry->d_sb)->key, 16); else goto fail; crypto_cipher_encrypt_one(tfm, encrypted_page_buffer, virt); /*printk(KERN_ALERT "Encrypted buffer = %s\n", encrypted_page_buffer);*/ /*memcpy(virt, encrypted_page_buffer, size);*/ rc = wrapfs_write_lower(wrapfs_inode, encrypted_page_buffer, offset, size, file); #else rc = wrapfs_write_lower(wrapfs_inode, virt, offset, size, file); #endif if (rc > 0) rc = 0; kunmap(page_for_lower); #ifdef WRAPFS_CRYPTO crypto_free_cipher(tfm); fail: kfree(encrypted_page_buffer); out: #endif return rc; }
/* Replace the cipher instance, and free the old one when readers have * abandoned it. */ static void update_cipher(struct crypto_cipher **cipher_ptr, struct crypto_cipher *new_cipher) { struct crypto_cipher *old_cipher; /* Perform RCU update */ spin_lock(¶m_write_lock); old_cipher = *cipher_ptr; rcu_assign_pointer(*cipher_ptr, new_cipher); spin_unlock(¶m_write_lock); /* Free the old cipher when ready. */ synchronize_rcu(); crypto_free_cipher(old_cipher); }
static int param_set_cipher_key(const char *val, struct kernel_param *kp) { struct crypto_cipher *new_cipher; int key_len; u8 key[128]; int err = 0; /* Try to convert the user's key to raw bytes. */ key_len = parse_hex_string(val, key, ARRAY_SIZE(key)); if (key_len < 0) { printk(KERN_INFO "stubl: Can't parse key.\n"); return key_len; } /* If the key is empty, then clear it. */ if (key_len == 0) { printk(KERN_INFO "stubl: Clearing tunnel key.\n"); update_cipher(kp->arg, NULL); return 0; } printk(KERN_INFO "stubl: Setting tunnel key.\n"); /* Init a new cipher */ new_cipher = crypto_alloc_cipher("blowfish", 0, 0); if (IS_ERR(new_cipher)) { printk(KERN_INFO "stubl: Can't init cipher: %ld\n", PTR_ERR(new_cipher)); return PTR_ERR(new_cipher); } /* Set key */ err = crypto_cipher_setkey(new_cipher, key, key_len); if (err < 0) { printk(KERN_INFO "stubl: Can't set key: %d\n", err); crypto_free_cipher(new_cipher); return err; } /* Perform RCU update */ update_cipher(kp->arg, new_cipher); return 0; }
void aes_decrypt(u8 *cdata, u8 *pdata, u8 *key, int len) { struct crypto_cipher *tfm; u8 *ptmp, *ctmp; int i; tfm = crypto_alloc_cipher("aes", 4, CRYPTO_ALG_ASYNC); ptmp = pdata; ctmp = cdata; for(i=0; i<len; i+=AES_BLOCK_SIZE) { crypto_cipher_decrypt_one(tfm, ptmp, ctmp); ptmp += AES_BLOCK_SIZE; ctmp += AES_BLOCK_SIZE; } crypto_free_cipher(tfm); }
int ieee80211_wep_init(struct ieee80211_local *local) { /* start WEP IV from a random value */ get_random_bytes(&local->wep_iv, WEP_IV_LEN); local->wep_tx_tfm = crypto_alloc_cipher("arc4", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(local->wep_tx_tfm)) { local->wep_rx_tfm = ERR_PTR(-EINVAL); return PTR_ERR(local->wep_tx_tfm); } local->wep_rx_tfm = crypto_alloc_cipher("arc4", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(local->wep_rx_tfm)) { crypto_free_cipher(local->wep_tx_tfm); local->wep_tx_tfm = ERR_PTR(-EINVAL); return PTR_ERR(local->wep_rx_tfm); } return 0; }
void aes_encrypt(u8 *pdata, u8 *cdata, u8 *ndata, u8 *key) { struct crypto_cipher *tfm; tfm = crypto_alloc_cipher("aes", 4, CRYPTO_ALG_ASYNC); crypto_cipher_encrypt_one(tfm, &cdata[0], &pdata[0]); crypto_cipher_encrypt_one(tfm, &cdata[16], &pdata[16]); dump("PlainText: ", pdata); dump("Crypted: ", cdata); crypto_cipher_decrypt_one(tfm, &ndata[0], &cdata[0]); crypto_cipher_decrypt_one(tfm, &ndata[16], &cdata[16]); dump("Decrypted: ", ndata); crypto_free_cipher(tfm); return; }
static int rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len) { struct crypto_cipher *tfm; int ret; tfm = crypto_alloc_cipher("aes", 0, 0); if (IS_ERR(tfm)) return PTR_ERR(tfm); ret = crypto_cipher_setkey(tfm, key, key_len); if (ret) goto out_free_cipher; /* Clear the data in the hash sub key container to zero.*/ /* We want to cipher all zeros to create the hash sub key. */ memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE); crypto_cipher_encrypt_one(tfm, hash_subkey, hash_subkey); out_free_cipher: crypto_free_cipher(tfm); return ret; }
static void crypto_cbc_exit_tfm(struct crypto_tfm *tfm) { struct crypto_cbc_ctx *ctx = crypto_tfm_ctx(tfm); crypto_free_cipher(ctx->child); }
static int crypt(struct blkcipher_desc *d, struct blkcipher_walk *w, struct priv *ctx, void (*fn)(struct crypto_tfm *, u8 *, const u8 *)) { int err; unsigned int avail; const int bs = LRW_BLOCK_SIZE; struct sinfo s = { .tfm = crypto_cipher_tfm(ctx->child), .fn = fn }; be128 *iv; u8 *wsrc; u8 *wdst; err = blkcipher_walk_virt(d, w); if (!(avail = w->nbytes)) return err; wsrc = w->src.virt.addr; wdst = w->dst.virt.addr; /* calculate first value of T */ iv = (be128 *)w->iv; s.t = *iv; /* T <- I*Key2 */ gf128mul_64k_bbe(&s.t, ctx->table.table); goto first; for (;;) { do { /* T <- I*Key2, using the optimization * discussed in the specification */ be128_xor(&s.t, &s.t, &ctx->table.mulinc[get_index128(iv)]); inc(iv); first: lrw_round(&s, wdst, wsrc); wsrc += bs; wdst += bs; } while ((avail -= bs) >= bs); err = blkcipher_walk_done(d, w, avail); if (!(avail = w->nbytes)) break; wsrc = w->src.virt.addr; wdst = w->dst.virt.addr; } return err; } static int encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct priv *ctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk w; blkcipher_walk_init(&w, dst, src, nbytes); return crypt(desc, &w, ctx, crypto_cipher_alg(ctx->child)->cia_encrypt); } static int decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct priv *ctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk w; blkcipher_walk_init(&w, dst, src, nbytes); return crypt(desc, &w, ctx, crypto_cipher_alg(ctx->child)->cia_decrypt); } int lrw_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst, struct scatterlist *ssrc, unsigned int nbytes, struct lrw_crypt_req *req) { const unsigned int bsize = LRW_BLOCK_SIZE; const unsigned int max_blks = req->tbuflen / bsize; struct lrw_table_ctx *ctx = req->table_ctx; struct blkcipher_walk walk; unsigned int nblocks; be128 *iv, *src, *dst, *t; be128 *t_buf = req->tbuf; int err, i; BUG_ON(max_blks < 1); blkcipher_walk_init(&walk, sdst, ssrc, nbytes); err = blkcipher_walk_virt(desc, &walk); nbytes = walk.nbytes; if (!nbytes) return err; nblocks = min(walk.nbytes / bsize, max_blks); src = (be128 *)walk.src.virt.addr; dst = (be128 *)walk.dst.virt.addr; /* calculate first value of T */ iv = (be128 *)walk.iv; t_buf[0] = *iv; /* T <- I*Key2 */ gf128mul_64k_bbe(&t_buf[0], ctx->table); i = 0; goto first; for (;;) { do { for (i = 0; i < nblocks; i++) { /* T <- I*Key2, using the optimization * discussed in the specification */ be128_xor(&t_buf[i], t, &ctx->mulinc[get_index128(iv)]); inc(iv); first: t = &t_buf[i]; /* PP <- T xor P */ be128_xor(dst + i, t, src + i); } /* CC <- E(Key2,PP) */ req->crypt_fn(req->crypt_ctx, (u8 *)dst, nblocks * bsize); /* C <- T xor CC */ for (i = 0; i < nblocks; i++) be128_xor(dst + i, dst + i, &t_buf[i]); src += nblocks; dst += nblocks; nbytes -= nblocks * bsize; nblocks = min(nbytes / bsize, max_blks); } while (nblocks > 0); err = blkcipher_walk_done(desc, &walk, nbytes); nbytes = walk.nbytes; if (!nbytes) break; nblocks = min(nbytes / bsize, max_blks); src = (be128 *)walk.src.virt.addr; dst = (be128 *)walk.dst.virt.addr; } return err; } EXPORT_SYMBOL_GPL(lrw_crypt); static int init_tfm(struct crypto_tfm *tfm) { struct crypto_cipher *cipher; struct crypto_instance *inst = (void *)tfm->__crt_alg; struct crypto_spawn *spawn = crypto_instance_ctx(inst); struct priv *ctx = crypto_tfm_ctx(tfm); u32 *flags = &tfm->crt_flags; cipher = crypto_spawn_cipher(spawn); if (IS_ERR(cipher)) return PTR_ERR(cipher); if (crypto_cipher_blocksize(cipher) != LRW_BLOCK_SIZE) { *flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN; crypto_free_cipher(cipher); return -EINVAL; } ctx->child = cipher; return 0; } static void exit_tfm(struct crypto_tfm *tfm) { struct priv *ctx = crypto_tfm_ctx(tfm); lrw_free_table(&ctx->table); crypto_free_cipher(ctx->child); } static struct crypto_instance *alloc(struct rtattr **tb) { struct crypto_instance *inst; struct crypto_alg *alg; int err; err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); if (err) return ERR_PTR(err); alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, CRYPTO_ALG_TYPE_MASK); if (IS_ERR(alg)) return ERR_CAST(alg); inst = crypto_alloc_instance("lrw", alg); if (IS_ERR(inst)) goto out_put_alg; inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER; inst->alg.cra_priority = alg->cra_priority; inst->alg.cra_blocksize = alg->cra_blocksize; if (alg->cra_alignmask < 7) inst->alg.cra_alignmask = 7; else inst->alg.cra_alignmask = alg->cra_alignmask; inst->alg.cra_type = &crypto_blkcipher_type; if (!(alg->cra_blocksize % 4)) inst->alg.cra_alignmask |= 3; inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize; inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize + alg->cra_blocksize; inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize + alg->cra_blocksize; inst->alg.cra_ctxsize = sizeof(struct priv); inst->alg.cra_init = init_tfm; inst->alg.cra_exit = exit_tfm; inst->alg.cra_blkcipher.setkey = setkey; inst->alg.cra_blkcipher.encrypt = encrypt; inst->alg.cra_blkcipher.decrypt = decrypt; out_put_alg: crypto_mod_put(alg); return inst; } static void free(struct crypto_instance *inst) { crypto_drop_spawn(crypto_instance_ctx(inst)); kfree(inst); }
v_BOOL_t vos_is_mmie_valid(v_U8_t *igtk, v_U8_t *ipn, v_U8_t* frm, v_U8_t* efrm) { struct ieee80211_mmie *mmie; struct ieee80211_frame *wh; v_U8_t *rx_ipn, aad[AAD_LEN], mic[CMAC_TLEN], *input; v_U16_t nBytes = 0; int ret = 0; struct crypto_cipher *tfm; /* Check if frame is invalid length */ if ((efrm < frm) || ((efrm - frm) < sizeof(*wh))) { VOS_TRACE(VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR, "Invalid frame length"); return VOS_FALSE; } mmie = (struct ieee80211_mmie *)(efrm - sizeof(*mmie)); /* Check Element ID */ if ((mmie->element_id != IEEE80211_ELEMID_MMIE) || (mmie->length != (sizeof(*mmie)-2))) { VOS_TRACE(VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR, "IE is not Mgmt MIC IE or Invalid length"); /* IE is not Mgmt MIC IE or invalid length */ return VOS_FALSE; } /* Validate IPN */ rx_ipn = mmie->sequence_number; if (OS_MEMCMP(rx_ipn, ipn, CMAC_IPN_LEN) <= 0) { /* Replay error */ VOS_TRACE(VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR, "Replay error mmie ipn %02X %02X %02X %02X %02X %02X" " drvr ipn %02X %02X %02X %02X %02X %02X", rx_ipn[0], rx_ipn[1], rx_ipn[2], rx_ipn[3], rx_ipn[4], rx_ipn[5], ipn[0], ipn[1], ipn[2], ipn[3], ipn[4], ipn[5]); return VOS_FALSE; } #if !defined(CONFIG_CNSS) && (defined(HIF_USB) || defined(HIF_SDIO)) tfm = crypto_alloc_cipher( "aes", 0, CRYPTO_ALG_ASYNC); #else tfm = wcnss_wlan_crypto_alloc_cipher( "aes", 0, CRYPTO_ALG_ASYNC); #endif if (IS_ERR(tfm)) { ret = PTR_ERR(tfm); tfm = NULL; VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "crypto_alloc_cipher failed (%d)", ret); goto err_tfm; } ret = crypto_cipher_setkey(tfm, igtk, AES_KEYSIZE_128); if (ret) { VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "crypto_cipher_setkey failed (%d)", ret); goto err_tfm; } /* Construct AAD */ wh = (struct ieee80211_frame *)frm; /* Generate BIP AAD: FC(masked) || A1 || A2 || A3 */ /* FC type/subtype */ aad[0] = wh->i_fc[0]; /* Mask FC Retry, PwrMgt, MoreData flags to zero */ aad[1] = wh->i_fc[1] & ~(IEEE80211_FC1_RETRY | IEEE80211_FC1_PWR_MGT | IEEE80211_FC1_MORE_DATA); /* A1 || A2 || A3 */ vos_mem_copy(aad + 2, wh->i_addr_all, 3 * IEEE80211_ADDR_LEN); /* MIC = AES-128-CMAC(IGTK, AAD || Management Frame Body || MMIE, 64) */ nBytes = AAD_LEN + (efrm - (v_U8_t*)(wh+1)); input = (v_U8_t *)vos_mem_malloc(nBytes); if (NULL == input) { VOS_TRACE(VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR, "Memory allocation failed"); ret = VOS_STATUS_E_NOMEM; goto err_tfm; } /* Copy the AAD, MMIE with 8 bit MIC zeroed out */ vos_mem_zero(input, nBytes); vos_mem_copy(input, aad, AAD_LEN); vos_mem_copy(input+AAD_LEN, (v_U8_t*)(wh+1), nBytes - AAD_LEN - CMAC_TLEN); #if !defined(CONFIG_CNSS) && (defined(HIF_USB) || defined(HIF_SDIO)) cmac_calc_mic(tfm, input, nBytes, mic); #else wcnss_wlan_cmac_calc_mic(tfm, input, nBytes, mic); #endif vos_mem_free(input); VOS_TRACE(VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR, "CMAC(T)= %02X %02X %02X %02X %02X %02X %02X %02X", mic[0], mic[1], mic[2], mic[3], mic[4], mic[5], mic[6], mic[7]); if (OS_MEMCMP(mic, mmie->mic, CMAC_TLEN) != 0) { /* MMIE MIC mismatch */ VOS_TRACE(VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR, "BC/MC MGMT frame MMIE MIC check Failed" " rmic %02X %02X %02X %02X %02X %02X %02X %02X" " cmic %02X %02X %02X %02X %02X %02X %02X %02X", mmie->mic[0], mmie->mic[1], mmie->mic[2], mmie->mic[3], mmie->mic[4], mmie->mic[5], mmie->mic[6], mmie->mic[7], mic[0], mic[1], mic[2], mic[3], mic[4], mic[5], mic[6], mic[7]); return VOS_FALSE; } /* Update IPN */ vos_mem_copy(ipn, rx_ipn, CMAC_IPN_LEN); err_tfm: if (tfm) #if !defined(CONFIG_CNSS) && (defined(HIF_USB) || defined(HIF_SDIO)) crypto_free_cipher(tfm); #else wcnss_wlan_crypto_free_cipher(tfm); #endif return !ret?VOS_TRUE:VOS_FALSE; }
v_BOOL_t vos_attach_mmie(v_U8_t *igtk, v_U8_t *ipn, u_int16_t key_id, v_U8_t* frm, v_U8_t* efrm, u_int16_t frmLen) { struct ieee80211_mmie *mmie; struct ieee80211_frame *wh; v_U8_t aad[AAD_LEN], mic[CMAC_TLEN], *input = NULL; v_U8_t previous_ipn[IEEE80211_MMIE_IPNLEN] = {0}; v_U16_t nBytes = 0; int ret = 0; struct crypto_cipher *tfm; /* This is how received frame look like * * <------------frmLen----------------------------> * * +---------------+----------------------+-------+ * | 802.11 HEADER | Management framebody | MMIE | * +---------------+----------------------+-------+ * ^ * | * efrm * This is how MMIE from above frame look like * * * <------------ 18 Bytes-----------------------------> * +--------+---------+---------+-----------+---------+ * |Element | Length | Key id | IPN | MIC | * | id | | | | | * +--------+---------+---------+-----------+---------+ * Octet 1 1 2 6 8 * */ /* Check if frame is invalid length */ if (((efrm - frm) != frmLen) || (frmLen < sizeof(*wh))) { VOS_TRACE(VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR, "%s: Invalid frame length", __func__); return VOS_FALSE; } mmie = (struct ieee80211_mmie *)(efrm - sizeof(*mmie)); /* Copy Element id */ mmie->element_id = IEEE80211_ELEMID_MMIE; /* Copy Length */ mmie->length = sizeof(*mmie)-2; /* Copy Key id */ mmie->key_id = key_id; /* * In case of error, revert back to original IPN * to do that copy the original IPN into previous_ipn */ vos_mem_copy(&previous_ipn[0], ipn, IEEE80211_MMIE_IPNLEN); vos_increase_seq(ipn); vos_mem_copy(mmie->sequence_number, ipn, IEEE80211_MMIE_IPNLEN); /* * Calculate MIC and then copy */ #if !defined(CONFIG_CNSS) && (defined(HIF_USB) || defined(HIF_SDIO)) tfm = crypto_alloc_cipher( "aes", 0, CRYPTO_ALG_ASYNC); #else tfm = wcnss_wlan_crypto_alloc_cipher( "aes", 0, CRYPTO_ALG_ASYNC); #endif if (IS_ERR(tfm)) { ret = PTR_ERR(tfm); tfm = NULL; VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "%s: crypto_alloc_cipher failed (%d)", __func__, ret); goto err_tfm; } ret = crypto_cipher_setkey(tfm, igtk, AES_KEYSIZE_128); if (ret) { VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "%s: crypto_cipher_setkey failed (%d)", __func__, ret); goto err_tfm; } /* Construct AAD */ wh = (struct ieee80211_frame *)frm; /* Generate BIP AAD: FC(masked) || A1 || A2 || A3 */ /* FC type/subtype */ aad[0] = wh->i_fc[0]; /* Mask FC Retry, PwrMgt, MoreData flags to zero */ aad[1] = wh->i_fc[1] & ~(IEEE80211_FC1_RETRY | IEEE80211_FC1_PWR_MGT | IEEE80211_FC1_MORE_DATA); /* A1 || A2 || A3 */ vos_mem_copy(aad + 2, wh->i_addr_all, 3 * IEEE80211_ADDR_LEN); /* MIC = AES-128-CMAC(IGTK, AAD || Management Frame Body || MMIE, 64) */ nBytes = AAD_LEN + (frmLen - sizeof(struct ieee80211_frame)); input = (v_U8_t *)vos_mem_malloc(nBytes); if (NULL == input) { VOS_TRACE(VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR, "%s: Memory allocation failed", __func__); ret = VOS_STATUS_E_NOMEM; goto err_tfm; } /* * Copy the AAD, Management frame body, and * MMIE with 8 bit MIC zeroed out */ vos_mem_zero(input, nBytes); vos_mem_copy(input, aad, AAD_LEN); /* Copy Management Frame Body and MMIE without MIC*/ vos_mem_copy(input+AAD_LEN, (v_U8_t*)(efrm-(frmLen-sizeof(struct ieee80211_frame))), nBytes - AAD_LEN - CMAC_TLEN); #if !defined(CONFIG_CNSS) && (defined(HIF_USB) || defined(HIF_SDIO)) cmac_calc_mic(tfm, input, nBytes, mic); #else wcnss_wlan_cmac_calc_mic(tfm, input, nBytes, mic); #endif vos_mem_free(input); VOS_TRACE(VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_INFO_HIGH, "CMAC(T)= %02X %02X %02X %02X %02X %02X %02X %02X", mic[0], mic[1], mic[2], mic[3], mic[4], mic[5], mic[6], mic[7]); vos_mem_copy(mmie->mic, mic, IEEE80211_MMIE_MICLEN); err_tfm: if (ret) { vos_mem_copy(ipn, previous_ipn, IEEE80211_MMIE_IPNLEN); } if (tfm) #if !defined(CONFIG_CNSS) && (defined(HIF_USB) || defined(HIF_SDIO)) crypto_free_cipher(tfm); #else wcnss_wlan_crypto_free_cipher(tfm); #endif return !ret?VOS_TRUE:VOS_FALSE; }
static int __init sbd_init(void) { /* * Set up our internal device. */ int ret; tfm = crypto_alloc_cipher("aes", 0, 16); if (IS_ERR(tfm)){ printk(KERN_ERR "alg: cipher: Failed to load transform"); return PTR_ERR(tfm); } Device.size = nsectors * logical_block_size; spin_lock_init(&Device.lock); Device.data = vmalloc(Device.size); if (Device.data == NULL) return -ENOMEM; /* * Get a request queue. */ Queue = blk_init_queue(sbd_request, &Device.lock); if (Queue == NULL) goto out; blk_queue_logical_block_size(Queue, logical_block_size); /* * Get registered. */ major_num = register_blkdev(major_num, "sbd"); if (major_num < 0) { printk(KERN_WARNING "sbd: unable to get major number\n"); goto out; } /* * And the gendisk structure. */ Device.gd = alloc_disk(16); if (!Device.gd) goto out_unregister; Device.gd->major = major_num; Device.gd->first_minor = 0; Device.gd->fops = &sbd_ops; Device.gd->private_data = &Device; strcpy(Device.gd->disk_name, "sbd0"); set_capacity(Device.gd, nsectors); Device.gd->queue = Queue; add_disk(Device.gd); ret = device_register(&rd_root_dev); if (ret < 0) goto out_unregister; ret = device_create_file(&rd_root_dev, &dev_attr_key); if (ret < 0) { device_unregister(&rd_root_dev); goto out_unregister; } return 0; out_unregister: unregister_blkdev(major_num, "sbd"); out: vfree(Device.data); crypto_free_cipher(tfm); return -ENOMEM; }
static void free_prng_context(struct prng_context *ctx) { crypto_free_cipher(ctx->tfm); }