Ejemplo n.º 1
0
int tcp_fastopen_reset_cipher(void *key, unsigned int len)
{
	int err;
	struct tcp_fastopen_context *ctx, *octx;

	ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
	if (!ctx)
		return -ENOMEM;
	ctx->tfm = crypto_alloc_cipher("aes", 0, 0);

	if (IS_ERR(ctx->tfm)) {
		err = PTR_ERR(ctx->tfm);
error:		kfree(ctx);
		pr_err("TCP: TFO aes cipher alloc error: %d\n", err);
		return err;
	}
	err = crypto_cipher_setkey(ctx->tfm, key, len);
	if (err) {
		pr_err("TCP: TFO cipher key error: %d\n", err);
		crypto_free_cipher(ctx->tfm);
		goto error;
	}
	memcpy(ctx->key, key, len);

	spin_lock(&tcp_fastopen_ctx_lock);

	octx = rcu_dereference_protected(tcp_fastopen_ctx,
				lockdep_is_held(&tcp_fastopen_ctx_lock));
	rcu_assign_pointer(tcp_fastopen_ctx, ctx);
	spin_unlock(&tcp_fastopen_ctx_lock);

	if (octx)
		call_rcu(&octx->rcu, tcp_fastopen_ctx_free);
	return err;
}
Ejemplo n.º 2
0
static int init_essiv_generator(struct fscrypt_info *ci, const u8 *raw_key,
				int keysize)
{
	int err;
	struct crypto_cipher *essiv_tfm;
	u8 salt[SHA256_DIGEST_SIZE];

	essiv_tfm = crypto_alloc_cipher("aes", 0, 0);
	if (IS_ERR(essiv_tfm))
		return PTR_ERR(essiv_tfm);

	ci->ci_essiv_tfm = essiv_tfm;

	err = derive_essiv_salt(raw_key, keysize, salt);
	if (err)
		goto out;

	/*
	 * Using SHA256 to derive the salt/key will result in AES-256 being
	 * used for IV generation. File contents encryption will still use the
	 * configured keysize (AES-128) nevertheless.
	 */
	err = crypto_cipher_setkey(essiv_tfm, salt, sizeof(salt));
	if (err)
		goto out;

out:
	memzero_explicit(salt, sizeof(salt));
	return err;
}
Ejemplo n.º 3
0
static int crypto4xx_compute_gcm_hash_key_sw(__le32 *hash_start, const u8 *key,
					     unsigned int keylen)
{
	struct crypto_cipher *aes_tfm = NULL;
	uint8_t src[16] = { 0 };
	int rc = 0;

	aes_tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC |
				      CRYPTO_ALG_NEED_FALLBACK);
	if (IS_ERR(aes_tfm)) {
		rc = PTR_ERR(aes_tfm);
		pr_warn("could not load aes cipher driver: %d\n", rc);
		return rc;
	}

	rc = crypto_cipher_setkey(aes_tfm, key, keylen);
	if (rc) {
		pr_err("setkey() failed: %d\n", rc);
		goto out;
	}

	crypto_cipher_encrypt_one(aes_tfm, src, src);
	crypto4xx_memcpy_to_le32(hash_start, src, 16);
out:
	crypto_free_cipher(aes_tfm);
	return rc;
}
Ejemplo n.º 4
0
/* Set up per cpu cipher state */
static struct crypto_cipher *setup_essiv_cpu(struct crypt_config *cc,
        struct dm_target *ti,
        u8 *salt, unsigned saltsize)
{
    struct crypto_cipher *essiv_tfm;
    int err;

    /* Setup the essiv_tfm with the given salt */
    essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
    if (IS_ERR(essiv_tfm)) {
        ti->error = "Error allocating crypto tfm for ESSIV";
        return essiv_tfm;
    }

    if (crypto_cipher_blocksize(essiv_tfm) !=
            crypto_ablkcipher_ivsize(any_tfm(cc))) {
        ti->error = "Block size of ESSIV cipher does "
                    "not match IV size of block cipher";
        crypto_free_cipher(essiv_tfm);
        return ERR_PTR(-EINVAL);
    }

    err = crypto_cipher_setkey(essiv_tfm, salt, saltsize);
    if (err) {
        ti->error = "Failed to set key for ESSIV cipher";
        crypto_free_cipher(essiv_tfm);
        return ERR_PTR(err);
    }

    return essiv_tfm;
}
static int cprng_init(struct crypto_tfm *tfm)
{
	struct prng_context *ctx = crypto_tfm_ctx(tfm);

	spin_lock_init(&ctx->prng_lock);

	ctx->tfm = crypto_alloc_cipher("aes", 0, 0);

	if (IS_ERR(ctx->tfm)) {
		dbgprint(KERN_CRIT "Failed to alloc tfm for context %p\n",
				ctx);
		return PTR_ERR(ctx->tfm);
	}

	if (reset_prng_context(ctx, NULL, DEFAULT_PRNG_KSZ, NULL, NULL) < 0)
		return -EINVAL;

	/*
	 * after allocation, we should always force the user to reset
	 * so they don't inadvertently use the insecure default values
	 * without specifying them intentially
	 */
	ctx->flags |= PRNG_NEED_RESET;
	return 0;
}
Ejemplo n.º 6
0
static int reset_prng_context(struct prng_context *ctx,
			      unsigned char *key, size_t klen,
			      unsigned char *V, unsigned char *DT)
{
	int ret;
	int rc = -EINVAL;
	unsigned char *prng_key;

	spin_lock(&ctx->prng_lock);
	ctx->flags |= PRNG_NEED_RESET;

	prng_key = (key != NULL) ? key : (unsigned char *)DEFAULT_PRNG_KEY;

	if (!key)
		klen = DEFAULT_PRNG_KSZ;

	if (V)
		memcpy(ctx->V, V, DEFAULT_BLK_SZ);
	else
		memcpy(ctx->V, DEFAULT_V_SEED, DEFAULT_BLK_SZ);

	if (DT)
		memcpy(ctx->DT, DT, DEFAULT_BLK_SZ);
	else
		memset(ctx->DT, 0, DEFAULT_BLK_SZ);

	memset(ctx->rand_data, 0, DEFAULT_BLK_SZ);
	memset(ctx->last_rand_data, 0, DEFAULT_BLK_SZ);

	if (ctx->tfm)
		crypto_free_cipher(ctx->tfm);

	ctx->tfm = crypto_alloc_cipher("aes", 0, 0);
	if (IS_ERR(ctx->tfm)) {
		dbgprint(KERN_CRIT "Failed to alloc tfm for context %p\n",
			ctx);
		ctx->tfm = NULL;
		goto out;
	}

	ctx->rand_data_valid = DEFAULT_BLK_SZ;

	ret = crypto_cipher_setkey(ctx->tfm, prng_key, klen);
	if (ret) {
		dbgprint(KERN_CRIT "PRNG: setkey() failed flags=%x\n",
			crypto_cipher_get_flags(ctx->tfm));
		crypto_free_cipher(ctx->tfm);
		goto out;
	}

	rc = 0;
	ctx->flags &= ~PRNG_NEED_RESET;
out:
	spin_unlock(&ctx->prng_lock);

	return rc;

}
Ejemplo n.º 7
0
static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
			      const char *opts)
{
	struct crypto_cipher *essiv_tfm = NULL;
	struct crypto_hash *hash_tfm = NULL;
	u8 *salt = NULL;
	int err;

	if (!opts) {
		ti->error = "Digest algorithm missing for ESSIV mode";
		return -EINVAL;
	}

	/* Allocate hash algorithm */
	hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(hash_tfm)) {
		ti->error = "Error initializing ESSIV hash";
		err = PTR_ERR(hash_tfm);
		goto bad;
	}

	salt = kzalloc(crypto_hash_digestsize(hash_tfm), GFP_KERNEL);
	if (!salt) {
		ti->error = "Error kmallocing salt storage in ESSIV";
		err = -ENOMEM;
		goto bad;
	}

	/* Allocate essiv_tfm */
	essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(essiv_tfm)) {
		ti->error = "Error allocating crypto tfm for ESSIV";
		err = PTR_ERR(essiv_tfm);
		goto bad;
	}
	if (crypto_cipher_blocksize(essiv_tfm) !=
	    crypto_ablkcipher_ivsize(cc->tfm)) {
		ti->error = "Block size of ESSIV cipher does "
			    "not match IV size of block cipher";
		err = -EINVAL;
		goto bad;
	}

	cc->iv_gen_private.essiv.salt = salt;
	cc->iv_gen_private.essiv.tfm = essiv_tfm;
	cc->iv_gen_private.essiv.hash_tfm = hash_tfm;

	return 0;

bad:
	if (essiv_tfm && !IS_ERR(essiv_tfm))
		crypto_free_cipher(essiv_tfm);
	if (hash_tfm && !IS_ERR(hash_tfm))
		crypto_free_hash(hash_tfm);
	kfree(salt);
	return err;
}
Ejemplo n.º 8
0
Archivo: crypto.c Proyecto: 7799/linux
/*
 * WUSB Pseudo Random Function (WUSB1.0[6.5])
 *
 * @b: buffer to the source data; cannot be a global or const local
 *     (will confuse the scatterlists)
 */
ssize_t wusb_prf(void *out, size_t out_size,
		 const u8 key[16], const struct aes_ccm_nonce *_n,
		 const struct aes_ccm_label *a,
		 const void *b, size_t blen, size_t len)
{
	ssize_t result, bytes = 0, bitr;
	struct aes_ccm_nonce n = *_n;
	struct crypto_blkcipher *tfm_cbc;
	struct crypto_cipher *tfm_aes;
	u64 sfn = 0;
	__le64 sfn_le;

	tfm_cbc = crypto_alloc_blkcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(tfm_cbc)) {
		result = PTR_ERR(tfm_cbc);
		printk(KERN_ERR "E: can't load CBC(AES): %d\n", (int)result);
		goto error_alloc_cbc;
	}
	result = crypto_blkcipher_setkey(tfm_cbc, key, 16);
	if (result < 0) {
		printk(KERN_ERR "E: can't set CBC key: %d\n", (int)result);
		goto error_setkey_cbc;
	}

	tfm_aes = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(tfm_aes)) {
		result = PTR_ERR(tfm_aes);
		printk(KERN_ERR "E: can't load AES: %d\n", (int)result);
		goto error_alloc_aes;
	}
	result = crypto_cipher_setkey(tfm_aes, key, 16);
	if (result < 0) {
		printk(KERN_ERR "E: can't set AES key: %d\n", (int)result);
		goto error_setkey_aes;
	}

	for (bitr = 0; bitr < (len + 63) / 64; bitr++) {
		sfn_le = cpu_to_le64(sfn++);
		memcpy(&n.sfn, &sfn_le, sizeof(n.sfn));	/* n.sfn++... */
		result = wusb_ccm_mac(tfm_cbc, tfm_aes, out + bytes,
				      &n, a, b, blen);
		if (result < 0)
			goto error_ccm_mac;
		bytes += result;
	}
	result = bytes;
error_ccm_mac:
error_setkey_aes:
	crypto_free_cipher(tfm_aes);
error_alloc_aes:
error_setkey_cbc:
	crypto_free_blkcipher(tfm_cbc);
error_alloc_cbc:
	return result;
}
Ejemplo n.º 9
0
int ieee80211_wep_init(struct ieee80211_local *local)
{
	/* start WEP IV from a random value */
	get_random_bytes(&local->wep_iv, WEP_IV_LEN);

	local->wep_tx_tfm = crypto_alloc_cipher("arc4", 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(local->wep_tx_tfm)) {
		local->wep_rx_tfm = ERR_PTR(-EINVAL);
		return PTR_ERR(local->wep_tx_tfm);
	}

	local->wep_rx_tfm = crypto_alloc_cipher("arc4", 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(local->wep_rx_tfm)) {
		crypto_free_cipher(local->wep_tx_tfm);
		local->wep_tx_tfm = ERR_PTR(-EINVAL);
		return PTR_ERR(local->wep_rx_tfm);
	}

	return 0;
}
Ejemplo n.º 10
0
int wrapfs_read_lower(char *data, loff_t offset, size_t size,
                        struct inode *wrapfs_inode, struct file *file)
{
		struct file *lower_file;
        mm_segment_t fs_save;
        ssize_t rc;
	  mode_t previous_mode;

#ifdef WRAPFS_CRYPTO
	struct crypto_cipher *tfm;
	char *decrypted_page_buffer = NULL; // free this
#endif
      
	lower_file = wrapfs_lower_file(file);
        if (!lower_file)
                return -EIO;
        fs_save = get_fs();
        set_fs(get_ds());
	previous_mode = lower_file->f_mode;
	lower_file->f_mode |= FMODE_READ;
        rc = vfs_read(lower_file, data, size, &offset);
	lower_file->f_mode = previous_mode;

#ifdef WRAPFS_CRYPTO	
	decrypted_page_buffer = kmalloc(size, GFP_KERNEL);
	if (decrypted_page_buffer == NULL)
		goto out;		
	memset(decrypted_page_buffer, 0, size);
	
	tfm = crypto_alloc_cipher("aes", 0, 16);
	if (!IS_ERR(tfm))
		crypto_cipher_setkey(tfm, WRAPFS_SB(file->f_dentry->d_sb)->key, 16);
	else
		goto fail;

	crypto_cipher_decrypt_one(tfm, decrypted_page_buffer, data);
	
	/*printk(KERN_ALERT "Decrypted buffer = %s\n", decrypted_page_buffer);*/

	memcpy(data, decrypted_page_buffer, size);
#endif
        set_fs(fs_save);

#ifdef WRAPFS_CRYPTO
	crypto_free_cipher(tfm);
fail:
		kfree(decrypted_page_buffer);
out:
#endif
        return rc;
}
Ejemplo n.º 11
0
    static int __init sbd_init(void) {

        crypt = crypto_alloc_cipher("aes", 0, 0);

        /*
         * Set up our internal device.
         */
        Device.size = nsectors * logical_block_size;
        spin_lock_init(&Device.lock);
        Device.data = vmalloc(Device.size);
        if (Device.data == NULL)
            return -ENOMEM;
        /*
         * Get a request queue.
         */
        Queue = blk_init_queue(sbd_request, &Device.lock);
        if (Queue == NULL)
            goto out;
        blk_queue_logical_block_size(Queue, logical_block_size);
        /*
         * Get registered.
         */
        major_num = register_blkdev(major_num, "sbd");
        if (major_num < 0) {
            printk(KERN_WARNING "sbd: unable to get major number\n");
            goto out;
        }
        /*
         * And the gendisk structure.
         */
        Device.gd = alloc_disk(16);
        if (!Device.gd)
            goto out_unregister;
        Device.gd->major = major_num;
        Device.gd->first_minor = 0;
        Device.gd->fops = &sbd_ops;
        Device.gd->private_data = &Device;
        strcpy(Device.gd->disk_name, "sbd0");
        set_capacity(Device.gd, nsectors);
        Device.gd->queue = Queue;
        add_disk(Device.gd);

        return 0;

out_unregister:
        unregister_blkdev(major_num, "sbd");
out:
        vfree(Device.data);
        return -ENOMEM;
    }
Ejemplo n.º 12
0
int wrapfs_write_lower_page_segment(struct inode *wrapfs_inode,
                                      struct page *page_for_lower,
                                      size_t offset_in_page, size_t size, 
                                      struct file *file)
{
        char *virt;
        loff_t offset;
        int rc = -1;

#ifdef WRAPFS_CRYPTO
        unsigned char *encrypted_page_buffer;
        struct crypto_cipher *tfm;
#endif

        offset = ((((loff_t)page_for_lower->index) << PAGE_CACHE_SHIFT)
                  + offset_in_page);
        virt = kmap(page_for_lower);

#ifdef WRAPFS_CRYPTO
        encrypted_page_buffer = kmalloc(size, GFP_KERNEL); //free this
		if (encrypted_page_buffer == NULL)
			goto out;		
        memset(encrypted_page_buffer, 0, size);

        tfm = crypto_alloc_cipher("aes", 0, 16);
        if (!IS_ERR(tfm))
                crypto_cipher_setkey(tfm, WRAPFS_SB(file->f_dentry->d_sb)->key, 16);
		else
				goto fail;

        crypto_cipher_encrypt_one(tfm, encrypted_page_buffer, virt);
        /*printk(KERN_ALERT "Encrypted buffer = %s\n", encrypted_page_buffer);*/
	    /*memcpy(virt, encrypted_page_buffer, size);*/

        rc = wrapfs_write_lower(wrapfs_inode, encrypted_page_buffer, offset, size, file);
#else
		rc = wrapfs_write_lower(wrapfs_inode, virt, offset, size, file);
#endif

        if (rc > 0)
                rc = 0;
        kunmap(page_for_lower);
#ifdef WRAPFS_CRYPTO
	crypto_free_cipher(tfm);
fail:
		kfree(encrypted_page_buffer);
out:
#endif
        return rc;
}
Ejemplo n.º 13
0
static int fallback_init_cip(struct crypto_tfm *tfm)
{
	const char *name = tfm->__crt_alg->cra_name;
	struct geode_aes_op *op = crypto_tfm_ctx(tfm);

	op->fallback.cip = crypto_alloc_cipher(name, 0,
				CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);

	if (IS_ERR(op->fallback.cip)) {
		printk(KERN_ERR "Error allocating fallback algo %s\n", name);
		return PTR_ERR(op->fallback.cip);
	}

	return 0;
}
Ejemplo n.º 14
0
static int fallback_init_cip(struct crypto_tfm *tfm)
{
	const char *name = tfm->__crt_alg->cra_name;
	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);

	sctx->fallback.cip = crypto_alloc_cipher(name, 0,
			CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);

	if (IS_ERR(sctx->fallback.cip)) {
		pr_err("Allocating AES fallback algorithm %s failed\n",
		       name);
		return PTR_ERR(sctx->fallback.cip);
	}

	return 0;
}
Ejemplo n.º 15
0
static int param_set_cipher_key(const char *val, struct kernel_param *kp)
{
	struct crypto_cipher *new_cipher;
	int key_len;
	u8 key[128];
	int err = 0;

	/* Try to convert the user's key to raw bytes. */
	key_len = parse_hex_string(val, key, ARRAY_SIZE(key));
	if (key_len < 0) {
		printk(KERN_INFO "stubl: Can't parse key.\n");
		return key_len;
	}

	/* If the key is empty, then clear it. */
	if (key_len == 0) {
		printk(KERN_INFO "stubl: Clearing tunnel key.\n");
		update_cipher(kp->arg, NULL);
		return 0;
	} 

	printk(KERN_INFO "stubl: Setting tunnel key.\n");

	/* Init a new cipher */
	new_cipher = crypto_alloc_cipher("blowfish", 0, 0);
	if (IS_ERR(new_cipher)) {
		printk(KERN_INFO "stubl: Can't init cipher: %ld\n",
				PTR_ERR(new_cipher));
		return PTR_ERR(new_cipher);
	}

	/* Set key */
	err = crypto_cipher_setkey(new_cipher, key, key_len);
	if (err < 0) {
		printk(KERN_INFO "stubl: Can't set key: %d\n", err);
		crypto_free_cipher(new_cipher);
		return err;
	}

	/* Perform RCU update */
	update_cipher(kp->arg, new_cipher);

	return 0;
}
Ejemplo n.º 16
0
void aes_decrypt(u8 *cdata, u8 *pdata, u8 *key, int len)
{
	struct crypto_cipher *tfm;
	u8 *ptmp, *ctmp;
	int i;

	tfm = crypto_alloc_cipher("aes", 4, CRYPTO_ALG_ASYNC);

	ptmp = pdata;
	ctmp = cdata;
	for(i=0; i<len; i+=AES_BLOCK_SIZE)
	{
		crypto_cipher_decrypt_one(tfm, ptmp, ctmp);
		ptmp += AES_BLOCK_SIZE;
		ctmp += AES_BLOCK_SIZE;
	}

	crypto_free_cipher(tfm);
}
Ejemplo n.º 17
0
void aes_encrypt(u8 *pdata, u8 *cdata, u8 *ndata, u8 *key)
{
	struct crypto_cipher *tfm;

	tfm = crypto_alloc_cipher("aes", 4, CRYPTO_ALG_ASYNC);

	crypto_cipher_encrypt_one(tfm, &cdata[0], &pdata[0]);
	crypto_cipher_encrypt_one(tfm, &cdata[16], &pdata[16]);

	dump("PlainText: ", pdata);
	dump("Crypted:   ", cdata);

	crypto_cipher_decrypt_one(tfm, &ndata[0], &cdata[0]);
	crypto_cipher_decrypt_one(tfm, &ndata[16], &cdata[16]);

	dump("Decrypted: ", ndata);
	
	crypto_free_cipher(tfm);

	return;
}
Ejemplo n.º 18
0
static int cprng_init(struct crypto_tfm *tfm)
{
	struct prng_context *ctx = crypto_tfm_ctx(tfm);

	spin_lock_init(&ctx->prng_lock);
	ctx->tfm = crypto_alloc_cipher("aes", 0, 0);
	if (IS_ERR(ctx->tfm)) {
		dbgprint(KERN_CRIT "Failed to alloc tfm for context %p\n",
				ctx);
		return PTR_ERR(ctx->tfm);
	}

	if (reset_prng_context(ctx, NULL, DEFAULT_PRNG_KSZ, NULL, NULL) < 0)
		return -EINVAL;

	/*
                                                              
                                                               
                                       
  */
	ctx->flags |= PRNG_NEED_RESET;
	return 0;
}
Ejemplo n.º 19
0
static int
rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
{
	struct crypto_cipher *tfm;
	int ret;

	tfm = crypto_alloc_cipher("aes", 0, 0);
	if (IS_ERR(tfm))
		return PTR_ERR(tfm);

	ret = crypto_cipher_setkey(tfm, key, key_len);
	if (ret)
		goto out_free_cipher;

	/* Clear the data in the hash sub key container to zero.*/
	/* We want to cipher all zeros to create the hash sub key. */
	memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);

	crypto_cipher_encrypt_one(tfm, hash_subkey, hash_subkey);

out_free_cipher:
	crypto_free_cipher(tfm);
	return ret;
}
Ejemplo n.º 20
0
static struct crypto_instance *crypto_ctr_alloc(struct rtattr **tb)
{
	struct crypto_instance *inst;
	const char *name;
	struct crypto_cipher *cipher;
	struct crypto_alg *alg;
	int err;

	err = crypto_check_attr_type(tb, NCRYPTO_ALG_TYPE_BLKCIPHER);
	if (err)
		return ERR_PTR(err);

	name = crypto_attr_alg_name(tb[1]);
	err = PTR_ERR(name);
	if (IS_ERR(name))
		return ERR_PTR(err);

	cipher = crypto_alloc_cipher(name, 0, 0);
	err = PTR_ERR(cipher);
	if (IS_ERR(cipher))
		return ERR_PTR(err);

	alg = crypto_cipher_tfm(cipher)->__crt_alg;

	/* Block size must be >= 4 bytes. */
	err = -EINVAL;
	if (alg->cra_blocksize < 4)
		goto out_put_alg;

	/* If this is false we'd fail the alignment of crypto_inc. */
	if (alg->cra_blocksize % 4)
		goto out_put_alg;

	inst = ocrypto_alloc_instance("ctr", alg);
	if (IS_ERR(inst))
		goto out;

	inst->alg.cra_flags = NCRYPTO_ALG_TYPE_BLKCIPHER;
	inst->alg.cra_priority = alg->cra_priority;
	inst->alg.cra_blocksize = 1;
	inst->alg.cra_alignmask = alg->cra_alignmask | (__alignof__(u32) - 1);
	inst->alg.cra_type = &crypto_blkcipher_type;

	inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize;
	inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
	inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize;

	inst->alg.cra_ctxsize = sizeof(struct crypto_ctr_ctx);

	inst->alg.cra_init = crypto_ctr_init_tfm;
	inst->alg.cra_exit = crypto_ctr_exit_tfm;

	inst->alg.cra_blkcipher.setkey = crypto_ctr_setkey;
	inst->alg.cra_blkcipher.encrypt = crypto_ctr_crypt;
	inst->alg.cra_blkcipher.decrypt = crypto_ctr_crypt;

out:
	crypto_free_cipher(cipher);
	return inst;

out_put_alg:
	inst = ERR_PTR(err);
	goto out;
}
v_BOOL_t vos_is_mmie_valid(v_U8_t *igtk, v_U8_t *ipn,
                           v_U8_t* frm, v_U8_t* efrm)
{
    struct ieee80211_mmie  *mmie;
    struct ieee80211_frame *wh;
    v_U8_t *rx_ipn, aad[AAD_LEN], mic[CMAC_TLEN], *input;
    v_U16_t nBytes = 0;
    int ret = 0;
    struct crypto_cipher *tfm;

    /* Check if frame is invalid length */
    if ((efrm < frm) || ((efrm - frm) < sizeof(*wh))) {
        VOS_TRACE(VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR,
                "Invalid frame length");
        return VOS_FALSE;
    }

    mmie = (struct ieee80211_mmie *)(efrm - sizeof(*mmie));

    /* Check Element ID */
    if ((mmie->element_id != IEEE80211_ELEMID_MMIE) ||
        (mmie->length != (sizeof(*mmie)-2))) {
        VOS_TRACE(VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR,
                "IE is not Mgmt MIC IE or Invalid length");
        /* IE is not Mgmt MIC IE or invalid length */
        return VOS_FALSE;
    }

    /* Validate IPN */
    rx_ipn = mmie->sequence_number;
    if (OS_MEMCMP(rx_ipn, ipn, CMAC_IPN_LEN) <= 0)
    {
        /* Replay error */
        VOS_TRACE(VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR,
            "Replay error mmie ipn %02X %02X %02X %02X %02X %02X"
            " drvr ipn %02X %02X %02X %02X %02X %02X",
            rx_ipn[0], rx_ipn[1], rx_ipn[2], rx_ipn[3], rx_ipn[4], rx_ipn[5],
            ipn[0], ipn[1], ipn[2], ipn[3], ipn[4], ipn[5]);
        return VOS_FALSE;
    }

#if  !defined(CONFIG_CNSS) && (defined(HIF_USB) || defined(HIF_SDIO))
    tfm = crypto_alloc_cipher( "aes", 0, CRYPTO_ALG_ASYNC);
#else
    tfm = wcnss_wlan_crypto_alloc_cipher( "aes", 0, CRYPTO_ALG_ASYNC);
#endif
    if (IS_ERR(tfm)) {
        ret = PTR_ERR(tfm);
        tfm = NULL;
        VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR,
             "crypto_alloc_cipher failed (%d)", ret);
        goto err_tfm;
    }

    ret = crypto_cipher_setkey(tfm, igtk, AES_KEYSIZE_128);
    if (ret) {
        VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR,
             "crypto_cipher_setkey failed (%d)", ret);
        goto err_tfm;
    }

    /* Construct AAD */
    wh = (struct ieee80211_frame *)frm;

    /* Generate BIP AAD: FC(masked) || A1 || A2 || A3 */

    /* FC type/subtype */
    aad[0] = wh->i_fc[0];
    /* Mask FC Retry, PwrMgt, MoreData flags to zero */
    aad[1] = wh->i_fc[1] & ~(IEEE80211_FC1_RETRY | IEEE80211_FC1_PWR_MGT |
                             IEEE80211_FC1_MORE_DATA);
    /* A1 || A2 || A3 */
    vos_mem_copy(aad + 2, wh->i_addr_all, 3 * IEEE80211_ADDR_LEN);

    /* MIC = AES-128-CMAC(IGTK, AAD || Management Frame Body || MMIE, 64) */
    nBytes = AAD_LEN + (efrm - (v_U8_t*)(wh+1));
    input = (v_U8_t *)vos_mem_malloc(nBytes);
    if (NULL == input)
    {
        VOS_TRACE(VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR,
             "Memory allocation failed");
        ret = VOS_STATUS_E_NOMEM;
        goto err_tfm;
    }

    /* Copy the AAD, MMIE with 8 bit MIC zeroed out */
    vos_mem_zero(input, nBytes);
    vos_mem_copy(input, aad, AAD_LEN);
    vos_mem_copy(input+AAD_LEN, (v_U8_t*)(wh+1), nBytes - AAD_LEN - CMAC_TLEN);

#if  !defined(CONFIG_CNSS) && (defined(HIF_USB) || defined(HIF_SDIO))
    cmac_calc_mic(tfm, input, nBytes, mic);
#else
    wcnss_wlan_cmac_calc_mic(tfm, input, nBytes, mic);
#endif
    vos_mem_free(input);

    VOS_TRACE(VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR,
            "CMAC(T)= %02X %02X %02X %02X %02X %02X %02X %02X",
            mic[0], mic[1], mic[2], mic[3],
            mic[4], mic[5], mic[6], mic[7]);

    if (OS_MEMCMP(mic, mmie->mic, CMAC_TLEN) != 0) {
        /* MMIE MIC mismatch */
        VOS_TRACE(VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR,
                "BC/MC MGMT frame MMIE MIC check Failed"
                " rmic %02X %02X %02X %02X %02X %02X %02X %02X"
                " cmic %02X %02X %02X %02X %02X %02X %02X %02X",
                mmie->mic[0], mmie->mic[1], mmie->mic[2], mmie->mic[3],
                mmie->mic[4], mmie->mic[5], mmie->mic[6], mmie->mic[7],
                mic[0], mic[1], mic[2], mic[3],
                mic[4], mic[5], mic[6], mic[7]);
        return VOS_FALSE;
    }

    /* Update IPN */
    vos_mem_copy(ipn, rx_ipn, CMAC_IPN_LEN);

err_tfm:
    if (tfm)
#if  !defined(CONFIG_CNSS) && (defined(HIF_USB) || defined(HIF_SDIO))
        crypto_free_cipher(tfm);
#else
        wcnss_wlan_crypto_free_cipher(tfm);
#endif

    return !ret?VOS_TRUE:VOS_FALSE;
}
v_BOOL_t
vos_attach_mmie(v_U8_t *igtk, v_U8_t *ipn, u_int16_t key_id,
                v_U8_t* frm, v_U8_t* efrm, u_int16_t frmLen)
{
    struct ieee80211_mmie  *mmie;
    struct ieee80211_frame *wh;
    v_U8_t aad[AAD_LEN], mic[CMAC_TLEN], *input = NULL;
    v_U8_t previous_ipn[IEEE80211_MMIE_IPNLEN] = {0};
    v_U16_t nBytes = 0;
    int ret = 0;
    struct crypto_cipher *tfm;

    /*  This is how received frame look like
     *
     *        <------------frmLen---------------------------->
     *
     *        +---------------+----------------------+-------+
     *        | 802.11 HEADER | Management framebody | MMIE  |
     *        +---------------+----------------------+-------+
     *                                                       ^
     *                                                       |
     *                                                      efrm
     *   This is how MMIE from above frame look like
     *
     *
     *        <------------ 18 Bytes----------------------------->
     *        +--------+---------+---------+-----------+---------+
     *        |Element | Length  | Key id  |   IPN     |  MIC    |
     *        |  id    |         |         |           |         |
     *        +--------+---------+---------+-----------+---------+
     * Octet     1         1         2         6            8
     *
     */

    /* Check if frame is invalid length */
    if (((efrm - frm) != frmLen) || (frmLen < sizeof(*wh)))
    {
        VOS_TRACE(VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR,
                  "%s: Invalid frame length", __func__);
        return VOS_FALSE;
    }
    mmie = (struct ieee80211_mmie *)(efrm - sizeof(*mmie));

    /* Copy Element id */
    mmie->element_id = IEEE80211_ELEMID_MMIE;

    /* Copy Length */
    mmie->length = sizeof(*mmie)-2;

    /* Copy Key id */
    mmie->key_id = key_id;

    /*
     * In case of error, revert back to original IPN
     * to do that copy the original IPN into previous_ipn
     */
    vos_mem_copy(&previous_ipn[0], ipn, IEEE80211_MMIE_IPNLEN);
    vos_increase_seq(ipn);
    vos_mem_copy(mmie->sequence_number, ipn, IEEE80211_MMIE_IPNLEN);

    /*
     * Calculate MIC and then copy
     */
#if  !defined(CONFIG_CNSS) && (defined(HIF_USB) || defined(HIF_SDIO))
    tfm = crypto_alloc_cipher( "aes", 0, CRYPTO_ALG_ASYNC);
#else
    tfm = wcnss_wlan_crypto_alloc_cipher( "aes", 0, CRYPTO_ALG_ASYNC);
#endif
    if (IS_ERR(tfm))
    {
        ret = PTR_ERR(tfm);
        tfm = NULL;
        VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR,
             "%s: crypto_alloc_cipher failed (%d)", __func__, ret);
        goto err_tfm;
    }

    ret = crypto_cipher_setkey(tfm, igtk, AES_KEYSIZE_128);
    if (ret) {
        VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR,
             "%s: crypto_cipher_setkey failed (%d)", __func__, ret);
        goto err_tfm;
    }

    /* Construct AAD */
    wh = (struct ieee80211_frame *)frm;

    /* Generate BIP AAD: FC(masked) || A1 || A2 || A3 */

    /* FC type/subtype */
    aad[0] = wh->i_fc[0];
    /* Mask FC Retry, PwrMgt, MoreData flags to zero */
    aad[1] = wh->i_fc[1] & ~(IEEE80211_FC1_RETRY | IEEE80211_FC1_PWR_MGT |
                             IEEE80211_FC1_MORE_DATA);
    /* A1 || A2 || A3 */
    vos_mem_copy(aad + 2, wh->i_addr_all, 3 * IEEE80211_ADDR_LEN);

    /* MIC = AES-128-CMAC(IGTK, AAD || Management Frame Body || MMIE, 64) */
    nBytes = AAD_LEN + (frmLen - sizeof(struct ieee80211_frame));
    input = (v_U8_t *)vos_mem_malloc(nBytes);
    if (NULL == input)
    {
        VOS_TRACE(VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR,
                  "%s: Memory allocation failed", __func__);
        ret = VOS_STATUS_E_NOMEM;
        goto err_tfm;
    }

    /*
     * Copy the AAD, Management frame body, and
     * MMIE with 8 bit MIC zeroed out
     */
    vos_mem_zero(input, nBytes);
    vos_mem_copy(input, aad, AAD_LEN);
    /* Copy Management Frame Body and MMIE without MIC*/
    vos_mem_copy(input+AAD_LEN,
                (v_U8_t*)(efrm-(frmLen-sizeof(struct ieee80211_frame))),
                nBytes - AAD_LEN - CMAC_TLEN);

#if  !defined(CONFIG_CNSS) && (defined(HIF_USB) || defined(HIF_SDIO))
    cmac_calc_mic(tfm, input, nBytes, mic);
#else
    wcnss_wlan_cmac_calc_mic(tfm, input, nBytes, mic);
#endif
    vos_mem_free(input);

    VOS_TRACE(VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_INFO_HIGH,
            "CMAC(T)= %02X %02X %02X %02X %02X %02X %02X %02X",
            mic[0], mic[1], mic[2], mic[3],
            mic[4], mic[5], mic[6], mic[7]);
    vos_mem_copy(mmie->mic, mic, IEEE80211_MMIE_MICLEN);


err_tfm:
    if (ret)
    {
       vos_mem_copy(ipn, previous_ipn, IEEE80211_MMIE_IPNLEN);
    }

    if (tfm)
#if  !defined(CONFIG_CNSS) && (defined(HIF_USB) || defined(HIF_SDIO))
       crypto_free_cipher(tfm);
#else
       wcnss_wlan_crypto_free_cipher(tfm);
#endif
    return !ret?VOS_TRUE:VOS_FALSE;
}
Ejemplo n.º 23
0
static inline int chcr_ipsec_setkey(struct xfrm_state *x,
				    struct ipsec_sa_entry *sa_entry)
{
	struct crypto_cipher *cipher;
	int keylen = (x->aead->alg_key_len + 7) / 8;
	unsigned char *key = x->aead->alg_key;
	int ck_size, key_ctx_size = 0;
	unsigned char ghash_h[AEAD_H_SIZE];
	int ret = 0;

	if (keylen > 3) {
		keylen -= 4;  /* nonce/salt is present in the last 4 bytes */
		memcpy(sa_entry->salt, key + keylen, 4);
	}

	if (keylen == AES_KEYSIZE_128) {
		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
	} else if (keylen == AES_KEYSIZE_192) {
		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
	} else if (keylen == AES_KEYSIZE_256) {
		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
	} else {
		pr_err("GCM: Invalid key length %d\n", keylen);
		ret = -EINVAL;
		goto out;
	}

	memcpy(sa_entry->key, key, keylen);
	sa_entry->enckey_len = keylen;
	key_ctx_size = sizeof(struct _key_ctx) +
			      ((DIV_ROUND_UP(keylen, 16)) << 4) +
			      AEAD_H_SIZE;

	sa_entry->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
						 CHCR_KEYCTX_MAC_KEY_SIZE_128,
						 0, 0,
						 key_ctx_size >> 4);

	/* Calculate the H = CIPH(K, 0 repeated 16 times).
	 * It will go in key context
	 */
	cipher = crypto_alloc_cipher("aes-generic", 0, 0);
	if (IS_ERR(cipher)) {
		sa_entry->enckey_len = 0;
		ret = -ENOMEM;
		goto out;
	}

	ret = crypto_cipher_setkey(cipher, key, keylen);
	if (ret) {
		sa_entry->enckey_len = 0;
		goto out1;
	}
	memset(ghash_h, 0, AEAD_H_SIZE);
	crypto_cipher_encrypt_one(cipher, ghash_h, ghash_h);
	memcpy(sa_entry->key + (DIV_ROUND_UP(sa_entry->enckey_len, 16) *
	       16), ghash_h, AEAD_H_SIZE);
	sa_entry->kctx_len = ((DIV_ROUND_UP(sa_entry->enckey_len, 16)) << 4) +
			      AEAD_H_SIZE;
out1:
	crypto_free_cipher(cipher);
out:
	return ret;
}
Ejemplo n.º 24
0
static int __init sbd_init(void) {
	/*
	 * Set up our internal device.
	 */

	int ret;

	tfm = crypto_alloc_cipher("aes", 0, 16);
	if (IS_ERR(tfm)){
		printk(KERN_ERR "alg: cipher: Failed to load transform");
		return PTR_ERR(tfm);
	}
	Device.size = nsectors * logical_block_size;
	spin_lock_init(&Device.lock);
	Device.data = vmalloc(Device.size);
	if (Device.data == NULL)
		return -ENOMEM;
	/*
	 * Get a request queue.
	 */
	Queue = blk_init_queue(sbd_request, &Device.lock);
	if (Queue == NULL)
		goto out;
	blk_queue_logical_block_size(Queue, logical_block_size);
	/*
	 * Get registered.
	 */
	major_num = register_blkdev(major_num, "sbd");
	if (major_num < 0) {
		printk(KERN_WARNING "sbd: unable to get major number\n");
		goto out;
	}
	/*
	 * And the gendisk structure.
	 */
	Device.gd = alloc_disk(16);
	if (!Device.gd)
		goto out_unregister;
	Device.gd->major = major_num;
	Device.gd->first_minor = 0;
	Device.gd->fops = &sbd_ops;
	Device.gd->private_data = &Device;
	strcpy(Device.gd->disk_name, "sbd0");
	set_capacity(Device.gd, nsectors);
	Device.gd->queue = Queue;
	add_disk(Device.gd);

	ret = device_register(&rd_root_dev);
	if (ret < 0)
		goto out_unregister;

	ret = device_create_file(&rd_root_dev, &dev_attr_key);
	if (ret < 0) {
		device_unregister(&rd_root_dev);
		goto out_unregister;
	}

	return 0;

out_unregister:
	unregister_blkdev(major_num, "sbd");
out:
	vfree(Device.data);
	crypto_free_cipher(tfm);
	return -ENOMEM;
}
Ejemplo n.º 25
0
static int __init sbd_init(void) {
	
	/*
	* crypto_alloc_cipher() - allocate single block cipher handle
	* @alg_name: is the cra_name / name or cra_driver_name / driver name of the
	*           single block cipher
	* @type: specifies the type of the cipher
	* @mask: specifies the mask for the cipher
	*
	* Allocate a cipher handle for a single block cipher. The returned struct
	* crypto_cipher is the cipher handle that is required for any subsequent API
	* invocation for that single block cipher.
	*
	* Return: allocated cipher handle in case of success; IS_ERR() is true in case
	*         of an error, PTR_ERR() returns the error code.
	

	static inline struct crypto_cipher *crypto_alloc_cipher(const char *alg_name,
	                                                     u32 type, u32 mask)
	{
		type &= ~CRYPTO_ALG_TYPE_MASK;
		type |= CRYPTO_ALG_TYPE_CIPHER;
		mask |= CRYPTO_ALG_TYPE_MASK;

		return __crypto_cipher_cast(crypto_alloc_base(alg_name, type, mask));
	}


	aes, 0, 0 for placeholder alloc
	aes based off encryption

	 */

	crypto = crypto_alloc_cipher("aes", 0, 0);

	/*
	 * Set up our internal device.
	 */
	Device.size = nsectors * logical_block_size;
	spin_lock_init(&Device.lock);
	Device.data = vmalloc(Device.size);
	if (Device.data == NULL)
		return -ENOMEM;
	/*
	 * Get a request queue.
	 */
	Queue = blk_init_queue(sbd_request, &Device.lock);
	if (Queue == NULL)
		goto out;
	blk_queue_logical_block_size(Queue, logical_block_size);
	/*
	 * Get registered.
	 */
	major_num = register_blkdev(major_num, "sbd");
	if (major_num < 0) {
		printk(KERN_WARNING "sbd: unable to get major number\n");
		goto out;
	}
	/*
	 * And the gendisk structure.
	 */
	Device.gd = alloc_disk(16);
	if (!Device.gd)
		goto out_unregister;
	Device.gd->major = major_num;
	Device.gd->first_minor = 0;
	Device.gd->fops = &sbd_ops;
	Device.gd->private_data = &Device;
	strcpy(Device.gd->disk_name, "sbd0");
	set_capacity(Device.gd, nsectors);
	Device.gd->queue = Queue;
	add_disk(Device.gd);

	return 0;

out_unregister:
	unregister_blkdev(major_num, "sbd");
out:
	vfree(Device.data);
	return -ENOMEM;
}
Ejemplo n.º 26
0
static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
		       struct scatterlist *src, unsigned int nbytes)
{
	struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
	be128 buf[8];
	struct xts_crypt_req req = {
		.tbuf = buf,
		.tbuflen = sizeof(buf),

		.tweak_ctx = aes_ctx(ctx->raw_tweak_ctx),
		.tweak_fn = aesni_xts_tweak,
		.crypt_ctx = aes_ctx(ctx->raw_crypt_ctx),
		.crypt_fn = lrw_xts_encrypt_callback,
	};
	int ret;

	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;

	kernel_fpu_begin();
	ret = xts_crypt(desc, dst, src, nbytes, &req);
	kernel_fpu_end();

	return ret;
}

static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
		       struct scatterlist *src, unsigned int nbytes)
{
	struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
	be128 buf[8];
	struct xts_crypt_req req = {
		.tbuf = buf,
		.tbuflen = sizeof(buf),

		.tweak_ctx = aes_ctx(ctx->raw_tweak_ctx),
		.tweak_fn = aesni_xts_tweak,
		.crypt_ctx = aes_ctx(ctx->raw_crypt_ctx),
		.crypt_fn = lrw_xts_decrypt_callback,
	};
	int ret;

	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;

	kernel_fpu_begin();
	ret = xts_crypt(desc, dst, src, nbytes, &req);
	kernel_fpu_end();

	return ret;
}

#endif

#ifdef CONFIG_X86_64
static int rfc4106_init(struct crypto_aead *aead)
{
	struct cryptd_aead *cryptd_tfm;
	struct cryptd_aead **ctx = crypto_aead_ctx(aead);

	cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni",
				       CRYPTO_ALG_INTERNAL,
				       CRYPTO_ALG_INTERNAL);
	if (IS_ERR(cryptd_tfm))
		return PTR_ERR(cryptd_tfm);

	*ctx = cryptd_tfm;
	crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
	return 0;
}

static void rfc4106_exit(struct crypto_aead *aead)
{
	struct cryptd_aead **ctx = crypto_aead_ctx(aead);

	cryptd_free_aead(*ctx);
}

static int
rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
{
	struct crypto_cipher *tfm;
	int ret;

	tfm = crypto_alloc_cipher("aes", 0, 0);
	if (IS_ERR(tfm))
		return PTR_ERR(tfm);

	ret = crypto_cipher_setkey(tfm, key, key_len);
	if (ret)
		goto out_free_cipher;

	/* Clear the data in the hash sub key container to zero.*/
	/* We want to cipher all zeros to create the hash sub key. */
	memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);

	crypto_cipher_encrypt_one(tfm, hash_subkey, hash_subkey);

out_free_cipher:
	crypto_free_cipher(tfm);
	return ret;
}

static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key,
				  unsigned int key_len)
{
	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(aead);

	if (key_len < 4) {
		crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
		return -EINVAL;
	}
	/*Account for 4 byte nonce at the end.*/
	key_len -= 4;

	memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));

	return aes_set_key_common(crypto_aead_tfm(aead),
				  &ctx->aes_key_expanded, key, key_len) ?:
	       rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
}

static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
			   unsigned int key_len)
{
	struct cryptd_aead **ctx = crypto_aead_ctx(parent);
	struct cryptd_aead *cryptd_tfm = *ctx;

	return crypto_aead_setkey(&cryptd_tfm->base, key, key_len);
}

static int common_rfc4106_set_authsize(struct crypto_aead *aead,
				       unsigned int authsize)
{
	switch (authsize) {
	case 8:
	case 12:
	case 16:
		break;
	default:
		return -EINVAL;
	}

	return 0;
}

/* This is the Integrity Check Value (aka the authentication tag length and can
 * be 8, 12 or 16 bytes long. */
static int rfc4106_set_authsize(struct crypto_aead *parent,
				unsigned int authsize)
{
	struct cryptd_aead **ctx = crypto_aead_ctx(parent);
	struct cryptd_aead *cryptd_tfm = *ctx;

	return crypto_aead_setauthsize(&cryptd_tfm->base, authsize);
}

static int helper_rfc4106_encrypt(struct aead_request *req)
{
	u8 one_entry_in_sg = 0;
	u8 *src, *dst, *assoc;
	__be32 counter = cpu_to_be32(1);
	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
	void *aes_ctx = &(ctx->aes_key_expanded);
	unsigned long auth_tag_len = crypto_aead_authsize(tfm);
	u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
	struct scatter_walk src_sg_walk;
	struct scatter_walk dst_sg_walk = {};
	unsigned int i;

	/* Assuming we are supporting rfc4106 64-bit extended */
	/* sequence numbers We need to have the AAD length equal */
	/* to 16 or 20 bytes */
	if (unlikely(req->assoclen != 16 && req->assoclen != 20))
		return -EINVAL;

	/* IV below built */
	for (i = 0; i < 4; i++)
		*(iv+i) = ctx->nonce[i];
	for (i = 0; i < 8; i++)
		*(iv+4+i) = req->iv[i];
	*((__be32 *)(iv+12)) = counter;

	if (sg_is_last(req->src) &&
	    req->src->offset + req->src->length <= PAGE_SIZE &&
	    sg_is_last(req->dst) &&
	    req->dst->offset + req->dst->length <= PAGE_SIZE) {
		one_entry_in_sg = 1;
		scatterwalk_start(&src_sg_walk, req->src);
		assoc = scatterwalk_map(&src_sg_walk);
		src = assoc + req->assoclen;
		dst = src;
		if (unlikely(req->src != req->dst)) {
			scatterwalk_start(&dst_sg_walk, req->dst);
			dst = scatterwalk_map(&dst_sg_walk) + req->assoclen;
		}
	} else {
		/* Allocate memory for src, dst, assoc */
		assoc = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
			GFP_ATOMIC);
		if (unlikely(!assoc))
			return -ENOMEM;
		scatterwalk_map_and_copy(assoc, req->src, 0,
					 req->assoclen + req->cryptlen, 0);
		src = assoc + req->assoclen;
		dst = src;
	}

	kernel_fpu_begin();
	aesni_gcm_enc_tfm(aes_ctx, dst, src, req->cryptlen, iv,
			  ctx->hash_subkey, assoc, req->assoclen - 8,
			  dst + req->cryptlen, auth_tag_len);
	kernel_fpu_end();

	/* The authTag (aka the Integrity Check Value) needs to be written
	 * back to the packet. */
	if (one_entry_in_sg) {
		if (unlikely(req->src != req->dst)) {
			scatterwalk_unmap(dst - req->assoclen);
			scatterwalk_advance(&dst_sg_walk, req->dst->length);
			scatterwalk_done(&dst_sg_walk, 1, 0);
		}
		scatterwalk_unmap(assoc);
		scatterwalk_advance(&src_sg_walk, req->src->length);
		scatterwalk_done(&src_sg_walk, req->src == req->dst, 0);
	} else {
		scatterwalk_map_and_copy(dst, req->dst, req->assoclen,
					 req->cryptlen + auth_tag_len, 1);
		kfree(assoc);
	}
	return 0;
}

static int helper_rfc4106_decrypt(struct aead_request *req)
{
	u8 one_entry_in_sg = 0;
	u8 *src, *dst, *assoc;
	unsigned long tempCipherLen = 0;
	__be32 counter = cpu_to_be32(1);
	int retval = 0;
	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
	void *aes_ctx = &(ctx->aes_key_expanded);
	unsigned long auth_tag_len = crypto_aead_authsize(tfm);
	u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
	u8 authTag[16];
	struct scatter_walk src_sg_walk;
	struct scatter_walk dst_sg_walk = {};
	unsigned int i;

	if (unlikely(req->assoclen != 16 && req->assoclen != 20))
		return -EINVAL;

	/* Assuming we are supporting rfc4106 64-bit extended */
	/* sequence numbers We need to have the AAD length */
	/* equal to 16 or 20 bytes */

	tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
	/* IV below built */
	for (i = 0; i < 4; i++)
		*(iv+i) = ctx->nonce[i];
	for (i = 0; i < 8; i++)
		*(iv+4+i) = req->iv[i];
	*((__be32 *)(iv+12)) = counter;

	if (sg_is_last(req->src) &&
	    req->src->offset + req->src->length <= PAGE_SIZE &&
	    sg_is_last(req->dst) &&
	    req->dst->offset + req->dst->length <= PAGE_SIZE) {
		one_entry_in_sg = 1;
		scatterwalk_start(&src_sg_walk, req->src);
		assoc = scatterwalk_map(&src_sg_walk);
		src = assoc + req->assoclen;
		dst = src;
		if (unlikely(req->src != req->dst)) {
			scatterwalk_start(&dst_sg_walk, req->dst);
			dst = scatterwalk_map(&dst_sg_walk) + req->assoclen;
		}

	} else {
		/* Allocate memory for src, dst, assoc */
		assoc = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
		if (!assoc)
			return -ENOMEM;
		scatterwalk_map_and_copy(assoc, req->src, 0,
					 req->assoclen + req->cryptlen, 0);
		src = assoc + req->assoclen;
		dst = src;
	}

	kernel_fpu_begin();
	aesni_gcm_dec_tfm(aes_ctx, dst, src, tempCipherLen, iv,
			  ctx->hash_subkey, assoc, req->assoclen - 8,
			  authTag, auth_tag_len);
	kernel_fpu_end();

	/* Compare generated tag with passed in tag. */
	retval = crypto_memneq(src + tempCipherLen, authTag, auth_tag_len) ?
		-EBADMSG : 0;

	if (one_entry_in_sg) {
		if (unlikely(req->src != req->dst)) {
			scatterwalk_unmap(dst - req->assoclen);
			scatterwalk_advance(&dst_sg_walk, req->dst->length);
			scatterwalk_done(&dst_sg_walk, 1, 0);
		}
		scatterwalk_unmap(assoc);
		scatterwalk_advance(&src_sg_walk, req->src->length);
		scatterwalk_done(&src_sg_walk, req->src == req->dst, 0);
	} else {
		scatterwalk_map_and_copy(dst, req->dst, req->assoclen,
					 tempCipherLen, 1);
		kfree(assoc);
	}
	return retval;
}
Ejemplo n.º 27
0
Archivo: wep.c Proyecto: UNwS/rtl8192su
struct crypto_cipher *ieee80211_wep_init(void)
{
	return crypto_alloc_cipher("arc4", 0, CRYPTO_ALG_ASYNC);
}