示例#1
0
/* Computes the fastopen cookie for the IP path.
 * The path is a 128 bits long (pad with zeros for IPv4).
 *
 * The caller must check foc->len to determine if a valid cookie
 * has been generated successfully.
*/
void tcp_fastopen_cookie_gen(__be32 src, __be32 dst,
			     struct tcp_fastopen_cookie *foc)
{
	__be32 path[4] = { src, dst, 0, 0 };
	struct tcp_fastopen_context *ctx;

	rcu_read_lock();
	ctx = rcu_dereference(tcp_fastopen_ctx);
	if (ctx) {
		crypto_cipher_encrypt_one(ctx->tfm, foc->val, (__u8 *)path);
		foc->len = TCP_FASTOPEN_COOKIE_SIZE;
	}
	rcu_read_unlock();
}
示例#2
0
int wrapfs_write_lower_page_segment(struct inode *wrapfs_inode,
                                      struct page *page_for_lower,
                                      size_t offset_in_page, size_t size, 
                                      struct file *file)
{
        char *virt;
        loff_t offset;
        int rc = -1;

#ifdef WRAPFS_CRYPTO
        unsigned char *encrypted_page_buffer;
        struct crypto_cipher *tfm;
#endif

        offset = ((((loff_t)page_for_lower->index) << PAGE_CACHE_SHIFT)
                  + offset_in_page);
        virt = kmap(page_for_lower);

#ifdef WRAPFS_CRYPTO
        encrypted_page_buffer = kmalloc(size, GFP_KERNEL); //free this
		if (encrypted_page_buffer == NULL)
			goto out;		
        memset(encrypted_page_buffer, 0, size);

        tfm = crypto_alloc_cipher("aes", 0, 16);
        if (!IS_ERR(tfm))
                crypto_cipher_setkey(tfm, WRAPFS_SB(file->f_dentry->d_sb)->key, 16);
		else
				goto fail;

        crypto_cipher_encrypt_one(tfm, encrypted_page_buffer, virt);
        /*printk(KERN_ALERT "Encrypted buffer = %s\n", encrypted_page_buffer);*/
	    /*memcpy(virt, encrypted_page_buffer, size);*/

        rc = wrapfs_write_lower(wrapfs_inode, encrypted_page_buffer, offset, size, file);
#else
		rc = wrapfs_write_lower(wrapfs_inode, virt, offset, size, file);
#endif

        if (rc > 0)
                rc = 0;
        kunmap(page_for_lower);
#ifdef WRAPFS_CRYPTO
	crypto_free_cipher(tfm);
fail:
		kfree(encrypted_page_buffer);
out:
#endif
        return rc;
}
示例#3
0
/*
 * Handle an I/O request.
 */
static void sbd_transfer(struct sbd_device *dev, sector_t sector,
        unsigned long nsect, char *buffer, int write) {
    unsigned long offset = sector * logical_block_size;
    unsigned long nbytes = nsect * logical_block_size;
    int i;
    char *mode[2];
    unsigned long len;
    u8 *dst;
    u8 *src;

    crypto_cipher_setkey(crypt, crypto_key, crypto_key_len);

    if ((offset + nbytes) > dev->size) {
        printk (KERN_NOTICE "sbd: Beyond-end write (%ld %ld)\n", offset, nbytes);
        return;
    }

    if (write){
        mode[1] = "UNENCRYPTED";
        mode[2] = "ENCRYPTED";
        dst = dev->data + offset;
        src = buffer;
        
        for (i = 0; i < nbytes; i += crypto_cipher_blocksize(crypt)) {
            crypto_cipher_encrypt_one(crypt, dst + i, src + i);
        }

    } else {
        mode[1] = "ENCRYPTED";
        mode[2] = "UNENCRYPTED";
        dst = buffer;
        src = dev->data + offset;
        
        for (i = 0; i < nbytes; i += crypto_cipher_blocksize(crypt)) {
            crypto_cipher_decrypt_one(crypt, dst + i, src + i);
        }
    }


    len = nbytes;
    printk("%s:", mode[1]);
    while (len--)
        printk("%u", (unsigned) *src++);
    
    len = nbytes;
    printk("\n%s:", mode[2]);
    while (len--)
        printk("%u", (unsigned) *dst++);
    printk("\n");
}
示例#4
0
static bool __tcp_fastopen_cookie_gen(const void *path,
                                      struct tcp_fastopen_cookie *foc)
{
    struct tcp_fastopen_context *ctx;
    bool ok = false;

    rcu_read_lock();
    ctx = rcu_dereference(tcp_fastopen_ctx);
    if (ctx) {
        crypto_cipher_encrypt_one(ctx->tfm, foc->val, path);
        foc->len = TCP_FASTOPEN_COOKIE_SIZE;
        ok = true;
    }
    rcu_read_unlock();
    return ok;
}
示例#5
0
/*
 * Handle an I/O request.
 */
static void sbd_transfer(struct sbd_device *dev, sector_t sector,
		unsigned long nsect, char *buffer, int write) {
	unsigned long offset = sector * logical_block_size;
	unsigned long nbytes = nsect * logical_block_size;

	int k;
  printk("Before decryption "); //added
  printk("\n");
  printing(buffer,nbytes); //added
  printk("\n");
  key_size = strlen(key); //added
	if(key_size == 0){
		printk(KERN_INFO "no key set\n");
	}else{
		crypto_cipher_clear_flags(tfm, ~0);
		crypto_cipher_setkey(tfm, crypto_key, key_size);
	}

	if ((offset + nbytes) > dev->size) {
		printk (KERN_NOTICE "sbd: Beyond-end write (%ld %ld)\n", offset, nbytes);
		return;
	}
	if (write){
    printing(buffer, nbytes);
    printk("\n");
		if(key_size != 0){
			for (k = 0; k < nbytes; k+= crypto_cipher_blocksize(tfm)) {
				crypto_cipher_encrypt_one(tfm, dev->data+offset+k, buffer+k);
			}
		}else{
			memcpy(dev->data + offset, buffer, nbytes);
		}
	}else{
		if(key_size != 0){
			for (k = 0; k < nbytes; k+= crypto_cipher_blocksize(tfm)) {
				crypto_cipher_decrypt_one(tfm, buffer+k, dev->data+offset+k);
			}
		}else{
			memcpy(buffer, dev->data + offset, nbytes);
		}
	}
  printk("Decrypted ");
  printk("\n");
  printing(buffer,nbytes);
  printk("\n");
}
示例#6
0
文件: ctr.c 项目: Anjali05/linux
static void crypto_ctr_crypt_final(struct skcipher_walk *walk,
				   struct crypto_cipher *tfm)
{
	unsigned int bsize = crypto_cipher_blocksize(tfm);
	unsigned long alignmask = crypto_cipher_alignmask(tfm);
	u8 *ctrblk = walk->iv;
	u8 tmp[MAX_CIPHER_BLOCKSIZE + MAX_CIPHER_ALIGNMASK];
	u8 *keystream = PTR_ALIGN(tmp + 0, alignmask + 1);
	u8 *src = walk->src.virt.addr;
	u8 *dst = walk->dst.virt.addr;
	unsigned int nbytes = walk->nbytes;

	crypto_cipher_encrypt_one(tfm, keystream, ctrblk);
	crypto_xor_cpy(dst, keystream, src, nbytes);

	crypto_inc(ctrblk, bsize);
}
示例#7
0
static int dek_encrypt_dek(int persona_id, dek *plainDek, dek *encDek) {
	int ret = 0;
	int key_arr_idx = PERSONA_KEY_ARR_IDX(persona_id);
	unsigned int i;
	unsigned int bsize;

	if (!dek_is_persona(persona_id)) {
		return -EFAULT;
	}
#if DEK_DEBUG
	printk("dek: plainDek from user: "******"dek: no encryption key for id: %d\n", persona_id);
		dek_add_to_log(persona_id, "encrypt failed, no key");
		return -EIO;
	}

	if (encDek->len <= 0 || encDek->len > DEK_DEK_ENC_LEN) {
		printk("dek: dek_encrypt_dek, incorrect len=%d\n", encDek->len);
		memset(encDek, 0, sizeof(dek));
		return -EFAULT;
	}
#if DEK_DEBUG
	else {
		printk("dek: encDek to user: ");
		dump(encDek->buf, encDek->len);
	}
#endif

	return ret;
}
示例#8
0
void aes_encrypt(u8 *pdata, u8 *cdata, u8 *key, int len)
{
	struct crypto_cipher *tfm;
	u8 *ptmp, *ctmp;
	int i;

	tfm = crypto_alloc_cipher("aes", 4, CRYPTO_ALG_ASYNC);

	ptmp = pdata;
	ctmp = cdata;
	for(i=0; i<len; i+=AES_BLOCK_SIZE)
	{
		crypto_cipher_encrypt_one(tfm, ctmp, ptmp);
		ptmp += AES_BLOCK_SIZE;
		ctmp += AES_BLOCK_SIZE;
	}

	crypto_free_cipher(tfm);
}
示例#9
0
static void
geode_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
	struct geode_aes_op *op = crypto_tfm_ctx(tfm);

	if (unlikely(op->keylen != AES_KEYSIZE_128)) {
		crypto_cipher_encrypt_one(op->fallback.cip, out, in);
		return;
	}

	op->src = (void *) in;
	op->dst = (void *) out;
	op->mode = AES_MODE_ECB;
	op->flags = 0;
	op->len = AES_MIN_BLOCK_SIZE;
	op->dir = AES_DIR_ENCRYPT;

	geode_aes_crypt(op);
}
示例#10
0
static void get_data_to_compute(struct crypto_cipher *tfm,
			       struct crypto_ccm_req_priv_ctx *pctx,
			       struct scatterlist *sg, unsigned int len)
{
	struct scatter_walk walk;
	u8 *data_src;
	int n;

	scatterwalk_start(&walk, sg);

	while (len) {
		n = scatterwalk_clamp(&walk, len);
		if (!n) {
			scatterwalk_start(&walk, sg_next(walk.sg));
			n = scatterwalk_clamp(&walk, len);
		}
		data_src = scatterwalk_map(&walk);

		compute_mac(tfm, data_src, n, pctx);
		len -= n;

		scatterwalk_unmap(data_src);
		scatterwalk_advance(&walk, n);
		scatterwalk_done(&walk, 0, len);
		if (len)
			crypto_yield(pctx->flags);
	}

	/* any leftover needs padding and then encrypted */
	if (pctx->ilen) {
		int padlen;
		u8 *odata = pctx->odata;
		u8 *idata = pctx->idata;

		padlen = 16 - pctx->ilen;
		memset(idata + pctx->ilen, 0, padlen);
		crypto_xor(odata, idata, 16);
		crypto_cipher_encrypt_one(tfm, odata, odata);
		pctx->ilen = 0;
	}
}
示例#11
0
/*
 * Basic transfer function called by other functions for transfering
 * data from the RAM disk block. Calls the appropriate encrypt and
 * decrypt functions from crytpo. Also calls hexdump in order to
 * dump the entirety of the data to the kernel.
 */
static void osurd_transfer(struct osurd_dev *dev, unsigned long sector,
                           unsigned long nsect, char *buffer, int write)
{
    unsigned long offset = sector *KERNEL_SECTOR_SIZE;
    unsigned long nbytes = nsect *KERNEL_SECTOR_SIZE;
    int i;

    if((offset + nbytes) > dev->size) {
        printk(KERN_NOTICE "Beyond-end write (%ld %ld)\n", offset, nbytes);
        return;
    }

    crypto_cipher_clear_flags(tfm, ~0);
    crypto_cipher_setkey(tfm, key, strlen(key));

    if(write) {
        printk("Writing to RAM disk\n");
        printk("Pre-encrypted data: ");
        hexdump(buffer, nbytes);

        for(i = 0; i < nbytes; i += crypto_cipher_blocksize(tfm)) {
            memset(dev->data + offset +i, 0, crypto_cipher_blocksize(tfm));
            crypto_cipher_encrypt_one(tfm, dev->data + offset + i, buffer + i);
        }

        printk("Encrypted data: ");
        hexdump(dev->data + offset, nbytes);
    }
    else {
        printk("Reading from RAM disk\n");
        printk("Encrypted data: ");
        hexdump(dev->data + offset, nbytes);

        for(i = 0; i < nbytes; i += crypto_cipher_blocksize(tfm)) {
            crypto_ckpher_decrypt_one(tfm, buffer + i, dev->data + offset + i);
        }

        printk("Decrypted data: ");
        hexdump(buffer, nbytes);
    }
}
示例#12
0
文件: pcbc.c 项目: AlexShiLucky/linux
static int crypto_pcbc_encrypt_segment(struct skcipher_request *req,
				       struct skcipher_walk *walk,
				       struct crypto_cipher *tfm)
{
	int bsize = crypto_cipher_blocksize(tfm);
	unsigned int nbytes = walk->nbytes;
	u8 *src = walk->src.virt.addr;
	u8 *dst = walk->dst.virt.addr;
	u8 *iv = walk->iv;

	do {
		crypto_xor(iv, src, bsize);
		crypto_cipher_encrypt_one(tfm, dst, iv);
		crypto_xor_cpy(iv, dst, src, bsize);

		src += bsize;
		dst += bsize;
	} while ((nbytes -= bsize) >= bsize);

	return nbytes;
}
示例#13
0
文件: pcbc.c 项目: AlexShiLucky/linux
static int crypto_pcbc_encrypt_inplace(struct skcipher_request *req,
				       struct skcipher_walk *walk,
				       struct crypto_cipher *tfm)
{
	int bsize = crypto_cipher_blocksize(tfm);
	unsigned int nbytes = walk->nbytes;
	u8 *src = walk->src.virt.addr;
	u8 *iv = walk->iv;
	u8 tmpbuf[MAX_CIPHER_BLOCKSIZE];

	do {
		memcpy(tmpbuf, src, bsize);
		crypto_xor(iv, src, bsize);
		crypto_cipher_encrypt_one(tfm, src, iv);
		crypto_xor_cpy(iv, tmpbuf, src, bsize);

		src += bsize;
	} while ((nbytes -= bsize) >= bsize);

	memcpy(walk->iv, iv, bsize);

	return nbytes;
}
示例#14
0
static int
rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
{
	struct crypto_cipher *tfm;
	int ret;

	tfm = crypto_alloc_cipher("aes", 0, 0);
	if (IS_ERR(tfm))
		return PTR_ERR(tfm);

	ret = crypto_cipher_setkey(tfm, key, key_len);
	if (ret)
		goto out_free_cipher;

	/* Clear the data in the hash sub key container to zero.*/
	/* We want to cipher all zeros to create the hash sub key. */
	memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);

	crypto_cipher_encrypt_one(tfm, hash_subkey, hash_subkey);

out_free_cipher:
	crypto_free_cipher(tfm);
	return ret;
}
示例#15
0
static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
	const struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);

	if (unlikely(need_fallback(sctx->key_len))) {
		crypto_cipher_encrypt_one(sctx->fallback.cip, out, in);
		return;
	}

	switch (sctx->key_len) {
	case 16:
		crypt_s390_km(KM_AES_128_ENCRYPT, &sctx->key, out, in,
			      AES_BLOCK_SIZE);
		break;
	case 24:
		crypt_s390_km(KM_AES_192_ENCRYPT, &sctx->key, out, in,
			      AES_BLOCK_SIZE);
		break;
	case 32:
		crypt_s390_km(KM_AES_256_ENCRYPT, &sctx->key, out, in,
			      AES_BLOCK_SIZE);
		break;
	}
}
示例#16
0
文件: crypto.c 项目: 7799/linux
/*
 * CC-MAC function WUSB1.0[6.5]
 *
 * Take a data string and produce the encrypted CBC Counter-mode MIC
 *
 * Note the names for most function arguments are made to (more or
 * less) match those used in the pseudo-function definition given in
 * WUSB1.0[6.5].
 *
 * @tfm_cbc: CBC(AES) blkcipher handle (initialized)
 *
 * @tfm_aes: AES cipher handle (initialized)
 *
 * @mic: buffer for placing the computed MIC (Message Integrity
 *       Code). This is exactly 8 bytes, and we expect the buffer to
 *       be at least eight bytes in length.
 *
 * @key: 128 bit symmetric key
 *
 * @n: CCM nonce
 *
 * @a: ASCII string, 14 bytes long (I guess zero padded if needed;
 *     we use exactly 14 bytes).
 *
 * @b: data stream to be processed; cannot be a global or const local
 *     (will confuse the scatterlists)
 *
 * @blen: size of b...
 *
 * Still not very clear how this is done, but looks like this: we
 * create block B0 (as WUSB1.0[6.5] says), then we AES-crypt it with
 * @key. We bytewise xor B0 with B1 (1) and AES-crypt that. Then we
 * take the payload and divide it in blocks (16 bytes), xor them with
 * the previous crypto result (16 bytes) and crypt it, repeat the next
 * block with the output of the previous one, rinse wash (I guess this
 * is what AES CBC mode means...but I truly have no idea). So we use
 * the CBC(AES) blkcipher, that does precisely that. The IV (Initial
 * Vector) is 16 bytes and is set to zero, so
 *
 * See rfc3610. Linux crypto has a CBC implementation, but the
 * documentation is scarce, to say the least, and the example code is
 * so intricated that is difficult to understand how things work. Most
 * of this is guess work -- bite me.
 *
 * (1) Created as 6.5 says, again, using as l(a) 'Blen + 14', and
 *     using the 14 bytes of @a to fill up
 *     b1.{mac_header,e0,security_reserved,padding}.
 *
 * NOTE: The definition of l(a) in WUSB1.0[6.5] vs the definition of
 *       l(m) is orthogonal, they bear no relationship, so it is not
 *       in conflict with the parameter's relation that
 *       WUSB1.0[6.4.2]) defines.
 *
 * NOTE: WUSB1.0[A.1]: Host Nonce is missing a nibble? (1e); fixed in
 *       first errata released on 2005/07.
 *
 * NOTE: we need to clean IV to zero at each invocation to make sure
 *       we start with a fresh empty Initial Vector, so that the CBC
 *       works ok.
 *
 * NOTE: blen is not aligned to a block size, we'll pad zeros, that's
 *       what sg[4] is for. Maybe there is a smarter way to do this.
 */
static int wusb_ccm_mac(struct crypto_blkcipher *tfm_cbc,
			struct crypto_cipher *tfm_aes, void *mic,
			const struct aes_ccm_nonce *n,
			const struct aes_ccm_label *a, const void *b,
			size_t blen)
{
	int result = 0;
	struct blkcipher_desc desc;
	struct aes_ccm_b0 b0;
	struct aes_ccm_b1 b1;
	struct aes_ccm_a ax;
	struct scatterlist sg[4], sg_dst;
	void *iv, *dst_buf;
	size_t ivsize, dst_size;
	const u8 bzero[16] = { 0 };
	size_t zero_padding;

	/*
	 * These checks should be compile time optimized out
	 * ensure @a fills b1's mac_header and following fields
	 */
	WARN_ON(sizeof(*a) != sizeof(b1) - sizeof(b1.la));
	WARN_ON(sizeof(b0) != sizeof(struct aes_ccm_block));
	WARN_ON(sizeof(b1) != sizeof(struct aes_ccm_block));
	WARN_ON(sizeof(ax) != sizeof(struct aes_ccm_block));

	result = -ENOMEM;
	zero_padding = sizeof(struct aes_ccm_block)
		- blen % sizeof(struct aes_ccm_block);
	zero_padding = blen % sizeof(struct aes_ccm_block);
	if (zero_padding)
		zero_padding = sizeof(struct aes_ccm_block) - zero_padding;
	dst_size = blen + sizeof(b0) + sizeof(b1) + zero_padding;
	dst_buf = kzalloc(dst_size, GFP_KERNEL);
	if (dst_buf == NULL) {
		printk(KERN_ERR "E: can't alloc destination buffer\n");
		goto error_dst_buf;
	}

	iv = crypto_blkcipher_crt(tfm_cbc)->iv;
	ivsize = crypto_blkcipher_ivsize(tfm_cbc);
	memset(iv, 0, ivsize);

	/* Setup B0 */
	b0.flags = 0x59;	/* Format B0 */
	b0.ccm_nonce = *n;
	b0.lm = cpu_to_be16(0);	/* WUSB1.0[6.5] sez l(m) is 0 */

	/* Setup B1
	 *
	 * The WUSB spec is anything but clear! WUSB1.0[6.5]
	 * says that to initialize B1 from A with 'l(a) = blen +
	 * 14'--after clarification, it means to use A's contents
	 * for MAC Header, EO, sec reserved and padding.
	 */
	b1.la = cpu_to_be16(blen + 14);
	memcpy(&b1.mac_header, a, sizeof(*a));

	sg_init_table(sg, ARRAY_SIZE(sg));
	sg_set_buf(&sg[0], &b0, sizeof(b0));
	sg_set_buf(&sg[1], &b1, sizeof(b1));
	sg_set_buf(&sg[2], b, blen);
	/* 0 if well behaved :) */
	sg_set_buf(&sg[3], bzero, zero_padding);
	sg_init_one(&sg_dst, dst_buf, dst_size);

	desc.tfm = tfm_cbc;
	desc.flags = 0;
	result = crypto_blkcipher_encrypt(&desc, &sg_dst, sg, dst_size);
	if (result < 0) {
		printk(KERN_ERR "E: can't compute CBC-MAC tag (MIC): %d\n",
		       result);
		goto error_cbc_crypt;
	}

	/* Now we crypt the MIC Tag (*iv) with Ax -- values per WUSB1.0[6.5]
	 * The procedure is to AES crypt the A0 block and XOR the MIC
	 * Tag against it; we only do the first 8 bytes and place it
	 * directly in the destination buffer.
	 *
	 * POS Crypto API: size is assumed to be AES's block size.
	 * Thanks for documenting it -- tip taken from airo.c
	 */
	ax.flags = 0x01;		/* as per WUSB 1.0 spec */
	ax.ccm_nonce = *n;
	ax.counter = 0;
	crypto_cipher_encrypt_one(tfm_aes, (void *)&ax, (void *)&ax);
	bytewise_xor(mic, &ax, iv, 8);
	result = 8;
error_cbc_crypt:
	kfree(dst_buf);
error_dst_buf:
	return result;
}
示例#17
0
static inline int chcr_ipsec_setkey(struct xfrm_state *x,
				    struct ipsec_sa_entry *sa_entry)
{
	struct crypto_cipher *cipher;
	int keylen = (x->aead->alg_key_len + 7) / 8;
	unsigned char *key = x->aead->alg_key;
	int ck_size, key_ctx_size = 0;
	unsigned char ghash_h[AEAD_H_SIZE];
	int ret = 0;

	if (keylen > 3) {
		keylen -= 4;  /* nonce/salt is present in the last 4 bytes */
		memcpy(sa_entry->salt, key + keylen, 4);
	}

	if (keylen == AES_KEYSIZE_128) {
		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
	} else if (keylen == AES_KEYSIZE_192) {
		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
	} else if (keylen == AES_KEYSIZE_256) {
		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
	} else {
		pr_err("GCM: Invalid key length %d\n", keylen);
		ret = -EINVAL;
		goto out;
	}

	memcpy(sa_entry->key, key, keylen);
	sa_entry->enckey_len = keylen;
	key_ctx_size = sizeof(struct _key_ctx) +
			      ((DIV_ROUND_UP(keylen, 16)) << 4) +
			      AEAD_H_SIZE;

	sa_entry->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
						 CHCR_KEYCTX_MAC_KEY_SIZE_128,
						 0, 0,
						 key_ctx_size >> 4);

	/* Calculate the H = CIPH(K, 0 repeated 16 times).
	 * It will go in key context
	 */
	cipher = crypto_alloc_cipher("aes-generic", 0, 0);
	if (IS_ERR(cipher)) {
		sa_entry->enckey_len = 0;
		ret = -ENOMEM;
		goto out;
	}

	ret = crypto_cipher_setkey(cipher, key, keylen);
	if (ret) {
		sa_entry->enckey_len = 0;
		goto out1;
	}
	memset(ghash_h, 0, AEAD_H_SIZE);
	crypto_cipher_encrypt_one(cipher, ghash_h, ghash_h);
	memcpy(sa_entry->key + (DIV_ROUND_UP(sa_entry->enckey_len, 16) *
	       16), ghash_h, AEAD_H_SIZE);
	sa_entry->kctx_len = ((DIV_ROUND_UP(sa_entry->enckey_len, 16)) << 4) +
			      AEAD_H_SIZE;
out1:
	crypto_free_cipher(cipher);
out:
	return ret;
}
示例#18
0
/******************************************************************************
 * Handle an I/O request.
 *
 ******************************************************************************/
static void sbd_transfer(struct sbd_device *dev, sector_t sector,
						 unsigned long nsect, char *buffer, int write) {
	unsigned long offset = sector * logical_block_size;
	unsigned long nbytes = nsect * logical_block_size;
	
	int i;
	int encrypt_flag;
	int decrypt_flag;
	unsigned long length;
	u8 *dst;
	u8 *src;
	
	/* Setting the Key */
	crypto_cipher_setkey(crypt_cipher, key, key_len);
	
	if ((offset + nbytes) > dev->size) {
		printk (KERN_NOTICE "sbd: Beyond-end write (%ld %ld)\n", offset, nbytes);
		return;
	}
	
	if (write){
		decrypt_flag = 0;
		encrypt_flag = 1;
		dst = dev->data + offset;
		src = buffer;
		
		for (i = 0; i < nbytes; i += crypto_cipher_blocksize(crypt_cipher)) {
			crypto_cipher_encrypt_one(crypt_cipher, dst + i, src + i);
		}
	
	} else {
		decrypt_flag = 1;
		encrypt_flag = 0;
		dst = buffer;
		src = dev->data + offset;
		
		for (i = 0; i < nbytes; i += crypto_cipher_blocksize(crypt_cipher)) {
			crypto_cipher_decrypt_one(crypt_cipher, dst + i, src + i);
		}
	}
	
	length = nbytes;
	printk("\n\n");
	if (encrypt_flag && !decrypt_flag) {
		printk("Encrypt:\n");
	} else {
		printk("Decrypt:\n");
	}
	while (length--) {
		printk("%u ", (unsigned) *src++);
	}
	
	length = nbytes;
	printk("\n\n");
	if (encrypt_flag && !decrypt_flag) {
		printk("Decrypt:\n");
	} else {
		printk("Encrypt:\n");
	}
	while (length--) {
		printk("%u ", (unsigned) *dst++);
	}
	printk("\n");
}
示例#19
0
/*
 * Returns DEFAULT_BLK_SZ bytes of random data per call
 * returns 0 if generation succeded, <0 if something went wrong
 */
static int _get_more_prng_bytes(struct prng_context *ctx, int cont_test)
{
	int i;
	unsigned char tmp[DEFAULT_BLK_SZ];
	unsigned char *output = NULL;


	dbgprint(KERN_CRIT "Calling _get_more_prng_bytes for context %p\n",
		ctx);

	hexdump("Input DT: ", ctx->DT, DEFAULT_BLK_SZ);
	hexdump("Input I: ", ctx->I, DEFAULT_BLK_SZ);
	hexdump("Input V: ", ctx->V, DEFAULT_BLK_SZ);

	/*
	 * This algorithm is a 3 stage state machine
	 */
	for (i = 0; i < 3; i++) {

		switch (i) {
		case 0:
			/*
			 * Start by encrypting the counter value
			 * This gives us an intermediate value I
			 */
			memcpy(tmp, ctx->DT, DEFAULT_BLK_SZ);
			output = ctx->I;
			hexdump("tmp stage 0: ", tmp, DEFAULT_BLK_SZ);
			break;
		case 1:

			/*
			 * Next xor I with our secret vector V
			 * encrypt that result to obtain our
			 * pseudo random data which we output
			 */
			xor_vectors(ctx->I, ctx->V, tmp, DEFAULT_BLK_SZ);
			hexdump("tmp stage 1: ", tmp, DEFAULT_BLK_SZ);
			output = ctx->rand_data;
			break;
		case 2:
			/*
			 * First check that we didn't produce the same
			 * random data that we did last time around through this
			 */
			if (!memcmp(ctx->rand_data, ctx->last_rand_data,
					DEFAULT_BLK_SZ)) {
				if (cont_test) {
					panic("cprng %p Failed repetition check!\n",
						ctx);
				}

				printk(KERN_ERR
					"ctx %p Failed repetition check!\n",
					ctx);

				ctx->flags |= PRNG_NEED_RESET;
				return -EINVAL;
			}
			memcpy(ctx->last_rand_data, ctx->rand_data,
				DEFAULT_BLK_SZ);

			/*
			 * Lastly xor the random data with I
			 * and encrypt that to obtain a new secret vector V
			 */
			xor_vectors(ctx->rand_data, ctx->I, tmp,
				DEFAULT_BLK_SZ);
			output = ctx->V;
			hexdump("tmp stage 2: ", tmp, DEFAULT_BLK_SZ);
			break;
		}


		/* do the encryption */
		crypto_cipher_encrypt_one(ctx->tfm, output, tmp);

	}

	/*
	 * Now update our DT value
	 */
	for (i = DEFAULT_BLK_SZ - 1; i >= 0; i--) {
		ctx->DT[i] += 1;
		if (ctx->DT[i] != 0)
			break;
	}

	dbgprint("Returning new block for context %p\n", ctx);
	ctx->rand_data_valid = 0;

	hexdump("Output DT: ", ctx->DT, DEFAULT_BLK_SZ);
	hexdump("Output I: ", ctx->I, DEFAULT_BLK_SZ);
	hexdump("Output V: ", ctx->V, DEFAULT_BLK_SZ);
	hexdump("New Random Data: ", ctx->rand_data, DEFAULT_BLK_SZ);

	return 0;
}
示例#20
0
/*
 * Handle an I/O request.
 */
static void sbd_transfer(struct sbd_device *dev, sector_t sector,
		unsigned long nsect, char *buffer, int write) {
	unsigned long offset = sector * logical_block_size;
	unsigned long nbytes = nsect * logical_block_size;
	char *encrypts[2];
	u8 *dst, *src;
	unsigned long len;
	int i;

	//set up the key
	crypto_cipher_setkey(crypto, crypto_key, crypto_key_len);

	if ((offset + nbytes) > dev->size) {
		printk (KERN_NOTICE "sbd: Beyond-end write (%ld %ld)\n", offset, nbytes);
		return;
	}

	if (write) {
		encrypts[0] = "UNECRYPTED";
		encrypts[1] = "ENCRYPTED";
		dst = dev->data + offset;
		src = buffer;

		for (i = 0; i < nbytes; i += crypto_cipher_blocksize(crypto)) {
			/*
			* crypto_cipher_encrypt_one() - encrypt one block of plaintext
			* @tfm: cipher handle
			* @dst: points to the buffer that will be filled with the ciphertext
			* @src: buffer holding the plaintext to be encrypted
			*
			* Invoke the encryption operation of one block. The caller must ensure that
			* the plaintext and ciphertext buffers are at least one block in size.

			static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm,
														u8 *dst, const u8 *src)
			{
				crypto_cipher_crt(tfm)->cit_encrypt_one(crypto_cipher_tfm(tfm),
														dst, src);
			}
			*/
			crypto_cipher_encrypt_one(crypto, dst + i, src + i);
		}
	}
	else {
		//backwards from encrypt
		encrypts[0] = "ENCRYPTED";
		encrypts[1] = "UNENCRYPTED";
		dst = buffer;
		src = dev->data + offset;

		for (i = 0; i < nbytes; i += crypto_cipher_blocksize(crypto)) {
			/*
			* crypto_cipher_decrypt_one() - decrypt one block of ciphertext
			* @tfm: cipher handle
			* @dst: points to the buffer that will be filled with the plaintext
			* @src: buffer holding the ciphertext to be decrypted
			*
			* Invoke the decryption operation of one block. The caller must ensure that
			* the plaintext and ciphertext buffers are at least one block in size.

			static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm,
			                                      		u8 *dst, const u8 *src)
			{
			     crypto_cipher_crt(tfm)->cit_decrypt_one(crypto_cipher_tfm(tfm),
		                                             	dst, src);
			}
			*/
			crypto_cipher_decrypt_one(crypto, dst + i, src + i);
		}
	}

	// print out both buffers of encrypted and unencrypted
	// len = nbytes;
	// printk("%s:", encrypts[0]);
	// while (len--) {
 //    	printk("%u", (unsigned) *src++);
 //    }

	// len = nbytes;
	// printk("\n%s:", encrypts[1]);
	// while (len--) {
 //    	printk("%u", (unsigned) *dst++);
 //    }
	// printk("\n");
}
static int wusb_ccm_mac(struct crypto_blkcipher *tfm_cbc,
			struct crypto_cipher *tfm_aes, void *mic,
			const struct aes_ccm_nonce *n,
			const struct aes_ccm_label *a, const void *b,
			size_t blen)
{
	int result = 0;
	struct blkcipher_desc desc;
	struct aes_ccm_b0 b0;
	struct aes_ccm_b1 b1;
	struct aes_ccm_a ax;
	struct scatterlist sg[4], sg_dst;
	void *iv, *dst_buf;
	size_t ivsize, dst_size;
	const u8 bzero[16] = { 0 };
	size_t zero_padding;

	WARN_ON(sizeof(*a) != sizeof(b1) - sizeof(b1.la));
	WARN_ON(sizeof(b0) != sizeof(struct aes_ccm_block));
	WARN_ON(sizeof(b1) != sizeof(struct aes_ccm_block));
	WARN_ON(sizeof(ax) != sizeof(struct aes_ccm_block));

	result = -ENOMEM;
	zero_padding = sizeof(struct aes_ccm_block)
		- blen % sizeof(struct aes_ccm_block);
	zero_padding = blen % sizeof(struct aes_ccm_block);
	if (zero_padding)
		zero_padding = sizeof(struct aes_ccm_block) - zero_padding;
	dst_size = blen + sizeof(b0) + sizeof(b1) + zero_padding;
	dst_buf = kzalloc(dst_size, GFP_KERNEL);
	if (dst_buf == NULL) {
		printk(KERN_ERR "E: can't alloc destination buffer\n");
		goto error_dst_buf;
	}

	iv = crypto_blkcipher_crt(tfm_cbc)->iv;
	ivsize = crypto_blkcipher_ivsize(tfm_cbc);
	memset(iv, 0, ivsize);

	
	b0.flags = 0x59;	
	b0.ccm_nonce = *n;
	b0.lm = cpu_to_be16(0);	

	b1.la = cpu_to_be16(blen + 14);
	memcpy(&b1.mac_header, a, sizeof(*a));

	sg_init_table(sg, ARRAY_SIZE(sg));
	sg_set_buf(&sg[0], &b0, sizeof(b0));
	sg_set_buf(&sg[1], &b1, sizeof(b1));
	sg_set_buf(&sg[2], b, blen);
	
	sg_set_buf(&sg[3], bzero, zero_padding);
	sg_init_one(&sg_dst, dst_buf, dst_size);

	desc.tfm = tfm_cbc;
	desc.flags = 0;
	result = crypto_blkcipher_encrypt(&desc, &sg_dst, sg, dst_size);
	if (result < 0) {
		printk(KERN_ERR "E: can't compute CBC-MAC tag (MIC): %d\n",
		       result);
		goto error_cbc_crypt;
	}

	ax.flags = 0x01;		
	ax.ccm_nonce = *n;
	ax.counter = 0;
	crypto_cipher_encrypt_one(tfm_aes, (void *)&ax, (void *)&ax);
	bytewise_xor(mic, &ax, iv, 8);
	result = 8;
error_cbc_crypt:
	kfree(dst_buf);
error_dst_buf:
	return result;
}
示例#22
0
static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
		       struct scatterlist *src, unsigned int nbytes)
{
	struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
	be128 buf[8];
	struct xts_crypt_req req = {
		.tbuf = buf,
		.tbuflen = sizeof(buf),

		.tweak_ctx = aes_ctx(ctx->raw_tweak_ctx),
		.tweak_fn = aesni_xts_tweak,
		.crypt_ctx = aes_ctx(ctx->raw_crypt_ctx),
		.crypt_fn = lrw_xts_encrypt_callback,
	};
	int ret;

	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;

	kernel_fpu_begin();
	ret = xts_crypt(desc, dst, src, nbytes, &req);
	kernel_fpu_end();

	return ret;
}

static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
		       struct scatterlist *src, unsigned int nbytes)
{
	struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
	be128 buf[8];
	struct xts_crypt_req req = {
		.tbuf = buf,
		.tbuflen = sizeof(buf),

		.tweak_ctx = aes_ctx(ctx->raw_tweak_ctx),
		.tweak_fn = aesni_xts_tweak,
		.crypt_ctx = aes_ctx(ctx->raw_crypt_ctx),
		.crypt_fn = lrw_xts_decrypt_callback,
	};
	int ret;

	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;

	kernel_fpu_begin();
	ret = xts_crypt(desc, dst, src, nbytes, &req);
	kernel_fpu_end();

	return ret;
}

#endif

#ifdef CONFIG_X86_64
static int rfc4106_init(struct crypto_aead *aead)
{
	struct cryptd_aead *cryptd_tfm;
	struct cryptd_aead **ctx = crypto_aead_ctx(aead);

	cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni",
				       CRYPTO_ALG_INTERNAL,
				       CRYPTO_ALG_INTERNAL);
	if (IS_ERR(cryptd_tfm))
		return PTR_ERR(cryptd_tfm);

	*ctx = cryptd_tfm;
	crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
	return 0;
}

static void rfc4106_exit(struct crypto_aead *aead)
{
	struct cryptd_aead **ctx = crypto_aead_ctx(aead);

	cryptd_free_aead(*ctx);
}

static int
rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
{
	struct crypto_cipher *tfm;
	int ret;

	tfm = crypto_alloc_cipher("aes", 0, 0);
	if (IS_ERR(tfm))
		return PTR_ERR(tfm);

	ret = crypto_cipher_setkey(tfm, key, key_len);
	if (ret)
		goto out_free_cipher;

	/* Clear the data in the hash sub key container to zero.*/
	/* We want to cipher all zeros to create the hash sub key. */
	memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);

	crypto_cipher_encrypt_one(tfm, hash_subkey, hash_subkey);

out_free_cipher:
	crypto_free_cipher(tfm);
	return ret;
}

static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key,
				  unsigned int key_len)
{
	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(aead);

	if (key_len < 4) {
		crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
		return -EINVAL;
	}
	/*Account for 4 byte nonce at the end.*/
	key_len -= 4;

	memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));

	return aes_set_key_common(crypto_aead_tfm(aead),
				  &ctx->aes_key_expanded, key, key_len) ?:
	       rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
}

static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
			   unsigned int key_len)
{
	struct cryptd_aead **ctx = crypto_aead_ctx(parent);
	struct cryptd_aead *cryptd_tfm = *ctx;

	return crypto_aead_setkey(&cryptd_tfm->base, key, key_len);
}

static int common_rfc4106_set_authsize(struct crypto_aead *aead,
				       unsigned int authsize)
{
	switch (authsize) {
	case 8:
	case 12:
	case 16:
		break;
	default:
		return -EINVAL;
	}

	return 0;
}

/* This is the Integrity Check Value (aka the authentication tag length and can
 * be 8, 12 or 16 bytes long. */
static int rfc4106_set_authsize(struct crypto_aead *parent,
				unsigned int authsize)
{
	struct cryptd_aead **ctx = crypto_aead_ctx(parent);
	struct cryptd_aead *cryptd_tfm = *ctx;

	return crypto_aead_setauthsize(&cryptd_tfm->base, authsize);
}

static int helper_rfc4106_encrypt(struct aead_request *req)
{
	u8 one_entry_in_sg = 0;
	u8 *src, *dst, *assoc;
	__be32 counter = cpu_to_be32(1);
	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
	void *aes_ctx = &(ctx->aes_key_expanded);
	unsigned long auth_tag_len = crypto_aead_authsize(tfm);
	u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
	struct scatter_walk src_sg_walk;
	struct scatter_walk dst_sg_walk = {};
	unsigned int i;

	/* Assuming we are supporting rfc4106 64-bit extended */
	/* sequence numbers We need to have the AAD length equal */
	/* to 16 or 20 bytes */
	if (unlikely(req->assoclen != 16 && req->assoclen != 20))
		return -EINVAL;

	/* IV below built */
	for (i = 0; i < 4; i++)
		*(iv+i) = ctx->nonce[i];
	for (i = 0; i < 8; i++)
		*(iv+4+i) = req->iv[i];
	*((__be32 *)(iv+12)) = counter;

	if (sg_is_last(req->src) &&
	    req->src->offset + req->src->length <= PAGE_SIZE &&
	    sg_is_last(req->dst) &&
	    req->dst->offset + req->dst->length <= PAGE_SIZE) {
		one_entry_in_sg = 1;
		scatterwalk_start(&src_sg_walk, req->src);
		assoc = scatterwalk_map(&src_sg_walk);
		src = assoc + req->assoclen;
		dst = src;
		if (unlikely(req->src != req->dst)) {
			scatterwalk_start(&dst_sg_walk, req->dst);
			dst = scatterwalk_map(&dst_sg_walk) + req->assoclen;
		}
	} else {
		/* Allocate memory for src, dst, assoc */
		assoc = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
			GFP_ATOMIC);
		if (unlikely(!assoc))
			return -ENOMEM;
		scatterwalk_map_and_copy(assoc, req->src, 0,
					 req->assoclen + req->cryptlen, 0);
		src = assoc + req->assoclen;
		dst = src;
	}

	kernel_fpu_begin();
	aesni_gcm_enc_tfm(aes_ctx, dst, src, req->cryptlen, iv,
			  ctx->hash_subkey, assoc, req->assoclen - 8,
			  dst + req->cryptlen, auth_tag_len);
	kernel_fpu_end();

	/* The authTag (aka the Integrity Check Value) needs to be written
	 * back to the packet. */
	if (one_entry_in_sg) {
		if (unlikely(req->src != req->dst)) {
			scatterwalk_unmap(dst - req->assoclen);
			scatterwalk_advance(&dst_sg_walk, req->dst->length);
			scatterwalk_done(&dst_sg_walk, 1, 0);
		}
		scatterwalk_unmap(assoc);
		scatterwalk_advance(&src_sg_walk, req->src->length);
		scatterwalk_done(&src_sg_walk, req->src == req->dst, 0);
	} else {
		scatterwalk_map_and_copy(dst, req->dst, req->assoclen,
					 req->cryptlen + auth_tag_len, 1);
		kfree(assoc);
	}
	return 0;
}

static int helper_rfc4106_decrypt(struct aead_request *req)
{
	u8 one_entry_in_sg = 0;
	u8 *src, *dst, *assoc;
	unsigned long tempCipherLen = 0;
	__be32 counter = cpu_to_be32(1);
	int retval = 0;
	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
	void *aes_ctx = &(ctx->aes_key_expanded);
	unsigned long auth_tag_len = crypto_aead_authsize(tfm);
	u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
	u8 authTag[16];
	struct scatter_walk src_sg_walk;
	struct scatter_walk dst_sg_walk = {};
	unsigned int i;

	if (unlikely(req->assoclen != 16 && req->assoclen != 20))
		return -EINVAL;

	/* Assuming we are supporting rfc4106 64-bit extended */
	/* sequence numbers We need to have the AAD length */
	/* equal to 16 or 20 bytes */

	tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
	/* IV below built */
	for (i = 0; i < 4; i++)
		*(iv+i) = ctx->nonce[i];
	for (i = 0; i < 8; i++)
		*(iv+4+i) = req->iv[i];
	*((__be32 *)(iv+12)) = counter;

	if (sg_is_last(req->src) &&
	    req->src->offset + req->src->length <= PAGE_SIZE &&
	    sg_is_last(req->dst) &&
	    req->dst->offset + req->dst->length <= PAGE_SIZE) {
		one_entry_in_sg = 1;
		scatterwalk_start(&src_sg_walk, req->src);
		assoc = scatterwalk_map(&src_sg_walk);
		src = assoc + req->assoclen;
		dst = src;
		if (unlikely(req->src != req->dst)) {
			scatterwalk_start(&dst_sg_walk, req->dst);
			dst = scatterwalk_map(&dst_sg_walk) + req->assoclen;
		}

	} else {
		/* Allocate memory for src, dst, assoc */
		assoc = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
		if (!assoc)
			return -ENOMEM;
		scatterwalk_map_and_copy(assoc, req->src, 0,
					 req->assoclen + req->cryptlen, 0);
		src = assoc + req->assoclen;
		dst = src;
	}

	kernel_fpu_begin();
	aesni_gcm_dec_tfm(aes_ctx, dst, src, tempCipherLen, iv,
			  ctx->hash_subkey, assoc, req->assoclen - 8,
			  authTag, auth_tag_len);
	kernel_fpu_end();

	/* Compare generated tag with passed in tag. */
	retval = crypto_memneq(src + tempCipherLen, authTag, auth_tag_len) ?
		-EBADMSG : 0;

	if (one_entry_in_sg) {
		if (unlikely(req->src != req->dst)) {
			scatterwalk_unmap(dst - req->assoclen);
			scatterwalk_advance(&dst_sg_walk, req->dst->length);
			scatterwalk_done(&dst_sg_walk, 1, 0);
		}
		scatterwalk_unmap(assoc);
		scatterwalk_advance(&src_sg_walk, req->src->length);
		scatterwalk_done(&src_sg_walk, req->src == req->dst, 0);
	} else {
		scatterwalk_map_and_copy(dst, req->dst, req->assoclen,
					 tempCipherLen, 1);
		kfree(assoc);
	}
	return retval;
}
static int _get_more_prng_bytes(struct prng_context *ctx, int cont_test)
{
	int i;
	unsigned char tmp[DEFAULT_BLK_SZ];
	unsigned char *output = NULL;


	dbgprint(KERN_CRIT "Calling _get_more_prng_bytes for context %p\n",
		ctx);

	hexdump("Input DT: ", ctx->DT, DEFAULT_BLK_SZ);
	hexdump("Input I: ", ctx->I, DEFAULT_BLK_SZ);
	hexdump("Input V: ", ctx->V, DEFAULT_BLK_SZ);

	/*
                                             
  */
	for (i = 0; i < 3; i++) {

		switch (i) {
		case 0:
			/*
                                           
                                           
    */
			memcpy(tmp, ctx->DT, DEFAULT_BLK_SZ);
			output = ctx->I;
			hexdump("tmp stage 0: ", tmp, DEFAULT_BLK_SZ);
			break;
		case 1:

			/*
                                         
                                       
                                        
    */
			xor_vectors(ctx->I, ctx->V, tmp, DEFAULT_BLK_SZ);
			hexdump("tmp stage 1: ", tmp, DEFAULT_BLK_SZ);
			output = ctx->rand_data;
			break;
		case 2:
			/*
                                                 
                                                           
    */
			if (!memcmp(ctx->rand_data, ctx->last_rand_data,
					DEFAULT_BLK_SZ)) {
				if (cont_test) {
					panic("cprng %p Failed repetition check!\n",
						ctx);
				}

				printk(KERN_ERR
					"ctx %p Failed repetition check!\n",
					ctx);

				ctx->flags |= PRNG_NEED_RESET;
				return -EINVAL;
			}
			memcpy(ctx->last_rand_data, ctx->rand_data,
				DEFAULT_BLK_SZ);

			/*
                                       
                                                      
    */
			xor_vectors(ctx->rand_data, ctx->I, tmp,
				DEFAULT_BLK_SZ);
			output = ctx->V;
			hexdump("tmp stage 2: ", tmp, DEFAULT_BLK_SZ);
			break;
		}


		/*                   */
		crypto_cipher_encrypt_one(ctx->tfm, output, tmp);

	}

	/*
                           
  */
	for (i = DEFAULT_BLK_SZ - 1; i >= 0; i--) {
		ctx->DT[i] += 1;
		if (ctx->DT[i] != 0)
			break;
	}

	dbgprint("Returning new block for context %p\n", ctx);
	ctx->rand_data_valid = 0;

	hexdump("Output DT: ", ctx->DT, DEFAULT_BLK_SZ);
	hexdump("Output I: ", ctx->I, DEFAULT_BLK_SZ);
	hexdump("Output V: ", ctx->V, DEFAULT_BLK_SZ);
	hexdump("New Random Data: ", ctx->rand_data, DEFAULT_BLK_SZ);

	return 0;
}