Ejemplo n.º 1
0
static void update(struct crypto_tfm *tfm,
                   struct scatterlist *sg, unsigned int nsg)
{
	unsigned int i;

	for (i = 0; i < nsg; i++) {

	#if(LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24))
		struct page *pg = sg[i].page;
	#else
		struct page *pg = sg_page(&sg[i]);
	#endif
		unsigned int offset = sg[i].offset;
		unsigned int l = sg[i].length;

		do {
			unsigned int bytes_from_page = min(l, ((unsigned int)
							   (PAGE_SIZE)) - 
							   offset);
			char *p = crypto_kmap(pg, 0) + offset;

			tfm->__crt_alg->cra_digest.dia_update
					(crypto_tfm_ctx(tfm), p,
					 bytes_from_page);
			crypto_kunmap(p, 0);
			crypto_yield(tfm);
			offset = 0;
			pg++;
			l -= bytes_from_page;
		} while (l > 0);
	}
}
Ejemplo n.º 2
0
static void crypto_gcm_ghash_update_sg(struct crypto_gcm_ghash_ctx *ctx,
				       struct scatterlist *sg, int len)
{
	struct scatter_walk walk;
	u8 *src;
	int n;

	if (!len)
		return;

	scatterwalk_start(&walk, sg);

	while (len) {
		n = scatterwalk_clamp(&walk, len);

		if (!n) {
			scatterwalk_start(&walk, scatterwalk_sg_next(walk.sg));
			n = scatterwalk_clamp(&walk, len);
		}

		src = scatterwalk_map(&walk, 0);

		crypto_gcm_ghash_update(ctx, src, n);
		len -= n;

		scatterwalk_unmap(src, 0);
		scatterwalk_advance(&walk, n);
		scatterwalk_done(&walk, 0, len);
		if (len)
			crypto_yield(ctx->flags);
	}
}
Ejemplo n.º 3
0
static void update(struct crypto_tfm *tfm,
		   struct scatterlist *sg, unsigned int nsg)
{
	unsigned int i;

	for (i = 0; i < nsg; i++) {

		struct page *pg = sg[i].page;
		unsigned int offset = sg[i].offset;
		unsigned int l = sg[i].length;

		do {
			unsigned int bytes_from_page = min(l, ((unsigned int)
							   (PAGE_SIZE)) -
							   offset);
			char *p = kmap_atomic(pg) + offset;

			tfm->__crt_alg->cra_digest.dia_update
					(crypto_tfm_ctx(tfm), p,
					 bytes_from_page);
			kunmap_atomic(p);
			crypto_yield(tfm);
			offset = 0;
			pg++;
			l -= bytes_from_page;
		} while (l > 0);
	}
}
Ejemplo n.º 4
0
static int update2(struct hash_desc *desc,
		   struct scatterlist *sg, unsigned int nbytes)
{
	struct crypto_tfm *tfm = crypto_hash_tfm(desc->tfm);
	unsigned int alignmask = crypto_tfm_alg_alignmask(tfm);

	if (!nbytes)
		return 0;

	for (;;) {
		struct page *pg = sg_page(sg);
		unsigned int offset = sg->offset;
		unsigned int l = sg->length;

		if (unlikely(l > nbytes))
			l = nbytes;
		nbytes -= l;

		do {
			unsigned int bytes_from_page = min(l, ((unsigned int)
							   (PAGE_SIZE)) - 
							   offset);
			char *src = crypto_kmap(pg, 0);
			char *p = src + offset;

			if (unlikely(offset & alignmask)) {
				unsigned int bytes =
					alignmask + 1 - (offset & alignmask);
				bytes = min(bytes, bytes_from_page);
				tfm->__crt_alg->cra_digest.dia_update(tfm, p,
								      bytes);
				p += bytes;
				bytes_from_page -= bytes;
				l -= bytes;
			}
			tfm->__crt_alg->cra_digest.dia_update(tfm, p,
							      bytes_from_page);
			crypto_kunmap(src, 0);
			crypto_yield(desc->flags);
			offset = 0;
			pg++;
			l -= bytes_from_page;
		} while (l > 0);

		if (!nbytes)
			break;
		sg = scatterwalk_sg_next(sg);
	}

	return 0;
}
Ejemplo n.º 5
0
/*
 * Generic encrypt/decrypt wrapper for ciphers, handles operations across
 * multiple page boundaries by using temporary blocks.  In user context,
 * the kernel is given a chance to schedule us once per block.
 */
static int crypt(struct crypto_tfm *tfm,
		 struct scatterlist *dst,
		 struct scatterlist *src,
		 unsigned int nbytes, cryptfn_t crfn,
		 procfn_t prfn, int enc, void *info)
{
	struct scatter_walk walk_in, walk_out;
	const unsigned int bsize = crypto_tfm_alg_blocksize(tfm);
	u8 tmp_src[bsize];
	u8 tmp_dst[bsize];

	if (!nbytes)
		return 0;

	if (nbytes % bsize) {
		tfm->crt_flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
		return -EINVAL;
	}

	scatterwalk_start(&walk_in, src);
	scatterwalk_start(&walk_out, dst);

	for(;;) {
		u8 *src_p, *dst_p;
		int in_place;

		scatterwalk_map(&walk_in);
		scatterwalk_map(&walk_out);
		src_p = scatterwalk_whichbuf(&walk_in, bsize, tmp_src);
		dst_p = scatterwalk_whichbuf(&walk_out, bsize, tmp_dst);
		in_place = scatterwalk_samebuf(&walk_in, &walk_out,
					       src_p, dst_p);

		nbytes -= bsize;

		scatterwalk_copychunks(src_p, &walk_in, bsize, 0);

		prfn(tfm, dst_p, src_p, crfn, enc, info, in_place);

		scatterwalk_done(&walk_in, nbytes);

		scatterwalk_copychunks(dst_p, &walk_out, bsize, 1);
		scatterwalk_done(&walk_out, nbytes);

		if (!nbytes)
			return 0;

		crypto_yield(tfm);
	}
}
Ejemplo n.º 6
0
int blkcipher_walk_done(struct blkcipher_desc *desc,
			struct blkcipher_walk *walk, int err)
{
	unsigned int nbytes = 0;

#ifdef CONFIG_CRYPTO_FIPS
    if (unlikely(in_fips_err()))
        return (-EACCES);
#endif

	if (likely(err >= 0)) {
		unsigned int n = walk->nbytes - err;

		if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW)))
			n = blkcipher_done_fast(walk, n);
		else if (WARN_ON(err)) {
			err = -EINVAL;
			goto err;
		} else
			n = blkcipher_done_slow(walk, n);

		nbytes = walk->total - n;
		err = 0;
	}

	scatterwalk_done(&walk->in, 0, nbytes);
	scatterwalk_done(&walk->out, 1, nbytes);

err:
	walk->total = nbytes;
	walk->nbytes = nbytes;

	if (nbytes) {
		crypto_yield(desc->flags);
		return blkcipher_walk_next(desc, walk);
	}

	if (walk->iv != desc->info)
		memcpy(desc->info, walk->iv, walk->ivsize);
	if (walk->buffer != walk->page)
		kfree(walk->buffer);
	if (walk->page)
		free_page((unsigned long)walk->page);

	return err;
}
int blkcipher_walk_done(struct blkcipher_desc *desc,
			struct blkcipher_walk *walk, int err)
{
	struct crypto_blkcipher *tfm = desc->tfm;
	unsigned int nbytes = 0;

	if (likely(err >= 0)) {
		unsigned int n = walk->nbytes - err;

		if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW)))
			n = blkcipher_done_fast(walk, n);
		else if (WARN_ON(err)) {
			err = -EINVAL;
			goto err;
		} else
			n = blkcipher_done_slow(tfm, walk, n);

		nbytes = walk->total - n;
		err = 0;
	}

	scatterwalk_done(&walk->in, 0, nbytes);
	scatterwalk_done(&walk->out, 1, nbytes);

err:
	walk->total = nbytes;
	walk->nbytes = nbytes;

	if (nbytes) {
		crypto_yield(desc->flags);
		return blkcipher_walk_next(desc, walk);
	}

	if (walk->iv != desc->info)
		memcpy(desc->info, walk->iv, crypto_blkcipher_ivsize(tfm));
	if (walk->buffer != walk->page)
		kfree(walk->buffer);
	if (walk->page)
		free_page((unsigned long)walk->page);

	return err;
}
Ejemplo n.º 8
0
static void get_data_to_compute(struct crypto_cipher *tfm,
			       struct crypto_ccm_req_priv_ctx *pctx,
			       struct scatterlist *sg, unsigned int len)
{
	struct scatter_walk walk;
	u8 *data_src;
	int n;

	scatterwalk_start(&walk, sg);

	while (len) {
		n = scatterwalk_clamp(&walk, len);
		if (!n) {
			scatterwalk_start(&walk, sg_next(walk.sg));
			n = scatterwalk_clamp(&walk, len);
		}
		data_src = scatterwalk_map(&walk);

		compute_mac(tfm, data_src, n, pctx);
		len -= n;

		scatterwalk_unmap(data_src);
		scatterwalk_advance(&walk, n);
		scatterwalk_done(&walk, 0, len);
		if (len)
			crypto_yield(pctx->flags);
	}

	/* any leftover needs padding and then encrypted */
	if (pctx->ilen) {
		int padlen;
		u8 *odata = pctx->odata;
		u8 *idata = pctx->idata;

		padlen = 16 - pctx->ilen;
		memset(idata + pctx->ilen, 0, padlen);
		crypto_xor(odata, idata, 16);
		crypto_cipher_encrypt_one(tfm, odata, odata);
		pctx->ilen = 0;
	}
}
Ejemplo n.º 9
0
/* 
 * Generic encrypt/decrypt wrapper for ciphers, handles operations across
 * multiple page boundaries by using temporary blocks.  In user context,
 * the kernel is given a chance to schedule us once per page.
 */
static int crypt(const struct cipher_desc *desc,
		 struct scatterlist *dst,
		 struct scatterlist *src,
		 unsigned int nbytes)
{
	struct scatter_walk walk_in, walk_out;
	struct crypto_tfm *tfm = desc->tfm;
	const unsigned int bsize = crypto_tfm_alg_blocksize(tfm);
	unsigned int alignmask = crypto_tfm_alg_alignmask(tfm);
	unsigned long buffer = 0;

	if (!nbytes)
		return 0;

	if (nbytes % bsize) {
		tfm->crt_flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
		return -EINVAL;
	}

	scatterwalk_start(&walk_in, src);
	scatterwalk_start(&walk_out, dst);

	for(;;) {
		unsigned int n = nbytes;
		u8 *tmp = NULL;

		if (!scatterwalk_aligned(&walk_in, alignmask) ||
		    !scatterwalk_aligned(&walk_out, alignmask)) {
			if (!buffer) {
				buffer = __get_free_page(GFP_ATOMIC);
				if (!buffer)
					n = 0;
			}
			tmp = (u8 *)buffer;
		}

		scatterwalk_map(&walk_in, 0);
		scatterwalk_map(&walk_out, 1);

		n = scatterwalk_clamp(&walk_in, n);
		n = scatterwalk_clamp(&walk_out, n);

		if (likely(n >= bsize))
			n = crypt_fast(desc, &walk_in, &walk_out, n, tmp);
		else
			n = crypt_slow(desc, &walk_in, &walk_out, bsize);

		nbytes -= n;

		scatterwalk_done(&walk_in, 0, nbytes);
		scatterwalk_done(&walk_out, 1, nbytes);

		if (!nbytes)
			break;

		crypto_yield(tfm);
	}

	if (buffer)
		free_page(buffer);

	return 0;
}
void crypto_xcbc_update(struct crypto_tfm *tfm, struct scatterlist *sg, unsigned int nsg)
{
	struct xcbc_ops *ops = (struct xcbc_ops*)tfm->crt_cipher.cit_xcbc_block;
	const unsigned int bsize = crypto_tfm_alg_blocksize(tfm);
	unsigned int i;

	if (!(tfm->crt_cipher.cit_mode & CRYPTO_TFM_MODE_CBC))
		return;
 
	for(i = 0; i < nsg; i++) {

		struct page *pg = sg[i].page;
		unsigned int offset = sg[i].offset;
		unsigned int slen = sg[i].length;

		while (slen > 0) {
			unsigned int len = min(slen, ((unsigned int)(PAGE_SIZE)) - offset);
			char *p = crypto_kmap(pg, 0) + offset;

			/* checking the data can fill the block */
			if ((ops->len + len) <= bsize) {
				memcpy(ops->prev + ops->len, p, len);
				ops->len += len;
				slen -= len;

				/* checking the rest of the page */
				if (len + offset >= PAGE_SIZE) {
					offset = 0;
					pg++;
				} else
					offset += len;

				crypto_kunmap(p, 0);
				crypto_yield(tfm);
				continue;
			}

			/* filling ops->prev with new data and encrypting it */
			memcpy(ops->prev + ops->len, p, bsize - ops->len);
			len -= bsize - ops->len;
			p += bsize - ops->len;
			tfm->crt_u.cipher.cit_xor_block(tfm->crt_cipher.cit_iv,
							ops->prev);
			tfm->__crt_alg->cra_cipher.cia_encrypt(
				crypto_tfm_ctx(tfm), tfm->crt_cipher.cit_iv,
				tfm->crt_cipher.cit_iv);

			/* clearing the length */
			ops->len = 0;

			/* encrypting the rest of data */
			while (len > bsize) {
				tfm->crt_u.cipher.cit_xor_block(tfm->crt_cipher.cit_iv, p);
				tfm->__crt_alg->cra_cipher.cia_encrypt(
					crypto_tfm_ctx(tfm), tfm->crt_cipher.cit_iv,
					tfm->crt_cipher.cit_iv);
				p += bsize;
				len -= bsize;
			}

			/* keeping the surplus of blocksize */
			if (len) {
				memcpy(ops->prev, p, len);
				ops->len = len;
			}
			crypto_kunmap(p, 0);
			crypto_yield(tfm);
			slen -= min(slen, ((unsigned int)(PAGE_SIZE)) - offset);
			offset = 0;
			pg++;
		}
	}
}