Ejemplo n.º 1
0
static int sha512_init(struct shash_desc *desc)
{
	struct s390_sha_ctx *ctx = shash_desc_ctx(desc);

	*(__u64 *)&ctx->state[0] = 0x6a09e667f3bcc908ULL;
	*(__u64 *)&ctx->state[2] = 0xbb67ae8584caa73bULL;
	*(__u64 *)&ctx->state[4] = 0x3c6ef372fe94f82bULL;
	*(__u64 *)&ctx->state[6] = 0xa54ff53a5f1d36f1ULL;
	*(__u64 *)&ctx->state[8] = 0x510e527fade682d1ULL;
	*(__u64 *)&ctx->state[10] = 0x9b05688c2b3e6c1fULL;
	*(__u64 *)&ctx->state[12] = 0x1f83d9abfb41bd6bULL;
	*(__u64 *)&ctx->state[14] = 0x5be0cd19137e2179ULL;
	ctx->count = 0;
	ctx->func = KIMD_SHA_512;

	return 0;
}
static int sha256_init(struct shash_desc *desc)
{
	struct s390_sha_ctx *sctx = shash_desc_ctx(desc);

	sctx->state[0] = SHA256_H0;
	sctx->state[1] = SHA256_H1;
	sctx->state[2] = SHA256_H2;
	sctx->state[3] = SHA256_H3;
	sctx->state[4] = SHA256_H4;
	sctx->state[5] = SHA256_H5;
	sctx->state[6] = SHA256_H6;
	sctx->state[7] = SHA256_H7;
	sctx->count = 0;
	sctx->func = KIMD_SHA_256;

	return 0;
}
Ejemplo n.º 3
0
static int sha384_init(struct shash_desc *desc)
{
	struct s390_sha_ctx *ctx = shash_desc_ctx(desc);

	*(__u64 *)&ctx->state[0] = 0xcbbb9d5dc1059ed8ULL;
	*(__u64 *)&ctx->state[2] = 0x629a292a367cd507ULL;
	*(__u64 *)&ctx->state[4] = 0x9159015a3070dd17ULL;
	*(__u64 *)&ctx->state[6] = 0x152fecd8f70e5939ULL;
	*(__u64 *)&ctx->state[8] = 0x67332667ffc00b31ULL;
	*(__u64 *)&ctx->state[10] = 0x8eb44a8768581511ULL;
	*(__u64 *)&ctx->state[12] = 0xdb0c2e0d64f98fa7ULL;
	*(__u64 *)&ctx->state[14] = 0x47b5481dbefa4fa4ULL;
	ctx->count = 0;
	ctx->func = KIMD_SHA_512;

	return 0;
}
Ejemplo n.º 4
0
static int p8_ghash_update(struct shash_desc *desc,
        const u8 *src, unsigned int srclen)
{
    unsigned int len;
    struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
    struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);

    if (IN_INTERRUPT) {
        return crypto_shash_update(&dctx->fallback_desc, src, srclen);
    } else {
        if (dctx->bytes) {
            if (dctx->bytes + srclen < GHASH_DIGEST_SIZE) {
                memcpy(dctx->buffer + dctx->bytes, src, srclen);
                dctx->bytes += srclen;
                return 0;
            }
            memcpy(dctx->buffer + dctx->bytes, src,
                    GHASH_DIGEST_SIZE - dctx->bytes);
            pagefault_disable();
            enable_kernel_altivec();
            enable_kernel_fp();
            gcm_ghash_p8(dctx->shash, ctx->htable, dctx->buffer,
                    GHASH_DIGEST_SIZE);
            pagefault_enable();
            src += GHASH_DIGEST_SIZE - dctx->bytes;
            srclen -= GHASH_DIGEST_SIZE - dctx->bytes;
            dctx->bytes = 0;
        }
        len = srclen & ~(GHASH_DIGEST_SIZE - 1);
        if (len) {
            pagefault_disable();
            enable_kernel_altivec();
            enable_kernel_fp();
            gcm_ghash_p8(dctx->shash, ctx->htable, src, len);
            pagefault_enable();
            src += len;
            srclen -= len;
        }
        if (srclen) {
            memcpy(dctx->buffer, src, srclen);
            dctx->bytes = srclen;
        }
        return 0;
    }
}
static int ppc_spe_sha256_update(struct shash_desc *desc, const u8 *data,
			unsigned int len)
{
	struct sha256_state *sctx = shash_desc_ctx(desc);
	const unsigned int offset = sctx->count & 0x3f;
	const unsigned int avail = 64 - offset;
	unsigned int bytes;
	const u8 *src = data;

	if (avail > len) {
		sctx->count += len;
		memcpy((char *)sctx->buf + offset, src, len);
		return 0;
	}

	sctx->count += len;

	if (offset) {
		memcpy((char *)sctx->buf + offset, src, avail);

		spe_begin();
		ppc_spe_sha256_transform(sctx->state, (const u8 *)sctx->buf, 1);
		spe_end();

		len -= avail;
		src += avail;
	}

	while (len > 63) {
		/* cut input data into smaller blocks */
		bytes = (len > MAX_BYTES) ? MAX_BYTES : len;
		bytes = bytes & ~0x3f;

		spe_begin();
		ppc_spe_sha256_transform(sctx->state, src, bytes >> 6);
		spe_end();

		src += bytes;
		len -= bytes;
	};

	memcpy((char *)sctx->buf, src, len);
	return 0;
}
Ejemplo n.º 6
0
static int sha512_ssse3_update(struct shash_desc *desc, const u8 *data,
			       unsigned int len)
{
	struct sha512_state *sctx = shash_desc_ctx(desc);

	if (!irq_fpu_usable() ||
	    (sctx->count[0] % SHA512_BLOCK_SIZE) + len < SHA512_BLOCK_SIZE)
		return crypto_sha512_update(desc, data, len);

	/* make sure casting to sha512_block_fn() is safe */
	BUILD_BUG_ON(offsetof(struct sha512_state, state) != 0);

	kernel_fpu_begin();
	sha512_base_do_update(desc, data, len,
			      (sha512_block_fn *)sha512_transform_asm);
	kernel_fpu_end();

	return 0;
}
Ejemplo n.º 7
0
static int rmd256_init(struct shash_desc *desc)
{
	struct rmd256_ctx *rctx = shash_desc_ctx(desc);

	rctx->byte_count = 0;

	rctx->state[0] = RMD_H0;
	rctx->state[1] = RMD_H1;
	rctx->state[2] = RMD_H2;
	rctx->state[3] = RMD_H3;
	rctx->state[4] = RMD_H5;
	rctx->state[5] = RMD_H6;
	rctx->state[6] = RMD_H7;
	rctx->state[7] = RMD_H8;

	memset(rctx->buffer, 0, sizeof(rctx->buffer));

	return 0;
}
Ejemplo n.º 8
0
/*! \fn static void md5_final(struct crypto_tfm *tfm, u8 *out)
 *  \ingroup IFX_MD5_FUNCTIONS
 *  \brief compute final md5 value   
 *  \param tfm linux crypto algo transform  
 *  \param out final md5 output value  
*/                                 
static int md5_final(struct shash_desc *desc, u8 *out)
{
    struct md5_ctx *mctx = shash_desc_ctx(desc);
    const unsigned int offset = mctx->byte_count & 0x3f;
    char *p = (char *)mctx->block + offset;
    int padding = 56 - (offset + 1);
    volatile struct deu_hash_t *hashs = (struct deu_hash_t *) HASH_START;
    unsigned long flag;

    *p++ = 0x80;
    if (padding < 0) {
        memset(p, 0x00, padding + sizeof (u64));
        md5_transform_helper(mctx);
        p = (char *)mctx->block;
        padding = 56;
    }

    memset(p, 0, padding);
    mctx->block[14] = endian_swap(mctx->byte_count << 3);
    mctx->block[15] = endian_swap(mctx->byte_count >> 29);

#if 0
    le32_to_cpu_array(mctx->block, (sizeof(mctx->block) -
                      sizeof(u64)) / sizeof(u32));
#endif

    md5_transform(mctx, mctx->hash, mctx->block);                                                 

    CRTCL_SECT_START;

    *((u32 *) out + 0) = endian_swap (hashs->D1R);
    *((u32 *) out + 1) = endian_swap (hashs->D2R);
    *((u32 *) out + 2) = endian_swap (hashs->D3R);
    *((u32 *) out + 3) = endian_swap (hashs->D4R);

    CRTCL_SECT_END;

    // Wipe context
    memset(mctx, 0, sizeof(*mctx));

    return 0;
}
/* Add padding and return the message digest. */
static int sha256_neon_final(struct shash_desc *desc, u8 *out)
{
	struct sha256_state *sctx = shash_desc_ctx(desc);
	unsigned int i, index, padlen;
	__be32 *dst = (__be32 *)out;
	__be64 bits;
	static const u8 padding[SHA256_BLOCK_SIZE] = { 0x80, };

	/* save number of bits */
	bits = cpu_to_be64(sctx->count << 3);

	/* Pad out to 56 mod 64 and append length */
	index = sctx->count % SHA256_BLOCK_SIZE;
	padlen = (index < 56) ? (56 - index) : ((SHA256_BLOCK_SIZE+56)-index);

	if (!may_use_simd()) {
		sha256_update(desc, padding, padlen);
		sha256_update(desc, (const u8 *)&bits, sizeof(bits));
	} else {
		kernel_neon_begin();
		/* We need to fill a whole block for __sha256_neon_update() */
		if (padlen <= 56) {
			sctx->count += padlen;
			memcpy(sctx->buf + index, padding, padlen);
		} else {
			__sha256_neon_update(desc, padding, padlen, index);
		}
		__sha256_neon_update(desc, (const u8 *)&bits,
					sizeof(bits), 56);
		kernel_neon_end();
	}

	/* Store state in digest */
	for (i = 0; i < 8; i++)
		dst[i] = cpu_to_be32(sctx->state[i]);

	/* Wipe context */
	memset(sctx, 0, sizeof(*sctx));

	return 0;
}
Ejemplo n.º 10
0
static int nx_sha256_init(struct shash_desc *desc)
{
	struct sha256_state *sctx = shash_desc_ctx(desc);
	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
	struct nx_sg *out_sg;
	int len;
	u32 max_sg_len;

	nx_ctx_init(nx_ctx, HCOP_FC_SHA);

	memset(sctx, 0, sizeof *sctx);

	nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA256];

	NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA256);

	max_sg_len = min_t(u64, nx_ctx->ap->sglen,
			nx_driver.of.max_sg_len/sizeof(struct nx_sg));
	max_sg_len = min_t(u64, max_sg_len,
			nx_ctx->ap->databytelen/NX_PAGE_SIZE);

	len = SHA256_DIGEST_SIZE;
	out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
				  &len, max_sg_len);
	nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);

	if (len != SHA256_DIGEST_SIZE)
		return -EINVAL;

	sctx->state[0] = __cpu_to_be32(SHA256_H0);
	sctx->state[1] = __cpu_to_be32(SHA256_H1);
	sctx->state[2] = __cpu_to_be32(SHA256_H2);
	sctx->state[3] = __cpu_to_be32(SHA256_H3);
	sctx->state[4] = __cpu_to_be32(SHA256_H4);
	sctx->state[5] = __cpu_to_be32(SHA256_H5);
	sctx->state[6] = __cpu_to_be32(SHA256_H6);
	sctx->state[7] = __cpu_to_be32(SHA256_H7);
	sctx->count = 0;

	return 0;
}
int s390_sha_final(struct shash_desc *desc, u8 *out)
{
	struct s390_sha_ctx *ctx = shash_desc_ctx(desc);
	unsigned int bsize = crypto_shash_blocksize(desc->tfm);
	u64 bits;
	unsigned int index, end, plen;
	int ret;

	/* SHA-512 uses 128 bit padding length */
	plen = (bsize > SHA256_BLOCK_SIZE) ? 16 : 8;

	/* must perform manual padding */
	index = ctx->count & (bsize - 1);
	end = (index < bsize - plen) ? bsize : (2 * bsize);

	/* start pad with 1 */
	ctx->buf[index] = 0x80;
	index++;

	/* pad with zeros */
	memset(ctx->buf + index, 0x00, end - index - 8);

	/*
	 * Append message length. Well, SHA-512 wants a 128 bit length value,
	 * nevertheless we use u64, should be enough for now...
	 */
	bits = ctx->count * 8;
	memcpy(ctx->buf + end - 8, &bits, sizeof(bits));

	ret = crypt_s390_kimd(ctx->func, ctx->state, ctx->buf, end);
	BUG_ON(ret != end);

	/* copy digest to out */
	memcpy(out, ctx->state, crypto_shash_digestsize(desc->tfm));
	/* wipe context */
	memset(ctx, 0, sizeof *ctx);

	return 0;
}
Ejemplo n.º 12
0
static int octeon_sha1_final(struct shash_desc *desc, u8 *out)
{
	struct sha1_state *sctx = shash_desc_ctx(desc);
	static const u8 padding[64] = { 0x80, };
	struct octeon_cop2_state state;
	__be32 *dst = (__be32 *)out;
	unsigned int pad_len;
	unsigned long flags;
	unsigned int index;
	__be64 bits;
	int i;

	/* Save number of bits. */
	bits = cpu_to_be64(sctx->count << 3);

	/* Pad out to 56 mod 64. */
	index = sctx->count & 0x3f;
	pad_len = (index < 56) ? (56 - index) : ((64+56) - index);

	flags = octeon_crypto_enable(&state);
	octeon_sha1_store_hash(sctx);

	__octeon_sha1_update(sctx, padding, pad_len);

	/* Append length (before padding). */
	__octeon_sha1_update(sctx, (const u8 *)&bits, sizeof(bits));

	octeon_sha1_read_hash(sctx);
	octeon_crypto_disable(&state, flags);

	/* Store state in digest */
	for (i = 0; i < 5; i++)
		dst[i] = cpu_to_be32(sctx->state[i]);

	/* Zeroize sensitive information. */
	memset(sctx, 0, sizeof(*sctx));

	return 0;
}
Ejemplo n.º 13
0
static int octeon_md5_update(struct shash_desc *desc, const u8 *data,
			     unsigned int len)
{
	struct md5_state *mctx = shash_desc_ctx(desc);
	const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f);
	struct octeon_cop2_state state;
	unsigned long flags;

	mctx->byte_count += len;

	if (avail > len) {
		memcpy((char *)mctx->block + (sizeof(mctx->block) - avail),
		       data, len);
		return 0;
	}

	memcpy((char *)mctx->block + (sizeof(mctx->block) - avail), data,
	       avail);

	flags = octeon_crypto_enable(&state);
	octeon_md5_store_hash(mctx);

	octeon_md5_transform(mctx->block);
	data += avail;
	len -= avail;

	while (len >= sizeof(mctx->block)) {
		octeon_md5_transform(data);
		data += sizeof(mctx->block);
		len -= sizeof(mctx->block);
	}

	octeon_md5_read_hash(mctx);
	octeon_crypto_disable(&state, flags);

	memcpy(mctx->block, data, len);

	return 0;
}
static int ghash_update(struct shash_desc *desc,
			 const u8 *src, unsigned int srclen)
{
	struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
	struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
	u8 *dst = dctx->buffer;

	if (!ctx->gf128)
		return -ENOKEY;

	if (dctx->bytes) {
		int n = min(srclen, dctx->bytes);
		u8 *pos = dst + (GHASH_BLOCK_SIZE - dctx->bytes);

		dctx->bytes -= n;
		srclen -= n;

		while (n--)
			*pos++ ^= *src++;

		if (!dctx->bytes)
			gf128mul_4k_lle((be128 *)dst, ctx->gf128);
	}

	while (srclen >= GHASH_BLOCK_SIZE) {
		crypto_xor(dst, src, GHASH_BLOCK_SIZE);
		gf128mul_4k_lle((be128 *)dst, ctx->gf128);
		src += GHASH_BLOCK_SIZE;
		srclen -= GHASH_BLOCK_SIZE;
	}

	if (srclen) {
		dctx->bytes = GHASH_BLOCK_SIZE - srclen;
		while (srclen--)
			*dst++ ^= *src++;
	}

	return 0;
}
Ejemplo n.º 15
0
static int sha256_init(struct shash_desc *desc)
{
	struct sha256_state *sctx = shash_desc_ctx(desc);

#if defined(CONFIG_FIPS_FMP)
	if (unlikely(in_fmp_fips_err())) {
		printk(KERN_ERR "FMP sha256_init failed due to fips error\n");
		return -EPERM;
	}
#endif
	sctx->state[0] = SHA256_H0;
	sctx->state[1] = SHA256_H1;
	sctx->state[2] = SHA256_H2;
	sctx->state[3] = SHA256_H3;
	sctx->state[4] = SHA256_H4;
	sctx->state[5] = SHA256_H5;
	sctx->state[6] = SHA256_H6;
	sctx->state[7] = SHA256_H7;
	sctx->count = 0;

	return 0;
}
Ejemplo n.º 16
0
static int ppc_spe_sha256_final(struct shash_desc *desc, u8 *out)
{
	struct sha256_state *sctx = shash_desc_ctx(desc);
	const unsigned int offset = sctx->count & 0x3f;
	char *p = (char *)sctx->buf + offset;
	int padlen;
	__be64 *pbits = (__be64 *)(((char *)&sctx->buf) + 56);
	__be32 *dst = (__be32 *)out;

	padlen = 55 - offset;
	*p++ = 0x80;

	spe_begin();

	if (padlen < 0) {
		memset(p, 0x00, padlen + sizeof (u64));
		ppc_spe_sha256_transform(sctx->state, sctx->buf, 1);
		p = (char *)sctx->buf;
		padlen = 56;
	}

	memset(p, 0, padlen);
	*pbits = cpu_to_be64(sctx->count << 3);
	ppc_spe_sha256_transform(sctx->state, sctx->buf, 1);

	spe_end();

	dst[0] = cpu_to_be32(sctx->state[0]);
	dst[1] = cpu_to_be32(sctx->state[1]);
	dst[2] = cpu_to_be32(sctx->state[2]);
	dst[3] = cpu_to_be32(sctx->state[3]);
	dst[4] = cpu_to_be32(sctx->state[4]);
	dst[5] = cpu_to_be32(sctx->state[5]);
	dst[6] = cpu_to_be32(sctx->state[6]);
	dst[7] = cpu_to_be32(sctx->state[7]);

	ppc_sha256_clear_context(sctx);
	return 0;
}
Ejemplo n.º 17
0
static int nx_sha512_init(struct shash_desc *desc)
{
	struct sha512_state *sctx = shash_desc_ctx(desc);
	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
	int len;
	int rc;

	nx_ctx_init(nx_ctx, HCOP_FC_SHA);

	memset(sctx, 0, sizeof *sctx);

	nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA512];

	NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA512);

	len = SHA512_DIGEST_SIZE;
	rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg,
				  &nx_ctx->op.outlen,
				  &len,
				  (u8 *)sctx->state,
				  NX_DS_SHA512);

	if (rc || len != SHA512_DIGEST_SIZE)
		goto out;

	sctx->state[0] = __cpu_to_be64(SHA512_H0);
	sctx->state[1] = __cpu_to_be64(SHA512_H1);
	sctx->state[2] = __cpu_to_be64(SHA512_H2);
	sctx->state[3] = __cpu_to_be64(SHA512_H3);
	sctx->state[4] = __cpu_to_be64(SHA512_H4);
	sctx->state[5] = __cpu_to_be64(SHA512_H5);
	sctx->state[6] = __cpu_to_be64(SHA512_H6);
	sctx->state[7] = __cpu_to_be64(SHA512_H7);
	sctx->count[0] = 0;

out:
	return 0;
}
int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len)
{
	struct s390_sha_ctx *ctx = shash_desc_ctx(desc);
	unsigned int bsize = crypto_shash_blocksize(desc->tfm);
	unsigned int index;
	int ret;

	/* how much is already in the buffer? */
	index = ctx->count & (bsize - 1);
	ctx->count += len;

	if ((index + len) < bsize)
		goto store;

	/* process one stored block */
	if (index) {
		memcpy(ctx->buf + index, data, bsize - index);
		ret = crypt_s390_kimd(ctx->func, ctx->state, ctx->buf, bsize);
		BUG_ON(ret != bsize);
		data += bsize - index;
		len -= bsize - index;
		index = 0;
	}

	/* process as many blocks as possible */
	if (len >= bsize) {
		ret = crypt_s390_kimd(ctx->func, ctx->state, data,
				      len & ~(bsize - 1));
		BUG_ON(ret != (len & ~(bsize - 1)));
		data += ret;
		len -= ret;
	}
store:
	if (len)
		memcpy(ctx->buf + index , data, len);

	return 0;
}
Ejemplo n.º 19
0
static int p8_ghash_final(struct shash_desc *desc, u8 *out)
{
    int i;
    struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
    struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);

    if (IN_INTERRUPT) {
        return crypto_shash_final(&dctx->fallback_desc, out);
    } else {
        if (dctx->bytes) {
            for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++)
                dctx->buffer[i] = 0;
            pagefault_disable();
            enable_kernel_altivec();
            enable_kernel_fp();
            gcm_ghash_p8(dctx->shash, ctx->htable, dctx->buffer,
                    GHASH_DIGEST_SIZE);
            pagefault_enable();
            dctx->bytes = 0;
        }
        memcpy(out, dctx->shash, GHASH_DIGEST_SIZE);
        return 0;
    }
}
static int crc32c_pclmul_finup(struct shash_desc *desc, const u8 *data,
			      unsigned int len, u8 *out)
{
	return __crc32c_pclmul_finup(shash_desc_ctx(desc), data, len, out);
}
Ejemplo n.º 21
0
static int adler32_finup(struct shash_desc *desc, const u8 *data,
			 unsigned int len, u8 *out)
{
	return __adler32_finup(shash_desc_ctx(desc), data, len, out);
}
Ejemplo n.º 22
0
static int nx_sha512_final(struct shash_desc *desc, u8 *out)
{
	struct sha512_state *sctx = shash_desc_ctx(desc);
	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
	struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
	u64 count0;
	unsigned long irq_flags;
	int rc;
	int len;

	spin_lock_irqsave(&nx_ctx->lock, irq_flags);

	/* final is represented by continuing the operation and indicating that
	 * this is not an intermediate operation */
	if (sctx->count[0] >= SHA512_BLOCK_SIZE) {
		/* we've hit the nx chip previously, now we're finalizing,
		 * so copy over the partial digest */
		memcpy(csbcpb->cpb.sha512.input_partial_digest, sctx->state,
							SHA512_DIGEST_SIZE);
		NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
		NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
	} else {
		NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
		NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
	}

	NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;

	count0 = sctx->count[0] * 8;

	csbcpb->cpb.sha512.message_bit_length_lo = count0;

	len = sctx->count[0] & (SHA512_BLOCK_SIZE - 1);
	rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
				  &nx_ctx->op.inlen,
				  &len,
				  (u8 *)sctx->buf,
				  NX_DS_SHA512);

	if (rc || len != (sctx->count[0] & (SHA512_BLOCK_SIZE - 1)))
		goto out;

	len = SHA512_DIGEST_SIZE;
	rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg,
				  &nx_ctx->op.outlen,
				  &len,
				  out,
				  NX_DS_SHA512);

	if (rc)
		goto out;

	if (!nx_ctx->op.outlen) {
		rc = -EINVAL;
		goto out;
	}

	rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
			   desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
	if (rc)
		goto out;

	atomic_inc(&(nx_ctx->stats->sha512_ops));
	atomic64_add(sctx->count[0], &(nx_ctx->stats->sha512_bytes));

	memcpy(out, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
out:
	spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
	return rc;
}
Ejemplo n.º 23
0
static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
			    unsigned int len)
{
	struct sha256_state *sctx = shash_desc_ctx(desc);
	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
	struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
	struct nx_sg *in_sg;
	u64 to_process = 0, leftover, total;
	unsigned long irq_flags;
	int rc = 0;
	int data_len;
	u32 max_sg_len;
	u64 buf_len = (sctx->count % SHA256_BLOCK_SIZE);

	spin_lock_irqsave(&nx_ctx->lock, irq_flags);

	/* 2 cases for total data len:
	 *  1: < SHA256_BLOCK_SIZE: copy into state, return 0
	 *  2: >= SHA256_BLOCK_SIZE: process X blocks, copy in leftover
	 */
	total = (sctx->count % SHA256_BLOCK_SIZE) + len;
	if (total < SHA256_BLOCK_SIZE) {
		memcpy(sctx->buf + buf_len, data, len);
		sctx->count += len;
		goto out;
	}

	memcpy(csbcpb->cpb.sha256.message_digest, sctx->state, SHA256_DIGEST_SIZE);
	NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
	NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;

	in_sg = nx_ctx->in_sg;
	max_sg_len = min_t(u64, nx_ctx->ap->sglen,
			nx_driver.of.max_sg_len/sizeof(struct nx_sg));
	max_sg_len = min_t(u64, max_sg_len,
			nx_ctx->ap->databytelen/NX_PAGE_SIZE);

	do {
		/*
		 * to_process: the SHA256_BLOCK_SIZE data chunk to process in
		 * this update. This value is also restricted by the sg list
		 * limits.
		 */
		to_process = total - to_process;
		to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);

		if (buf_len) {
			data_len = buf_len;
			in_sg = nx_build_sg_list(nx_ctx->in_sg,
						 (u8 *) sctx->buf,
						 &data_len,
						 max_sg_len);

			if (data_len != buf_len) {
				rc = -EINVAL;
				goto out;
			}
		}

		data_len = to_process - buf_len;
		in_sg = nx_build_sg_list(in_sg, (u8 *) data,
					 &data_len, max_sg_len);

		nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);

		to_process = (data_len + buf_len);
		leftover = total - to_process;

		/*
		 * we've hit the nx chip previously and we're updating
		 * again, so copy over the partial digest.
		 */
		memcpy(csbcpb->cpb.sha256.input_partial_digest,
			       csbcpb->cpb.sha256.message_digest,
			       SHA256_DIGEST_SIZE);

		if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
			rc = -EINVAL;
			goto out;
		}

		rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
				   desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
		if (rc)
			goto out;

		atomic_inc(&(nx_ctx->stats->sha256_ops));

		total -= to_process;
		data += to_process - buf_len;
		buf_len = 0;

	} while (leftover >= SHA256_BLOCK_SIZE);

	/* copy the leftover back into the state struct */
	if (leftover)
		memcpy(sctx->buf, data, leftover);

	sctx->count += len;
	memcpy(sctx->state, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
out:
	spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
	return rc;
}
Ejemplo n.º 24
0
static int skein512_final(struct shash_desc *desc, u8 *out)
{
	return skein_512_final((struct skein_512_ctx *)shash_desc_ctx(desc),
				out);
}
Ejemplo n.º 25
0
static int skein256_init(struct shash_desc *desc)
{
	return skein_256_init((struct skein_256_ctx *)shash_desc_ctx(desc),
			SKEIN256_DIGEST_BIT_SIZE);
}
Ejemplo n.º 26
0
static int nx_sha256_final(struct shash_desc *desc, u8 *out)
{
	struct sha256_state *sctx = shash_desc_ctx(desc);
	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
	struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
	struct nx_sg *in_sg, *out_sg;
	unsigned long irq_flags;
	u32 max_sg_len;
	int rc = 0;
	int len;

	spin_lock_irqsave(&nx_ctx->lock, irq_flags);

	max_sg_len = min_t(u64, nx_ctx->ap->sglen,
			nx_driver.of.max_sg_len/sizeof(struct nx_sg));
	max_sg_len = min_t(u64, max_sg_len,
			nx_ctx->ap->databytelen/NX_PAGE_SIZE);

	/* final is represented by continuing the operation and indicating that
	 * this is not an intermediate operation */
	if (sctx->count >= SHA256_BLOCK_SIZE) {
		/* we've hit the nx chip previously, now we're finalizing,
		 * so copy over the partial digest */
		memcpy(csbcpb->cpb.sha256.input_partial_digest, sctx->state, SHA256_DIGEST_SIZE);
		NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
		NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
	} else {
		NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
		NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
	}

	csbcpb->cpb.sha256.message_bit_length = (u64) (sctx->count * 8);

	len = sctx->count & (SHA256_BLOCK_SIZE - 1);
	in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) sctx->buf,
				 &len, max_sg_len);

	if (len != (sctx->count & (SHA256_BLOCK_SIZE - 1))) {
		rc = -EINVAL;
		goto out;
	}

	len = SHA256_DIGEST_SIZE;
	out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len, max_sg_len);

	if (len != SHA256_DIGEST_SIZE) {
		rc = -EINVAL;
		goto out;
	}

	nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
	nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
	if (!nx_ctx->op.outlen) {
		rc = -EINVAL;
		goto out;
	}

	rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
			   desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
	if (rc)
		goto out;

	atomic_inc(&(nx_ctx->stats->sha256_ops));

	atomic64_add(sctx->count, &(nx_ctx->stats->sha256_bytes));
	memcpy(out, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
out:
	spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
	return rc;
}
Ejemplo n.º 27
0
static int skein1024_init(struct shash_desc *desc)
{
	return skein_1024_init((struct skein_1024_ctx *)shash_desc_ctx(desc),
				SKEIN1024_DIGEST_BIT_SIZE);
}
Ejemplo n.º 28
0
static int padlock_sha_export(struct shash_desc *desc, void *out)
{
	struct padlock_sha_desc *dctx = shash_desc_ctx(desc);

	return crypto_shash_export(&dctx->fallback, out);
}
Ejemplo n.º 29
0
static int skein512_init(struct shash_desc *desc)
{
	return skein_512_init((struct skein_512_ctx *)shash_desc_ctx(desc),
				SKEIN512_DIGEST_BIT_SIZE);
}
Ejemplo n.º 30
0
static int skein1024_update(struct shash_desc *desc, const u8 *data,
			unsigned int len)
{
	return skein_1024_update((struct skein_1024_ctx *)shash_desc_ctx(desc),
				data, len);
}