Ejemplo n.º 1
0
void
fletcher_4_incremental_native(const void *buf, uint64_t size,
    zio_cksum_t *zcp)
{
	ASSERT(IS_P2ALIGNED(size, sizeof (uint32_t)));

	fletcher_4_scalar_native(buf, size, zcp);
}
Ejemplo n.º 2
0
int
fletcher_4_incremental_native(void *buf, size_t size, void *data)
{
	zio_cksum_t *zcp = data;
	/* Use scalar impl to directly update cksum of small blocks */
	if (size < SPA_MINBLOCKSIZE)
		fletcher_4_scalar_native((fletcher_4_ctx_t *)zcp, buf, size);
	else
		fletcher_4_incremental_impl(B_TRUE, buf, size, zcp);
	return (0);
}
Ejemplo n.º 3
0
/*ARGSUSED*/
void
fletcher_4_native(const void *buf, uint64_t size,
    const void *ctx_template, zio_cksum_t *zcp)
{
	const uint64_t p2size = P2ALIGN(size, 64);

	ASSERT(IS_P2ALIGNED(size, sizeof (uint32_t)));

	if (size == 0 || p2size == 0) {
		ZIO_SET_CHECKSUM(zcp, 0, 0, 0, 0);

		if (size > 0)
			fletcher_4_scalar_native((fletcher_4_ctx_t *)zcp,
			    buf, size);
	} else {
		fletcher_4_native_impl(buf, p2size, zcp);

		if (p2size < size)
			fletcher_4_scalar_native((fletcher_4_ctx_t *)zcp,
			    (char *)buf + p2size, size - p2size);
	}
}
Ejemplo n.º 4
0
void
fletcher_4_native_varsize(const void *buf, uint64_t size, zio_cksum_t *zcp)
{
	ZIO_SET_CHECKSUM(zcp, 0, 0, 0, 0);
	fletcher_4_scalar_native((fletcher_4_ctx_t *)zcp, buf, size);
}