Esempio n. 1
0
/*
 * Generate the checksum.
 */
void
zio_checksum_compute(zio_t *zio, enum zio_checksum checksum,
    abd_t *abd, uint64_t size)
{
	blkptr_t *bp = zio->io_bp;
	uint64_t offset = zio->io_offset;
	zio_checksum_info_t *ci = &zio_checksum_table[checksum];
	zio_cksum_t cksum;
	spa_t *spa = zio->io_spa;

	ASSERT((uint_t)checksum < ZIO_CHECKSUM_FUNCTIONS);
	ASSERT(ci->ci_func[0] != NULL);

	zio_checksum_template_init(checksum, spa);

	if (ci->ci_flags & ZCHECKSUM_FLAG_EMBEDDED) {
		zio_eck_t *eck;
		void *data = abd_to_buf(abd);

		if (checksum == ZIO_CHECKSUM_ZILOG2) {
			zil_chain_t *zilc = data;

			size = P2ROUNDUP_TYPED(zilc->zc_nused, ZIL_MIN_BLKSZ,
			    uint64_t);
			eck = &zilc->zc_eck;
		} else {
			eck = (zio_eck_t *)((char *)data + size) - 1;
		}
		if (checksum == ZIO_CHECKSUM_GANG_HEADER)
			zio_checksum_gang_verifier(&eck->zec_cksum, bp);
		else if (checksum == ZIO_CHECKSUM_LABEL)
			zio_checksum_label_verifier(&eck->zec_cksum, offset);
		else
			bp->blk_cksum = eck->zec_cksum;
		eck->zec_magic = ZEC_MAGIC;
		ci->ci_func[0](abd, size, spa->spa_cksum_tmpls[checksum],
		    &cksum);
		eck->zec_cksum = cksum;
	} else {
		ci->ci_func[0](abd, size, spa->spa_cksum_tmpls[checksum],
		    &bp->blk_cksum);
	}
}
Esempio n. 2
0
void
vdev_raidz_math_init(void)
{
	raidz_impl_ops_t *curr_impl;
	zio_t *bench_zio = NULL;
	raidz_map_t *bench_rm = NULL;
	uint64_t bench_parity;
	int i, c, fn;

	/* move supported impl into raidz_supp_impl */
	for (i = 0, c = 0; i < ARRAY_SIZE(raidz_all_maths); i++) {
		curr_impl = (raidz_impl_ops_t *)raidz_all_maths[i];

		/* initialize impl */
		if (curr_impl->init)
			curr_impl->init();

		if (curr_impl->is_supported())
			raidz_supp_impl[c++] = (raidz_impl_ops_t *)curr_impl;
	}
	membar_producer();		/* complete raidz_supp_impl[] init */
	raidz_supp_impl_cnt = c;	/* number of supported impl */

#if !defined(_KERNEL)
	/* Skip benchmarking and use last implementation as fastest */
	memcpy(&vdev_raidz_fastest_impl, raidz_supp_impl[raidz_supp_impl_cnt-1],
	    sizeof (vdev_raidz_fastest_impl));
	strcpy(vdev_raidz_fastest_impl.name, "fastest");

	raidz_math_initialized = B_TRUE;

	/* Use 'cycle' math selection method for userspace */
	VERIFY0(vdev_raidz_impl_set("cycle"));
	return;
#endif

	/* Fake an zio and run the benchmark on a warmed up buffer */
	bench_zio = kmem_zalloc(sizeof (zio_t), KM_SLEEP);
	bench_zio->io_offset = 0;
	bench_zio->io_size = BENCH_ZIO_SIZE; /* only data columns */
	bench_zio->io_abd = abd_alloc_linear(BENCH_ZIO_SIZE, B_TRUE);
	memset(abd_to_buf(bench_zio->io_abd), 0xAA, BENCH_ZIO_SIZE);

	/* Benchmark parity generation methods */
	for (fn = 0; fn < RAIDZ_GEN_NUM; fn++) {
		bench_parity = fn + 1;
		/* New raidz_map is needed for each generate_p/q/r */
		bench_rm = vdev_raidz_map_alloc(bench_zio, SPA_MINBLOCKSHIFT,
		    BENCH_D_COLS + bench_parity, bench_parity);

		benchmark_raidz_impl(bench_rm, fn, benchmark_gen_impl);

		vdev_raidz_map_free(bench_rm);
	}

	/* Benchmark data reconstruction methods */
	bench_rm = vdev_raidz_map_alloc(bench_zio, SPA_MINBLOCKSHIFT,
	    BENCH_COLS, PARITY_PQR);

	for (fn = 0; fn < RAIDZ_REC_NUM; fn++)
		benchmark_raidz_impl(bench_rm, fn, benchmark_rec_impl);

	vdev_raidz_map_free(bench_rm);

	/* cleanup the bench zio */
	abd_free(bench_zio->io_abd);
	kmem_free(bench_zio, sizeof (zio_t));

	/* install kstats for all impl */
	raidz_math_kstat = kstat_create("zfs", 0, "vdev_raidz_bench", "misc",
	    KSTAT_TYPE_RAW, 0, KSTAT_FLAG_VIRTUAL);

	if (raidz_math_kstat != NULL) {
		raidz_math_kstat->ks_data = NULL;
		raidz_math_kstat->ks_ndata = UINT32_MAX;
		kstat_set_raw_ops(raidz_math_kstat,
		    raidz_math_kstat_headers,
		    raidz_math_kstat_data,
		    raidz_math_kstat_addr);
		kstat_install(raidz_math_kstat);
	}

	/* Finish initialization */
	atomic_swap_32(&zfs_vdev_raidz_impl, user_sel_impl);
	raidz_math_initialized = B_TRUE;
}