Example #1
0
/*
 * Evict the specified entry from the cache.
 */
static void
vdev_cache_evict(vdev_cache_t *vc, vdev_cache_entry_t *ve)
{
	ASSERT(MUTEX_HELD(&vc->vc_lock));
	ASSERT3P(ve->ve_fill_io, ==, NULL);
	ASSERT3P(ve->ve_abd, !=, NULL);

	avl_remove(&vc->vc_lastused_tree, ve);
	avl_remove(&vc->vc_offset_tree, ve);
	abd_free(ve->ve_abd);
	kmem_free(ve, sizeof (vdev_cache_entry_t));
}
Example #2
0
static void
vdev_queue_agg_io_done(zio_t *aio)
{
	if (aio->io_type == ZIO_TYPE_READ) {
		zio_t *pio;
		while ((pio = zio_walk_parents(aio)) != NULL) {
			abd_copy_off(pio->io_data, aio->io_data, pio->io_size,
			    0, pio->io_offset - aio->io_offset);
		}
	}

	abd_free(aio->io_data, aio->io_size);
}
Example #3
0
static void
vdev_queue_agg_io_done(zio_t *aio)
{
	if (aio->io_type == ZIO_TYPE_READ) {
		zio_t *pio;
		zio_link_t *zl = NULL;
		while ((pio = zio_walk_parents(aio, &zl)) != NULL) {
			abd_copy_off(pio->io_abd, aio->io_abd,
			    0, pio->io_offset - aio->io_offset, pio->io_size);
		}
	}

	abd_free(aio->io_abd);
}
Example #4
0
File: mmp.c Project: LLNL/zfs
static void
mmp_write_done(zio_t *zio)
{
	spa_t *spa = zio->io_spa;
	vdev_t *vd = zio->io_vd;
	mmp_thread_t *mts = zio->io_private;

	mutex_enter(&mts->mmp_io_lock);
	vd->vdev_mmp_pending = 0;

	if (zio->io_error)
		goto unlock;

	/*
	 * Mmp writes are queued on a fixed schedule, but under many
	 * circumstances, such as a busy device or faulty hardware,
	 * the writes will complete at variable, much longer,
	 * intervals.  In these cases, another node checking for
	 * activity must wait longer to account for these delays.
	 *
	 * The mmp_delay is calculated as a decaying average of the interval
	 * between completed mmp writes.  This is used to predict how long
	 * the import must wait to detect activity in the pool, before
	 * concluding it is not in use.
	 *
	 * Do not set mmp_delay if the multihost property is not on,
	 * so as not to trigger an activity check on import.
	 */
	if (spa_multihost(spa)) {
		hrtime_t delay = gethrtime() - mts->mmp_last_write;

		if (delay > mts->mmp_delay)
			mts->mmp_delay = delay;
		else
			mts->mmp_delay = (delay + mts->mmp_delay * 127) /
			    128;
	} else {
		mts->mmp_delay = 0;
	}
	mts->mmp_last_write = gethrtime();

unlock:
	mutex_exit(&mts->mmp_io_lock);
	spa_config_exit(spa, SCL_STATE, FTAG);

	abd_free(zio->io_abd);
}
Example #5
0
void
vdev_raidz_math_init(void)
{
	raidz_impl_ops_t *curr_impl;
	zio_t *bench_zio = NULL;
	raidz_map_t *bench_rm = NULL;
	uint64_t bench_parity;
	int i, c, fn;

	/* move supported impl into raidz_supp_impl */
	for (i = 0, c = 0; i < ARRAY_SIZE(raidz_all_maths); i++) {
		curr_impl = (raidz_impl_ops_t *)raidz_all_maths[i];

		/* initialize impl */
		if (curr_impl->init)
			curr_impl->init();

		if (curr_impl->is_supported())
			raidz_supp_impl[c++] = (raidz_impl_ops_t *)curr_impl;
	}
	membar_producer();		/* complete raidz_supp_impl[] init */
	raidz_supp_impl_cnt = c;	/* number of supported impl */

#if !defined(_KERNEL)
	/* Skip benchmarking and use last implementation as fastest */
	memcpy(&vdev_raidz_fastest_impl, raidz_supp_impl[raidz_supp_impl_cnt-1],
	    sizeof (vdev_raidz_fastest_impl));
	strcpy(vdev_raidz_fastest_impl.name, "fastest");

	raidz_math_initialized = B_TRUE;

	/* Use 'cycle' math selection method for userspace */
	VERIFY0(vdev_raidz_impl_set("cycle"));
	return;
#endif

	/* Fake an zio and run the benchmark on a warmed up buffer */
	bench_zio = kmem_zalloc(sizeof (zio_t), KM_SLEEP);
	bench_zio->io_offset = 0;
	bench_zio->io_size = BENCH_ZIO_SIZE; /* only data columns */
	bench_zio->io_abd = abd_alloc_linear(BENCH_ZIO_SIZE, B_TRUE);
	memset(abd_to_buf(bench_zio->io_abd), 0xAA, BENCH_ZIO_SIZE);

	/* Benchmark parity generation methods */
	for (fn = 0; fn < RAIDZ_GEN_NUM; fn++) {
		bench_parity = fn + 1;
		/* New raidz_map is needed for each generate_p/q/r */
		bench_rm = vdev_raidz_map_alloc(bench_zio, SPA_MINBLOCKSHIFT,
		    BENCH_D_COLS + bench_parity, bench_parity);

		benchmark_raidz_impl(bench_rm, fn, benchmark_gen_impl);

		vdev_raidz_map_free(bench_rm);
	}

	/* Benchmark data reconstruction methods */
	bench_rm = vdev_raidz_map_alloc(bench_zio, SPA_MINBLOCKSHIFT,
	    BENCH_COLS, PARITY_PQR);

	for (fn = 0; fn < RAIDZ_REC_NUM; fn++)
		benchmark_raidz_impl(bench_rm, fn, benchmark_rec_impl);

	vdev_raidz_map_free(bench_rm);

	/* cleanup the bench zio */
	abd_free(bench_zio->io_abd);
	kmem_free(bench_zio, sizeof (zio_t));

	/* install kstats for all impl */
	raidz_math_kstat = kstat_create("zfs", 0, "vdev_raidz_bench", "misc",
	    KSTAT_TYPE_RAW, 0, KSTAT_FLAG_VIRTUAL);

	if (raidz_math_kstat != NULL) {
		raidz_math_kstat->ks_data = NULL;
		raidz_math_kstat->ks_ndata = UINT32_MAX;
		kstat_set_raw_ops(raidz_math_kstat,
		    raidz_math_kstat_headers,
		    raidz_math_kstat_data,
		    raidz_math_kstat_addr);
		kstat_install(raidz_math_kstat);
	}

	/* Finish initialization */
	atomic_swap_32(&zfs_vdev_raidz_impl, user_sel_impl);
	raidz_math_initialized = B_TRUE;
}
Example #6
0
static void
vdev_initialize_block_free(abd_t *data)
{
	abd_free(data);
}