Пример #1
0
/*
 * Function sets desired raidz implementation.
 *
 * If we are called before init(), user preference will be saved in
 * user_sel_impl, and applied in later init() call. This occurs when module
 * parameter is specified on module load. Otherwise, directly update
 * zfs_vdev_raidz_impl.
 *
 * @val		Name of raidz implementation to use
 * @param	Unused.
 */
int
vdev_raidz_impl_set(const char *val)
{
	int err = -EINVAL;
	char req_name[RAIDZ_IMPL_NAME_MAX];
	uint32_t impl = RAIDZ_IMPL_READ(user_sel_impl);
	size_t i;

	/* sanitize input */
	i = strnlen(val, RAIDZ_IMPL_NAME_MAX);
	if (i == 0 || i == RAIDZ_IMPL_NAME_MAX)
		return (err);

	strlcpy(req_name, val, RAIDZ_IMPL_NAME_MAX);
	while (i > 0 && !!isspace(req_name[i-1]))
		i--;
	req_name[i] = '\0';

	/* Check mandatory options */
	for (i = 0; i < ARRAY_SIZE(math_impl_opts); i++) {
		if (strcmp(req_name, math_impl_opts[i].name) == 0) {
			impl = math_impl_opts[i].sel;
			err = 0;
			break;
		}
	}

	/* check all supported impl if init() was already called */
	if (err != 0 && raidz_math_initialized) {
		/* check all supported implementations */
		for (i = 0; i < raidz_supp_impl_cnt; i++) {
			if (strcmp(req_name, raidz_supp_impl[i]->name) == 0) {
				impl = i;
				err = 0;
				break;
			}
		}
	}

	if (err == 0) {
		if (raidz_math_initialized)
			atomic_swap_32(&zfs_vdev_raidz_impl, impl);
		else
			atomic_swap_32(&user_sel_impl, impl);
	}

	return (err);
}
Пример #2
0
/*
 * Stop the thread to setup switching mode.
 */
void
vsw_setup_switching_stop(vsw_t *vswp)
{
	kt_did_t	tid = 0;

	/*
	 * Signal the setup_switching thread to stop and wait until it stops.
	 */
	mutex_enter(&vswp->sw_thr_lock);

	if (vswp->sw_thread != NULL) {
		tid = vswp->sw_thread->t_did;
		vswp->sw_thr_flags |= VSW_SWTHR_STOP;
		cv_signal(&vswp->sw_thr_cv);
	}

	mutex_exit(&vswp->sw_thr_lock);

	if (tid != 0)
		thread_join(tid);

	(void) atomic_swap_32(&vswp->switching_setup_done, B_FALSE);

	vswp->mac_open_retries = 0;
}
Пример #3
0
static void
fletcher_4_benchmark_impl(boolean_t native, char *data, uint64_t data_size)
{

	struct fletcher_4_kstat *fastest_stat =
	    &fletcher_4_stat_data[fletcher_4_supp_impls_cnt];
	hrtime_t start;
	uint64_t run_bw, run_time_ns, best_run = 0;
	zio_cksum_t zc;
	uint32_t i, l, sel_save = IMPL_READ(fletcher_4_impl_chosen);


	fletcher_checksum_func_t *fletcher_4_test = native ?
	    fletcher_4_native : fletcher_4_byteswap;

	for (i = 0; i < fletcher_4_supp_impls_cnt; i++) {
		struct fletcher_4_kstat *stat = &fletcher_4_stat_data[i];
		uint64_t run_count = 0;

		/* temporary set an implementation */
		fletcher_4_impl_chosen = i;

		kpreempt_disable();
		start = gethrtime();
		do {
			for (l = 0; l < 32; l++, run_count++)
				fletcher_4_test(data, data_size, NULL, &zc);

			run_time_ns = gethrtime() - start;
		} while (run_time_ns < FLETCHER_4_BENCH_NS);
		kpreempt_enable();

		run_bw = data_size * run_count * NANOSEC;
		run_bw /= run_time_ns;	/* B/s */

		if (native)
			stat->native = run_bw;
		else
			stat->byteswap = run_bw;

		if (run_bw > best_run) {
			best_run = run_bw;

			if (native) {
				fastest_stat->native = i;
				FLETCHER_4_FASTEST_FN_COPY(native,
				    fletcher_4_supp_impls[i]);
			} else {
				fastest_stat->byteswap = i;
				FLETCHER_4_FASTEST_FN_COPY(byteswap,
				    fletcher_4_supp_impls[i]);
			}
		}
	}

	/* restore original selection */
	atomic_swap_32(&fletcher_4_impl_chosen, sel_save);
}
Пример #4
0
static void
shmif_unlockbus(struct shmif_mem *busmem)
{
	unsigned int old;

	membar_exit();
	old = atomic_swap_32(&busmem->shm_lock, LOCK_UNLOCKED);
	KASSERT(old == LOCK_LOCKED);
}
Пример #5
0
void BnxeTimerStart(um_device_t * pUM)
{
    atomic_swap_32(&pUM->timerEnabled, B_TRUE);

    pUM->lm_dev.vars.stats.stats_collect.timer_wakeup = 0; /* reset */

    pUM->timerID = timeout(BnxeTimer, (void *)pUM,
                           drv_usectohz(BNXE_TIMER_INTERVAL));
}
Пример #6
0
void BnxeTimerStop(um_device_t * pUM)
{
    atomic_swap_32(&pUM->timerEnabled, B_FALSE);

    BNXE_LOCK_ENTER_TIMER(pUM);
    BNXE_LOCK_EXIT_TIMER(pUM);

    untimeout(pUM->timerID);
    pUM->timerID = 0;
}
Пример #7
0
/*
 * Interrupt another CPU.
 * 	This is useful to make the other CPU go through a trap so that
 *	it recognizes an address space trap (AST) for preempting a thread.
 *
 *	It is possible to be preempted here and be resumed on the CPU
 *	being poked, so it isn't an error to poke the current CPU.
 *	We could check this and still get preempted after the check, so
 *	we don't bother.
 */
void
poke_cpu(int cpun)
{
	uint32_t *ptr = (uint32_t *)&cpu[cpun]->cpu_m.poke_cpu_outstanding;

	/*
	 * If panicstr is set or a poke_cpu is already pending,
	 * no need to send another one. Use atomic swap to protect
	 * against multiple CPUs sending redundant pokes.
	 */
	if (panicstr || *ptr == B_TRUE ||
	    atomic_swap_32(ptr, B_TRUE) == B_TRUE)
		return;

	xt_one(cpun, setsoftint_tl1, poke_cpu_inum, 0);
}
Пример #8
0
int
fletcher_4_impl_set(const char *val)
{
	int err = -EINVAL;
	uint32_t impl = IMPL_READ(fletcher_4_impl_chosen);
	size_t i, val_len;

	val_len = strlen(val);
	while ((val_len > 0) && !!isspace(val[val_len-1])) /* trim '\n' */
		val_len--;

	/* check mandatory implementations */
	for (i = 0; i < ARRAY_SIZE(fletcher_4_impl_selectors); i++) {
		const char *name = fletcher_4_impl_selectors[i].fis_name;

		if (val_len == strlen(name) &&
		    strncmp(val, name, val_len) == 0) {
			impl = fletcher_4_impl_selectors[i].fis_sel;
			err = 0;
			break;
		}
	}

	if (err != 0 && fletcher_4_initialized) {
		/* check all supported implementations */
		for (i = 0; i < fletcher_4_supp_impls_cnt; i++) {
			const char *name = fletcher_4_supp_impls[i]->name;

			if (val_len == strlen(name) &&
			    strncmp(val, name, val_len) == 0) {
				impl = i;
				err = 0;
				break;
			}
		}
	}

	if (err == 0) {
		atomic_swap_32(&fletcher_4_impl_chosen, impl);
		membar_producer();
	}

	return (err);
}
Пример #9
0
/*
 * Setup the required switching mode.
 * Returns:
 *  0 on success.
 *  EAGAIN if retry is needed.
 *  1 on all other failures.
 */
int
vsw_setup_switching(vsw_t *vswp)
{
	int	rv = 1;

	D1(vswp, "%s: enter", __func__);

	/*
	 * Select best switching mode.
	 * This is done as this routine can be called from the timeout
	 * handler to retry setting up a specific mode. Currently only
	 * the function which sets up layer2/promisc mode returns EAGAIN
	 * if the underlying network device is not available yet, causing
	 * retries.
	 */
	if (vswp->smode & VSW_LAYER2) {
		rv = vsw_setup_layer2(vswp);
	} else if (vswp->smode & VSW_LAYER3) {
		rv = vsw_setup_layer3(vswp);
	} else {
		DERR(vswp, "unknown switch mode");
		rv = 1;
	}

	if (rv && (rv != EAGAIN)) {
		cmn_err(CE_WARN, "!vsw%d: Unable to setup specified "
		    "switching mode", vswp->instance);
	} else if (rv == 0) {
		(void) atomic_swap_32(&vswp->switching_setup_done, B_TRUE);
	}

	D2(vswp, "%s: Operating in mode %d", __func__,
	    vswp->smode);

	D1(vswp, "%s: exit", __func__);

	return (rv);
}
Пример #10
0
int
ipi_intr(void *v)
{
	struct cpu_info * const ci = curcpu();
	int cpu_id = cpu_index(ci);
	int msr;
	uint32_t ipi;

	ci->ci_ev_ipi.ev_count++;
	ipi = atomic_swap_32(&ci->ci_pending_ipis, 0);

	if (ipi == IPI_NOMESG)
		return 1;

	if (ipi & IPI_XCALL)
		xc_ipi_handler();

	if (ipi & IPI_GENERIC)
		ipi_cpu_handler();

	if (ipi & IPI_SUSPEND)
		cpu_pause(NULL);

	if (ipi & IPI_HALT) {
		struct cpuset_info * const csi = &cpuset_info;
		aprint_normal("halting CPU %d\n", cpu_id);
		kcpuset_set(csi->cpus_halted, cpu_id);
		msr = (mfmsr() & ~PSL_EE) | PSL_POW;
		for (;;) {
			__asm volatile ("sync; isync");
			mtmsr(msr);
		}
	}

	return 1;
}
Пример #11
0
void
vdev_raidz_math_init(void)
{
	raidz_impl_ops_t *curr_impl;
	zio_t *bench_zio = NULL;
	raidz_map_t *bench_rm = NULL;
	uint64_t bench_parity;
	int i, c, fn;

	/* move supported impl into raidz_supp_impl */
	for (i = 0, c = 0; i < ARRAY_SIZE(raidz_all_maths); i++) {
		curr_impl = (raidz_impl_ops_t *)raidz_all_maths[i];

		/* initialize impl */
		if (curr_impl->init)
			curr_impl->init();

		if (curr_impl->is_supported())
			raidz_supp_impl[c++] = (raidz_impl_ops_t *)curr_impl;
	}
	membar_producer();		/* complete raidz_supp_impl[] init */
	raidz_supp_impl_cnt = c;	/* number of supported impl */

#if !defined(_KERNEL)
	/* Skip benchmarking and use last implementation as fastest */
	memcpy(&vdev_raidz_fastest_impl, raidz_supp_impl[raidz_supp_impl_cnt-1],
	    sizeof (vdev_raidz_fastest_impl));
	strcpy(vdev_raidz_fastest_impl.name, "fastest");

	raidz_math_initialized = B_TRUE;

	/* Use 'cycle' math selection method for userspace */
	VERIFY0(vdev_raidz_impl_set("cycle"));
	return;
#endif

	/* Fake an zio and run the benchmark on a warmed up buffer */
	bench_zio = kmem_zalloc(sizeof (zio_t), KM_SLEEP);
	bench_zio->io_offset = 0;
	bench_zio->io_size = BENCH_ZIO_SIZE; /* only data columns */
	bench_zio->io_abd = abd_alloc_linear(BENCH_ZIO_SIZE, B_TRUE);
	memset(abd_to_buf(bench_zio->io_abd), 0xAA, BENCH_ZIO_SIZE);

	/* Benchmark parity generation methods */
	for (fn = 0; fn < RAIDZ_GEN_NUM; fn++) {
		bench_parity = fn + 1;
		/* New raidz_map is needed for each generate_p/q/r */
		bench_rm = vdev_raidz_map_alloc(bench_zio, SPA_MINBLOCKSHIFT,
		    BENCH_D_COLS + bench_parity, bench_parity);

		benchmark_raidz_impl(bench_rm, fn, benchmark_gen_impl);

		vdev_raidz_map_free(bench_rm);
	}

	/* Benchmark data reconstruction methods */
	bench_rm = vdev_raidz_map_alloc(bench_zio, SPA_MINBLOCKSHIFT,
	    BENCH_COLS, PARITY_PQR);

	for (fn = 0; fn < RAIDZ_REC_NUM; fn++)
		benchmark_raidz_impl(bench_rm, fn, benchmark_rec_impl);

	vdev_raidz_map_free(bench_rm);

	/* cleanup the bench zio */
	abd_free(bench_zio->io_abd);
	kmem_free(bench_zio, sizeof (zio_t));

	/* install kstats for all impl */
	raidz_math_kstat = kstat_create("zfs", 0, "vdev_raidz_bench", "misc",
	    KSTAT_TYPE_RAW, 0, KSTAT_FLAG_VIRTUAL);

	if (raidz_math_kstat != NULL) {
		raidz_math_kstat->ks_data = NULL;
		raidz_math_kstat->ks_ndata = UINT32_MAX;
		kstat_set_raw_ops(raidz_math_kstat,
		    raidz_math_kstat_headers,
		    raidz_math_kstat_data,
		    raidz_math_kstat_addr);
		kstat_install(raidz_math_kstat);
	}

	/* Finish initialization */
	atomic_swap_32(&zfs_vdev_raidz_impl, user_sel_impl);
	raidz_math_initialized = B_TRUE;
}