Example #1
0
static void
txg_quiesce(dsl_pool_t *dp, uint64_t txg)
{
	hrtime_t start;
	txg_history_t *th;
	tx_state_t *tx = &dp->dp_tx;
	int g = txg & TXG_MASK;
	int c;

	/*
	 * Grab all tx_cpu locks so nobody else can get into this txg.
	 */
	for (c = 0; c < max_ncpus; c++)
		mutex_enter(&tx->tx_cpu[c].tc_lock);

	ASSERT(txg == tx->tx_open_txg);
	tx->tx_open_txg++;

	/*
	 * Measure how long the txg was open and replace the kstat.
	 */
	th = dsl_pool_txg_history_get(dp, txg);
	th->th_kstat.open_time = gethrtime() - th->th_kstat.birth;
	th->th_kstat.state = TXG_STATE_QUIESCING;
	dsl_pool_txg_history_put(th);
	dsl_pool_txg_history_add(dp, tx->tx_open_txg);

	/*
	 * Now that we've incremented tx_open_txg, we can let threads
	 * enter the next transaction group.
	 */
	for (c = 0; c < max_ncpus; c++)
		mutex_exit(&tx->tx_cpu[c].tc_lock);

	/*
	 * Quiesce the transaction group by waiting for everyone to txg_exit().
	 */
	start = gethrtime();

	for (c = 0; c < max_ncpus; c++) {
		tx_cpu_t *tc = &tx->tx_cpu[c];
		mutex_enter(&tc->tc_lock);
		while (tc->tc_count[g] != 0)
			cv_wait(&tc->tc_cv[g], &tc->tc_lock);
		mutex_exit(&tc->tc_lock);
	}

	/*
	 * Measure how long the txg took to quiesce.
	 */
	th = dsl_pool_txg_history_get(dp, txg);
	th->th_kstat.quiesce_time = gethrtime() - start;
	dsl_pool_txg_history_put(th);
}
Example #2
0
static void
dsl_pool_txg_history_init(dsl_pool_t *dp, uint64_t txg)
{
	char name[KSTAT_STRLEN];

	list_create(&dp->dp_txg_history, sizeof (txg_history_t),
	    offsetof(txg_history_t, th_link));
	dsl_pool_txg_history_add(dp, txg);

	(void) snprintf(name, KSTAT_STRLEN, "txgs-%s", spa_name(dp->dp_spa));
	dp->dp_txg_kstat = kstat_create("zfs", 0, name, "misc",
	    KSTAT_TYPE_TXG, 0, KSTAT_FLAG_VIRTUAL);
	if (dp->dp_txg_kstat) {
		dp->dp_txg_kstat->ks_data = NULL;
		dp->dp_txg_kstat->ks_private = dp;
		dp->dp_txg_kstat->ks_update = dsl_pool_txg_history_update;
		kstat_install(dp->dp_txg_kstat);
	}
}