Exemplo n.º 1
0
/*
 * This sync task completes (finishes) a condense, deleting the old
 * mapping and replacing it with the new one.
 */
static void
spa_condense_indirect_complete_sync(void *arg, dmu_tx_t *tx)
{
	spa_condensing_indirect_t *sci = arg;
	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
	spa_condensing_indirect_phys_t *scip =
	    &spa->spa_condensing_indirect_phys;
	vdev_t *vd = vdev_lookup_top(spa, scip->scip_vdev);
	vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
	objset_t *mos = spa->spa_meta_objset;
	vdev_indirect_mapping_t *old_mapping = vd->vdev_indirect_mapping;
	uint64_t old_count = vdev_indirect_mapping_num_entries(old_mapping);
	uint64_t new_count =
	    vdev_indirect_mapping_num_entries(sci->sci_new_mapping);

	ASSERT(dmu_tx_is_syncing(tx));
	ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
	ASSERT3P(sci, ==, spa->spa_condensing_indirect);
	for (int i = 0; i < TXG_SIZE; i++) {
		ASSERT(list_is_empty(&sci->sci_new_mapping_entries[i]));
	}
	ASSERT(vic->vic_mapping_object != 0);
	ASSERT3U(vd->vdev_id, ==, scip->scip_vdev);
	ASSERT(scip->scip_next_mapping_object != 0);
	ASSERT(scip->scip_prev_obsolete_sm_object != 0);

	/*
	 * Reset vdev_indirect_mapping to refer to the new object.
	 */
	rw_enter(&vd->vdev_indirect_rwlock, RW_WRITER);
	vdev_indirect_mapping_close(vd->vdev_indirect_mapping);
	vd->vdev_indirect_mapping = sci->sci_new_mapping;
	rw_exit(&vd->vdev_indirect_rwlock);

	sci->sci_new_mapping = NULL;
	vdev_indirect_mapping_free(mos, vic->vic_mapping_object, tx);
	vic->vic_mapping_object = scip->scip_next_mapping_object;
	scip->scip_next_mapping_object = 0;

	space_map_free_obj(mos, scip->scip_prev_obsolete_sm_object, tx);
	spa_feature_decr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
	scip->scip_prev_obsolete_sm_object = 0;

	scip->scip_vdev = 0;

	VERIFY0(zap_remove(mos, DMU_POOL_DIRECTORY_OBJECT,
	    DMU_POOL_CONDENSING_INDIRECT, tx));
	spa_condensing_indirect_destroy(spa->spa_condensing_indirect);
	spa->spa_condensing_indirect = NULL;

	zfs_dbgmsg("finished condense of vdev %llu in txg %llu: "
	    "new mapping object %llu has %llu entries "
	    "(was %llu entries)",
	    vd->vdev_id, dmu_tx_get_txg(tx), vic->vic_mapping_object,
	    new_count, old_count);

	vdev_config_dirty(spa->spa_root_vdev);
}
Exemplo n.º 2
0
/*
 * Update all disk labels, generate a fresh config based on the current
 * in-core state, and sync the global config cache (do not sync the config
 * cache if this is a booting rootpool).
 */
void
spa_config_update_common(spa_t *spa, int what, boolean_t isroot)
{
    vdev_t *rvd = spa->spa_root_vdev;
    uint64_t txg;
    int c;

    ASSERT(MUTEX_HELD(&spa_namespace_lock));

    spa_config_enter(spa, RW_WRITER, FTAG);
    txg = spa_last_synced_txg(spa) + 1;
    if (what == SPA_CONFIG_UPDATE_POOL) {
        vdev_config_dirty(rvd);
    } else {
        /*
         * If we have top-level vdevs that were added but have
         * not yet been prepared for allocation, do that now.
         * (It's safe now because the config cache is up to date,
         * so it will be able to translate the new DVAs.)
         * See comments in spa_vdev_add() for full details.
         */
        for (c = 0; c < rvd->vdev_children; c++) {
            vdev_t *tvd = rvd->vdev_child[c];
            if (tvd->vdev_ms_array == 0) {
                vdev_init(tvd, txg);
                vdev_config_dirty(tvd);
            }
        }
    }
    spa_config_exit(spa, FTAG);

    /*
     * Wait for the mosconfig to be regenerated and synced.
     */
    txg_wait_synced(spa->spa_dsl_pool, txg);

    /*
     * Update the global config cache to reflect the new mosconfig.
     */
    if (!isroot)
        spa_config_sync(spa, B_FALSE, what != SPA_CONFIG_UPDATE_POOL);

    if (what == SPA_CONFIG_UPDATE_POOL)
        spa_config_update_common(spa, SPA_CONFIG_UPDATE_VDEVS, isroot);
}
Exemplo n.º 3
0
/*
 * Transfer top-level vdev state from svd to tvd.
 */
static void
vdev_top_transfer(vdev_t *svd, vdev_t *tvd)
{
	spa_t *spa = svd->vdev_spa;
	metaslab_t *msp;
	vdev_t *vd;
	int t;

	ASSERT(tvd == tvd->vdev_top);

	tvd->vdev_ms_array = svd->vdev_ms_array;
	tvd->vdev_ms_shift = svd->vdev_ms_shift;
	tvd->vdev_ms_count = svd->vdev_ms_count;

	svd->vdev_ms_array = 0;
	svd->vdev_ms_shift = 0;
	svd->vdev_ms_count = 0;

	tvd->vdev_mg = svd->vdev_mg;
	tvd->vdev_ms = svd->vdev_ms;

	svd->vdev_mg = NULL;
	svd->vdev_ms = NULL;

	if (tvd->vdev_mg != NULL)
		tvd->vdev_mg->mg_vd = tvd;

	tvd->vdev_stat.vs_alloc = svd->vdev_stat.vs_alloc;
	tvd->vdev_stat.vs_space = svd->vdev_stat.vs_space;
	tvd->vdev_stat.vs_dspace = svd->vdev_stat.vs_dspace;

	svd->vdev_stat.vs_alloc = 0;
	svd->vdev_stat.vs_space = 0;
	svd->vdev_stat.vs_dspace = 0;

	for (t = 0; t < TXG_SIZE; t++) {
		while ((msp = txg_list_remove(&svd->vdev_ms_list, t)) != NULL)
			(void) txg_list_add(&tvd->vdev_ms_list, msp, t);
		while ((vd = txg_list_remove(&svd->vdev_dtl_list, t)) != NULL)
			(void) txg_list_add(&tvd->vdev_dtl_list, vd, t);
		if (txg_list_remove_this(&spa->spa_vdev_txg_list, svd, t))
			(void) txg_list_add(&spa->spa_vdev_txg_list, tvd, t);
	}

	if (list_link_active(&svd->vdev_dirty_node)) {
		vdev_config_clean(svd);
		vdev_config_dirty(tvd);
	}

	tvd->vdev_reopen_wanted = svd->vdev_reopen_wanted;
	svd->vdev_reopen_wanted = 0;

	tvd->vdev_deflate_ratio = svd->vdev_deflate_ratio;
	svd->vdev_deflate_ratio = 0;
}
Exemplo n.º 4
0
static void
dsl_scan_setup_sync(void *arg, dmu_tx_t *tx)
{
	dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
	pool_scan_func_t *funcp = arg;
	dmu_object_type_t ot = 0;
	dsl_pool_t *dp = scn->scn_dp;
	spa_t *spa = dp->dp_spa;

	ASSERT(scn->scn_phys.scn_state != DSS_SCANNING);
	ASSERT(*funcp > POOL_SCAN_NONE && *funcp < POOL_SCAN_FUNCS);
	bzero(&scn->scn_phys, sizeof (scn->scn_phys));
	scn->scn_phys.scn_func = *funcp;
	scn->scn_phys.scn_state = DSS_SCANNING;
	scn->scn_phys.scn_min_txg = 0;
	scn->scn_phys.scn_max_txg = tx->tx_txg;
	scn->scn_phys.scn_ddt_class_max = DDT_CLASSES - 1; /* the entire DDT */
	scn->scn_phys.scn_start_time = gethrestime_sec();
	scn->scn_phys.scn_errors = 0;
	scn->scn_phys.scn_to_examine = spa->spa_root_vdev->vdev_stat.vs_alloc;
	scn->scn_restart_txg = 0;
	spa_scan_stat_init(spa);

	if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) {
		scn->scn_phys.scn_ddt_class_max = zfs_scrub_ddt_class_max;

		/* rewrite all disk labels */
		vdev_config_dirty(spa->spa_root_vdev);

		if (vdev_resilver_needed(spa->spa_root_vdev,
		    &scn->scn_phys.scn_min_txg, &scn->scn_phys.scn_max_txg)) {
			spa_event_notify(spa, NULL, ESC_ZFS_RESILVER_START);
		} else {
			spa_event_notify(spa, NULL, ESC_ZFS_SCRUB_START);
		}

		spa->spa_scrub_started = B_TRUE;
		/*
		 * If this is an incremental scrub, limit the DDT scrub phase
		 * to just the auto-ditto class (for correctness); the rest
		 * of the scrub should go faster using top-down pruning.
		 */
		if (scn->scn_phys.scn_min_txg > TXG_INITIAL)
			scn->scn_phys.scn_ddt_class_max = DDT_CLASS_DITTO;

	}

	/* back to the generic stuff */

	if (dp->dp_blkstats == NULL) {
		dp->dp_blkstats =
		    kmem_alloc(sizeof (zfs_all_blkstats_t), KM_SLEEP);
	}
	bzero(dp->dp_blkstats, sizeof (zfs_all_blkstats_t));

	if (spa_version(spa) < SPA_VERSION_DSL_SCRUB)
		ot = DMU_OT_ZAP_OTHER;

	scn->scn_phys.scn_queue_obj = zap_create(dp->dp_meta_objset,
	    ot ? ot : DMU_OT_SCAN_QUEUE, DMU_OT_NONE, 0, tx);

	dsl_scan_sync_state(scn, tx);

	spa_history_log_internal(spa, "scan setup", tx,
	    "func=%u mintxg=%llu maxtxg=%llu",
	    *funcp, scn->scn_phys.scn_min_txg, scn->scn_phys.scn_max_txg);
}
Exemplo n.º 5
0
/* ARGSUSED */
static void
dsl_pool_scrub_setup_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
{
	dsl_pool_t *dp = arg1;
	enum scrub_func *funcp = arg2;
	dmu_object_type_t ot = 0;
	boolean_t complete = B_FALSE;

	dsl_pool_scrub_cancel_sync(dp, &complete, cr, tx);

	ASSERT(dp->dp_scrub_func == SCRUB_FUNC_NONE);
	ASSERT(*funcp > SCRUB_FUNC_NONE);
	ASSERT(*funcp < SCRUB_FUNC_NUMFUNCS);

	dp->dp_scrub_min_txg = 0;
	dp->dp_scrub_max_txg = tx->tx_txg;
	dp->dp_scrub_ddt_class_max = zfs_scrub_ddt_class_max;

	if (*funcp == SCRUB_FUNC_CLEAN) {
		vdev_t *rvd = dp->dp_spa->spa_root_vdev;

		/* rewrite all disk labels */
		vdev_config_dirty(rvd);

		if (vdev_resilver_needed(rvd,
		    &dp->dp_scrub_min_txg, &dp->dp_scrub_max_txg)) {
			spa_event_notify(dp->dp_spa, NULL,
			    ESC_ZFS_RESILVER_START);
			dp->dp_scrub_max_txg = MIN(dp->dp_scrub_max_txg,
			    tx->tx_txg);
		} else {
			spa_event_notify(dp->dp_spa, NULL,
			    ESC_ZFS_SCRUB_START);
		}

		/* zero out the scrub stats in all vdev_stat_t's */
		vdev_scrub_stat_update(rvd,
		    dp->dp_scrub_min_txg ? POOL_SCRUB_RESILVER :
		    POOL_SCRUB_EVERYTHING, B_FALSE);

		/*
		 * If this is an incremental scrub, limit the DDT scrub phase
		 * to just the auto-ditto class (for correctness); the rest
		 * of the scrub should go faster using top-down pruning.
		 */
		if (dp->dp_scrub_min_txg > TXG_INITIAL)
			dp->dp_scrub_ddt_class_max = DDT_CLASS_DITTO;

		dp->dp_spa->spa_scrub_started = B_TRUE;
	}

	/* back to the generic stuff */

	if (dp->dp_blkstats == NULL) {
		dp->dp_blkstats =
		    kmem_alloc(sizeof (zfs_all_blkstats_t), KM_SLEEP);
	}
	bzero(dp->dp_blkstats, sizeof (zfs_all_blkstats_t));

	if (spa_version(dp->dp_spa) < SPA_VERSION_DSL_SCRUB)
		ot = DMU_OT_ZAP_OTHER;

	dp->dp_scrub_func = *funcp;
	dp->dp_scrub_queue_obj = zap_create(dp->dp_meta_objset,
	    ot ? ot : DMU_OT_SCRUB_QUEUE, DMU_OT_NONE, 0, tx);
	bzero(&dp->dp_scrub_bookmark, sizeof (zbookmark_t));
	bzero(&dp->dp_scrub_ddt_bookmark, sizeof (ddt_bookmark_t));
	dp->dp_scrub_restart = B_FALSE;
	dp->dp_spa->spa_scrub_errors = 0;

	VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
	    DMU_POOL_SCRUB_FUNC, sizeof (uint32_t), 1,
	    &dp->dp_scrub_func, tx));
	VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
	    DMU_POOL_SCRUB_QUEUE, sizeof (uint64_t), 1,
	    &dp->dp_scrub_queue_obj, tx));
	VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
	    DMU_POOL_SCRUB_MIN_TXG, sizeof (uint64_t), 1,
	    &dp->dp_scrub_min_txg, tx));
	VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
	    DMU_POOL_SCRUB_MAX_TXG, sizeof (uint64_t), 1,
	    &dp->dp_scrub_max_txg, tx));
	VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
	    DMU_POOL_SCRUB_BOOKMARK, sizeof (uint64_t),
	    sizeof (dp->dp_scrub_bookmark) / sizeof (uint64_t),
	    &dp->dp_scrub_bookmark, tx));
	VERIFY(0 == zap_update(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
	    DMU_POOL_SCRUB_DDT_BOOKMARK, sizeof (uint64_t),
	    sizeof (dp->dp_scrub_ddt_bookmark) / sizeof (uint64_t),
	    &dp->dp_scrub_ddt_bookmark, tx));
	VERIFY(0 == zap_update(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
	    DMU_POOL_SCRUB_DDT_CLASS_MAX, sizeof (uint64_t), 1,
	    &dp->dp_scrub_ddt_class_max, tx));
	VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
	    DMU_POOL_SCRUB_ERRORS, sizeof (uint64_t), 1,
	    &dp->dp_spa->spa_scrub_errors, tx));

	spa_history_internal_log(LOG_POOL_SCRUB, dp->dp_spa, tx, cr,
	    "func=%u mintxg=%llu maxtxg=%llu",
	    *funcp, dp->dp_scrub_min_txg, dp->dp_scrub_max_txg);
}
Exemplo n.º 6
0
/* ARGSUSED */
static void
dsl_pool_scrub_setup_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
{
	dsl_pool_t *dp = arg1;
	enum scrub_func *funcp = arg2;
	dmu_object_type_t ot = 0;
	boolean_t complete = B_FALSE;

	dsl_pool_scrub_cancel_sync(dp, &complete, cr, tx);

	ASSERT(dp->dp_scrub_func == SCRUB_FUNC_NONE);
	ASSERT(*funcp > SCRUB_FUNC_NONE);
	ASSERT(*funcp < SCRUB_FUNC_NUMFUNCS);

	dp->dp_scrub_min_txg = 0;
	dp->dp_scrub_max_txg = tx->tx_txg;

	if (*funcp == SCRUB_FUNC_CLEAN) {
		vdev_t *rvd = dp->dp_spa->spa_root_vdev;

		/* rewrite all disk labels */
		vdev_config_dirty(rvd);

		if (vdev_resilver_needed(rvd,
		    &dp->dp_scrub_min_txg, &dp->dp_scrub_max_txg)) {
			spa_event_notify(dp->dp_spa, NULL,
			    ESC_ZFS_RESILVER_START);
			dp->dp_scrub_max_txg = MIN(dp->dp_scrub_max_txg,
			    tx->tx_txg);
		}

		/* zero out the scrub stats in all vdev_stat_t's */
		vdev_scrub_stat_update(rvd,
		    dp->dp_scrub_min_txg ? POOL_SCRUB_RESILVER :
		    POOL_SCRUB_EVERYTHING, B_FALSE);

		dp->dp_spa->spa_scrub_started = B_TRUE;
	}

	/* back to the generic stuff */

	if (spa_version(dp->dp_spa) < SPA_VERSION_DSL_SCRUB)
		ot = DMU_OT_ZAP_OTHER;

	dp->dp_scrub_func = *funcp;
	dp->dp_scrub_queue_obj = zap_create(dp->dp_meta_objset,
	    ot ? ot : DMU_OT_SCRUB_QUEUE, DMU_OT_NONE, 0, tx);
	bzero(&dp->dp_scrub_bookmark, sizeof (zbookmark_t));
	dp->dp_scrub_restart = B_FALSE;
	dp->dp_spa->spa_scrub_errors = 0;

	VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
	    DMU_POOL_SCRUB_FUNC, sizeof (uint32_t), 1,
	    &dp->dp_scrub_func, tx));
	VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
	    DMU_POOL_SCRUB_QUEUE, sizeof (uint64_t), 1,
	    &dp->dp_scrub_queue_obj, tx));
	VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
	    DMU_POOL_SCRUB_MIN_TXG, sizeof (uint64_t), 1,
	    &dp->dp_scrub_min_txg, tx));
	VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
	    DMU_POOL_SCRUB_MAX_TXG, sizeof (uint64_t), 1,
	    &dp->dp_scrub_max_txg, tx));
	VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
	    DMU_POOL_SCRUB_BOOKMARK, sizeof (uint64_t), 4,
	    &dp->dp_scrub_bookmark, tx));
	VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
	    DMU_POOL_SCRUB_ERRORS, sizeof (uint64_t), 1,
	    &dp->dp_spa->spa_scrub_errors, tx));

	spa_history_internal_log(LOG_POOL_SCRUB, dp->dp_spa, tx, cr,
	    "func=%u mintxg=%llu maxtxg=%llu",
	    *funcp, dp->dp_scrub_min_txg, dp->dp_scrub_max_txg);
}