Ejemplo n.º 1
0
void
spa_history_create_obj(spa_t *spa, dmu_tx_t *tx)
{
	dmu_buf_t *dbp;
	spa_history_phys_t *shpp;
	objset_t *mos = spa->spa_meta_objset;

	ASSERT(spa->spa_history == 0);
	spa->spa_history = dmu_object_alloc(mos, DMU_OT_SPA_HISTORY,
	    SPA_MAXBLOCKSIZE, DMU_OT_SPA_HISTORY_OFFSETS,
	    sizeof (spa_history_phys_t), tx);

	VERIFY(zap_add(mos, DMU_POOL_DIRECTORY_OBJECT,
	    DMU_POOL_HISTORY, sizeof (uint64_t), 1,
	    &spa->spa_history, tx) == 0);

	VERIFY(0 == dmu_bonus_hold(mos, spa->spa_history, FTAG, &dbp));
	ASSERT(dbp->db_size >= sizeof (spa_history_phys_t));

	shpp = dbp->db_data;
	dmu_buf_will_dirty(dbp, tx);

	/*
	 * Figure out maximum size of history log.  We set it at
	 * 0.1% of pool size, with a max of 1G and min of 128KB.
	 */
	shpp->sh_phys_max_off =
	    metaslab_class_get_dspace(spa_normal_class(spa)) / 1000;
	shpp->sh_phys_max_off = MIN(shpp->sh_phys_max_off, 1<<30);
	shpp->sh_phys_max_off = MAX(shpp->sh_phys_max_off, 128<<10);

	dmu_buf_rele(dbp, FTAG);
}
Ejemplo n.º 2
0
/*
 * Select whether to direct zio to special or to normal storage class
 * Even when the top-level criteria match (for placement to the special
 * class), consider refining data and metadata placement based on
 * additional information about the system's behavior
 */
metaslab_class_t *
spa_select_class(spa_t *spa, zio_prop_t *zp)
{
	spa_meta_placement_t *mp = &spa->spa_meta_policy;
	if (zp->zp_usesc && spa_has_special(spa) && spa->spa_usesc) {
		boolean_t match = B_FALSE;
		uint64_t specflags = zp->zp_specflags;

		if (zp->zp_metadata) {
			match = !!(SPECIAL_FLAG_DATAMETA & specflags);
			if (match && mp->spa_enable_meta_placement_selection)
				match = spa_refine_meta_placement(spa,
				    zp->zp_zpl_meta_to_special, zp->zp_type);
		} else {
			match = !!(SPECIAL_FLAG_DATAUSER & specflags);
			if (match && spa_enable_data_placement_selection)
				match = spa_refine_data_placement(spa);
		}

		if (match)
			return (spa_special_class(spa));
	}

	return (spa_normal_class(spa));
}
Ejemplo n.º 3
0
static int
zcp_synctask_wrapper(lua_State *state)
{
	int err;
	zcp_cleanup_handler_t *zch;
	int num_ret = 1;
	nvlist_t *err_details = fnvlist_alloc();

	/*
	 * Make sure err_details is properly freed, even if a fatal error is
	 * thrown during the synctask.
	 */
	zch = zcp_register_cleanup(state,
	    (zcp_cleanup_t *)&fnvlist_free, err_details);

	zcp_synctask_info_t *info = lua_touserdata(state, lua_upvalueindex(1));
	boolean_t sync = lua_toboolean(state, lua_upvalueindex(2));

	zcp_run_info_t *ri = zcp_run_info(state);
	dsl_pool_t *dp = ri->zri_pool;

	/* MOS space is triple-dittoed, so we multiply by 3. */
	uint64_t funcspace = (info->blocks_modified << DST_AVG_BLKSHIFT) * 3;

	zcp_parse_args(state, info->name, info->pargs, info->kwargs);

	err = 0;
	if (info->space_check != ZFS_SPACE_CHECK_NONE && funcspace > 0) {
		uint64_t quota = dsl_pool_adjustedsize(dp,
		    info->space_check == ZFS_SPACE_CHECK_RESERVED) -
		    metaslab_class_get_deferred(spa_normal_class(dp->dp_spa));
		uint64_t used = dsl_dir_phys(dp->dp_root_dir)->dd_used_bytes +
		    ri->zri_space_used;

		if (used + funcspace > quota) {
			err = SET_ERROR(ENOSPC);
		}
	}

	if (err == 0) {
		err = info->func(state, sync, err_details);
	}

	if (err == 0) {
		ri->zri_space_used += funcspace;
	}

	lua_pushnumber(state, (lua_Number)err);
	if (fnvlist_num_pairs(err_details) > 0) {
		(void) zcp_nvlist_to_lua(state, err_details, NULL, 0);
		num_ret++;
	}

	zcp_deregister_cleanup(state, zch);
	fnvlist_free(err_details);

	return (num_ret);
}
Ejemplo n.º 4
0
void
dsl_sync_task_group_sync(dsl_sync_task_group_t *dstg, dmu_tx_t *tx)
{
	dsl_sync_task_t *dst;
	dsl_pool_t *dp = dstg->dstg_pool;
	uint64_t quota, used;

	ASSERT0(dstg->dstg_err);

	/*
	 * Check for sufficient space.  We just check against what's
	 * on-disk; we don't want any in-flight accounting to get in our
	 * way, because open context may have already used up various
	 * in-core limits (arc_tempreserve, dsl_pool_tempreserve).
	 */
	quota = dsl_pool_adjustedsize(dp, B_FALSE) -
	    metaslab_class_get_deferred(spa_normal_class(dp->dp_spa));
	used = dp->dp_root_dir->dd_phys->dd_used_bytes;
	/* MOS space is triple-dittoed, so we multiply by 3. */
	if (dstg->dstg_space > 0 && used + dstg->dstg_space * 3 > quota) {
		dstg->dstg_err = ENOSPC;
		return;
	}

	/*
	 * Check for errors by calling checkfuncs.
	 */
	rw_enter(&dp->dp_config_rwlock, RW_WRITER);
	for (dst = list_head(&dstg->dstg_tasks); dst;
	    dst = list_next(&dstg->dstg_tasks, dst)) {
		dst->dst_err =
		    dst->dst_checkfunc(dst->dst_arg1, dst->dst_arg2, tx);
		if (dst->dst_err)
			dstg->dstg_err = dst->dst_err;
	}

	if (dstg->dstg_err == 0) {
		/*
		 * Execute sync tasks.
		 */
		for (dst = list_head(&dstg->dstg_tasks); dst;
		    dst = list_next(&dstg->dstg_tasks, dst)) {
			dst->dst_syncfunc(dst->dst_arg1, dst->dst_arg2, tx);
		}
	}
	rw_exit(&dp->dp_config_rwlock);

	if (dstg->dstg_nowaiter)
		dsl_sync_task_group_destroy(dstg);
}
Ejemplo n.º 5
0
/*
 * Called in syncing context to execute the synctask.
 */
void
dsl_sync_task_sync(dsl_sync_task_t *dst, dmu_tx_t *tx)
{
	dsl_pool_t *dp = dst->dst_pool;

	ASSERT0(dst->dst_error);

	/*
	 * Check for sufficient space.
	 *
	 * When the sync task was created, the caller specified the
	 * type of space checking required.  See the comment in
	 * zfs_space_check_t for details on the semantics of each
	 * type of space checking.
	 *
	 * We just check against what's on-disk; we don't want any
	 * in-flight accounting to get in our way, because open context
	 * may have already used up various in-core limits
	 * (arc_tempreserve, dsl_pool_tempreserve).
	 */
	if (dst->dst_space_check != ZFS_SPACE_CHECK_NONE) {
		uint64_t quota = dsl_pool_adjustedsize(dp,
		    dst->dst_space_check == ZFS_SPACE_CHECK_RESERVED) -
		    metaslab_class_get_deferred(spa_normal_class(dp->dp_spa));
		uint64_t used = dsl_dir_phys(dp->dp_root_dir)->dd_used_bytes;
		/* MOS space is triple-dittoed, so we multiply by 3. */
		if (dst->dst_space > 0 && used + dst->dst_space * 3 > quota) {
			dst->dst_error = SET_ERROR(ENOSPC);
			if (dst->dst_nowaiter)
				kmem_free(dst, sizeof (*dst));
			return;
		}
	}

	/*
	 * Check for errors by calling checkfunc.
	 */
	rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
	dst->dst_error = dst->dst_checkfunc(dst->dst_arg, tx);
	if (dst->dst_error == 0)
		dst->dst_syncfunc(dst->dst_arg, tx);
	rrw_exit(&dp->dp_config_rwlock, FTAG);
	if (dst->dst_nowaiter)
		kmem_free(dst, sizeof (*dst));
}
Ejemplo n.º 6
0
/*
 * Called in syncing context to execute the synctask.
 */
void
dsl_sync_task_sync(dsl_sync_task_t *dst, dmu_tx_t *tx)
{
	dsl_pool_t *dp = dst->dst_pool;
	uint64_t quota, used;

	ASSERT0(dst->dst_error);

	/*
	 * Check for sufficient space.  We just check against what's
	 * on-disk; we don't want any in-flight accounting to get in our
	 * way, because open context may have already used up various
	 * in-core limits (arc_tempreserve, dsl_pool_tempreserve).
	 */
	quota = dsl_pool_adjustedsize(dp, B_FALSE) -
	    metaslab_class_get_deferred(spa_normal_class(dp->dp_spa));
	used = dp->dp_root_dir->dd_phys->dd_used_bytes;
	/* MOS space is triple-dittoed, so we multiply by 3. */
	if (dst->dst_space > 0 && used + dst->dst_space * 3 > quota) {
		dst->dst_error = SET_ERROR(ENOSPC);
		if (dst->dst_nowaiter)
			kmem_free(dst, sizeof (*dst));
		return;
	}

	/*
	 * Check for errors by calling checkfunc.
	 */
	rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
	dst->dst_error = dst->dst_checkfunc(dst->dst_arg, tx);
	if (dst->dst_error == 0)
		dst->dst_syncfunc(dst->dst_arg, tx);
	rrw_exit(&dp->dp_config_rwlock, FTAG);
	if (dst->dst_nowaiter)
		kmem_free(dst, sizeof (*dst));
}