コード例 #1
0
ファイル: properties.c プロジェクト: amyvmiwei/bittern
static dm_percent_t _metadata_percent(const struct logical_volume *lv)
{
	dm_percent_t percent;
	struct lv_status_cache *status;

	if (lv_is_cache(lv) || lv_is_cache_pool(lv)) {
		if (!lv_cache_status(lv, &status)) {
			stack;
			return DM_PERCENT_INVALID;
		}
		percent = status->dirty_usage;
		dm_pool_destroy(status->mem);
		return percent;
	}

	if (lv_is_thin_pool(lv))
		return lv_thin_pool_percent(lv, 1, &percent) ? percent : DM_PERCENT_INVALID;

	return DM_PERCENT_INVALID;
}
コード例 #2
0
ファイル: lvcreate.c プロジェクト: akiradeveloper/lvm2
/*
 * Normal snapshot or thinly-provisioned snapshot?
 */
static int _determine_snapshot_type(struct volume_group *vg,
				  struct lvcreate_params *lp)
{
	struct lv_list *lvl;

	if (!(lvl = find_lv_in_vg(vg, lp->origin))) {
		log_error("Snapshot origin LV %s not found in Volume group %s.",
			  lp->origin, vg->name);
		return 0;
	}

	if (lv_is_cache(lvl->lv)) {
		log_error("Snapshot of cache LV is not yet supported.");
		return 0;
	}

	if (!arg_count(vg->cmd, extents_ARG) && !arg_count(vg->cmd, size_ARG)) {
		if (seg_is_thin(lp)) {
			if (!(lp->segtype = get_segtype_from_string(vg->cmd, "thin")))
				return_0;
			return 1;
		}

		if (!lv_is_thin_volume(lvl->lv)) {
			log_error("Please specify either size or extents with snapshots.");
			return 0;
		}

		if (!(lp->segtype = get_segtype_from_string(vg->cmd, "thin")))
			return_0;

		lp->pool = first_seg(lvl->lv)->pool_lv->name;
	}

	return 1;
}
コード例 #3
0
ファイル: cache_manip.c プロジェクト: vgmoose/lvm
/*
 * lv_cache_remove
 * @cache_lv
 *
 * Given a cache LV, remove the cache layer.  This will unlink
 * the origin and cache_pool, remove the cache LV layer, and promote
 * the origin to a usable non-cached LV of the same name as the
 * given cache_lv.
 *
 * Returns: 1 on success, 0 on failure
 */
int lv_cache_remove(struct logical_volume *cache_lv)
{
	struct cmd_context *cmd = cache_lv->vg->cmd;
	const char *policy_name;
	uint64_t dirty_blocks;
	struct lv_segment *cache_seg = first_seg(cache_lv);
	struct logical_volume *corigin_lv;
	struct logical_volume *cache_pool_lv;

	if (!lv_is_cache(cache_lv)) {
		log_error(INTERNAL_ERROR "LV %s is not cached.", cache_lv->name);
		return 0;
	}

	/* Active volume is needed (writeback only?) */
	if (!lv_is_active_locally(cache_lv) &&
	    !activate_lv_excl_local(cache_lv->vg->cmd, cache_lv)) {
		log_error("Failed to active cache locally %s.", cache_lv->name);
		return 0;
	}

	/*
	 * FIXME:
	 * Before the link can be broken, we must ensure that the
	 * cache has been flushed.  This may already be the case
	 * if the cache mode is writethrough (or the cleaner
	 * policy is in place from a previous half-finished attempt
	 * to remove the cache_pool).  It could take a long time to
	 * flush the cache - it should probably be done in the background.
	 *
	 * Also, if we do perform the flush in the background and we
	 * happen to also be removing the cache/origin LV, then we
	 * could check if the cleaner policy is in place and simply
	 * remove the cache_pool then without waiting for the flush to
	 * complete.
	 */
	if (!lv_cache_policy_info(cache_lv, &policy_name, NULL, NULL))
		return_0;

	if (strcmp(policy_name, "cleaner")) {
		/* We must swap in the cleaner to flush the cache */
		log_print_unless_silent("Flushing cache for %s.", cache_lv->name);

		/*
		 * Is there are clean way to free the memory for the name
		 * and argv when changing the policy?
		 */
		cache_seg->policy_name = "cleaner";
		cache_seg->policy_argc = 0;
		cache_seg->policy_argv = NULL;

		/* update the kernel to put the cleaner policy in place */
		if (!vg_write(cache_lv->vg))
			return_0;
		if (!suspend_lv(cmd, cache_lv))
			return_0;
		if (!vg_commit(cache_lv->vg))
			return_0;
		if (!resume_lv(cmd, cache_lv))
			return_0;
	}

	//FIXME: use polling to do this...
	do {
		if (!lv_cache_block_info(cache_lv, NULL,
					 &dirty_blocks, NULL, NULL))
			return_0;
		log_print_unless_silent("%" PRIu64 " blocks must still be flushed.",
					dirty_blocks);
		if (dirty_blocks)
			sleep(1);
	} while (dirty_blocks);

	cache_pool_lv = cache_seg->pool_lv;
	if (!detach_pool_lv(cache_seg))
		return_0;

	/* Regular LV which user may remove if there are problems */
	corigin_lv = seg_lv(cache_seg, 0);
	lv_set_visible(corigin_lv);
	if (!remove_layer_from_lv(cache_lv, corigin_lv))
			return_0;

	if (!vg_write(cache_lv->vg))
		return_0;

	/*
	 * suspend_lv on this cache LV suspends all components:
	 * - the top-level cache LV
	 * - the origin
	 * - the cache_pool _cdata and _cmeta
	 */
	if (!suspend_lv(cmd, cache_lv))
		return_0;

	if (!vg_commit(cache_lv->vg))
		return_0;

	/* resume_lv on this (former) cache LV will resume all */
	/*
	 * FIXME: currently we can't easily avoid execution of
	 * blkid on resumed error device
	 */
	if (!resume_lv(cmd, cache_lv))
		return_0;

	/*
	 * cleanup orphan devices
	 *
	 * FIXME:
	 * fix _add_dev() to support this case better
	 * since the should be handled interanlly by resume_lv()
	 * which should autoremove any orhpans
	 */
	if (!_cleanup_orphan_lv(corigin_lv))  /* _corig */
		return_0;
	if (!_cleanup_orphan_lv(seg_lv(first_seg(cache_pool_lv), 0))) /* _cdata */
		return_0;
	if (!_cleanup_orphan_lv(first_seg(cache_pool_lv)->metadata_lv)) /* _cmeta */
		return_0;

	if (!lv_remove(corigin_lv))
		return_0;

	return 1;
}