Exemplo n.º 1
0
static int _detach_pvmove_mirror(struct cmd_context *cmd,
				 struct logical_volume *lv_mirr)
{
	uint32_t mimage_to_remove = 0;
	struct dm_list lvs_completed;
	struct lv_list *lvl;

	/* Update metadata to remove mirror segments and break dependencies */
	dm_list_init(&lvs_completed);

	if (arg_is_set(cmd, abort_ARG) &&
	    (seg_type(first_seg(lv_mirr), 0) == AREA_LV))
		mimage_to_remove = 1; /* remove the second mirror leg */

	if (!lv_remove_mirrors(cmd, lv_mirr, 1, 0, _is_pvmove_image_removable, &mimage_to_remove, PVMOVE) ||
	    !remove_layers_for_segments_all(cmd, lv_mirr, PVMOVE,
					    &lvs_completed)) {
		return 0;
	}

	dm_list_iterate_items(lvl, &lvs_completed)
		/* FIXME Assumes only one pvmove at a time! */
		lvl->lv->status &= ~LOCKED;

	return 1;
}
Exemplo n.º 2
0
/*
 * Normal snapshot or thinly-provisioned snapshot?
 */
static int _determine_snapshot_type(struct volume_group *vg,
				  struct lvcreate_params *lp)
{
	struct lv_list *lvl;

	if (!(lvl = find_lv_in_vg(vg, lp->origin))) {
		log_error("Snapshot origin LV %s not found in Volume group %s.",
			  lp->origin, vg->name);
		return 0;
	}

	if (!arg_count(vg->cmd, extents_ARG) && !arg_count(vg->cmd, size_ARG)) {
		if (!lv_is_thin_volume(lvl->lv)) {
			log_error("Please specify either size or extents with snapshots.");
			return 0;
		}

		lp->thin = 1;
		if (!(lp->segtype = get_segtype_from_string(vg->cmd, "thin")))
			return_0;

		lp->pool = first_seg(lvl->lv)->pool_lv->name;
	}

	return 1;
}
Exemplo n.º 3
0
/*
 * lv_cache_create
 * @pool
 * @origin
 *
 * Given a cache_pool and an origin, link the two and create a
 * cached LV.
 *
 * Returns: cache LV on success, NULL on failure
 */
struct logical_volume *lv_cache_create(struct logical_volume *pool,
				       struct logical_volume *origin)
{
	const struct segment_type *segtype;
	struct cmd_context *cmd = pool->vg->cmd;
	struct logical_volume *cache_lv;
	struct lv_segment *seg;

	if (!lv_is_cache_pool(pool)) {
		log_error(INTERNAL_ERROR
			  "%s is not a cache_pool LV", pool->name);
		return NULL;
	}

	if (!dm_list_empty(&pool->segs_using_this_lv)) {
		seg = get_only_segment_using_this_lv(pool);
		log_error("%s is already in use by %s",
			  pool->name, seg ? seg->lv->name : "another LV");
		return NULL;
	}

	if (lv_is_cache_type(origin)) {
		/*
		 * FIXME: We can layer caches, insert_layer_for_lv() would
		 * have to do a better job renaming the LVs in the stack
		 * first so that there isn't a name collision with <name>_corig.
		 * The origin under the origin would become *_corig_corig
		 * before renaming the origin above to *_corig.
		 */
		log_error(INTERNAL_ERROR
			  "The origin, %s, cannot be of cache type",
			  origin->name);
		return NULL;
	}

	if (!(segtype = get_segtype_from_string(cmd, "cache")))
		return_NULL;

	cache_lv = origin;
	if (!(origin = insert_layer_for_lv(cmd, cache_lv, CACHE, "_corig")))
		return_NULL;

	seg = first_seg(cache_lv);
	seg->segtype = segtype;

	if (!attach_pool_lv(seg, pool, NULL, NULL))
		return_NULL;

	return cache_lv;
}
Exemplo n.º 4
0
static int _is_converting(struct logical_volume *lv)
{
    struct lv_segment *seg;

    if (lv->status & MIRRORED) {
        seg = first_seg(lv);
        /* Can't use is_temporary_mirror() because the metadata for
         * seg_lv may not be read in and flags may not be set yet. */
        if (seg_type(seg, 0) == AREA_LV &&
                strstr(seg_lv(seg, 0)->name, MIRROR_SYNC_LAYER))
            return 1;
    }

    return 0;
}
Exemplo n.º 5
0
int parse_vdo_pool_status(struct dm_pool *mem, const struct logical_volume *vdo_pool_lv,
			  const char *params, struct lv_status_vdo *status)
{
	struct dm_vdo_status_parse_result result;
	char *dm_name;

	status->usage = DM_PERCENT_INVALID;
	status->saving = DM_PERCENT_INVALID;
	status->data_usage = DM_PERCENT_INVALID;

	if (!(dm_name = dm_build_dm_name(mem, vdo_pool_lv->vg->name,
					 vdo_pool_lv->name, NULL))) {
		log_error("Failed to build VDO DM name %s.",
			  display_lvname(vdo_pool_lv));
		return 0;
	}

	if (!dm_vdo_status_parse(mem, params, &result)) {
		log_error("Cannot parse %s VDO pool status %s.",
			  display_lvname(vdo_pool_lv), result.error);
		return 0;
	}

	status->vdo = result.status;

	if (result.status->operating_mode == DM_VDO_MODE_NORMAL) {
		if (!_sysfs_get_kvdo_value(dm_name, "statistics/data_blocks_used",
					   &status->data_blocks_used))
			return_0;

		if (!_sysfs_get_kvdo_value(dm_name, "statistics/logical_blocks_used",
					   &status->logical_blocks_used))
			return_0;

		status->usage = dm_make_percent(result.status->used_blocks,
						result.status->total_blocks);
		status->saving = dm_make_percent(status->logical_blocks_used - status->data_blocks_used,
						 status->logical_blocks_used);
		status->data_usage = dm_make_percent(status->data_blocks_used * DM_VDO_BLOCK_SIZE,
						     first_seg(vdo_pool_lv)->vdo_pool_virtual_extents *
						     (uint64_t) vdo_pool_lv->vg->extent_size);
	}

	return 1;
}
Exemplo n.º 6
0
/*
 * convert_vdo_pool_lv
 * @data_lv
 * @vtp
 * @virtual_extents
 *
 * Convert given data LV and its target parameters into a VDO LV with VDO pool.
 *
 * Returns: old data LV on success (passed data LV becomes VDO LV), NULL on failure
 */
struct logical_volume *convert_vdo_pool_lv(struct logical_volume *data_lv,
					   const struct dm_vdo_target_params *vtp,
					   uint32_t *virtual_extents)
{
	const uint64_t header_size = DEFAULT_VDO_POOL_HEADER_SIZE;
	const uint32_t extent_size = data_lv->vg->extent_size;
	struct cmd_context *cmd = data_lv->vg->cmd;
	struct logical_volume *vdo_pool_lv = data_lv;
	const struct segment_type *vdo_pool_segtype;
	struct lv_segment *vdo_pool_seg;
	uint64_t vdo_logical_size = 0;
	uint64_t adjust;

	if (!(vdo_pool_segtype = get_segtype_from_string(cmd, SEG_TYPE_NAME_VDO_POOL)))
		return_NULL;

	adjust = (*virtual_extents * (uint64_t) extent_size) % DM_VDO_BLOCK_SIZE;
	if (adjust) {
		*virtual_extents += (DM_VDO_BLOCK_SIZE - adjust) / extent_size;
		log_print_unless_silent("Rounding size up to 4,00 KiB VDO logical extent boundary: %s.",
					display_size(data_lv->vg->cmd, *virtual_extents * (uint64_t) extent_size));
	}

	if (*virtual_extents)
		vdo_logical_size =
			_get_virtual_size(*virtual_extents, extent_size, header_size);

	if (!dm_vdo_validate_target_params(vtp, vdo_logical_size))
		return_0;

	/* Format data LV as VDO volume */
	if (!_format_vdo_pool_data_lv(data_lv, vtp, &vdo_logical_size)) {
		log_error("Cannot format VDO pool volume %s.", display_lvname(data_lv));
		return NULL;
	}

	if (!deactivate_lv(data_lv->vg->cmd, data_lv)) {
		log_error("Aborting. Manual intervention required.");
		return NULL;
	}

	vdo_logical_size -= 2 * header_size;

	if (vdo_logical_size < extent_size) {
		if (!*virtual_extents)
			/* User has not specified size and at least 1 extent is necessary */
			log_error("Cannot create fully fitting VDO volume, "
				  "--virtualsize has to be specified.");

		log_error("Size %s for VDO volume cannot be smaller then extent size %s.",
			  display_size(data_lv->vg->cmd, vdo_logical_size),
			  display_size(data_lv->vg->cmd, extent_size));
		return NULL;
	}

	*virtual_extents = vdo_logical_size / extent_size;

	/* Move segments from existing data_lv into LV_vdata */
	if (!(data_lv = insert_layer_for_lv(cmd, vdo_pool_lv, 0, "_vdata")))
		return_NULL;

	vdo_pool_seg = first_seg(vdo_pool_lv);
	vdo_pool_seg->segtype = vdo_pool_segtype;
	vdo_pool_seg->vdo_params = *vtp;
	vdo_pool_seg->vdo_pool_header_size = DEFAULT_VDO_POOL_HEADER_SIZE;
	vdo_pool_seg->vdo_pool_virtual_extents = *virtual_extents;

	vdo_pool_lv->status |= LV_VDO_POOL;
	data_lv->status |= LV_VDO_POOL_DATA;

	return data_lv;
}
Exemplo n.º 7
0
static int lvchange_resync(struct cmd_context *cmd,
			      struct logical_volume *lv)
{
	int active = 0;
	int monitored;
	struct lvinfo info;
	struct logical_volume *log_lv;

	if (!(lv->status & MIRRORED)) {
		log_error("Unable to resync %s because it is not mirrored.",
			  lv->name);
		return 1;
	}

	if (lv->status & PVMOVE) {
		log_error("Unable to resync pvmove volume %s", lv->name);
		return 0;
	}

	if (lv->status & LOCKED) {
		log_error("Unable to resync locked volume %s", lv->name);
		return 0;
	}

	if (lv_info(cmd, lv, 0, &info, 1, 0)) {
		if (info.open_count) {
			log_error("Can't resync open logical volume \"%s\"",
				  lv->name);
			return 0;
		}

		if (info.exists) {
			if (!arg_count(cmd, yes_ARG) &&
			    yes_no_prompt("Do you really want to deactivate "
					  "logical volume %s to resync it? [y/n]: ",
					  lv->name) == 'n') {
				log_error("Logical volume \"%s\" not resynced",
					  lv->name);
				return 0;
			}

			if (sigint_caught())
				return 0;

			active = 1;
		}
	}

	/* Activate exclusively to ensure no nodes still have LV active */
	monitored = dmeventd_monitor_mode();
	init_dmeventd_monitor(0);

	if (!deactivate_lv(cmd, lv)) {
		log_error("Unable to deactivate %s for resync", lv->name);
		return 0;
	}

	if (vg_is_clustered(lv->vg) && lv_is_active(lv)) {
		log_error("Can't get exclusive access to clustered volume %s",
			  lv->name);
		return 0;
	}

	init_dmeventd_monitor(monitored);

	log_lv = first_seg(lv)->log_lv;

	log_very_verbose("Starting resync of %s%s%s mirror \"%s\"",
			 (active) ? "active " : "",
			 vg_is_clustered(lv->vg) ? "clustered " : "",
			 (log_lv) ? "disk-logged" : "core-logged",
			 lv->name);

	/*
	 * If this mirror has a core log (i.e. !log_lv),
	 * then simply deactivating/activating will cause
	 * it to reset the sync status.  We only need to
	 * worry about persistent logs.
	 */
	if (!log_lv && !(lv->status & LV_NOTSYNCED)) {
		if (active && !activate_lv(cmd, lv)) {
			log_error("Failed to reactivate %s to resynchronize "
				  "mirror", lv->name);
			return 0;
		}
		return 1;
	}

	lv->status &= ~LV_NOTSYNCED;

	if (log_lv) {
		/* Separate mirror log so we can clear it */
		detach_mirror_log(first_seg(lv));

		if (!vg_write(lv->vg)) {
			log_error("Failed to write intermediate VG metadata.");
			if (!attach_mirror_log(first_seg(lv), log_lv))
				stack;
			if (active && !activate_lv(cmd, lv))
				stack;
			return 0;
		}

		if (!vg_commit(lv->vg)) {
			log_error("Failed to commit intermediate VG metadata.");
			if (!attach_mirror_log(first_seg(lv), log_lv))
				stack;
			if (active && !activate_lv(cmd, lv))
				stack;
			return 0;
		}

		backup(lv->vg);

		if (!activate_lv(cmd, log_lv)) {
			log_error("Unable to activate %s for mirror log resync",
				  log_lv->name);
			return 0;
		}

		log_very_verbose("Clearing log device %s", log_lv->name);
		if (!set_lv(cmd, log_lv, log_lv->size, 0)) {
			log_error("Unable to reset sync status for %s", lv->name);
			if (!deactivate_lv(cmd, log_lv))
				log_error("Failed to deactivate log LV after "
					  "wiping failed");
			return 0;
		}

		if (!deactivate_lv(cmd, log_lv)) {
			log_error("Unable to deactivate log LV %s after wiping "
				  "for resync", log_lv->name);
			return 0;
		}

		/* Put mirror log back in place */
		if (!attach_mirror_log(first_seg(lv), log_lv))
			stack;
	}

	log_very_verbose("Updating logical volume \"%s\" on disk(s)", lv->name);
	if (!vg_write(lv->vg) || !vg_commit(lv->vg)) {
		log_error("Failed to update metadata on disk.");
		return 0;
	}

	if (active && !activate_lv(cmd, lv)) {
		log_error("Failed to reactivate %s after resync", lv->name);
		return 0;
	}

	return 1;
}
Exemplo n.º 8
0
struct logical_volume *find_cow(const struct logical_volume *snap)
{
	return first_seg(snap)->cow;
}
Exemplo n.º 9
0
static uint32_t _raidmaxrecoveryrate(const struct logical_volume *lv)
{
	return first_seg(lv)->max_recovery_rate;
}
Exemplo n.º 10
0
static uint32_t _raidwritebehind(const struct logical_volume *lv)
{
	return first_seg(lv)->writebehind;
}
Exemplo n.º 11
0
/*
 * lv_cache_remove
 * @cache_lv
 *
 * Given a cache LV, remove the cache layer.  This will unlink
 * the origin and cache_pool, remove the cache LV layer, and promote
 * the origin to a usable non-cached LV of the same name as the
 * given cache_lv.
 *
 * Returns: 1 on success, 0 on failure
 */
int lv_cache_remove(struct logical_volume *cache_lv)
{
	struct cmd_context *cmd = cache_lv->vg->cmd;
	const char *policy_name;
	uint64_t dirty_blocks;
	struct lv_segment *cache_seg = first_seg(cache_lv);
	struct logical_volume *corigin_lv;
	struct logical_volume *cache_pool_lv;

	if (!lv_is_cache(cache_lv)) {
		log_error(INTERNAL_ERROR "LV %s is not cached.", cache_lv->name);
		return 0;
	}

	/* Active volume is needed (writeback only?) */
	if (!lv_is_active_locally(cache_lv) &&
	    !activate_lv_excl_local(cache_lv->vg->cmd, cache_lv)) {
		log_error("Failed to active cache locally %s.", cache_lv->name);
		return 0;
	}

	/*
	 * FIXME:
	 * Before the link can be broken, we must ensure that the
	 * cache has been flushed.  This may already be the case
	 * if the cache mode is writethrough (or the cleaner
	 * policy is in place from a previous half-finished attempt
	 * to remove the cache_pool).  It could take a long time to
	 * flush the cache - it should probably be done in the background.
	 *
	 * Also, if we do perform the flush in the background and we
	 * happen to also be removing the cache/origin LV, then we
	 * could check if the cleaner policy is in place and simply
	 * remove the cache_pool then without waiting for the flush to
	 * complete.
	 */
	if (!lv_cache_policy_info(cache_lv, &policy_name, NULL, NULL))
		return_0;

	if (strcmp(policy_name, "cleaner")) {
		/* We must swap in the cleaner to flush the cache */
		log_print_unless_silent("Flushing cache for %s.", cache_lv->name);

		/*
		 * Is there are clean way to free the memory for the name
		 * and argv when changing the policy?
		 */
		cache_seg->policy_name = "cleaner";
		cache_seg->policy_argc = 0;
		cache_seg->policy_argv = NULL;

		/* update the kernel to put the cleaner policy in place */
		if (!vg_write(cache_lv->vg))
			return_0;
		if (!suspend_lv(cmd, cache_lv))
			return_0;
		if (!vg_commit(cache_lv->vg))
			return_0;
		if (!resume_lv(cmd, cache_lv))
			return_0;
	}

	//FIXME: use polling to do this...
	do {
		if (!lv_cache_block_info(cache_lv, NULL,
					 &dirty_blocks, NULL, NULL))
			return_0;
		log_print_unless_silent("%" PRIu64 " blocks must still be flushed.",
					dirty_blocks);
		if (dirty_blocks)
			sleep(1);
	} while (dirty_blocks);

	cache_pool_lv = cache_seg->pool_lv;
	if (!detach_pool_lv(cache_seg))
		return_0;

	/* Regular LV which user may remove if there are problems */
	corigin_lv = seg_lv(cache_seg, 0);
	lv_set_visible(corigin_lv);
	if (!remove_layer_from_lv(cache_lv, corigin_lv))
			return_0;

	if (!vg_write(cache_lv->vg))
		return_0;

	/*
	 * suspend_lv on this cache LV suspends all components:
	 * - the top-level cache LV
	 * - the origin
	 * - the cache_pool _cdata and _cmeta
	 */
	if (!suspend_lv(cmd, cache_lv))
		return_0;

	if (!vg_commit(cache_lv->vg))
		return_0;

	/* resume_lv on this (former) cache LV will resume all */
	/*
	 * FIXME: currently we can't easily avoid execution of
	 * blkid on resumed error device
	 */
	if (!resume_lv(cmd, cache_lv))
		return_0;

	/*
	 * cleanup orphan devices
	 *
	 * FIXME:
	 * fix _add_dev() to support this case better
	 * since the should be handled interanlly by resume_lv()
	 * which should autoremove any orhpans
	 */
	if (!_cleanup_orphan_lv(corigin_lv))  /* _corig */
		return_0;
	if (!_cleanup_orphan_lv(seg_lv(first_seg(cache_pool_lv), 0))) /* _cdata */
		return_0;
	if (!_cleanup_orphan_lv(first_seg(cache_pool_lv)->metadata_lv)) /* _cmeta */
		return_0;

	if (!lv_remove(corigin_lv))
		return_0;

	return 1;
}
Exemplo n.º 12
0
static int lvchange_pool_update(struct cmd_context *cmd,
				struct logical_volume *lv)
{
	int r = 0;
	int update = 0;
	unsigned val;
	thin_discards_t discards;

	if (!lv_is_thin_pool(lv)) {
		log_error("Logical volume \"%s\" is not a thin pool.", lv->name);
		return 0;
	}

	if (arg_count(cmd, discards_ARG)) {
		discards = (thin_discards_t) arg_uint_value(cmd, discards_ARG, THIN_DISCARDS_IGNORE);
		if (discards != first_seg(lv)->discards) {
			if ((discards != THIN_DISCARDS_IGNORE) &&
				 (first_seg(lv)->chunk_size &
				  (first_seg(lv)->chunk_size - 1)))
				log_error("Cannot change discards state for "
					  "logical volume \"%s\" "
					  "with non power of 2 chunk size.", lv->name);
			else if (((discards == THIN_DISCARDS_IGNORE) ||
			     (first_seg(lv)->discards == THIN_DISCARDS_IGNORE)) &&
			    lv_is_active(lv))
				log_error("Cannot change discards state for active "
					  "logical volume \"%s\".", lv->name);
			else {
				first_seg(lv)->discards = discards;
				update++;
			}
		} else
			log_error("Logical volume \"%s\" already uses --discards %s.",
				  lv->name, get_pool_discards_name(discards));
	}

	if (arg_count(cmd, zero_ARG)) {
		val = arg_uint_value(cmd, zero_ARG, 1);
		if (val != first_seg(lv)->zero_new_blocks) {
			first_seg(lv)->zero_new_blocks = val;
			update++;
		} else
			log_error("Logical volume \"%s\" already %szero new blocks.",
				  lv->name, val ? "" : "does not ");
	}

	if (!update)
		return 0;

	log_very_verbose("Updating logical volume \"%s\" on disk(s).", lv->name);
	if (!vg_write(lv->vg))
		return_0;

	if (!suspend_lv_origin(cmd, lv)) {
		log_error("Failed to update active %s/%s (deactivation is needed).",
			  lv->vg->name, lv->name);
		vg_revert(lv->vg);
		goto out;
	}

	if (!vg_commit(lv->vg)) {
		if (!resume_lv_origin(cmd, lv))
			stack;
		goto_out;
	}

	if (!resume_lv_origin(cmd, lv)) {
		log_error("Problem reactivating %s.", lv->name);
		goto out;
	}

	r = 1;
out:
	backup(lv->vg);
	return r;
}