/* * lv_cache_create * @pool * @origin * * Given a cache_pool and an origin, link the two and create a * cached LV. * * Returns: cache LV on success, NULL on failure */ struct logical_volume *lv_cache_create(struct logical_volume *pool, struct logical_volume *origin) { const struct segment_type *segtype; struct cmd_context *cmd = pool->vg->cmd; struct logical_volume *cache_lv; struct lv_segment *seg; if (!lv_is_cache_pool(pool)) { log_error(INTERNAL_ERROR "%s is not a cache_pool LV", pool->name); return NULL; } if (!dm_list_empty(&pool->segs_using_this_lv)) { seg = get_only_segment_using_this_lv(pool); log_error("%s is already in use by %s", pool->name, seg ? seg->lv->name : "another LV"); return NULL; } if (lv_is_cache_type(origin)) { /* * FIXME: We can layer caches, insert_layer_for_lv() would * have to do a better job renaming the LVs in the stack * first so that there isn't a name collision with <name>_corig. * The origin under the origin would become *_corig_corig * before renaming the origin above to *_corig. */ log_error(INTERNAL_ERROR "The origin, %s, cannot be of cache type", origin->name); return NULL; } if (!(segtype = get_segtype_from_string(cmd, "cache"))) return_NULL; cache_lv = origin; if (!(origin = insert_layer_for_lv(cmd, cache_lv, CACHE, "_corig"))) return_NULL; seg = first_seg(cache_lv); seg->segtype = segtype; if (!attach_pool_lv(seg, pool, NULL, NULL)) return_NULL; return cache_lv; }
/* * convert_vdo_pool_lv * @data_lv * @vtp * @virtual_extents * * Convert given data LV and its target parameters into a VDO LV with VDO pool. * * Returns: old data LV on success (passed data LV becomes VDO LV), NULL on failure */ struct logical_volume *convert_vdo_pool_lv(struct logical_volume *data_lv, const struct dm_vdo_target_params *vtp, uint32_t *virtual_extents) { const uint64_t header_size = DEFAULT_VDO_POOL_HEADER_SIZE; const uint32_t extent_size = data_lv->vg->extent_size; struct cmd_context *cmd = data_lv->vg->cmd; struct logical_volume *vdo_pool_lv = data_lv; const struct segment_type *vdo_pool_segtype; struct lv_segment *vdo_pool_seg; uint64_t vdo_logical_size = 0; uint64_t adjust; if (!(vdo_pool_segtype = get_segtype_from_string(cmd, SEG_TYPE_NAME_VDO_POOL))) return_NULL; adjust = (*virtual_extents * (uint64_t) extent_size) % DM_VDO_BLOCK_SIZE; if (adjust) { *virtual_extents += (DM_VDO_BLOCK_SIZE - adjust) / extent_size; log_print_unless_silent("Rounding size up to 4,00 KiB VDO logical extent boundary: %s.", display_size(data_lv->vg->cmd, *virtual_extents * (uint64_t) extent_size)); } if (*virtual_extents) vdo_logical_size = _get_virtual_size(*virtual_extents, extent_size, header_size); if (!dm_vdo_validate_target_params(vtp, vdo_logical_size)) return_0; /* Format data LV as VDO volume */ if (!_format_vdo_pool_data_lv(data_lv, vtp, &vdo_logical_size)) { log_error("Cannot format VDO pool volume %s.", display_lvname(data_lv)); return NULL; } if (!deactivate_lv(data_lv->vg->cmd, data_lv)) { log_error("Aborting. Manual intervention required."); return NULL; } vdo_logical_size -= 2 * header_size; if (vdo_logical_size < extent_size) { if (!*virtual_extents) /* User has not specified size and at least 1 extent is necessary */ log_error("Cannot create fully fitting VDO volume, " "--virtualsize has to be specified."); log_error("Size %s for VDO volume cannot be smaller then extent size %s.", display_size(data_lv->vg->cmd, vdo_logical_size), display_size(data_lv->vg->cmd, extent_size)); return NULL; } *virtual_extents = vdo_logical_size / extent_size; /* Move segments from existing data_lv into LV_vdata */ if (!(data_lv = insert_layer_for_lv(cmd, vdo_pool_lv, 0, "_vdata"))) return_NULL; vdo_pool_seg = first_seg(vdo_pool_lv); vdo_pool_seg->segtype = vdo_pool_segtype; vdo_pool_seg->vdo_params = *vtp; vdo_pool_seg->vdo_pool_header_size = DEFAULT_VDO_POOL_HEADER_SIZE; vdo_pool_seg->vdo_pool_virtual_extents = *virtual_extents; vdo_pool_lv->status |= LV_VDO_POOL; data_lv->status |= LV_VDO_POOL_DATA; return data_lv; }