static unsigned _pvmove_is_exclusive(struct cmd_context *cmd, struct volume_group *vg) { if (vg_is_clustered(vg)) if (!_pvmove_target_present(cmd, 1)) return 1; return 0; }
/* Check if given LV is usable as snapshot origin LV */ int validate_snapshot_origin(const struct logical_volume *origin_lv) { const char *err = NULL; /* For error string */ if (lv_is_cow(origin_lv)) err = "snapshots"; else if (lv_is_locked(origin_lv)) err = "locked volumes"; else if (lv_is_pvmove(origin_lv)) err = "pvmoved volumes"; else if (!lv_is_visible(origin_lv)) err = "hidden volumes"; else if (lv_is_merging_origin(origin_lv)) err = "an origin that has a merging snapshot"; else if (lv_is_cache_type(origin_lv) && !lv_is_cache(origin_lv)) err = "cache type volumes"; else if (lv_is_thin_type(origin_lv) && !lv_is_thin_volume(origin_lv)) err = "thin pool type volumes"; else if (lv_is_mirror_type(origin_lv)) { if (!lv_is_mirror(origin_lv)) err = "mirror subvolumes"; else { log_warn("WARNING: Snapshots of mirrors can deadlock under rare device failures."); log_warn("WARNING: Consider using the raid1 mirror type to avoid this."); log_warn("WARNING: See global/mirror_segtype_default in lvm.conf."); } } else if (lv_is_raid_type(origin_lv) && !lv_is_raid(origin_lv)) err = "raid subvolumes"; if (err) { log_error("Snapshots of %s are not supported.", err); return 0; } if (vg_is_clustered(origin_lv->vg) && lv_is_active(origin_lv) && !lv_is_active_exclusive_locally(origin_lv)) { log_error("Snapshot origin must be active exclusively."); return 0; } return 1; }
static int lvchange_resync(struct cmd_context *cmd, struct logical_volume *lv) { int active = 0; int monitored; struct lvinfo info; struct logical_volume *log_lv; if (!(lv->status & MIRRORED)) { log_error("Unable to resync %s because it is not mirrored.", lv->name); return 1; } if (lv->status & PVMOVE) { log_error("Unable to resync pvmove volume %s", lv->name); return 0; } if (lv->status & LOCKED) { log_error("Unable to resync locked volume %s", lv->name); return 0; } if (lv_info(cmd, lv, 0, &info, 1, 0)) { if (info.open_count) { log_error("Can't resync open logical volume \"%s\"", lv->name); return 0; } if (info.exists) { if (!arg_count(cmd, yes_ARG) && yes_no_prompt("Do you really want to deactivate " "logical volume %s to resync it? [y/n]: ", lv->name) == 'n') { log_error("Logical volume \"%s\" not resynced", lv->name); return 0; } if (sigint_caught()) return 0; active = 1; } } /* Activate exclusively to ensure no nodes still have LV active */ monitored = dmeventd_monitor_mode(); init_dmeventd_monitor(0); if (!deactivate_lv(cmd, lv)) { log_error("Unable to deactivate %s for resync", lv->name); return 0; } if (vg_is_clustered(lv->vg) && lv_is_active(lv)) { log_error("Can't get exclusive access to clustered volume %s", lv->name); return 0; } init_dmeventd_monitor(monitored); log_lv = first_seg(lv)->log_lv; log_very_verbose("Starting resync of %s%s%s mirror \"%s\"", (active) ? "active " : "", vg_is_clustered(lv->vg) ? "clustered " : "", (log_lv) ? "disk-logged" : "core-logged", lv->name); /* * If this mirror has a core log (i.e. !log_lv), * then simply deactivating/activating will cause * it to reset the sync status. We only need to * worry about persistent logs. */ if (!log_lv && !(lv->status & LV_NOTSYNCED)) { if (active && !activate_lv(cmd, lv)) { log_error("Failed to reactivate %s to resynchronize " "mirror", lv->name); return 0; } return 1; } lv->status &= ~LV_NOTSYNCED; if (log_lv) { /* Separate mirror log so we can clear it */ detach_mirror_log(first_seg(lv)); if (!vg_write(lv->vg)) { log_error("Failed to write intermediate VG metadata."); if (!attach_mirror_log(first_seg(lv), log_lv)) stack; if (active && !activate_lv(cmd, lv)) stack; return 0; } if (!vg_commit(lv->vg)) { log_error("Failed to commit intermediate VG metadata."); if (!attach_mirror_log(first_seg(lv), log_lv)) stack; if (active && !activate_lv(cmd, lv)) stack; return 0; } backup(lv->vg); if (!activate_lv(cmd, log_lv)) { log_error("Unable to activate %s for mirror log resync", log_lv->name); return 0; } log_very_verbose("Clearing log device %s", log_lv->name); if (!set_lv(cmd, log_lv, log_lv->size, 0)) { log_error("Unable to reset sync status for %s", lv->name); if (!deactivate_lv(cmd, log_lv)) log_error("Failed to deactivate log LV after " "wiping failed"); return 0; } if (!deactivate_lv(cmd, log_lv)) { log_error("Unable to deactivate log LV %s after wiping " "for resync", log_lv->name); return 0; } /* Put mirror log back in place */ if (!attach_mirror_log(first_seg(lv), log_lv)) stack; } log_very_verbose("Updating logical volume \"%s\" on disk(s)", lv->name); if (!vg_write(lv->vg) || !vg_commit(lv->vg)) { log_error("Failed to update metadata on disk."); return 0; } if (active && !activate_lv(cmd, lv)) { log_error("Failed to reactivate %s after resync", lv->name); return 0; } return 1; }
static int lvchange_permission(struct cmd_context *cmd, struct logical_volume *lv) { uint32_t lv_access; struct lvinfo info; int r = 0; lv_access = arg_uint_value(cmd, permission_ARG, 0); if ((lv_access & LVM_WRITE) && (lv->status & LVM_WRITE)) { log_error("Logical volume \"%s\" is already writable", lv->name); return 0; } if (!(lv_access & LVM_WRITE) && !(lv->status & LVM_WRITE)) { log_error("Logical volume \"%s\" is already read only", lv->name); return 0; } if ((lv->status & MIRRORED) && (vg_is_clustered(lv->vg)) && lv_info(cmd, lv, 0, &info, 0, 0) && info.exists) { log_error("Cannot change permissions of mirror \"%s\" " "while active.", lv->name); return 0; } /* Not allowed to change permissions on RAID sub-LVs directly */ if ((lv->status & RAID_META) || (lv->status & RAID_IMAGE)) { log_error("Cannot change permissions of RAID %s \"%s\"", (lv->status & RAID_IMAGE) ? "image" : "metadata area", lv->name); return 0; } if (lv_access & LVM_WRITE) { lv->status |= LVM_WRITE; log_verbose("Setting logical volume \"%s\" read/write", lv->name); } else { lv->status &= ~LVM_WRITE; log_verbose("Setting logical volume \"%s\" read-only", lv->name); } log_very_verbose("Updating logical volume \"%s\" on disk(s)", lv->name); if (!vg_write(lv->vg)) return_0; if (!suspend_lv(cmd, lv)) { log_error("Failed to lock %s", lv->name); vg_revert(lv->vg); goto out; } if (!vg_commit(lv->vg)) { if (!resume_lv(cmd, lv)) stack; goto_out; } log_very_verbose("Updating permissions for \"%s\" in kernel", lv->name); if (!resume_lv(cmd, lv)) { log_error("Problem reactivating %s", lv->name); goto out; } r = 1; out: backup(lv->vg); return r; }
int vg_remove_snapshot(struct logical_volume *cow) { int merging_snapshot = 0; struct logical_volume *origin = origin_from_cow(cow); int is_origin_active = lv_is_active(origin); if (is_origin_active && lv_is_virtual_origin(origin)) { if (!deactivate_lv(origin->vg->cmd, origin)) { log_error("Failed to deactivate logical volume \"%s\"", origin->name); return 0; } is_origin_active = 0; } dm_list_del(&cow->snapshot->origin_list); origin->origin_count--; if (lv_is_merging_origin(origin) && (find_snapshot(origin) == find_snapshot(cow))) { clear_snapshot_merge(origin); /* * preload origin IFF "snapshot-merge" target is active * - IMPORTANT: avoids preload if inactivate merge is pending */ if (lv_has_target_type(origin->vg->vgmem, origin, NULL, TARGET_NAME_SNAPSHOT_MERGE)) { /* * preload origin to: * - allow proper release of -cow * - avoid allocations with other devices suspended * when transitioning from "snapshot-merge" to * "snapshot-origin after a merge completes. */ merging_snapshot = 1; } } if (!lv_remove(cow->snapshot->lv)) { log_error("Failed to remove internal snapshot LV %s", cow->snapshot->lv->name); return 0; } cow->snapshot = NULL; lv_set_visible(cow); /* format1 must do the change in one step, with the commit last. */ if (!(origin->vg->fid->fmt->features & FMT_MDAS)) { /* Get the lock for COW volume */ if (is_origin_active && !activate_lv(cow->vg->cmd, cow)) { log_error("Unable to activate logical volume \"%s\"", cow->name); return 0; } return 1; } if (!vg_write(origin->vg)) return_0; /* Skip call suspend, if device is not active */ if (is_origin_active && !suspend_lv(origin->vg->cmd, origin)) { log_error("Failed to refresh %s without snapshot.", origin->name); vg_revert(origin->vg); return 0; } if (!vg_commit(origin->vg)) return_0; if (is_origin_active) { /* * If the snapshot was active and the COW LV is taken away * the LV lock on cluster has to be grabbed, so use * activate_lv() which resumes suspend cow device. */ if (!merging_snapshot && !activate_lv(cow->vg->cmd, cow)) { log_error("Failed to activate %s.", cow->name); return 0; } if (!resume_lv(origin->vg->cmd, origin)) { log_error("Failed to resume %s.", origin->name); return 0; } /* * For merged snapshot and clustered VG activate cow LV so * the following call to deactivate_lv() can clean-up table * entries. For this clustered lock need to be held. */ if (vg_is_clustered(cow->vg) && merging_snapshot && !activate_lv(cow->vg->cmd, cow)) { log_error("Failed to activate %s.", cow->name); return 0; } } return 1; }
int vgcreate(struct cmd_context *cmd, int argc, char **argv) { struct vgcreate_params vp_new; struct vgcreate_params vp_def; struct volume_group *vg; const char *tag; const char *clustered_message = ""; char *vg_name; struct pvcreate_params pp; if (!argc) { log_error("Please provide volume group name and " "physical volumes"); return EINVALID_CMD_LINE; } vg_name = argv[0]; argc--; argv++; pvcreate_params_set_defaults(&pp); if (!pvcreate_params_validate(cmd, argc, argv, &pp)) { return EINVALID_CMD_LINE; } vgcreate_params_set_defaults(&vp_def, NULL); vp_def.vg_name = vg_name; if (vgcreate_params_set_from_args(cmd, &vp_new, &vp_def)) return EINVALID_CMD_LINE; if (vgcreate_params_validate(cmd, &vp_new)) return EINVALID_CMD_LINE; /* Create the new VG */ vg = vg_create(cmd, vp_new.vg_name); if (vg_read_error(vg)) { if (vg_read_error(vg) == FAILED_EXIST) log_error("A volume group called %s already exists.", vp_new.vg_name); else log_error("Can't get lock for %s.", vp_new.vg_name); vg_release(vg); return ECMD_FAILED; } if (!vg_set_extent_size(vg, vp_new.extent_size) || !vg_set_max_lv(vg, vp_new.max_lv) || !vg_set_max_pv(vg, vp_new.max_pv) || !vg_set_alloc_policy(vg, vp_new.alloc) || !vg_set_clustered(vg, vp_new.clustered) || !vg_set_mda_copies(vg, vp_new.vgmetadatacopies)) goto bad_orphan; if (!lock_vol(cmd, VG_ORPHANS, LCK_VG_WRITE)) { log_error("Can't get lock for orphan PVs"); goto bad_orphan; } /* attach the pv's */ if (!vg_extend(vg, argc, argv, &pp)) goto_bad; if (vp_new.max_lv != vg->max_lv) log_warn("WARNING: Setting maxlogicalvolumes to %d " "(0 means unlimited)", vg->max_lv); if (vp_new.max_pv != vg->max_pv) log_warn("WARNING: Setting maxphysicalvolumes to %d " "(0 means unlimited)", vg->max_pv); if (arg_count(cmd, addtag_ARG)) { if (!(tag = arg_str_value(cmd, addtag_ARG, NULL))) { log_error("Failed to get tag"); goto bad; } if (!vg_change_tag(vg, tag, 1)) goto_bad; } if (vg_is_clustered(vg)) { clustered_message = "Clustered "; } else { if (locking_is_clustered()) clustered_message = "Non-clustered "; } if (!archive(vg)) goto_bad; /* Store VG on disk(s) */ if (!vg_write(vg) || !vg_commit(vg)) goto_bad; unlock_vg(cmd, VG_ORPHANS); unlock_vg(cmd, vp_new.vg_name); backup(vg); log_print("%s%colume group \"%s\" successfully created", clustered_message, *clustered_message ? 'v' : 'V', vg->name); vg_release(vg); return ECMD_PROCESSED; bad: unlock_vg(cmd, VG_ORPHANS); bad_orphan: vg_release(vg); unlock_vg(cmd, vp_new.vg_name); return ECMD_FAILED; }