Пример #1
0
int wipe_known_signatures(struct cmd_context *cmd, struct device *dev,
			  const char *name, uint32_t types_to_exclude,
			  uint32_t types_no_prompt, int yes, force_t force,
			  int *wiped)
{
#ifdef BLKID_WIPING_SUPPORT
	if (find_config_tree_bool(cmd, allocation_use_blkid_wiping_CFG, NULL))
		return _wipe_known_signatures_with_blkid(dev, name,
							 types_to_exclude,
							 types_no_prompt,
							 yes, force, wiped);
#endif
	return _wipe_known_signatures_with_lvm(dev, name,
					       types_to_exclude,
					       types_no_prompt,
					       yes, force, wiped);
}
Пример #2
0
/* Stop memory getting swapped out */
static void _lock_mem(struct cmd_context *cmd)
{
	_allocate_memory();
	(void)strerror(0);		/* Force libc.mo load */
	(void)dm_udev_get_sync_support(); /* udev is initialized */
	log_very_verbose("Locking memory");

	/*
	 * For daemon we need to use mlockall()
	 * so even future adition of thread which may not even use lvm lib
	 * will not block memory locked thread
	 * Note: assuming _memlock_count_daemon is updated before _memlock_count
	 */
	_use_mlockall = _memlock_count_daemon ? 1 :
		find_config_tree_bool(cmd, activation_use_mlockall_CFG, NULL);

	if (!_use_mlockall) {
		if (!*_procselfmaps &&
		    dm_snprintf(_procselfmaps, sizeof(_procselfmaps),
				"%s" SELF_MAPS, cmd->proc_dir) < 0) {
			log_error("proc_dir too long");
			return;
		}

		if (!(_maps_fd = open(_procselfmaps, O_RDONLY))) {
			log_sys_error("open", _procselfmaps);
			return;
		}

		if (!_disable_mmap())
			stack;
	}

	if (!_memlock_maps(cmd, LVM_MLOCK, &_mstats))
		stack;

	errno = 0;
	if (((_priority = getpriority(PRIO_PROCESS, 0)) == -1) && errno)
		log_sys_error("getpriority", "");
	else
		if (setpriority(PRIO_PROCESS, 0, _default_priority))
			log_error("setpriority %d failed: %s",
				  _default_priority, strerror(errno));
}
Пример #3
0
/*
 * Intial sanity checking of recovery-related command-line arguments.
 * These args are: --restorefile, --uuid, and --physicalvolumesize
 *
 * Output arguments:
 * pp: structure allocated by caller, fields written / validated here
 */
static int _pvcreate_restore_params_from_args(struct cmd_context *cmd, int argc,
					      struct pvcreate_params *pp)
{
	pp->restorefile = arg_str_value(cmd, restorefile_ARG, NULL);

	if (arg_is_set(cmd, restorefile_ARG) && !arg_is_set(cmd, uuidstr_ARG)) {
		log_error("--uuid is required with --restorefile");
		return 0;
	}

	if (!arg_is_set(cmd, restorefile_ARG) && arg_is_set(cmd, uuidstr_ARG)) {
		if (!arg_is_set(cmd, norestorefile_ARG) &&
		    find_config_tree_bool(cmd, devices_require_restorefile_with_uuid_CFG, NULL)) {
			log_error("--restorefile is required with --uuid");
			return 0;
		}
	}

	if (arg_is_set(cmd, uuidstr_ARG) && argc != 1) {
		log_error("Can only set uuid on one volume at once");
		return 0;
	}

	if (arg_is_set(cmd, uuidstr_ARG)) {
		pp->uuid_str = arg_str_value(cmd, uuidstr_ARG, "");
		if (!id_read_format(&pp->pva.id, pp->uuid_str))
			return 0;
		pp->pva.idp = &pp->pva.id;
	}

	if (arg_sign_value(cmd, setphysicalvolumesize_ARG, SIGN_NONE) == SIGN_MINUS) {
		log_error("Physical volume size may not be negative");
		return 0;
	}
	pp->pva.size = arg_uint64_value(cmd, setphysicalvolumesize_ARG, UINT64_C(0));

	if (arg_is_set(cmd, restorefile_ARG) || arg_is_set(cmd, uuidstr_ARG))
		pp->zero = 0;
	return 1;
}
Пример #4
0
int wipe_known_signatures(struct cmd_context *cmd, struct device *dev,
			  const char *name, uint32_t types_to_exclude,
			  uint32_t types_no_prompt, int yes, force_t force,
			  int *wiped)
{
	int blkid_wiping_enabled = find_config_tree_bool(cmd, allocation_use_blkid_wiping_CFG, NULL);

#ifdef BLKID_WIPING_SUPPORT
	if (blkid_wiping_enabled)
		return _wipe_known_signatures_with_blkid(dev, name,
							 types_to_exclude,
							 types_no_prompt,
							 yes, force, wiped);
#endif
	if (blkid_wiping_enabled) {
		log_warn("allocation/use_blkid_wiping=1 configuration setting is set "
			 "while LVM is not compiled with blkid wiping support.");
		log_warn("Falling back to native LVM signature detection.");
	}
	return _wipe_known_signatures_with_lvm(dev, name,
					       types_to_exclude,
					       types_no_prompt,
					       yes, force, wiped);
}
Пример #5
0
/*
 * Intial sanity checking of recovery-related command-line arguments.
 * These args are: --restorefile, --uuid, and --physicalvolumesize
 *
 * Output arguments:
 * pp: structure allocated by caller, fields written / validated here
 */
static int pvcreate_restore_params_validate(struct cmd_context *cmd,
					    int argc, char **argv,
					    struct pvcreate_params *pp)
{
	const char *uuid = NULL;
	struct volume_group *vg;
	struct pv_list *existing_pvl;

	if (arg_count(cmd, restorefile_ARG) && !arg_count(cmd, uuidstr_ARG)) {
		log_error("--uuid is required with --restorefile");
		return 0;
	}

	if (!arg_count(cmd, restorefile_ARG) && arg_count(cmd, uuidstr_ARG)) {
		if (!arg_count(cmd, norestorefile_ARG) &&
		    find_config_tree_bool(cmd, devices_require_restorefile_with_uuid_CFG, NULL)) {
			log_error("--restorefile is required with --uuid");
			return 0;
		}
	}

	if (arg_count(cmd, uuidstr_ARG) && argc != 1) {
		log_error("Can only set uuid on one volume at once");
		return 0;
	}

 	if (arg_count(cmd, uuidstr_ARG)) {
		uuid = arg_str_value(cmd, uuidstr_ARG, "");
		if (!id_read_format(&pp->rp.id, uuid))
			return 0;
		pp->rp.idp = &pp->rp.id;
		lvmcache_seed_infos_from_lvmetad(cmd); /* need to check for UUID dups */
	}

	if (arg_count(cmd, restorefile_ARG)) {
		pp->rp.restorefile = arg_str_value(cmd, restorefile_ARG, "");
		/* The uuid won't already exist */
		if (!(vg = backup_read_vg(cmd, NULL, pp->rp.restorefile))) {
			log_error("Unable to read volume group from %s",
				  pp->rp.restorefile);
			return 0;
		}
		if (!(existing_pvl = find_pv_in_vg_by_uuid(vg, pp->rp.idp))) {
			release_vg(vg);
			log_error("Can't find uuid %s in backup file %s",
				  uuid, pp->rp.restorefile);
			return 0;
		}
		pp->rp.ba_start = pv_ba_start(existing_pvl->pv);
		pp->rp.ba_size = pv_ba_size(existing_pvl->pv);
		pp->rp.pe_start = pv_pe_start(existing_pvl->pv);
		pp->rp.extent_size = pv_pe_size(existing_pvl->pv);
		pp->rp.extent_count = pv_pe_count(existing_pvl->pv);

		release_vg(vg);
	}

	if (arg_sign_value(cmd, physicalvolumesize_ARG, SIGN_NONE) == SIGN_MINUS) {
		log_error("Physical volume size may not be negative");
		return 0;
	}
	pp->size = arg_uint64_value(cmd, physicalvolumesize_ARG, UINT64_C(0));

	if (arg_count(cmd, restorefile_ARG) || arg_count(cmd, uuidstr_ARG))
		pp->zero = 0;
	return 1;
}
Пример #6
0
/*
 * Select a locking type
 * type: locking type; if < 0, then read config tree value
 */
int init_locking(int type, struct cmd_context *cmd, int suppress_messages)
{
	if (getenv("LVM_SUPPRESS_LOCKING_FAILURE_MESSAGES"))
		suppress_messages = 1;

	if (type < 0)
		type = find_config_tree_int(cmd, global_locking_type_CFG, NULL);

	_blocking_supported = find_config_tree_bool(cmd, global_wait_for_locks_CFG, NULL);

	switch (type) {
	case 0:
		init_no_locking(&_locking, cmd, suppress_messages);
		log_warn("WARNING: Locking disabled. Be careful! "
			  "This could corrupt your metadata.");
		return 1;

	case 1:
		log_very_verbose("%sFile-based locking selected.",
				 _blocking_supported ? "" : "Non-blocking ");

		if (!init_file_locking(&_locking, cmd, suppress_messages)) {
			log_error_suppress(suppress_messages,
					   "File-based locking initialisation failed.");
			break;
		}
		return 1;

#ifdef HAVE_LIBDL
	case 2:
		if (!is_static()) {
			log_very_verbose("External locking selected.");
			if (init_external_locking(&_locking, cmd, suppress_messages))
				return 1;
		}
		if (!find_config_tree_bool(cmd, global_fallback_to_clustered_locking_CFG, NULL)) {
			log_error_suppress(suppress_messages, "External locking initialisation failed.");
			break;
		}
#endif

#ifdef CLUSTER_LOCKING_INTERNAL
		log_very_verbose("Falling back to internal clustered locking.");
		/* Fall through */

	case 3:
		log_very_verbose("Cluster locking selected.");
		if (!init_cluster_locking(&_locking, cmd, suppress_messages)) {
			log_error_suppress(suppress_messages,
					   "Internal cluster locking initialisation failed.");
			break;
		}
		return 1;
#endif

	case 4:
		log_verbose("Read-only locking selected. "
			    "Only read operations permitted.");
		if (!init_readonly_locking(&_locking, cmd, suppress_messages))
			break;
		return 1;

	default:
		log_error("Unknown locking type requested.");
		return 0;
	}

	if ((type == 2 || type == 3) &&
	    find_config_tree_bool(cmd, global_fallback_to_local_locking_CFG, NULL)) {
		log_warn_suppress(suppress_messages, "WARNING: Falling back to local file-based locking.");
		log_warn_suppress(suppress_messages,
				  "Volume Groups with the clustered attribute will "
				  "be inaccessible.");
		if (init_file_locking(&_locking, cmd, suppress_messages))
			return 1;
		else
			log_error_suppress(suppress_messages,
					   "File-based locking initialisation failed.");
	}

	if (!ignorelockingfailure())
		return 0;

	log_verbose("Locking disabled - only read operations permitted.");
	init_readonly_locking(&_locking, cmd, suppress_messages);

	return 1;
}
Пример #7
0
int config_def_check(struct cmd_context *cmd, struct cft_check_handle *handle)
{
	cfg_def_item_t *def;
	struct dm_config_node *cn;
	char *vp = _cfg_path, rp[CFG_PATH_MAX_LEN];
	size_t rplen;
	int id, r = 1;

	/*
	 * vp = virtual path, it might contain substitutes for variable parts
	 * 	of the path, used while working with the hash
	 * rp = real path, the real path of the config element as found in the
	 *      configuration, used for message output
	 */

	/*
	 * If the check has already been done and 'skip_if_checked' is set,
	 * skip the actual check and use last result if available.
	 * If not available, we must do the check. The global status
	 * is stored in root node.
	 */
	if (handle->skip_if_checked && (handle->status[root_CFG_SECTION] & CFG_USED))
		return handle->status[root_CFG_SECTION] & CFG_VALID;

	/* Nothing to do if checks are disabled and also not forced. */
	if (!handle->force_check && !find_config_tree_bool(cmd, config_checks_CFG, NULL))
		return 1;

	/* Clear 'used' and 'valid' status flags. */
	for (id = 0; id < CFG_COUNT; id++)
		handle->status[id] &= ~(CFG_USED | CFG_VALID);

	/*
	 * Create a hash of all possible configuration
	 * sections and settings with full path as a key.
	 * If section name is variable, use '#' as a substitute.
	 */
	if (!cmd->cft_def_hash) {
		if (!(cmd->cft_def_hash = dm_hash_create(64))) {
			log_error("Failed to create configuration definition hash.");
			r = 0; goto out;
		}
		for (id = 1; id < CFG_COUNT; id++) {
			def = cfg_def_get_item_p(id);
			if (!cfg_def_get_path(def)) {
				dm_hash_destroy(cmd->cft_def_hash);
				cmd->cft_def_hash = NULL;
				r = 0; goto out;
			}
			if (!dm_hash_insert(cmd->cft_def_hash, vp, def)) {
				log_error("Failed to insert configuration to hash.");
				r = 0;
				goto out;
			}
		}
	}

	/*
	 * Mark this handle as used so next time we know that the check
	 * has already been done and so we can just reuse the previous
	 * status instead of running this whole check again.
	 */
	handle->status[root_CFG_SECTION] |= CFG_USED;

	/*
	 * Allow only sections as top-level elements.
	 * Iterate top-level sections and dive deeper.
	 * If any of subsequent checks fails, the whole check fails.
	 */
	for (cn = handle->cft->root; cn; cn = cn->sib) {
		if (!cn->v) {
			/* top level node: vp=vp, rp=rp */
			if (!_config_def_check_node(handle, vp, vp, rp, rp,
						    CFG_PATH_MAX_LEN,
						    cn, cmd->cft_def_hash)) {
				r = 0; continue;
			}
			rplen = strlen(rp);
			if (!_config_def_check_tree(handle,
						    vp, vp + strlen(vp),
						    rp, rp + rplen,
						    CFG_PATH_MAX_LEN - rplen,
						    cn, cmd->cft_def_hash))
				r = 0;
		} else {
			log_error_suppress(handle->suppress_messages,
				"Configuration setting \"%s\" invalid. "
				"It's not part of any section.", cn->key);
			r = 0;
		}
	}
out:
	if (r)
		handle->status[root_CFG_SECTION] |= CFG_VALID;
	else
		handle->status[root_CFG_SECTION] &= ~CFG_VALID;

	return r;
}