/* Get kernel driver protocol version and comapre it with library version. */ static int _check_version(char *version, size_t size) { struct dm_task *task; int r; if (!(task = dm_task_create(DM_DEVICE_VERSION))) { log_error("Failed to get device-mapper version"); version[0] = '\0'; return 0; } r = dm_task_run(task); dm_task_get_driver_version(task, version, size); dm_task_destroy(task); return r; }
static int virStorageBackendIsMultipath(const char *devname) { int ret = 0; struct dm_task *dmt = NULL; void *next = NULL; uint64_t start, length; char *target_type = NULL; char *params = NULL; dmt = dm_task_create(DM_DEVICE_TABLE); if (dmt == NULL) { ret = -1; goto out; } if (dm_task_set_name(dmt, devname) == 0) { ret = -1; goto out; } dm_task_no_open_count(dmt); if (!dm_task_run(dmt)) { ret = -1; goto out; } dm_get_next_target(dmt, next, &start, &length, &target_type, ¶ms); if (target_type == NULL) { ret = -1; goto out; } if (STREQ(target_type, "multipath")) { ret = 1; } out: if (dmt != NULL) { dm_task_destroy(dmt); } return ret; }
int dm_status_device(const char *name) { struct dm_task *dmt; struct dm_info dmi; uint64_t start, length; char *target_type, *params; void *next = NULL; int r = -EINVAL; if (!(dmt = dm_task_create(DM_DEVICE_STATUS))) return -EINVAL; if (!dm_task_set_name(dmt, name)) { r = -EINVAL; goto out; } if (!dm_task_run(dmt)) { r = -EINVAL; goto out; } if (!dm_task_get_info(dmt, &dmi)) { r = -EINVAL; goto out; } if (!dmi.exists) { r = -ENODEV; goto out; } next = dm_get_next_target(dmt, next, &start, &length, &target_type, ¶ms); if (!target_type || strcmp(target_type, DM_CRYPT_TARGET) != 0 || start != 0 || next) r = -EINVAL; else r = (dmi.open_count > 0); out: if (dmt) dm_task_destroy(dmt); return r; }
static int dm_device_remove_byname(const char *dev_name) { struct dm_task *dmt; int ret = 0; BL_LOG_INFO("%s: %s\n", __func__, dev_name); dmt = dm_task_create(DM_DEVICE_REMOVE); if (!dmt) return 0; ret = dm_task_set_name(dmt, dev_name) && dm_task_run(dmt); dm_task_update_nodes(); dm_task_destroy(dmt); return ret; }
void dm_check_version(void) { char *saveptr, *version; struct dm_task *task = dm_task_create(DM_DEVICE_VERSION); if(!task) { die("dm_task_create"); } dm_task_run(task); version = steg_malloc(1024); dm_task_get_driver_version(task, version, 1024); version[1023] = 0; strtok_r(version, ".", &saveptr); if(strncmp(version, MESG_STR("4"))) { die("unknown device mapper version"); } steg_free(version); dm_task_destroy(task); }
int lv_has_target_type(struct dm_pool *mem, struct logical_volume *lv, const char *layer, const char *target_type) { int r = 0; char *dlid; struct dm_task *dmt; struct dm_info info; void *next = NULL; uint64_t start, length; char *type = NULL; char *params = NULL; if (!(dlid = build_dm_uuid(mem, lv->lvid.s, layer))) return_0; if (!(dmt = _setup_task(NULL, dlid, 0, DM_DEVICE_STATUS, 0, 0))) return_0; if (!dm_task_no_open_count(dmt)) log_error("Failed to disable open_count"); if (!dm_task_run(dmt)) goto_out; if (!dm_task_get_info(dmt, &info) || !info.exists) goto_out; do { next = dm_get_next_target(dmt, next, &start, &length, &type, ¶ms); if (type && strncmp(type, target_type, strlen(target_type)) == 0) { if (info.live_table) r = 1; break; } } while (next); out: dm_task_destroy(dmt); return r; }
int dm_umount_aspect(char *path) { int e = -EIO; struct dm_task *task; if(!(task = dm_task_create(DM_DEVICE_REMOVE))) { goto fail; } if(!dm_task_set_name(task, path)) { goto fail; } if(!dm_task_run(task)) { e = -errno; goto fail; } e = 0; fail: dm_task_destroy(task); return e; }
static int dm_drv_version (unsigned int * version, char * str) { int r = 2; struct dm_task *dmt; struct dm_versions *target; struct dm_versions *last_target; unsigned int *v; if (!(dmt = dm_task_create(DM_DEVICE_LIST_VERSIONS))) return 1; dm_task_no_open_count(dmt); if (!dm_task_run(dmt)) { condlog(0, "Can not communicate with kernel DM"); goto out; } target = dm_task_get_versions(dmt); do { last_target = target; if (!strncmp(str, target->name, strlen(str))) { r = 1; break; } target = (void *) target + target->next; } while (last_target != target); if (r == 2) { condlog(0, "DM %s kernel driver not loaded", str); goto out; } v = target->version; version[0] = v[0]; version[1] = v[1]; version[2] = v[2]; r = 0; out: dm_task_destroy(dmt); return r; }
int dm_get_info (char * mapname, struct dm_info ** dmi) { int r = 1; struct dm_task *dmt = NULL; if (!mapname) return 1; if (!*dmi) *dmi = alloc_dminfo(); if (!*dmi) return 1; if (!(dmt = dm_task_create(DM_DEVICE_INFO))) goto out; if (!dm_task_set_name(dmt, mapname)) goto out; dm_task_no_open_count(dmt); if (!dm_task_run(dmt)) goto out; if (!dm_task_get_info(dmt, *dmi)) goto out; r = 0; out: if (r) { memset(*dmi, 0, sizeof(struct dm_info)); FREE(*dmi); *dmi = NULL; } if (dmt) dm_task_destroy(dmt); return r; }
/* * Create device via device mapper * return 0 when creation failed * return dev no for created device */ static uint64_t dm_device_create_mapped(const char *dev_name, struct bl_dm_table *p) { struct dm_task *dmt; struct dm_info dminfo; int ret = 0; dmt = dm_task_create(DM_DEVICE_CREATE); if (!dmt) { BL_LOG_ERR("Create dm_task for %s failed\n", dev_name); return 0; } ret = dm_task_set_name(dmt, dev_name); if (!ret) goto err_out; while (p) { ret = dm_task_add_target(dmt, p->offset, p->size, p->target_type, p->params); if (!ret) goto err_out; p = p->next; } ret = dm_task_run(dmt) && dm_task_get_info(dmt, &dminfo) && dminfo.exists; if (!ret) goto err_out; dm_task_update_nodes(); err_out: dm_task_destroy(dmt); if (!ret) { BL_LOG_ERR("Create device %s failed\n", dev_name); return 0; } return MKDEV(dminfo.major, dminfo.minor); }
/* Get uuid of a device */ static struct dm_task *_get_device_info(const struct dm_event_handler *dmevh) { struct dm_task *dmt; struct dm_info info; if (!(dmt = dm_task_create(DM_DEVICE_INFO))) { log_error("_get_device_info: dm_task creation for info failed"); return NULL; } if (dmevh->uuid) dm_task_set_uuid(dmt, dmevh->uuid); else if (dmevh->dev_name) dm_task_set_name(dmt, dmevh->dev_name); else if (dmevh->major && dmevh->minor) { dm_task_set_major(dmt, dmevh->major); dm_task_set_minor(dmt, dmevh->minor); } /* FIXME Add name or uuid or devno to messages */ if (!dm_task_run(dmt)) { log_error("_get_device_info: dm_task_run() failed"); goto failed; } if (!dm_task_get_info(dmt, &info)) { log_error("_get_device_info: failed to get info for device"); goto failed; } if (!info.exists) { log_error("_get_device_info: device not found"); goto failed; } return dmt; failed: dm_task_destroy(dmt); return NULL; }
extern int dm_is_mpath(const char * name) { int r = 0; struct dm_task *dmt; struct dm_info info; uint64_t start, length; char *target_type = NULL; char *params; const char *uuid; if (!(dmt = dm_task_create(DM_DEVICE_TABLE))) return 0; if (!dm_task_set_name(dmt, name)) goto out; dm_task_no_open_count(dmt); if (!dm_task_run(dmt)) goto out; if (!dm_task_get_info(dmt, &info) || !info.exists) goto out; uuid = dm_task_get_uuid(dmt); if (!uuid || strncmp(uuid, UUID_PREFIX, UUID_PREFIX_LEN) != 0) goto out; /* Fetch 1st target */ dm_get_next_target(dmt, NULL, &start, &length, &target_type, ¶ms); if (!target_type || strcmp(target_type, TGT_MPATH) != 0) goto out; r = 1; out: dm_task_destroy(dmt); return r; }
static int _dm_message(const char *name, const char *msg) { int r = 0; struct dm_task *dmt; if (!(dmt = dm_task_create(DM_DEVICE_TARGET_MSG))) return 0; if (name && !dm_task_set_name(dmt, name)) goto out; if (!dm_task_set_sector(dmt, (uint64_t) 0)) goto out; if (!dm_task_set_message(dmt, msg)) goto out; r = dm_task_run(dmt); out: dm_task_destroy(dmt); return r; }
static int dm_simplecmd (int task, const char *name, int no_flush, int need_sync, uint16_t udev_flags, int deferred_remove) { int r = 0; int udev_wait_flag = (need_sync && (task == DM_DEVICE_RESUME || task == DM_DEVICE_REMOVE)); uint32_t cookie = 0; struct dm_task *dmt; if (!(dmt = dm_task_create (task))) return 0; if (!dm_task_set_name (dmt, name)) goto out; dm_task_no_open_count(dmt); dm_task_skip_lockfs(dmt); /* for DM_DEVICE_RESUME */ #ifdef LIBDM_API_FLUSH if (no_flush) dm_task_no_flush(dmt); /* for DM_DEVICE_SUSPEND/RESUME */ #endif #ifdef LIBDM_API_DEFERRED if (do_deferred(deferred_remove)) dm_task_deferred_remove(dmt); #endif if (udev_wait_flag && !dm_task_set_cookie(dmt, &cookie, DM_UDEV_DISABLE_LIBRARY_FALLBACK | udev_flags)) goto out; r = dm_task_run (dmt); if (udev_wait_flag) dm_udev_wait(cookie); out: dm_task_destroy (dmt); return r; }
static int dm_map_present(char *str, char **uuid) { int r = 0; struct dm_task *dmt; const char *uuidtmp; struct dm_info info; if (uuid) *uuid = NULL; if (!(dmt = dm_task_create(DM_DEVICE_INFO))) return 0; if (!dm_task_set_name(dmt, str)) goto out; dm_task_no_open_count(dmt); if (!dm_task_run(dmt)) goto out; if (!dm_task_get_info(dmt, &info)) goto out; if (!info.exists) goto out; r = 1; if (uuid) { uuidtmp = dm_task_get_uuid(dmt); if (uuidtmp && strlen(uuidtmp)) *uuid = strdup(uuidtmp); } out: dm_task_destroy(dmt); return r; }
int dm_simplecmd(int task, const char *name, int no_flush, uint16_t udev_flags) { int r = 0; int udev_wait_flag = (task == DM_DEVICE_RESUME || task == DM_DEVICE_REMOVE); #ifdef LIBDM_API_COOKIE uint32_t cookie = 0; #endif struct dm_task *dmt; if (!(dmt = dm_task_create(task))) return 0; if (!dm_task_set_name(dmt, name)) goto out; dm_task_no_open_count(dmt); dm_task_skip_lockfs(dmt); if (no_flush) dm_task_no_flush(dmt); #ifdef LIBDM_API_COOKIE if (!udev_sync) udev_flags |= DM_UDEV_DISABLE_LIBRARY_FALLBACK; if (udev_wait_flag && !dm_task_set_cookie(dmt, &cookie, udev_flags)) goto out; #endif r = dm_task_run(dmt); #ifdef LIBDM_API_COOKIE if (udev_wait_flag) dm_udev_wait(cookie); #endif out: dm_task_destroy(dmt); return r; }
/* Check, if a device exists. */ static int _fill_device_data(struct thread_status *ts) { struct dm_task *dmt; struct dm_info dmi; if (!ts->device.uuid) return 0; ts->device.name = NULL; ts->device.major = ts->device.minor = 0; dmt = dm_task_create(DM_DEVICE_INFO); if (!dmt) return 0; dm_task_set_uuid(dmt, ts->device.uuid); if (!dm_task_run(dmt)) goto fail; ts->device.name = dm_strdup(dm_task_get_name(dmt)); if (!ts->device.name) goto fail; if (!dm_task_get_info(dmt, &dmi)) goto fail; ts->device.major = dmi.major; ts->device.minor = dmi.minor; dm_task_destroy(dmt); return 1; fail: dm_task_destroy(dmt); dm_free(ts->device.name); return 0; }
static int virStorageBackendGetMaps(virStoragePoolObjPtr pool) { int retval = 0; struct dm_task *dmt = NULL; struct dm_names *names = NULL; if (!(dmt = dm_task_create(DM_DEVICE_LIST))) { retval = 1; goto out; } dm_task_no_open_count(dmt); if (!dm_task_run(dmt)) { retval = 1; goto out; } if (!(names = dm_task_get_names(dmt))) { retval = 1; goto out; } if (!names->dev) { /* No devices found */ goto out; } virStorageBackendCreateVols(pool, names); out: if (dmt != NULL) { dm_task_destroy (dmt); } return retval; }
extern int dm_get_map(const char * name, unsigned long long * size, char * outparams) { int r = 1; struct dm_task *dmt; uint64_t start, length; char *target_type = NULL; char *params = NULL; if (!(dmt = dm_task_create(DM_DEVICE_TABLE))) return 1; if (!dm_task_set_name(dmt, name)) goto out; dm_task_no_open_count(dmt); if (!dm_task_run(dmt)) goto out; /* Fetch 1st target */ dm_get_next_target(dmt, NULL, &start, &length, &target_type, ¶ms); if (size) *size = length; if (!outparams) { r = 0; goto out; } if (snprintf(outparams, PARAMS_SIZE, "%s", params) <= PARAMS_SIZE) r = 0; out: dm_task_destroy(dmt); return r; }
int dm_prereq(char * str, int x, int y, int z) { int r = 1; struct dm_task *dmt; struct dm_versions *target; struct dm_versions *last_target; if (!(dmt = dm_task_create(DM_DEVICE_LIST_VERSIONS))) return 1; dm_task_no_open_count(dmt); if (!dm_task_run(dmt)) goto out; target = dm_task_get_versions(dmt); /* Fetch targets and print 'em */ do { last_target = target; if (!strncmp(str, target->name, strlen(str)) && /* dummy prereq on multipath version */ target->version[0] >= x && target->version[1] >= y && target->version[2] >= z ) r = 0; target = (void *) target + target->next; } while (last_target != target); out: dm_task_destroy(dmt); return r; }
static int dm_get_deferred_remove (char * mapname) { int r = -1; struct dm_task *dmt; struct dm_info info; if (!(dmt = dm_task_create(DM_DEVICE_INFO))) return -1; if (!dm_task_set_name(dmt, mapname)) goto out; if (!dm_task_run(dmt)) goto out; if (!dm_task_get_info(dmt, &info)) goto out; r = info.deferred_remove; out: dm_task_destroy(dmt); return r; }
/** * bd_mpath_is_mpath_member: * @device: device to test * @error: (out): place to store error (if any) * * Returns: %TRUE if the device is a multipath member, %FALSE if not or an error * appeared when queried (@error is set in those cases) */ gboolean bd_mpath_is_mpath_member (const gchar *device, GError **error) { struct dm_task *task_names = NULL; struct dm_names *names = NULL; gchar *symlink = NULL; guint64 next = 0; gchar **deps = NULL; gchar **dev_name = NULL; gboolean ret = FALSE; if (geteuid () != 0) { g_set_error (error, BD_MPATH_ERROR, BD_MPATH_ERROR_NOT_ROOT, "Not running as root, cannot query DM maps"); return FALSE; } /* we check if the 'device' is a dependency of any multipath map */ /* get maps */ task_names = dm_task_create(DM_DEVICE_LIST); if (!task_names) { g_warning ("Failed to create DM task"); g_set_error (error, BD_MPATH_ERROR, BD_MPATH_ERROR_DM_ERROR, "Failed to create DM task"); return FALSE; } dm_task_run(task_names); names = dm_task_get_names(task_names); if (!names || !names->dev) return FALSE; /* in case the device is symlink, we need to resolve it because maps's deps are devices and not their symlinks */ if (g_str_has_prefix (device, "/dev/mapper/") || g_str_has_prefix (device, "/dev/md/")) { symlink = g_file_read_link (device, error); if (!symlink) { /* the device doesn't exist and thus is not an mpath member */ g_clear_error (error); dm_task_destroy (task_names); return FALSE; } /* the symlink starts with "../" */ device = symlink + 3; } if (g_str_has_prefix (device, "/dev/")) device += 5; /* check all maps */ do { names = (void *)names + next; next = names->next; /* we are only interested in multipath maps */ if (map_is_multipath (names->name, error)) { deps = get_map_deps (names->name, NULL, error); if (*error) { g_prefix_error (error, "Failed to determine deps for '%s'", names->name); g_free (symlink); dm_task_destroy (task_names); return FALSE; } for (dev_name = deps; !ret && *dev_name; dev_name++) ret = (g_strcmp0 (*dev_name, device) == 0); g_strfreev (deps); } else if (*error) { g_prefix_error (error, "Failed to determine map's target for '%s'", names->name); g_free (symlink); dm_task_destroy (task_names); return FALSE; } } while (!ret && next); g_free (symlink); dm_task_destroy (task_names); return ret; }
static int _dm_task_run_v1(struct dm_task *dmt) { struct dm_ioctl_v1 *dmi; unsigned int command; dmi = _flatten_v1(dmt); if (!dmi) { log_error("Couldn't create ioctl argument."); return 0; } if (!_open_control()) return 0; if ((unsigned) dmt->type >= (sizeof(_cmd_data_v1) / sizeof(*_cmd_data_v1))) { log_error(INTERNAL_ERROR "unknown device-mapper task %d", dmt->type); goto bad; } command = _cmd_data_v1[dmt->type].cmd; if (dmt->type == DM_DEVICE_TABLE) dmi->flags |= DM_STATUS_TABLE_FLAG; if (dmt->new_uuid) { log_error("Changing UUID is not supported by kernel."); goto bad; } log_debug("dm %s %s %s%s%s [%u]", _cmd_data_v1[dmt->type].name, dmi->name, dmi->uuid, dmt->newname ? " " : "", dmt->newname ? dmt->newname : "", dmi->data_size); if (dmt->type == DM_DEVICE_LIST) { if (!_dm_names_v1(dmi)) goto bad; } #ifdef DM_IOCTLS else if (ioctl(_control_fd, command, dmi) < 0) { if (_log_suppress) log_verbose("device-mapper: %s ioctl failed: %s", _cmd_data_v1[dmt->type].name, strerror(errno)); else log_error("device-mapper: %s ioctl failed: %s", _cmd_data_v1[dmt->type].name, strerror(errno)); goto bad; } #else /* Userspace alternative for testing */ #endif if (dmi->flags & DM_BUFFER_FULL_FLAG) /* FIXME Increase buffer size and retry operation (if query) */ log_error("WARNING: libdevmapper buffer too small for data"); switch (dmt->type) { case DM_DEVICE_CREATE: add_dev_node(dmt->dev_name, MAJOR(dmi->dev), MINOR(dmi->dev), dmt->uid, dmt->gid, dmt->mode, 0); break; case DM_DEVICE_REMOVE: rm_dev_node(dmt->dev_name, 0); break; case DM_DEVICE_RENAME: rename_dev_node(dmt->dev_name, dmt->newname, 0); break; case DM_DEVICE_MKNODES: if (dmi->flags & DM_EXISTS_FLAG) add_dev_node(dmt->dev_name, MAJOR(dmi->dev), MINOR(dmi->dev), dmt->uid, dmt->gid, dmt->mode, 0); else rm_dev_node(dmt->dev_name, 0); break; case DM_DEVICE_STATUS: case DM_DEVICE_TABLE: if (!_unmarshal_status_v1(dmt, dmi)) goto bad; break; case DM_DEVICE_SUSPEND: case DM_DEVICE_RESUME: dmt->type = DM_DEVICE_INFO; if (!dm_task_run(dmt)) goto bad; _dm_zfree_dmi_v1(dmi); /* We'll use what info returned */ return 1; } dmt->dmi.v1 = dmi; return 1; bad: _dm_zfree_dmi_v1(dmi); return 0; }
static int _create_and_load_v4(struct dm_task *dmt) { struct dm_task *task; int r; uint32_t cookie; /* Use new task struct to create the device */ if (!(task = dm_task_create(DM_DEVICE_CREATE))) { log_error("Failed to create device-mapper task struct"); _udev_complete(dmt); return 0; } /* Copy across relevant fields */ if (dmt->dev_name && !dm_task_set_name(task, dmt->dev_name)) { dm_task_destroy(task); _udev_complete(dmt); return 0; } if (dmt->uuid && !dm_task_set_uuid(task, dmt->uuid)) { dm_task_destroy(task); _udev_complete(dmt); return 0; } task->major = dmt->major; task->minor = dmt->minor; task->uid = dmt->uid; task->gid = dmt->gid; task->mode = dmt->mode; /* FIXME: Just for udev_check in dm_task_run. Can we avoid this? */ task->event_nr = dmt->event_nr & DM_UDEV_FLAGS_MASK; task->cookie_set = dmt->cookie_set; r = dm_task_run(task); dm_task_destroy(task); if (!r) { _udev_complete(dmt); return 0; } /* Next load the table */ if (!(task = dm_task_create(DM_DEVICE_RELOAD))) { log_error("Failed to create device-mapper task struct"); _udev_complete(dmt); r = 0; goto revert; } /* Copy across relevant fields */ if (dmt->dev_name && !dm_task_set_name(task, dmt->dev_name)) { dm_task_destroy(task); _udev_complete(dmt); r = 0; goto revert; } task->read_only = dmt->read_only; task->head = dmt->head; task->tail = dmt->tail; r = dm_task_run(task); task->head = NULL; task->tail = NULL; dm_task_destroy(task); if (!r) { _udev_complete(dmt); goto revert; } /* Use the original structure last so the info will be correct */ dmt->type = DM_DEVICE_RESUME; dm_free(dmt->uuid); dmt->uuid = NULL; r = dm_task_run(dmt); if (r) return r; revert: dmt->type = DM_DEVICE_REMOVE; dm_free(dmt->uuid); dmt->uuid = NULL; /* * Also udev-synchronize "remove" dm task that is a part of this revert! * But only if the original dm task was supposed to be synchronized. */ if (dmt->cookie_set) { cookie = (dmt->event_nr & ~DM_UDEV_FLAGS_MASK) | (DM_COOKIE_MAGIC << DM_UDEV_FLAGS_SHIFT); dm_task_set_cookie(dmt, &cookie, (dmt->event_nr & DM_UDEV_FLAGS_MASK) >> DM_UDEV_FLAGS_SHIFT); }
void grub_util_iterate_devices (int NESTED_FUNC_ATTR (*hook) (const char *, int), int floppy_disks) { int i; clear_seen_devices (); /* Floppies. */ for (i = 0; i < floppy_disks; i++) { char name[16]; struct stat st; get_floppy_disk_name (name, i); if (stat (name, &st) < 0) break; /* In floppies, write the map, whether check_device_readable_unique succeeds or not, because the user just may not insert floppies. */ if (hook (name, 1)) goto out; } #ifdef __linux__ { DIR *dir = opendir ("/dev/disk/by-id"); if (dir) { struct dirent *entry; struct device *devs; size_t devs_len = 0, devs_max = 1024, i; devs = xmalloc (devs_max * sizeof (*devs)); /* Dump all the directory entries into names, resizing if necessary. */ for (entry = readdir (dir); entry; entry = readdir (dir)) { /* Skip current and parent directory entries. */ if (strcmp (entry->d_name, ".") == 0 || strcmp (entry->d_name, "..") == 0) continue; /* Skip partition entries. */ if (strstr (entry->d_name, "-part")) continue; /* Skip device-mapper entries; we'll handle the ones we want later. */ if (strncmp (entry->d_name, "dm-", sizeof ("dm-") - 1) == 0) continue; /* Skip RAID entries; they are handled by upper layers. */ if (strncmp (entry->d_name, "md-", sizeof ("md-") - 1) == 0) continue; if (devs_len >= devs_max) { devs_max *= 2; devs = xrealloc (devs, devs_max * sizeof (*devs)); } devs[devs_len].stable = xasprintf ("/dev/disk/by-id/%s", entry->d_name); devs[devs_len].kernel = canonicalize_file_name (devs[devs_len].stable); devs_len++; } qsort (devs, devs_len, sizeof (*devs), &compare_devices); closedir (dir); /* Now add all the devices in sorted order. */ for (i = 0; i < devs_len; ++i) { if (check_device_readable_unique (devs[i].stable)) { if (hook (devs[i].stable, 0)) goto out; } free (devs[i].stable); free (devs[i].kernel); } free (devs); } } if (have_devfs ()) { i = 0; while (1) { char discn[32]; char name[PATH_MAX]; struct stat st; /* Linux creates symlinks "/dev/discs/discN" for convenience. The way to number disks is the same as GRUB's. */ sprintf (discn, "/dev/discs/disc%d", i++); if (stat (discn, &st) < 0) break; if (realpath (discn, name)) { strcat (name, "/disc"); if (hook (name, 0)) goto out; } } goto out; } #endif /* __linux__ */ /* IDE disks. */ for (i = 0; i < 96; i++) { char name[16]; get_ide_disk_name (name, i); if (check_device_readable_unique (name)) { if (hook (name, 0)) goto out; } } #ifdef __FreeBSD_kernel__ /* IDE disks using ATA Direct Access driver. */ if (get_kfreebsd_version () >= 800000) for (i = 0; i < 96; i++) { char name[16]; get_ada_disk_name (name, i); if (check_device_readable_unique (name)) { if (hook (name, 0)) goto out; } } /* ATARAID disks. */ for (i = 0; i < 8; i++) { char name[20]; get_ataraid_disk_name (name, i); if (check_device_readable_unique (name)) { if (hook (name, 0)) goto out; } } #endif #ifdef __linux__ /* Virtio disks. */ for (i = 0; i < 26; i++) { char name[16]; get_virtio_disk_name (name, i); if (check_device_readable_unique (name)) { if (hook (name, 0)) goto out; } } /* ATARAID disks. */ for (i = 0; i < 8; i++) { char name[20]; get_ataraid_disk_name (name, i); if (check_device_readable_unique (name)) { if (hook (name, 0)) goto out; } } /* Xen virtual block devices. */ for (i = 0; i < 26; i++) { char name[16]; get_xvd_disk_name (name, i); if (check_device_readable_unique (name)) { if (hook (name, 0)) goto out; } } #endif /* __linux__ */ /* The rest is SCSI disks. */ for (i = 0; i < 48; i++) { char name[16]; get_scsi_disk_name (name, i); if (check_device_readable_unique (name)) { if (hook (name, 0)) goto out; } } #ifdef __linux__ /* This is for DAC960 - we have /dev/rd/c<controller>d<logical drive>p<partition>. DAC960 driver currently supports up to 8 controllers, 32 logical drives, and 7 partitions. */ { int controller, drive; for (controller = 0; controller < 8; controller++) { for (drive = 0; drive < 15; drive++) { char name[24]; get_dac960_disk_name (name, controller, drive); if (check_device_readable_unique (name)) { if (hook (name, 0)) goto out; } } } } /* This is for Mylex Acceleraid - we have /dev/rd/c<controller>d<logical drive>p<partition>. */ { int controller, drive; for (controller = 0; controller < 8; controller++) { for (drive = 0; drive < 15; drive++) { char name[24]; get_acceleraid_disk_name (name, controller, drive); if (check_device_readable_unique (name)) { if (hook (name, 0)) goto out; } } } } /* This is for CCISS - we have /dev/cciss/c<controller>d<logical drive>p<partition>. */ { int controller, drive; for (controller = 0; controller < 3; controller++) { for (drive = 0; drive < 16; drive++) { char name[24]; get_cciss_disk_name (name, controller, drive); if (check_device_readable_unique (name)) { if (hook (name, 0)) goto out; } } } } /* This is for Compaq Intelligent Drive Array - we have /dev/ida/c<controller>d<logical drive>p<partition>. */ { int controller, drive; for (controller = 0; controller < 3; controller++) { for (drive = 0; drive < 16; drive++) { char name[24]; get_ida_disk_name (name, controller, drive); if (check_device_readable_unique (name)) { if (hook (name, 0)) goto out; } } } } /* This is for I2O - we have /dev/i2o/hd<logical drive><partition> */ { char unit; for (unit = 'a'; unit < 'f'; unit++) { char name[24]; get_i2o_disk_name (name, unit); if (check_device_readable_unique (name)) { if (hook (name, 0)) goto out; } } } /* MultiMediaCard (MMC). */ for (i = 0; i < 10; i++) { char name[16]; get_mmc_disk_name (name, i); if (check_device_readable_unique (name)) { if (hook (name, 0)) goto out; } } # ifdef HAVE_DEVICE_MAPPER # define dmraid_check(cond, ...) \ if (! (cond)) \ { \ grub_dprintf ("deviceiter", __VA_ARGS__); \ goto dmraid_end; \ } /* DM-RAID. */ if (grub_device_mapper_supported ()) { struct dm_tree *tree = NULL; struct dm_task *task = NULL; struct dm_names *names = NULL; unsigned int next = 0; void *top_handle, *second_handle; struct dm_tree_node *root, *top, *second; /* Build DM tree for all devices. */ tree = dm_tree_create (); dmraid_check (tree, "dm_tree_create failed\n"); task = dm_task_create (DM_DEVICE_LIST); dmraid_check (task, "dm_task_create failed\n"); dmraid_check (dm_task_run (task), "dm_task_run failed\n"); names = dm_task_get_names (task); dmraid_check (names, "dm_task_get_names failed\n"); dmraid_check (names->dev, "No DM devices found\n"); do { names = (void *) names + next; dmraid_check (dm_tree_add_dev (tree, MAJOR (names->dev), MINOR (names->dev)), "dm_tree_add_dev (%s) failed\n", names->name); next = names->next; } while (next); /* Walk the second-level children of the inverted tree; that is, devices which are directly composed of non-DM devices such as hard disks. This class includes all DM-RAID disks and excludes all DM-RAID partitions. */ root = dm_tree_find_node (tree, 0, 0); top_handle = NULL; top = dm_tree_next_child (&top_handle, root, 1); while (top) { second_handle = NULL; second = dm_tree_next_child (&second_handle, top, 1); while (second) { const char *node_name, *node_uuid; char *name; node_name = dm_tree_node_get_name (second); dmraid_check (node_name, "dm_tree_node_get_name failed\n"); node_uuid = dm_tree_node_get_uuid (second); dmraid_check (node_uuid, "dm_tree_node_get_uuid failed\n"); if (strncmp (node_uuid, "DMRAID-", 7) != 0) { grub_dprintf ("deviceiter", "%s is not DM-RAID\n", node_name); goto dmraid_next_child; } name = xasprintf ("/dev/mapper/%s", node_name); if (check_device_readable_unique (name)) { if (hook (name, 0)) { free (name); if (task) dm_task_destroy (task); if (tree) dm_tree_free (tree); goto out; } } free (name); dmraid_next_child: second = dm_tree_next_child (&second_handle, top, 1); } top = dm_tree_next_child (&top_handle, root, 1); } dmraid_end: if (task) dm_task_destroy (task); if (tree) dm_tree_free (tree); } # endif /* HAVE_DEVICE_MAPPER */ #endif /* __linux__ */ out: clear_seen_devices (); }
static int _percent_run(struct dev_manager *dm, const char *name, const char *dlid, const char *target_type, int wait, const struct logical_volume *lv, float *percent, percent_range_t *overall_percent_range, uint32_t *event_nr, int fail_if_percent_unsupported) { int r = 0; struct dm_task *dmt; struct dm_info info; void *next = NULL; uint64_t start, length; char *type = NULL; char *params = NULL; const struct dm_list *segh = &lv->segments; struct lv_segment *seg = NULL; struct segment_type *segtype; percent_range_t percent_range = 0, combined_percent_range = 0; int first_time = 1; uint64_t total_numerator = 0, total_denominator = 0; *percent = -1; *overall_percent_range = PERCENT_INVALID; if (!(dmt = _setup_task(name, dlid, event_nr, wait ? DM_DEVICE_WAITEVENT : DM_DEVICE_STATUS, 0, 0))) return_0; if (!dm_task_no_open_count(dmt)) log_error("Failed to disable open_count"); if (!dm_task_run(dmt)) goto_out; if (!dm_task_get_info(dmt, &info) || !info.exists) goto_out; if (event_nr) *event_nr = info.event_nr; do { next = dm_get_next_target(dmt, next, &start, &length, &type, ¶ms); if (lv) { if (!(segh = dm_list_next(&lv->segments, segh))) { log_error("Number of segments in active LV %s " "does not match metadata", lv->name); goto out; } seg = dm_list_item(segh, struct lv_segment); } if (!type || !params) continue; if (!(segtype = get_segtype_from_string(dm->cmd, target_type))) continue; if (strcmp(type, target_type)) { /* If kernel's type isn't an exact match is it compatible? */ if (!segtype->ops->target_status_compatible || !segtype->ops->target_status_compatible(type)) continue; } if (segtype->ops->target_percent && !segtype->ops->target_percent(&dm->target_state, &percent_range, dm->mem, dm->cmd, seg, params, &total_numerator, &total_denominator)) goto_out; if (first_time) { combined_percent_range = percent_range; first_time = 0; } else combined_percent_range = _combine_percent_ranges(combined_percent_range, percent_range); } while (next); if (lv && (segh = dm_list_next(&lv->segments, segh))) { log_error("Number of segments in active LV %s does not " "match metadata", lv->name); goto out; } if (total_denominator) { *percent = (float) total_numerator *100 / total_denominator; *overall_percent_range = combined_percent_range; } else { *percent = 100; if (first_time) { /* above ->target_percent() was not executed! */ /* FIXME why return PERCENT_100 et. al. in this case? */ *overall_percent_range = PERCENT_100; if (fail_if_percent_unsupported) goto_out; } else *overall_percent_range = combined_percent_range; } log_debug("LV percent: %f", *percent); r = 1; out: dm_task_destroy(dmt); return r; }
int device_is_usable(struct device *dev) { struct dm_task *dmt; struct dm_info info; const char *name, *uuid; uint64_t start, length; char *target_type = NULL; char *params, *vgname = NULL, *lvname, *layer; void *next = NULL; int r = 0; if (!(dmt = dm_task_create(DM_DEVICE_STATUS))) { log_error("Failed to allocate dm_task struct to check dev status"); return 0; } if (!dm_task_set_major_minor(dmt, MAJOR(dev->dev), MINOR(dev->dev), 1)) goto_out; if (!dm_task_run(dmt)) { log_error("Failed to get state of mapped device"); goto out; } if (!dm_task_get_info(dmt, &info)) goto_out; if (!info.exists || info.suspended) goto out; name = dm_task_get_name(dmt); uuid = dm_task_get_uuid(dmt); /* FIXME Also check for mirror block_on_error and mpath no paths */ /* For now, we exclude all mirrors */ do { next = dm_get_next_target(dmt, next, &start, &length, &target_type, ¶ms); /* Skip if target type doesn't match */ if (target_type && !strcmp(target_type, "mirror")) { log_debug("%s: Mirror device not usable.", dev_name(dev)); goto out; } } while (next); /* FIXME Also check dependencies? */ /* Check internal lvm devices */ if (uuid && !strncmp(uuid, UUID_PREFIX, sizeof(UUID_PREFIX) - 1)) { if (!(vgname = dm_strdup(name)) || !dm_split_lvm_name(NULL, NULL, &vgname, &lvname, &layer)) goto_out; if (lvname && (is_reserved_lvname(lvname) || *layer)) { log_debug("%s: Reserved internal LV device %s/%s%s%s not usable.", dev_name(dev), vgname, lvname, *layer ? "-" : "", layer); goto out; } } r = 1; out: dm_free(vgname); dm_task_destroy(dmt); return r; }
int era_dm_info(const char *name, const char *uuid, struct era_dm_info *info, size_t name_size, char *name_ptr, size_t uuid_size, char *uuid_ptr) { struct dm_task *dmt; struct dm_info dmi; int rc = -1; if (!(dmt = dm_task_create(DM_DEVICE_INFO))) return -1; if (name && !dm_task_set_name(dmt, name)) goto out; if (uuid && !dm_task_set_uuid(dmt, uuid)) goto out; if (!dm_task_run(dmt)) goto out; if (!dm_task_get_info(dmt, &dmi)) goto out; if (info) { info->target_count = dmi.target_count; info->open_count = dmi.open_count; info->suspended = dmi.suspended; info->exists = dmi.exists; info->major = dmi.major; info->minor = dmi.minor; } if (dmi.exists && name_size > 0 && name_ptr) { const char *dm_name; size_t dm_name_len; dm_name = dm_task_get_name(dmt); if (!dm_name) goto out; dm_name_len = strlen(dm_name); if (dm_name_len >= name_size) { rc = dm_name_len + 1; goto out; } strcpy(name_ptr, dm_name); } if (dmi.exists && uuid_size > 0 && uuid_ptr) { const char *dm_uuid; size_t dm_uuid_len; dm_uuid = dm_task_get_uuid(dmt); if (!dm_uuid) goto out; dm_uuid_len = strlen(dm_uuid); if (dm_uuid_len >= uuid_size) { dm_task_destroy(dmt); return dm_uuid_len + 1; } strcpy(uuid_ptr, dm_uuid); } rc = 0; out: dm_task_destroy(dmt); return rc; }
static int _first_status(int task, const char *name, const char *uuid, uint64_t *start, uint64_t *length, size_t target_size, char *target_ptr, size_t params_size, char *params_ptr) { uint64_t a, b; struct dm_task *dmt; struct dm_info dmi; char *tgt, *prm; int rc = -1; if (!(dmt = dm_task_create(task))) return -1; if (name && !dm_task_set_name(dmt, name)) goto out; if (uuid && !dm_task_set_uuid(dmt, uuid)) goto out; if (!dm_task_run(dmt)) goto out; if (!dm_task_get_info(dmt, &dmi)) goto out; if (!dmi.exists) { error(0, "target %s does not exists", name ? name : (uuid ? uuid : "<NULL>")); goto out; } (void)dm_get_next_target(dmt, NULL, &a, &b, &tgt, &prm); if (start) *start = a; if (length) *length = b; if (target_size > 0 && target_ptr) { if (strlen(tgt) >= target_size) { error(0, "too long target name"); goto out; } strcpy(target_ptr, tgt); } if (params_size > 0 && params_ptr) { if (strlen(prm) >= params_size) { error(0, "too long target params"); goto out; } strcpy(params_ptr, prm); } rc = 0; out: dm_task_destroy(dmt); return rc; }
/* * returns the reschedule delay * negative means *stop* */ int waiteventloop (struct event_thread *waiter) { sigset_t set; int event_nr; int r; if (!waiter->event_nr) waiter->event_nr = dm_geteventnr(waiter->mapname); if (!(waiter->dmt = dm_task_create(DM_DEVICE_WAITEVENT))) { condlog(0, "%s: devmap event #%i dm_task_create error", waiter->mapname, waiter->event_nr); return 1; } if (!dm_task_set_name(waiter->dmt, waiter->mapname)) { condlog(0, "%s: devmap event #%i dm_task_set_name error", waiter->mapname, waiter->event_nr); dm_task_destroy(waiter->dmt); return 1; } if (waiter->event_nr && !dm_task_set_event_nr(waiter->dmt, waiter->event_nr)) { condlog(0, "%s: devmap event #%i dm_task_set_event_nr error", waiter->mapname, waiter->event_nr); dm_task_destroy(waiter->dmt); return 1; } dm_task_no_open_count(waiter->dmt); /* accept wait interruption */ set = unblock_signals(); /* wait */ r = dm_task_run(waiter->dmt); /* wait is over : event or interrupt */ pthread_sigmask(SIG_SETMASK, &set, NULL); if (!r) /* wait interrupted by signal */ return -1; dm_task_destroy(waiter->dmt); waiter->dmt = NULL; waiter->event_nr++; /* * upon event ... */ while (1) { condlog(3, "%s: devmap event #%i", waiter->mapname, waiter->event_nr); /* * event might be : * * 1) a table reload, which means our mpp structure is * obsolete : refresh it through update_multipath() * 2) a path failed by DM : mark as such through * update_multipath() * 3) map has gone away : stop the thread. * 4) a path reinstate : nothing to do * 5) a switch group : nothing to do */ pthread_cleanup_push(cleanup_lock, &waiter->vecs->lock); lock(waiter->vecs->lock); r = update_multipath(waiter->vecs, waiter->mapname); lock_cleanup_pop(waiter->vecs->lock); if (r) { condlog(2, "%s: event checker exit", waiter->mapname); return -1; /* stop the thread */ } event_nr = dm_geteventnr(waiter->mapname); if (waiter->event_nr == event_nr) return 1; /* upon problem reschedule 1s later */ waiter->event_nr = event_nr; } return -1; /* never reach there */ }