static HANDLE exec_shell(char *cmd, HANDLE *handles, int hide_child) { W32_CHAR *w32_cmdstr = 0; char *cmdstr; int freestr; PROCESS_INFORMATION pi; STARTUPINFO si; proc_handle = BAD_PROC_HANDLE; /* in case of failure */ TRACE((T_CALLED "exec_shell %s\n", cmd)); if ((cmdstr = mk_shell_cmd_str(cmd, &freestr, TRUE)) == NULL) { /* heap exhausted! */ no_memory("exec_shell"); /* Give user a chance to read message--more will surely follow. */ Sleep(3000); } else if ((w32_cmdstr = w32_charstring(cmdstr)) == 0) { no_memory("exec_shell"); } else { memset(&si, 0, sizeof(si)); si.cb = sizeof(si); si.dwFlags = STARTF_USESTDHANDLES; si.hStdInput = handles[0]; si.hStdOutput = handles[1]; si.hStdError = handles[2]; #if DISP_NTWIN if (hide_child) { si.dwFlags |= STARTF_USESHOWWINDOW; si.wShowWindow = SW_HIDE; } #endif TRACE(("CreateProcess %s (pipe)\n", cmdstr)); if (CreateProcess(NULL, w32_cmdstr, NULL, NULL, TRUE, /* Inherit handles */ 0, NULL, NULL, &si, &pi)) { /* Success */ w32_close_handle(pi.hThread); proc_handle = pi.hProcess; TRACE(("...created proc_handle %#x\n", proc_handle)); } } FreeIfNeeded(cmdstr); FreeIfNeeded(w32_cmdstr); returnPtr(proc_handle); }
/* * This function takes the raw DSL properties, and filters out the user-defined * properties into a separate nvlist. */ static nvlist_t *process_user_props(zfs_handle_t *zhp, nvlist_t *props) { libzfs_handle_t *hdl = zhp->zfs_hdl; nvpair_t *elem; nvlist_t *propval; nvlist_t *nvl; if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0) { (void) no_memory(hdl); return (NULL); } elem = NULL; while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { if (!zfs_prop_user(nvpair_name(elem))) continue; verify(nvpair_value_nvlist(elem, &propval) == 0); if (nvlist_add_nvlist(nvl, nvpair_name(elem), propval) != 0) { nvlist_free(nvl); return (NULL); } } return (nvl); }
static char* read_file(const char *filename) { FILE *f; char *r; long size; f = fopen(filename, "r"); if(f == NULL) gbasm_error("'%s' cannot be opened: %s", filename, strerror(errno)); if(fseek(f, 0, SEEK_END) != 0) goto error; size = ftell(f); if(size == -1) goto error; r = malloc(size + 1); if(r == NULL) no_memory(); if((fseek(f, 0, SEEK_SET) != 0) || (fread(r, size, 1, f) < 1) || ferror(f)) goto error; fclose(f); r[size] = 0; return r; error: gbasm_error("'%s' was not read successfully: %s", filename, strerror(errno)); }
/* * Unpacks an nvlist from the ZFS ioctl command structure. */ int zcmd_read_dst_nvlist(libzfs_handle_t *hdl, zfs_cmd_t *zc, nvlist_t **nvlp) { if (nvlist_unpack((void *)(uintptr_t)zc->zc_nvlist_dst, zc->zc_nvlist_dst_size, nvlp, 0) != 0) return (no_memory(hdl)); return (0); }
/* * A safe form of malloc() which will die if the allocation fails. */ void * zfs_alloc(libzfs_handle_t *hdl, size_t size) { void *data; if ((data = calloc(1, size)) == NULL) (void) no_memory(hdl); return (data); }
/* * A safe form of strdup() which will die if the allocation fails. */ char * zfs_strdup(libzfs_handle_t *hdl, const char *str) { char *ret; if ((ret = strdup(str)) == NULL) (void) no_memory(hdl); return (ret); }
static void zpool_open_func(void *arg) { rdsk_node_t *rn = arg; struct stat64 statbuf; nvlist_t *config; int fd; if (rn->rn_nozpool) return; if ((fd = openat64(rn->rn_dfd, rn->rn_name, O_RDONLY)) < 0) { /* symlink to a device that's no longer there */ if (errno == ENOENT) nozpool_all_slices(rn->rn_avl, rn->rn_name); return; } /* * Ignore failed stats. We only want regular * files, character devs and block devs. */ if (fstat64(fd, &statbuf) != 0 || (!S_ISREG(statbuf.st_mode) && !S_ISCHR(statbuf.st_mode) && !S_ISBLK(statbuf.st_mode))) { (void) close(fd); return; } /* this file is too small to hold a zpool */ if (S_ISREG(statbuf.st_mode) && statbuf.st_size < SPA_MINDEVSIZE) { (void) close(fd); return; } else if (!S_ISREG(statbuf.st_mode)) { /* * Try to read the disk label first so we don't have to * open a bunch of minor nodes that can't have a zpool. */ check_slices(rn->rn_avl, fd, rn->rn_name); } if ((zpool_read_label(fd, &config)) != 0) { (void) close(fd); (void) no_memory(rn->rn_hdl); return; } (void) close(fd); rn->rn_config = config; if (config != NULL) { assert(rn->rn_nozpool == B_FALSE); } }
static AREGION * alloc_AREGION(void) { AREGION *arp; beginDisplay(); if ((arp = typealloc(AREGION)) == NULL) { (void) no_memory("AREGION"); } endofDisplay(); return arp; }
int zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval) { zfs_cmd_t zc = { 0 }; int ret = -1; char errbuf[1024]; nvlist_t *nvl = NULL; nvlist_t *realprops; (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot set property for '%s'"), zhp->zpool_name); if (zpool_get_version(zhp) < ZFS_VERSION_BOOTFS) { zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN, "pool must be " "upgraded to support pool properties")); return (zfs_error(zhp->zpool_hdl, EZFS_BADVERSION, errbuf)); } if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) return (zfs_error(zhp->zpool_hdl, EZFS_POOLPROPS, errbuf)); if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0 || nvlist_add_string(nvl, propname, propval) != 0) { return (no_memory(zhp->zpool_hdl)); } if ((realprops = zfs_validate_properties(zhp->zpool_hdl, ZFS_TYPE_POOL, zhp->zpool_name, nvl, 0, NULL, errbuf)) == NULL) { nvlist_free(nvl); return (-1); } nvlist_free(nvl); nvl = realprops; /* * Execute the corresponding ioctl() to set this property. */ (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl, NULL) != 0) return (-1); ret = ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_POOL_SET_PROPS, &zc); zcmd_free_nvlists(&zc); if (ret) (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf); return (ret); }
/* * A safe form of realloc(), which also zeroes newly allocated space. */ void * zfs_realloc(libzfs_handle_t *hdl, void *ptr, size_t oldsize, size_t newsize) { void *ret; if ((ret = realloc(ptr, newsize)) == NULL) { (void) no_memory(hdl); return (NULL); } bzero((char *)ret + oldsize, (newsize - oldsize)); return (ret); }
int var_CRYPTKEY(TBUFF **rp, const char *vp) { if (rp) { tb_scopy(rp, WRITE_ONLY); return TRUE; } else if (vp) { beginDisplay(); FreeIfNeeded(cryptkey); cryptkey = typeallocn(char, NKEYLEN); endofDisplay(); if (cryptkey == 0) return no_memory("var_CRYPTKEY"); vl_make_encrypt_key(cryptkey, vp); return TRUE; } else { return FALSE;
/*PRINTFLIKE2*/ char * zfs_asprintf(libzfs_handle_t *hdl, const char *fmt, ...) { va_list ap; char *ret; int err; va_start(ap, fmt); err = vasprintf(&ret, fmt, ap); va_end(ap); if (err < 0) (void) no_memory(hdl); return (ret); }
static int fs_load_fat(FSInfo *fs) { int fat_start = 256*fs->block_size; int fat_size = 768*fs->block_size; if ((fs->fat = malloc(fat_size)) == 0) { no_memory("fs_load_fat"); return 0; } if (!fs_read(fs, fs->fat, -1, fat_start, fat_size)) { free(fs->fat); fs->fat = 0; return 0; } return 1; }
Cluster * fs_fat_chain(FSInfo *fs, int start_cluster, int *cluster_count, uint64_t filesize) { Cluster *clusters; int i; int cluster; int num_clusters; if (!fs->fat && !fs_load_fat(fs)) return 0; if ((num_clusters = fs_fat_each_cluster(fs, start_cluster, 0, 0)) < 0) return 0; // printf("from cluster %d found %d clusters\n", start_cluster, num_clusters); if (num_clusters != *cluster_count) { fs_warn("found %d clusters in chain starting from cluster %d, was expecting %d clusters", num_clusters, start_cluster, cluster_count); *cluster_count = num_clusters; } if ((clusters = malloc((num_clusters)*sizeof(Cluster))) == 0) { no_memory("fs_fat_chain"); return 0; } fs_fat_remaining_bytes = filesize; if (fs_fat_each_cluster(fs, start_cluster, fs_fat_record_cluster_fn, clusters) < 0) { free(clusters); return 0; } return clusters; }
/* * Create the named pool, using the provided vdev list. It is assumed * that the consumer has already validated the contents of the nvlist, so we * don't have to worry about error semantics. */ int zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot, const char *altroot) { zfs_cmd_t zc = { 0 }; char *packed; size_t len; char msg[1024]; (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, "cannot create '%s'"), pool); if (!zpool_name_valid(hdl, B_FALSE, pool)) return (zfs_error(hdl, EZFS_INVALIDNAME, msg)); if (altroot != NULL && altroot[0] != '/') return (zfs_error(hdl, EZFS_BADPATH, dgettext(TEXT_DOMAIN, "bad alternate root '%s'"), altroot)); if (nvlist_size(nvroot, &len, NV_ENCODE_NATIVE) != 0) return (no_memory(hdl)); if ((packed = zfs_alloc(hdl, len)) == NULL) return (-1); if (nvlist_pack(nvroot, &packed, &len, NV_ENCODE_NATIVE, 0) != 0) { free(packed); return (no_memory(hdl)); } (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name)); zc.zc_config_src = (uint64_t)(uintptr_t)packed; zc.zc_config_src_size = len; if (altroot != NULL) (void) strlcpy(zc.zc_root, altroot, sizeof (zc.zc_root)); if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_CREATE, &zc) != 0) { free(packed); switch (errno) { case EBUSY: /* * This can happen if the user has specified the same * device multiple times. We can't reliably detect this * until we try to add it and see we already have a * label. */ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "one or more vdevs refer to the same device")); return (zfs_error(hdl, EZFS_BADDEV, msg)); case EOVERFLOW: /* * This occurs when one of the devices is below * SPA_MINDEVSIZE. Unfortunately, we can't detect which * device was the problem device since there's no * reliable way to determine device size from userland. */ { char buf[64]; zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "one or more devices is less than the " "minimum size (%s)"), buf); } return (zfs_error(hdl, EZFS_BADDEV, msg)); case ENOSPC: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "one or more devices is out of space")); return (zfs_error(hdl, EZFS_BADDEV, msg)); default: return (zpool_standard_error(hdl, errno, msg)); } } free(packed); /* * If this is an alternate root pool, then we automatically set the * moutnpoint of the root dataset to be '/'. */ if (altroot != NULL) { zfs_handle_t *zhp; verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_ANY)) != NULL); verify(zfs_prop_set(zhp, ZFS_PROP_MOUNTPOINT, "/") == 0); zfs_close(zhp); } return (0); }
static void zpool_open_func(void *arg) { rdsk_node_t *rn = arg; #ifdef __APPLE__ struct stat statbuf; #else struct stat64 statbuf; #endif nvlist_t *config; int num_labels; int fd; if (rn->rn_nozpool) return; #if defined (__linux__) || defined (__APPLE__) /* * Skip devices with well known prefixes there can be side effects * when opening devices which need to be avoided. * * core - Symlink to /proc/kcore * fd* - Floppy interface. * fuse - Fuse control device. * hpet - High Precision Event Timer * lp* - Printer interface. * parport* - Parallel port interface. * ppp - Generic PPP driver. * random - Random device * rtc - Real Time Clock * tty* - Generic serial interface. * urandom - Random device. * usbmon* - USB IO monitor. * vcs* - Virtual console memory. * watchdog - Watchdog must be closed in a special way. */ if ((strncmp(rn->rn_name, "core", 4) == 0) || (strncmp(rn->rn_name, "fd", 2) == 0) || (strncmp(rn->rn_name, "fuse", 4) == 0) || (strncmp(rn->rn_name, "hpet", 4) == 0) || (strncmp(rn->rn_name, "lp", 2) == 0) || (strncmp(rn->rn_name, "parport", 7) == 0) || (strncmp(rn->rn_name, "ppp", 3) == 0) || (strncmp(rn->rn_name, "random", 6) == 0) || (strncmp(rn->rn_name, "rtc", 3) == 0) || (strncmp(rn->rn_name, "tty", 3) == 0) || (strncmp(rn->rn_name, "urandom", 7) == 0) || (strncmp(rn->rn_name, "usbmon", 6) == 0) || (strncmp(rn->rn_name, "vcs", 3) == 0) || #ifdef __APPLE__ (strncmp(rn->rn_name, "pty", 3) == 0) || // lots, skip for speed (strncmp(rn->rn_name, "com", 3) == 0) || // /dev/com_digidesign_semiface #endif (strncmp(rn->rn_name, "watchdog", 8) == 0)) return; /* * Ignore failed stats. We only want regular files and block devices. */ if (fstatat64(rn->rn_dfd, rn->rn_name, &statbuf, 0) != 0 || (!S_ISREG(statbuf.st_mode) && !S_ISBLK(statbuf.st_mode))) return; #ifdef __APPLE__ /* It is desirable to skip optical media as well, as they are * also called /dev/diskX */ if (is_optical_media((char *)rn->rn_name)) return; #endif if ((fd = openat64(rn->rn_dfd, rn->rn_name, O_RDONLY)) < 0) { /* symlink to a device that's no longer there */ if (errno == ENOENT) nozpool_all_slices(rn->rn_avl, rn->rn_name); return; } #else /* LINUX, APPLE -> IllumOS */ if ((fd = openat64(rn->rn_dfd, rn->rn_name, O_RDONLY)) < 0) { /* symlink to a device that's no longer there */ if (errno == ENOENT) nozpool_all_slices(rn->rn_avl, rn->rn_name); return; } /* * Ignore failed stats. We only want regular * files, character devs and block devs. */ if (fstat64(fd, &statbuf) != 0 || (!S_ISREG(statbuf.st_mode) && !S_ISCHR(statbuf.st_mode) && !S_ISBLK(statbuf.st_mode))) { (void) close(fd); return; } #endif /* this file is too small to hold a zpool */ if (S_ISREG(statbuf.st_mode) && statbuf.st_size < SPA_MINDEVSIZE) { (void) close(fd); return; } else if (!S_ISREG(statbuf.st_mode)) { /* * Try to read the disk label first so we don't have to * open a bunch of minor nodes that can't have a zpool. */ check_slices(rn->rn_avl, fd, rn->rn_name); } #ifdef __APPLE__ int32_t blksz = 0; if (S_ISBLK(statbuf.st_mode) && (ioctl(fd, DKIOCGETBLOCKSIZE, &blksz) || blksz == 0)) { if (strncmp(rn->rn_name, "vn", 2) != 0) fprintf(stderr, "device '%s' failed to report blocksize -- skipping\r\n", rn->rn_name); close(fd); return; } struct sigaction sact; sigemptyset(&sact.sa_mask); sact.sa_flags = 0; sact.sa_handler = signal_alarm; sigaction(SIGALRM, &sact, NULL); if (setjmp(buffer) != 0) { printf("ZFS: Warning, timeout reading device '%s'\n", rn->rn_name); close(fd); return; } alarm(20); #endif if ((zpool_read_label(fd, &config, &num_labels)) != 0) { #ifdef __APPLE__ alarm(0); #endif (void) close(fd); (void) no_memory(rn->rn_hdl); return; } #ifdef __APPLE__ alarm(0); #endif if (num_labels == 0) { (void) close(fd); nvlist_free(config); return; } (void) close(fd); rn->rn_config = config; rn->rn_num_labels = num_labels; }
/* * Given a list of directories to search, find all pools stored on disk. This * includes partial pools which are not available to import. If no args are * given (argc is 0), then the default directory (/dev/dsk) is searched. * poolname or guid (but not both) are provided by the caller when trying * to import a specific pool. */ static nvlist_t * zpool_find_import_impl(libzfs_handle_t *hdl, importargs_t *iarg) { int i, dirs = iarg->paths; DIR *dirp = NULL; struct dirent64 *dp; char path[MAXPATHLEN]; char *end, **dir = iarg->path; size_t pathleft; struct stat64 statbuf; nvlist_t *ret = NULL, *config; static char *default_dir = DISK_ROOT; int fd; pool_list_t pools = { 0 }; pool_entry_t *pe, *penext; vdev_entry_t *ve, *venext; config_entry_t *ce, *cenext; name_entry_t *ne, *nenext; verify(iarg->poolname == NULL || iarg->guid == 0); if (dirs == 0) { #ifdef HAVE_LIBBLKID /* Use libblkid to scan all device for their type */ if (zpool_find_import_blkid(hdl, &pools) == 0) goto skip_scanning; (void) zfs_error_fmt(hdl, EZFS_BADCACHE, dgettext(TEXT_DOMAIN, "blkid failure falling back " "to manual probing")); #endif /* HAVE_LIBBLKID */ dirs = 1; dir = &default_dir; } /* * Go through and read the label configuration information from every * possible device, organizing the information according to pool GUID * and toplevel GUID. */ for (i = 0; i < dirs; i++) { char *rdsk; int dfd; /* use realpath to normalize the path */ if (realpath(dir[i], path) == 0) { (void) zfs_error_fmt(hdl, EZFS_BADPATH, dgettext(TEXT_DOMAIN, "cannot open '%s'"), dir[i]); goto error; } end = &path[strlen(path)]; *end++ = '/'; *end = 0; pathleft = &path[sizeof (path)] - end; /* * Using raw devices instead of block devices when we're * reading the labels skips a bunch of slow operations during * close(2) processing, so we replace /dev/dsk with /dev/rdsk. */ if (strcmp(path, "/dev/dsk/") == 0) rdsk = "/dev/rdsk/"; else rdsk = path; if ((dfd = open64(rdsk, O_RDONLY)) < 0 || (dirp = fdopendir(dfd)) == NULL) { zfs_error_aux(hdl, strerror(errno)); (void) zfs_error_fmt(hdl, EZFS_BADPATH, dgettext(TEXT_DOMAIN, "cannot open '%s'"), rdsk); goto error; } /* * This is not MT-safe, but we have no MT consumers of libzfs */ while ((dp = readdir64(dirp)) != NULL) { const char *name = dp->d_name; if (name[0] == '.' && (name[1] == 0 || (name[1] == '.' && name[2] == 0))) continue; /* * Skip checking devices with well known prefixes: * watchdog - A special close is required to avoid * triggering it and resetting the system. * fuse - Fuse control device. * ppp - Generic PPP driver. * tty* - Generic serial interface. * vcs* - Virtual console memory. * parport* - Parallel port interface. * lp* - Printer interface. * fd* - Floppy interface. * hpet - High Precision Event Timer, crashes qemu * when accessed from a virtual machine. * core - Symlink to /proc/kcore, causes a crash * when access from Xen dom0. */ if ((strncmp(name, "watchdog", 8) == 0) || (strncmp(name, "fuse", 4) == 0) || (strncmp(name, "ppp", 3) == 0) || (strncmp(name, "tty", 3) == 0) || (strncmp(name, "vcs", 3) == 0) || (strncmp(name, "parport", 7) == 0) || (strncmp(name, "lp", 2) == 0) || (strncmp(name, "fd", 2) == 0) || (strncmp(name, "hpet", 4) == 0) || (strncmp(name, "core", 4) == 0)) continue; /* * Ignore failed stats. We only want regular * files and block devices. */ if ((fstatat64(dfd, name, &statbuf, 0) != 0) || (!S_ISREG(statbuf.st_mode) && !S_ISBLK(statbuf.st_mode))) continue; if ((fd = openat64(dfd, name, O_RDONLY)) < 0) continue; if ((zpool_read_label(fd, &config)) != 0) { (void) close(fd); (void) no_memory(hdl); goto error; } (void) close(fd); if (config != NULL) { boolean_t matched = B_TRUE; if (iarg->poolname != NULL) { char *pname; matched = nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, &pname) == 0 && strcmp(iarg->poolname, pname) == 0; } else if (iarg->guid != 0) { uint64_t this_guid; matched = nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &this_guid) == 0 && iarg->guid == this_guid; } if (!matched) { nvlist_free(config); config = NULL; continue; } /* use the non-raw path for the config */ (void) strlcpy(end, name, pathleft); if (add_config(hdl, &pools, path, config) != 0) goto error; } } (void) closedir(dirp); dirp = NULL; } #ifdef HAVE_LIBBLKID skip_scanning: #endif ret = get_configs(hdl, &pools, iarg->can_be_active); error: for (pe = pools.pools; pe != NULL; pe = penext) { penext = pe->pe_next; for (ve = pe->pe_vdevs; ve != NULL; ve = venext) { venext = ve->ve_next; for (ce = ve->ve_configs; ce != NULL; ce = cenext) { cenext = ce->ce_next; if (ce->ce_config) nvlist_free(ce->ce_config); free(ce); } free(ve); } free(pe); } for (ne = pools.names; ne != NULL; ne = nenext) { nenext = ne->ne_next; if (ne->ne_name) free(ne->ne_name); free(ne); } if (dirp) (void) closedir(dirp); return (ret); }
/* * Retrieve the persistent error log, uniquify the members, and return to the * caller. */ int zpool_get_errlog(zpool_handle_t *zhp, nvlist_t ***list, size_t *nelem) { zfs_cmd_t zc = { 0 }; uint64_t count; zbookmark_t *zb; int i, j; if (zhp->zpool_error_log != NULL) { *list = zhp->zpool_error_log; *nelem = zhp->zpool_error_count; return (0); } /* * Retrieve the raw error list from the kernel. If the number of errors * has increased, allocate more space and continue until we get the * entire list. */ verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT, &count) == 0); if ((zc.zc_config_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl, count * sizeof (zbookmark_t))) == NULL) return (-1); zc.zc_config_dst_size = count; (void) strcpy(zc.zc_name, zhp->zpool_name); for (;;) { if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG, &zc) != 0) { free((void *)(uintptr_t)zc.zc_config_dst); if (errno == ENOMEM) { if ((zc.zc_config_dst = (uintptr_t) zfs_alloc(zhp->zpool_hdl, zc.zc_config_dst_size)) == NULL) return (-1); } else { return (-1); } } else { break; } } /* * Sort the resulting bookmarks. This is a little confusing due to the * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last * to first, and 'zc_config_dst_size' indicates the number of boomarks * _not_ copied as part of the process. So we point the start of our * array appropriate and decrement the total number of elements. */ zb = ((zbookmark_t *)(uintptr_t)zc.zc_config_dst) + zc.zc_config_dst_size; count -= zc.zc_config_dst_size; qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare); /* * Count the number of unique elements */ j = 0; for (i = 0; i < count; i++) { if (i > 0 && memcmp(&zb[i - 1], &zb[i], sizeof (zbookmark_t)) == 0) continue; j++; } /* * If the user has only requested the number of items, return it now * without bothering with the extra work. */ if (list == NULL) { *nelem = j; free((void *)(uintptr_t)zc.zc_config_dst); return (0); } zhp->zpool_error_count = j; /* * Allocate an array of nvlists to hold the results */ if ((zhp->zpool_error_log = zfs_alloc(zhp->zpool_hdl, j * sizeof (nvlist_t *))) == NULL) { free((void *)(uintptr_t)zc.zc_config_dst); return (-1); } /* * Fill in the results with names from the kernel. */ j = 0; for (i = 0; i < count; i++) { char buf[64]; nvlist_t *nv; if (i > 0 && memcmp(&zb[i - 1], &zb[i], sizeof (zbookmark_t)) == 0) continue; if (nvlist_alloc(&nv, NV_UNIQUE_NAME, 0) != 0) goto nomem; zhp->zpool_error_log[j] = nv; zc.zc_bookmark = zb[i]; if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_BOOKMARK_NAME, &zc) == 0) { if (nvlist_add_string(nv, ZPOOL_ERR_DATASET, zc.zc_prop_name) != 0 || nvlist_add_string(nv, ZPOOL_ERR_OBJECT, zc.zc_prop_value) != 0 || nvlist_add_string(nv, ZPOOL_ERR_RANGE, zc.zc_filename) != 0) goto nomem; } else { (void) snprintf(buf, sizeof (buf), "%llx", zb[i].zb_objset); if (nvlist_add_string(nv, ZPOOL_ERR_DATASET, buf) != 0) goto nomem; (void) snprintf(buf, sizeof (buf), "%llx", zb[i].zb_object); if (nvlist_add_string(nv, ZPOOL_ERR_OBJECT, buf) != 0) goto nomem; (void) snprintf(buf, sizeof (buf), "lvl=%u blkid=%llu", (int)zb[i].zb_level, (long long)zb[i].zb_blkid); if (nvlist_add_string(nv, ZPOOL_ERR_RANGE, buf) != 0) goto nomem; } j++; } *list = zhp->zpool_error_log; *nelem = zhp->zpool_error_count; free((void *)(uintptr_t)zc.zc_config_dst); return (0); nomem: free((void *)(uintptr_t)zc.zc_config_dst); for (i = 0; i < zhp->zpool_error_count; i++) { if (zhp->zpool_error_log[i]) free(zhp->zpool_error_log[i]); } free(zhp->zpool_error_log); zhp->zpool_error_log = NULL; return (no_memory(zhp->zpool_hdl)); }
DevInfo * blkio_open(char *path) { DevInfo *dev; struct stat dev_stat; uint64_t dev_size; if (!path) { error("blkio_open", "path is null"); return 0; } if ((dev = malloc(sizeof(DevInfo))) == 0) { no_memory("blkio_open"); return 0; } dev->path = path; if ((dev->fd = open(path, O_RDONLY)) == -1) { sys_error("blkio_open", "could not open '%s'", path); free(dev); return 0; } if (fstat(dev->fd, &dev_stat) == -1) { error("blkio_open", "fstat failed"); free(dev); return 0; } if (S_ISREG(dev_stat.st_mode)) { uint64_t size = size_override? size_override : dev_stat.st_size; dev->block_size = DEFAULT_BLOCK_SIZE; dev->blocks = size/dev->block_size; dev->bytes = size; return dev; } else if (S_ISBLK(dev_stat.st_mode)) { if (ioctl(dev->fd, BLKSSZGET, &dev->block_size) == -1) { error("blkio_open", "ioctl(BLKSSZGET) failed"); free(dev); return 0; } if (size_override) { dev->blocks = size_override/dev->block_size; dev->bytes = size_override; } else { if (ioctl(dev->fd, BLKGETSIZE64, &dev_size) == -1) { error("blkio_open", "ioctl(BLKGETSIZE64) failed"); free(dev); return 0; } dev->blocks = dev_size/dev->block_size; dev->bytes = dev_size; } return dev; } error("blkio_open", "not a file or block device"); free(dev); return 0; }
/* * Retrieve the persistent error log, uniquify the members, and return to the * caller. */ int zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp) { zfs_cmd_t zc = { 0 }; uint64_t count; zbookmark_t *zb = NULL; int i; /* * Retrieve the raw error list from the kernel. If the number of errors * has increased, allocate more space and continue until we get the * entire list. */ verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT, &count) == 0); /* If there are no errors to get, then return success */ if (count == 0) return (0); if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl, count * sizeof (zbookmark_t))) == (uintptr_t)NULL) return (-1); zc.zc_nvlist_dst_size = count; (void) strcpy(zc.zc_name, zhp->zpool_name); for (;;) { if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG, &zc) != 0) { free((void *)(uintptr_t)zc.zc_nvlist_dst); if (errno == ENOMEM) { count = zc.zc_nvlist_dst_size; if ((zc.zc_nvlist_dst = (uintptr_t) zfs_alloc(zhp->zpool_hdl, count * sizeof (zbookmark_t))) == (uintptr_t)NULL) return (-1); } else { return (-1); } } else { break; } } /* * Sort the resulting bookmarks. This is a little confusing due to the * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last * to first, and 'zc_nvlist_dst_size' indicates the number of bookmarks * _not_ copied as part of the process. So we point the start of our * array appropriate and decrement the total number of elements. */ zb = ((zbookmark_t *)(uintptr_t)zc.zc_nvlist_dst) + zc.zc_nvlist_dst_size; count -= zc.zc_nvlist_dst_size; qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare); verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0); /* * Fill in the nverrlistp with nvlist's of dataset and object numbers. */ for (i = 0; i < count; i++) { nvlist_t *nv; /* ignoring zb_blkid and zb_level for now */ if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset && zb[i-1].zb_object == zb[i].zb_object) continue; if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0) goto nomem; if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET, zb[i].zb_objset) != 0) { nvlist_free(nv); goto nomem; } if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT, zb[i].zb_object) != 0) { nvlist_free(nv); goto nomem; } if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) { nvlist_free(nv); goto nomem; } nvlist_free(nv); } free((void *)(uintptr_t)zc.zc_nvlist_dst); return (0); nomem: free((void *)(uintptr_t)zc.zc_nvlist_dst); return (no_memory(zhp->zpool_hdl)); }
static void zpool_open_func(void *arg) { rdsk_node_t *rn = arg; struct stat64 statbuf; nvlist_t *config; int num_labels; int fd; if (rn->rn_nozpool) return; #ifdef __linux__ /* * Skip devices with well known prefixes there can be side effects * when opening devices which need to be avoided. * * core - Symlink to /proc/kcore * fd* - Floppy interface. * fuse - Fuse control device. * hpet - High Precision Event Timer * lp* - Printer interface. * parport* - Parallel port interface. * ppp - Generic PPP driver. * random - Random device * rtc - Real Time Clock * tty* - Generic serial interface. * urandom - Random device. * usbmon* - USB IO monitor. * vcs* - Virtual console memory. * watchdog - Watchdog must be closed in a special way. */ if ((strncmp(rn->rn_name, "core", 4) == 0) || (strncmp(rn->rn_name, "fd", 2) == 0) || (strncmp(rn->rn_name, "fuse", 4) == 0) || (strncmp(rn->rn_name, "hpet", 4) == 0) || (strncmp(rn->rn_name, "lp", 2) == 0) || (strncmp(rn->rn_name, "parport", 7) == 0) || (strncmp(rn->rn_name, "ppp", 3) == 0) || (strncmp(rn->rn_name, "random", 6) == 0) || (strncmp(rn->rn_name, "rtc", 3) == 0) || (strncmp(rn->rn_name, "tty", 3) == 0) || (strncmp(rn->rn_name, "urandom", 7) == 0) || (strncmp(rn->rn_name, "usbmon", 6) == 0) || (strncmp(rn->rn_name, "vcs", 3) == 0) || (strncmp(rn->rn_name, "watchdog", 8) == 0)) return; /* * Ignore failed stats. We only want regular files and block devices. */ if (fstatat64(rn->rn_dfd, rn->rn_name, &statbuf, 0) != 0 || (!S_ISREG(statbuf.st_mode) && !S_ISBLK(statbuf.st_mode))) return; if ((fd = openat64(rn->rn_dfd, rn->rn_name, O_RDONLY)) < 0) { /* symlink to a device that's no longer there */ if (errno == ENOENT) nozpool_all_slices(rn->rn_avl, rn->rn_name); return; } #else if ((fd = openat64(rn->rn_dfd, rn->rn_name, O_RDONLY)) < 0) { /* symlink to a device that's no longer there */ if (errno == ENOENT) nozpool_all_slices(rn->rn_avl, rn->rn_name); return; } /* * Ignore failed stats. We only want regular * files, character devs and block devs. */ if (fstat64(fd, &statbuf) != 0 || (!S_ISREG(statbuf.st_mode) && !S_ISCHR(statbuf.st_mode) && !S_ISBLK(statbuf.st_mode))) { (void) close(fd); return; } #endif /* this file is too small to hold a zpool */ if (S_ISREG(statbuf.st_mode) && statbuf.st_size < SPA_MINDEVSIZE) { (void) close(fd); return; } else if (!S_ISREG(statbuf.st_mode)) { /* * Try to read the disk label first so we don't have to * open a bunch of minor nodes that can't have a zpool. */ check_slices(rn->rn_avl, fd, rn->rn_name); } if ((zpool_read_label(fd, &config, &num_labels)) != 0) { (void) close(fd); (void) no_memory(rn->rn_hdl); return; } if (num_labels == 0) { (void) close(fd); nvlist_free(config); return; } (void) close(fd); rn->rn_config = config; rn->rn_num_labels = num_labels; }
int main(int argc, char **argv) { JSAMPARRAY buf = malloc(sizeof(JSAMPROW)*BUF_LINES); jpeg_saved_marker_ptr exif_marker, cmarker; MD5_CTX *MD5 = malloc(sizeof(MD5_CTX)); volatile int i; int c,j,lines_read, err_count; char ch; char namebuf[1024]; long fs; char *md5buf,digest[16],digest_text[33]; global_total_errors=0; if (rcsid); /* to keep compiler from not complaining about rcsid */ cinfo.err = jpeg_std_error(&jerr.pub); jpeg_create_decompress(&cinfo); jerr.pub.error_exit=my_error_exit; jerr.pub.output_message=my_output_message; if (!buf || !MD5) no_memory(); if (argc<2) { if (quiet_mode < 2) fprintf(stderr,"jpeginfo: file arguments missing\n" "Try 'jpeginfo " "--help" "' for more information.\n"); exit(1); } /* parse command line parameters */ while(1) { opt_index=0; if ( (c=getopt_long(argc,argv,"livVdcChqm:f:5", long_options,&opt_index)) == -1) break; switch (c) { case 'm': if (!strcasecmp(optarg,"all")) del_mode=0; else if (!strcasecmp(optarg,"erronly")) del_mode=1; else if (!quiet_mode) fprintf(stderr,"Unknown parameter for -m, --mode.\n"); break; case 'f': if (!strcmp(optarg,"-")) listfile=stdin; else if ((listfile=fopen(optarg,"r"))==NULL) { fprintf(stderr,"Cannot open file '%s'.\n",optarg); exit(2); } input_from_file=1; break; case 'v': verbose_mode=1; break; case 'V': fprintf(stderr,"jpeginfo v" VERSION " " HOST_TYPE "\nCopyright (c) Timo Kokkonen, 1995-2002.\n"); exit(0); case 'd': delete_mode=1; break; case 'c': check_mode=1; break; case 'h': p_usage(); break; case 'q': quiet_mode++; break; case 'l': list_mode=1; break; case 'i': longinfo_mode=1; break; case '5': md5_mode=1; break; case 'C': com_mode=1; break; case '?': break; default: if (!quiet_mode) fprintf(stderr,"jpeginfo: error parsing parameters.\n"); } } if (delete_mode && verbose_mode && !quiet_mode) fprintf(stderr,"jpeginfo: delete mode enabled (%s)\n", !del_mode?"normal":"errors only"); i=1; do { if (input_from_file) { if (!fgetstr(namebuf,sizeof(namebuf),listfile)) break; current=namebuf; } else current=argv[i]; if (current[0]==0) continue; if (current[0]=='-' && !input_from_file) continue; if (setjmp(jerr.setjmp_buffer)) { jpeg_abort_decompress(&cinfo); fclose(infile); if (list_mode && quiet_mode < 2) printf(" %s",current); if (quiet_mode < 2) printf(" [ERROR]\n"); if (delete_mode) delete_file(current,verbose_mode,quiet_mode); continue; } if ((infile=fopen(current,"r"))==NULL) { if (!quiet_mode) fprintf(stderr, "jpeginfo: can't open '%s'\n", current); continue; } if (is_dir(infile)) { fclose(infile); if (verbose_mode) printf("directory: %s skipped\n",current); continue; } fs=filesize(infile); if (md5_mode) { md5buf=malloc(fs); if (!md5buf) no_memory(); fread(md5buf,1,fs,infile); rewind(infile); MD5Init(MD5); MD5Update(MD5,md5buf,fs); MD5Final(digest,MD5); md2str(digest,digest_text); free(md5buf); } if (!list_mode && quiet_mode < 2) printf("%s ",current); global_error_counter=0; err_count=jerr.pub.num_warnings; if (com_mode) jpeg_save_markers(&cinfo, JPEG_COM, 0xffff); jpeg_save_markers(&cinfo, EXIF_JPEG_MARKER, 0xffff); jpeg_stdio_src(&cinfo, infile); jpeg_read_header(&cinfo, TRUE); /* check for Exif marker */ exif_marker=NULL; cmarker=cinfo.marker_list; while (cmarker) { if (cmarker->marker == EXIF_JPEG_MARKER) { if (!memcmp(cmarker->data,EXIF_IDENT_STRING,6)) exif_marker=cmarker; } cmarker=cmarker->next; } if (quiet_mode < 2) { printf("%4d x %-4d %2dbit ",(int)cinfo.image_width, (int)cinfo.image_height,(int)cinfo.num_components*8); if (exif_marker) printf("Exif "); else if (cinfo.saw_JFIF_marker) printf("JFIF "); else if (cinfo.saw_Adobe_marker) printf("Adobe "); else printf("n/a "); if (longinfo_mode) { printf("%s %s",(cinfo.progressive_mode?"Progressive":"Normal"), (cinfo.arith_code?"Arithmetic":"Huffman") ); if (cinfo.density_unit==1||cinfo.density_unit==2) printf(",%ddp%c",MIN(cinfo.X_density,cinfo.Y_density), (cinfo.density_unit==1?'i':'c') ); if (cinfo.CCIR601_sampling) printf(",CCIR601"); printf(" %7ld ",fs); } else printf("%c %7ld ",(cinfo.progressive_mode?'P':'N'),fs); if (md5_mode) printf("%s ",digest_text); if (list_mode) printf("%s ",current); if (com_mode) { cmarker=cinfo.marker_list; while (cmarker) { if (cmarker->marker == JPEG_COM) { printf("\""); for (j=0;j<cmarker->data_length;j++) { ch = cmarker->data[j]; if (ch < 32 || iscntrl(ch)) continue; printf("%c",cmarker->data[j]); } printf("\" "); } cmarker=cmarker->next; } } } if (check_mode) { cinfo.out_color_space=JCS_GRAYSCALE; /* to speed up the process... */ cinfo.scale_denom = 8; jpeg_start_decompress(&cinfo); for (j=0;j<BUF_LINES;j++) { buf[j]=malloc(sizeof(JSAMPLE)*cinfo.output_width* cinfo.out_color_components); if (!buf[j]) no_memory(); } while (cinfo.output_scanline < cinfo.output_height) { lines_read = jpeg_read_scanlines(&cinfo, buf,BUF_LINES); } jpeg_finish_decompress(&cinfo); for(j=0;j<BUF_LINES;j++) free(buf[j]); if (!global_error_counter) { if (quiet_mode < 2) printf(" [OK]\n"); } else { if (quiet_mode < 2) printf(" [WARNING]\n"); if (delete_mode && !del_mode) delete_file(current,verbose_mode,quiet_mode); } } else { /* !check_mode */ if (quiet_mode < 2) printf("\n"); jpeg_abort_decompress(&cinfo); } fclose(infile); } while (++i<argc || input_from_file); jpeg_destroy_decompress(&cinfo); free(buf); free(MD5); return (global_total_errors>0?1:0); /* return 1 if any errors found file(s) we checked */ }
/* * Loads the pool namespace, or re-loads it if the cache has changed. */ static int namespace_reload(libzfs_handle_t *hdl) { nvlist_t *config; config_node_t *cn; nvpair_t *elem; zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 }; void *cookie; if (hdl->libzfs_ns_gen == 0) { /* * This is the first time we've accessed the configuration * cache. Initialize the AVL tree and then fall through to the * common code. */ if ((hdl->libzfs_ns_avlpool = uu_avl_pool_create("config_pool", sizeof (config_node_t), offsetof(config_node_t, cn_avl), config_node_compare, UU_DEFAULT)) == NULL) return (no_memory(hdl)); if ((hdl->libzfs_ns_avl = uu_avl_create(hdl->libzfs_ns_avlpool, NULL, UU_DEFAULT)) == NULL) return (no_memory(hdl)); } if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0) return (-1); for (;;) { zc.zc_cookie = hdl->libzfs_ns_gen; //if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_CONFIGS, &zc) != 0) { if (zfs_ioctl(hdl, ZFS_IOC_POOL_CONFIGS, &zc) != 0) { switch (errno) { case EEXIST: /* * The namespace hasn't changed. */ zcmd_free_nvlists(&zc); return (0); case ENOMEM: if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { zcmd_free_nvlists(&zc); return (-1); } break; default: zcmd_free_nvlists(&zc); return (zfs_standard_error(hdl, errno, dgettext(TEXT_DOMAIN, "failed to read " "pool configuration"))); } } else { hdl->libzfs_ns_gen = zc.zc_cookie; break; } } if (zcmd_read_dst_nvlist(hdl, &zc, &config) != 0) { zcmd_free_nvlists(&zc); return (-1); } zcmd_free_nvlists(&zc); /* * Clear out any existing configuration information. */ cookie = NULL; while ((cn = uu_avl_teardown(hdl->libzfs_ns_avl, &cookie)) != NULL) { nvlist_free(cn->cn_config); free(cn->cn_name); free(cn); } elem = NULL; while ((elem = nvlist_next_nvpair(config, elem)) != NULL) { nvlist_t *child; uu_avl_index_t where; if ((cn = zfs_alloc(hdl, sizeof (config_node_t))) == NULL) { nvlist_free(config); return (-1); } if ((cn->cn_name = zfs_strdup(hdl, nvpair_name(elem))) == NULL) { free(cn); nvlist_free(config); return (-1); } verify(nvpair_value_nvlist(elem, &child) == 0); if (nvlist_dup(child, &cn->cn_config, 0) != 0) { free(cn->cn_name); free(cn); nvlist_free(config); return (no_memory(hdl)); } verify(uu_avl_find(hdl->libzfs_ns_avl, cn, NULL, &where) == NULL); uu_avl_insert(hdl->libzfs_ns_avl, cn, where); } nvlist_free(config); return (0); }
/* * Use libblkid to quickly search for zfs devices */ static int zpool_find_import_blkid(libzfs_handle_t *hdl, pool_list_t *pools) { blkid_cache cache; blkid_dev_iterate iter; blkid_dev dev; const char *devname; nvlist_t *config; int fd, err, num_labels; err = blkid_get_cache(&cache, NULL); if (err != 0) { (void) zfs_error_fmt(hdl, EZFS_BADCACHE, dgettext(TEXT_DOMAIN, "blkid_get_cache() %d"), err); goto err_blkid1; } err = blkid_probe_all(cache); if (err != 0) { (void) zfs_error_fmt(hdl, EZFS_BADCACHE, dgettext(TEXT_DOMAIN, "blkid_probe_all() %d"), err); goto err_blkid2; } iter = blkid_dev_iterate_begin(cache); if (iter == NULL) { (void) zfs_error_fmt(hdl, EZFS_BADCACHE, dgettext(TEXT_DOMAIN, "blkid_dev_iterate_begin()")); goto err_blkid2; } err = blkid_dev_set_search(iter, "TYPE", "zfs_member"); if (err != 0) { (void) zfs_error_fmt(hdl, EZFS_BADCACHE, dgettext(TEXT_DOMAIN, "blkid_dev_set_search() %d"), err); goto err_blkid3; } while (blkid_dev_next(iter, &dev) == 0) { devname = blkid_dev_devname(dev); if ((fd = open(devname, O_RDONLY)) < 0) continue; err = zpool_read_label(fd, &config, &num_labels); (void) close(fd); if (err != 0) { (void) no_memory(hdl); goto err_blkid3; } if (config != NULL) { err = add_config(hdl, pools, devname, 0, num_labels, config); if (err != 0) goto err_blkid3; } } err_blkid3: blkid_dev_iterate_end(iter); err_blkid2: blkid_put_cache(cache); err_blkid1: return (err); }
/* * Determines if the pool is in use. If so, it returns true and the state of * the pool as well as the name of the pool. Both strings are allocated and * must be freed by the caller. */ int zpool_in_use(libzfs_handle_t *hdl, int fd, pool_state_t *state, char **namestr, boolean_t *inuse) { nvlist_t *config; char *name; boolean_t ret; uint64_t guid, vdev_guid; zpool_handle_t *zhp; nvlist_t *pool_config; uint64_t stateval, isspare; aux_cbdata_t cb = { 0 }; boolean_t isactive; *inuse = B_FALSE; if (zpool_read_label(fd, &config, NULL) != 0 && errno == ENOMEM) { (void) no_memory(hdl); return (-1); } if (config == NULL) return (0); verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, &stateval) == 0); verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, &vdev_guid) == 0); if (stateval != POOL_STATE_SPARE && stateval != POOL_STATE_L2CACHE) { verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, &name) == 0); verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &guid) == 0); } switch (stateval) { case POOL_STATE_EXPORTED: /* * A pool with an exported state may in fact be imported * read-only, so check the in-core state to see if it's * active and imported read-only. If it is, set * its state to active. */ if (pool_active(hdl, name, guid, &isactive) == 0 && isactive && (zhp = zpool_open_canfail(hdl, name)) != NULL) { if (zpool_get_prop_int(zhp, ZPOOL_PROP_READONLY, NULL)) stateval = POOL_STATE_ACTIVE; /* * All we needed the zpool handle for is the * readonly prop check. */ zpool_close(zhp); } ret = B_TRUE; break; case POOL_STATE_ACTIVE: /* * For an active pool, we have to determine if it's really part * of a currently active pool (in which case the pool will exist * and the guid will be the same), or whether it's part of an * active pool that was disconnected without being explicitly * exported. */ if (pool_active(hdl, name, guid, &isactive) != 0) { nvlist_free(config); return (-1); } if (isactive) { /* * Because the device may have been removed while * offlined, we only report it as active if the vdev is * still present in the config. Otherwise, pretend like * it's not in use. */ if ((zhp = zpool_open_canfail(hdl, name)) != NULL && (pool_config = zpool_get_config(zhp, NULL)) != NULL) { nvlist_t *nvroot; verify(nvlist_lookup_nvlist(pool_config, ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); ret = find_guid(nvroot, vdev_guid); } else { ret = B_FALSE; } /* * If this is an active spare within another pool, we * treat it like an unused hot spare. This allows the * user to create a pool with a hot spare that currently * in use within another pool. Since we return B_TRUE, * libdiskmgt will continue to prevent generic consumers * from using the device. */ if (ret && nvlist_lookup_uint64(config, ZPOOL_CONFIG_IS_SPARE, &isspare) == 0 && isspare) stateval = POOL_STATE_SPARE; if (zhp != NULL) zpool_close(zhp); } else { stateval = POOL_STATE_POTENTIALLY_ACTIVE; ret = B_TRUE; } break; case POOL_STATE_SPARE: /* * For a hot spare, it can be either definitively in use, or * potentially active. To determine if it's in use, we iterate * over all pools in the system and search for one with a spare * with a matching guid. * * Due to the shared nature of spares, we don't actually report * the potentially active case as in use. This means the user * can freely create pools on the hot spares of exported pools, * but to do otherwise makes the resulting code complicated, and * we end up having to deal with this case anyway. */ cb.cb_zhp = NULL; cb.cb_guid = vdev_guid; cb.cb_type = ZPOOL_CONFIG_SPARES; if (zpool_iter(hdl, find_aux, &cb) == 1) { name = (char *)zpool_get_name(cb.cb_zhp); ret = B_TRUE; } else { ret = B_FALSE; } break; case POOL_STATE_L2CACHE: /* * Check if any pool is currently using this l2cache device. */ cb.cb_zhp = NULL; cb.cb_guid = vdev_guid; cb.cb_type = ZPOOL_CONFIG_L2CACHE; if (zpool_iter(hdl, find_aux, &cb) == 1) { name = (char *)zpool_get_name(cb.cb_zhp); ret = B_TRUE; } else { ret = B_FALSE; } break; default: ret = B_FALSE; } if (ret) { if ((*namestr = zfs_strdup(hdl, name)) == NULL) { if (cb.cb_zhp) zpool_close(cb.cb_zhp); nvlist_free(config); return (-1); } *state = (pool_state_t)stateval; } if (cb.cb_zhp) zpool_close(cb.cb_zhp); nvlist_free(config); *inuse = ret; return (0); }
/* * Given a cache file, return the contents as a list of importable pools. * poolname or guid (but not both) are provided by the caller when trying * to import a specific pool. */ nvlist_t * zpool_find_import_cached(libzfs_handle_t *hdl, const char *cachefile, char *poolname, uint64_t guid) { char *buf; int fd; struct stat statbuf; nvlist_t *raw, *src, *dst; nvlist_t *pools; nvpair_t *elem; char *name; uint64_t this_guid; boolean_t active; verify(poolname == NULL || guid == 0); if ((fd = open(cachefile, O_RDONLY)) < 0) { zfs_error_aux(hdl, "%s", strerror(errno)); (void) zfs_error(hdl, EZFS_BADCACHE, dgettext(TEXT_DOMAIN, "failed to open cache file")); return (NULL); } if (fstat(fd, &statbuf) != 0) { zfs_error_aux(hdl, "%s", strerror(errno)); (void) close(fd); (void) zfs_error(hdl, EZFS_BADCACHE, dgettext(TEXT_DOMAIN, "failed to get size of cache file")); return (NULL); } if ((buf = zfs_alloc(hdl, statbuf.st_size)) == NULL) { (void) close(fd); return (NULL); } if (read(fd, buf, statbuf.st_size) != statbuf.st_size) { (void) close(fd); free(buf); (void) zfs_error(hdl, EZFS_BADCACHE, dgettext(TEXT_DOMAIN, "failed to read cache file contents")); return (NULL); } (void) close(fd); if (nvlist_unpack(buf, statbuf.st_size, &raw, 0) != 0) { free(buf); (void) zfs_error(hdl, EZFS_BADCACHE, dgettext(TEXT_DOMAIN, "invalid or corrupt cache file contents")); return (NULL); } free(buf); /* * Go through and get the current state of the pools and refresh their * state. */ if (nvlist_alloc(&pools, 0, 0) != 0) { (void) no_memory(hdl); nvlist_free(raw); return (NULL); } elem = NULL; while ((elem = nvlist_next_nvpair(raw, elem)) != NULL) { src = fnvpair_value_nvlist(elem); name = fnvlist_lookup_string(src, ZPOOL_CONFIG_POOL_NAME); if (poolname != NULL && strcmp(poolname, name) != 0) continue; this_guid = fnvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID); if (guid != 0 && guid != this_guid) continue; if (pool_active(hdl, name, this_guid, &active) != 0) { nvlist_free(raw); nvlist_free(pools); return (NULL); } if (active) continue; if (nvlist_add_string(src, ZPOOL_CONFIG_CACHEFILE, cachefile) != 0) { (void) no_memory(hdl); nvlist_free(raw); nvlist_free(pools); return (NULL); } if ((dst = refresh_config(hdl, src)) == NULL) { nvlist_free(raw); nvlist_free(pools); return (NULL); } if (nvlist_add_nvlist(pools, nvpair_name(elem), dst) != 0) { (void) no_memory(hdl); nvlist_free(dst); nvlist_free(raw); nvlist_free(pools); return (NULL); } nvlist_free(dst); } nvlist_free(raw); return (pools); }
/* * zfs_crypto_zckey * * Called for creating new filesystems and clones and receiving. * * For encryption != off get the key material. */ int zfs_crypto_zckey(libzfs_handle_t *hdl, zfs_crypto_zckey_t cmd, nvlist_t *props, zfs_cmd_t *zc) { uint64_t crypt = ZIO_CRYPT_INHERIT, pcrypt = ZIO_CRYPT_DEFAULT; char *keysource = NULL; int ret = 0; int keystatus; zfs_handle_t *pzhp = NULL; boolean_t inherit_crypt = B_TRUE; boolean_t inherit_keysource = B_TRUE; boolean_t recv_existing = B_FALSE; boolean_t recv_clone = B_FALSE; boolean_t keysource_free = B_FALSE; zprop_source_t propsrctype = ZPROP_SRC_DEFAULT; char propsrc[ZFS_MAXNAMELEN]; char errbuf[1024]; char target[MAXNAMELEN]; char parent[MAXNAMELEN]; char *strval; zfs_cmd_target_dsname(zc, cmd, target, sizeof (target)); if (zfs_parent_name(target, parent, sizeof (parent)) != 0) parent[0] = '\0'; (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot create '%s'"), target); if (props != NULL) { if (nvlist_lookup_string(props, zfs_prop_to_name(ZFS_PROP_ENCRYPTION), &strval) == 0) { (void) zfs_prop_string_to_index(ZFS_PROP_ENCRYPTION, strval, &crypt); inherit_crypt = B_FALSE; } else if (nvlist_lookup_uint64(props, zfs_prop_to_name(ZFS_PROP_ENCRYPTION), &crypt) == 0) { inherit_crypt = B_FALSE; } else { inherit_crypt = B_TRUE; } if (nvlist_lookup_string(props, zfs_prop_to_name(ZFS_PROP_KEYSOURCE), &keysource) == 0) { inherit_keysource = B_FALSE; } } if (cmd == ZFS_CRYPTO_CREATE) { pzhp = make_dataset_handle(hdl, parent); } else if (cmd == ZFS_CRYPTO_CLONE) { zfs_handle_t *szhp = make_dataset_handle(hdl, zc->zc_value); if (szhp == NULL) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "parent not found")); (void) zfs_error(hdl, EZFS_NOENT, errbuf); ret = -1; goto out; } crypt = zfs_prop_get_int(szhp, ZFS_PROP_ENCRYPTION); zfs_close(szhp); pzhp = make_dataset_handle(hdl, parent); } else if (cmd == ZFS_CRYPTO_RECV) { if (zfs_dataset_exists(hdl, target, ZFS_TYPE_DATASET)) { pzhp = make_dataset_handle(hdl, target); pcrypt = zfs_prop_get_int(pzhp, ZFS_PROP_ENCRYPTION); if (crypt != pcrypt && crypt != ZIO_CRYPT_INHERIT) { const char *stream_crypt_str = NULL; const char *pcrypt_str = NULL; (void) zfs_prop_index_to_string( ZFS_PROP_ENCRYPTION, pcrypt, &pcrypt_str); (void) zfs_prop_index_to_string( ZFS_PROP_ENCRYPTION, crypt, &stream_crypt_str); zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "stream encryption '%s'(%llu) differs " "from receiving dataset value '%s'(%llu)"), stream_crypt_str, crypt, pcrypt_str, pcrypt); ret = -1; goto out; } inherit_crypt = B_TRUE; inherit_keysource = B_TRUE; recv_existing = B_TRUE; } else { if (strlen(zc->zc_string) != 0) { pzhp = make_dataset_handle(hdl, zc->zc_string); recv_clone = B_TRUE; } else { pzhp = make_dataset_handle(hdl, parent); } } } if (cmd != ZFS_CRYPTO_PCREATE) { if (pzhp == NULL) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "parent not found")); (void) zfs_error(hdl, EZFS_NOENT, errbuf); ret = -1; goto out; } pcrypt = zfs_prop_get_int(pzhp, ZFS_PROP_ENCRYPTION); } if (pcrypt != ZIO_CRYPT_OFF && crypt == ZIO_CRYPT_OFF) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid " "encryption value. dataset must be encrypted.")); (void) zfs_error(hdl, EZFS_KEYERR, errbuf); ret = -1; goto out; } if (crypt == ZIO_CRYPT_INHERIT) { crypt = pcrypt; } /* * If we have nothing to do then bail out, but make one last check * that keysource wasn't specified when there is no crypto going on. */ if (crypt == ZIO_CRYPT_OFF && !inherit_keysource) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "keysource " "can not be specified when encryption is off.")); (void) zfs_error(hdl, EZFS_KEYERR, errbuf); ret = -1; goto out; } else if (crypt == ZIO_CRYPT_OFF) { ret = 0; goto out; } /* * Need to pass down the inherited crypt value so that * dsl_crypto_key_gen() can see the same that we saw. */ zc->zc_crypto.zic_crypt = crypt; zc->zc_crypto.zic_clone_newkey = hdl->libzfs_crypt.zc_clone_newkey; /* * Here we have encryption on so we need to find a valid keysource * property. * * Now lets see if we have an explicit setting for keysource and * we have validate it; otherwise, if we inherit then it is already * validated. */ if (!inherit_keysource) { if (!zfs_valid_keysource(keysource)) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid keysource \"%s\""), keysource); (void) zfs_error(hdl, EZFS_KEYERR, errbuf); ret = -1; goto out; } /* * If keysource is local then encryption has to be as well * otherwise we could end up with the wrong sized keys. */ if (inherit_crypt) { VERIFY(nvlist_add_uint64(props, zfs_prop_to_name(ZFS_PROP_ENCRYPTION), crypt) == 0); VERIFY(nvlist_add_uint64(props, zfs_prop_to_name(ZFS_PROP_CHECKSUM), ZIO_CHECKSUM_SHA256_MAC) == 0); } } else { /* Get the already validated keysource from our parent */ keysource = zfs_alloc(hdl, ZFS_MAXNAMELEN); if (keysource == NULL) { ret = no_memory(hdl); goto out; } keysource_free = B_TRUE; if (pzhp != NULL && zfs_prop_get(pzhp, ZFS_PROP_KEYSOURCE, keysource, ZFS_MAXNAMELEN, &propsrctype, propsrc, sizeof (propsrc), FALSE) != 0) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "keysource must be provided.")); (void) zfs_error(hdl, EZFS_KEYERR, errbuf); ret = -1; goto out; } if (recv_existing) { (void) strlcpy(propsrc, target, sizeof (propsrc)); } else if (recv_clone) { (void) strlcpy(propsrc, zc->zc_string, sizeof (propsrc)); } else if (propsrctype == ZPROP_SRC_LOCAL || propsrctype == ZPROP_SRC_RECEIVED) { (void) strlcpy(propsrc, parent, sizeof (propsrc)); } else if (propsrctype == ZPROP_SRC_DEFAULT && pcrypt == ZIO_CRYPT_OFF) { /* * "Default" to "passphrase,prompt". The obvious * thing to do would be to set this in zfs_prop.c * as the property default. However that doesn't * work here because we don't want keysource set * for datasets that have encryption=off. If we * ever change the default to encryption=on then * the default of keysource can change too. * This is needed because of how inheritance happens * with defaulted properties, they show up as * "default" not "inherit" but we need "inherit" * to find the wrapping key if we are actually * inheriting keysource. */ inherit_keysource = B_FALSE; if (props == NULL) { VERIFY(0 == nvlist_alloc(&props, NV_UNIQUE_NAME, 0)); } (void) strlcpy(keysource, "passphrase,prompt", ZFS_MAXNAMELEN); VERIFY(nvlist_add_string(props, zfs_prop_to_name(ZFS_PROP_KEYSOURCE), keysource) == 0); VERIFY(nvlist_add_uint64(props, zfs_prop_to_name(ZFS_PROP_ENCRYPTION), crypt) == 0); VERIFY(nvlist_add_uint64(props, zfs_prop_to_name(ZFS_PROP_CHECKSUM), ZIO_CHECKSUM_SHA256_MAC) == 0); goto load_key; } else if (propsrctype == ZPROP_SRC_DEFAULT && pcrypt != ZIO_CRYPT_OFF) { abort(); #if 0 // FIXME } else if (strcmp(propsrc, ZONE_INVISIBLE_SOURCE) == 0) { /* * Assume key is available and handle failure ioctl * ENOKEY errors later. */ zc->zc_crypto.zic_cmd = ZFS_IOC_CRYPTO_KEY_INHERIT; (void) strlcpy(zc->zc_crypto.zic_inherit_dsname, propsrc, sizeof (zc->zc_crypto.zic_inherit_dsname)); ret = 0; goto out; #endif } else if (propsrctype != ZPROP_SRC_DEFAULT) { if (pzhp != NULL) zfs_close(pzhp); VERIFY((pzhp = make_dataset_handle(hdl, propsrc)) != 0); } keystatus = zfs_prop_get_int(pzhp, ZFS_PROP_KEYSTATUS); /* * AVAILABLE we are done other than filling in who we * are inheriting the wrapping key from. * * UNAVAILABLE we need to load the key of a higher level * dataset. */ if (keystatus == ZFS_CRYPT_KEY_AVAILABLE) { zc->zc_crypto.zic_cmd = ZFS_IOC_CRYPTO_KEY_INHERIT; (void) strlcpy(zc->zc_crypto.zic_inherit_dsname, propsrc, sizeof (zc->zc_crypto.zic_inherit_dsname)); ret = 0; goto out; } else if (keystatus == ZFS_CRYPT_KEY_UNAVAILABLE) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "zfs key -l %s required."), parent); (void) zfs_error(hdl, EZFS_KEYERR, errbuf); ret = -1; goto out; } } load_key: if (!zfs_can_prompt_if_needed(keysource)) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "unable to prompt for key material keysource = \"%s\"\n"), keysource); errno = ENOTTY; return (-1); } ret = key_hdl_to_zc(hdl, NULL, keysource, crypt, zc, cmd); if (ret != 0) { ret = -1; (void) zfs_error(hdl, EZFS_KEYERR, errbuf); goto out; } zc->zc_crypto.zic_cmd = ZFS_IOC_CRYPTO_KEY_LOAD; ret = 0; out: if (pzhp) zfs_close(pzhp); if (keysource_free) free(keysource); return (ret); }
/* * Convert our list of pools into the definitive set of configurations. We * start by picking the best config for each toplevel vdev. Once that's done, * we assemble the toplevel vdevs into a full config for the pool. We make a * pass to fix up any incorrect paths, and then add it to the main list to * return to the user. */ static nvlist_t * get_configs(libzfs_handle_t *hdl, pool_list_t *pl, boolean_t active_ok, nvlist_t *policy) { pool_entry_t *pe; vdev_entry_t *ve; config_entry_t *ce; nvlist_t *ret = NULL, *config = NULL, *tmp = NULL, *nvtop, *nvroot; nvlist_t **spares, **l2cache; uint_t i, nspares, nl2cache; boolean_t config_seen; uint64_t best_txg; char *name, *hostname = NULL; uint64_t guid; uint_t children = 0; nvlist_t **child = NULL; uint_t holes; uint64_t *hole_array, max_id; uint_t c; boolean_t isactive; uint64_t hostid; nvlist_t *nvl; boolean_t valid_top_config = B_FALSE; if (nvlist_alloc(&ret, 0, 0) != 0) goto nomem; for (pe = pl->pools; pe != NULL; pe = pe->pe_next) { uint64_t id, max_txg = 0; if (nvlist_alloc(&config, NV_UNIQUE_NAME, 0) != 0) goto nomem; config_seen = B_FALSE; /* * Iterate over all toplevel vdevs. Grab the pool configuration * from the first one we find, and then go through the rest and * add them as necessary to the 'vdevs' member of the config. */ for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) { /* * Determine the best configuration for this vdev by * selecting the config with the latest transaction * group. */ best_txg = 0; for (ce = ve->ve_configs; ce != NULL; ce = ce->ce_next) { if (ce->ce_txg > best_txg) { tmp = ce->ce_config; best_txg = ce->ce_txg; } } /* * We rely on the fact that the max txg for the * pool will contain the most up-to-date information * about the valid top-levels in the vdev namespace. */ if (best_txg > max_txg) { (void) nvlist_remove(config, ZPOOL_CONFIG_VDEV_CHILDREN, DATA_TYPE_UINT64); (void) nvlist_remove(config, ZPOOL_CONFIG_HOLE_ARRAY, DATA_TYPE_UINT64_ARRAY); max_txg = best_txg; hole_array = NULL; holes = 0; max_id = 0; valid_top_config = B_FALSE; if (nvlist_lookup_uint64(tmp, ZPOOL_CONFIG_VDEV_CHILDREN, &max_id) == 0) { verify(nvlist_add_uint64(config, ZPOOL_CONFIG_VDEV_CHILDREN, max_id) == 0); valid_top_config = B_TRUE; } if (nvlist_lookup_uint64_array(tmp, ZPOOL_CONFIG_HOLE_ARRAY, &hole_array, &holes) == 0) { verify(nvlist_add_uint64_array(config, ZPOOL_CONFIG_HOLE_ARRAY, hole_array, holes) == 0); } } if (!config_seen) { /* * Copy the relevant pieces of data to the pool * configuration: * * version * pool guid * name * pool txg (if available) * comment (if available) * pool state * hostid (if available) * hostname (if available) */ uint64_t state, version, pool_txg; char *comment = NULL; version = fnvlist_lookup_uint64(tmp, ZPOOL_CONFIG_VERSION); fnvlist_add_uint64(config, ZPOOL_CONFIG_VERSION, version); guid = fnvlist_lookup_uint64(tmp, ZPOOL_CONFIG_POOL_GUID); fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_GUID, guid); name = fnvlist_lookup_string(tmp, ZPOOL_CONFIG_POOL_NAME); fnvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, name); if (nvlist_lookup_uint64(tmp, ZPOOL_CONFIG_POOL_TXG, &pool_txg) == 0) fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_TXG, pool_txg); if (nvlist_lookup_string(tmp, ZPOOL_CONFIG_COMMENT, &comment) == 0) fnvlist_add_string(config, ZPOOL_CONFIG_COMMENT, comment); state = fnvlist_lookup_uint64(tmp, ZPOOL_CONFIG_POOL_STATE); fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, state); hostid = 0; if (nvlist_lookup_uint64(tmp, ZPOOL_CONFIG_HOSTID, &hostid) == 0) { fnvlist_add_uint64(config, ZPOOL_CONFIG_HOSTID, hostid); hostname = fnvlist_lookup_string(tmp, ZPOOL_CONFIG_HOSTNAME); fnvlist_add_string(config, ZPOOL_CONFIG_HOSTNAME, hostname); } config_seen = B_TRUE; } /* * Add this top-level vdev to the child array. */ verify(nvlist_lookup_nvlist(tmp, ZPOOL_CONFIG_VDEV_TREE, &nvtop) == 0); verify(nvlist_lookup_uint64(nvtop, ZPOOL_CONFIG_ID, &id) == 0); if (id >= children) { nvlist_t **newchild; newchild = zfs_alloc(hdl, (id + 1) * sizeof (nvlist_t *)); if (newchild == NULL) goto nomem; for (c = 0; c < children; c++) newchild[c] = child[c]; free(child); child = newchild; children = id + 1; } if (nvlist_dup(nvtop, &child[id], 0) != 0) goto nomem; } /* * If we have information about all the top-levels then * clean up the nvlist which we've constructed. This * means removing any extraneous devices that are * beyond the valid range or adding devices to the end * of our array which appear to be missing. */ if (valid_top_config) { if (max_id < children) { for (c = max_id; c < children; c++) nvlist_free(child[c]); children = max_id; } else if (max_id > children) { nvlist_t **newchild; newchild = zfs_alloc(hdl, (max_id) * sizeof (nvlist_t *)); if (newchild == NULL) goto nomem; for (c = 0; c < children; c++) newchild[c] = child[c]; free(child); child = newchild; children = max_id; } } verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &guid) == 0); /* * The vdev namespace may contain holes as a result of * device removal. We must add them back into the vdev * tree before we process any missing devices. */ if (holes > 0) { ASSERT(valid_top_config); for (c = 0; c < children; c++) { nvlist_t *holey; if (child[c] != NULL || !vdev_is_hole(hole_array, holes, c)) continue; if (nvlist_alloc(&holey, NV_UNIQUE_NAME, 0) != 0) goto nomem; /* * Holes in the namespace are treated as * "hole" top-level vdevs and have a * special flag set on them. */ if (nvlist_add_string(holey, ZPOOL_CONFIG_TYPE, VDEV_TYPE_HOLE) != 0 || nvlist_add_uint64(holey, ZPOOL_CONFIG_ID, c) != 0 || nvlist_add_uint64(holey, ZPOOL_CONFIG_GUID, 0ULL) != 0) { nvlist_free(holey); goto nomem; } child[c] = holey; } } /* * Look for any missing top-level vdevs. If this is the case, * create a faked up 'missing' vdev as a placeholder. We cannot * simply compress the child array, because the kernel performs * certain checks to make sure the vdev IDs match their location * in the configuration. */ for (c = 0; c < children; c++) { if (child[c] == NULL) { nvlist_t *missing; if (nvlist_alloc(&missing, NV_UNIQUE_NAME, 0) != 0) goto nomem; if (nvlist_add_string(missing, ZPOOL_CONFIG_TYPE, VDEV_TYPE_MISSING) != 0 || nvlist_add_uint64(missing, ZPOOL_CONFIG_ID, c) != 0 || nvlist_add_uint64(missing, ZPOOL_CONFIG_GUID, 0ULL) != 0) { nvlist_free(missing); goto nomem; } child[c] = missing; } } /* * Put all of this pool's top-level vdevs into a root vdev. */ if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0) goto nomem; if (nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) != 0 || nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) != 0 || nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, guid) != 0 || nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, child, children) != 0) { nvlist_free(nvroot); goto nomem; } for (c = 0; c < children; c++) nvlist_free(child[c]); free(child); children = 0; child = NULL; /* * Go through and fix up any paths and/or devids based on our * known list of vdev GUID -> path mappings. */ if (fix_paths(nvroot, pl->names) != 0) { nvlist_free(nvroot); goto nomem; } /* * Add the root vdev to this pool's configuration. */ if (nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) != 0) { nvlist_free(nvroot); goto nomem; } nvlist_free(nvroot); /* * zdb uses this path to report on active pools that were * imported or created using -R. */ if (active_ok) goto add_pool; /* * Determine if this pool is currently active, in which case we * can't actually import it. */ verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, &name) == 0); verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &guid) == 0); if (pool_active(hdl, name, guid, &isactive) != 0) goto error; if (isactive) { nvlist_free(config); config = NULL; continue; } if (policy != NULL) { if (nvlist_add_nvlist(config, ZPOOL_LOAD_POLICY, policy) != 0) goto nomem; } if ((nvl = refresh_config(hdl, config)) == NULL) { nvlist_free(config); config = NULL; continue; } nvlist_free(config); config = nvl; /* * Go through and update the paths for spares, now that we have * them. */ verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) { for (i = 0; i < nspares; i++) { if (fix_paths(spares[i], pl->names) != 0) goto nomem; } } /* * Update the paths for l2cache devices. */ if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0) { for (i = 0; i < nl2cache; i++) { if (fix_paths(l2cache[i], pl->names) != 0) goto nomem; } } /* * Restore the original information read from the actual label. */ (void) nvlist_remove(config, ZPOOL_CONFIG_HOSTID, DATA_TYPE_UINT64); (void) nvlist_remove(config, ZPOOL_CONFIG_HOSTNAME, DATA_TYPE_STRING); if (hostid != 0) { verify(nvlist_add_uint64(config, ZPOOL_CONFIG_HOSTID, hostid) == 0); verify(nvlist_add_string(config, ZPOOL_CONFIG_HOSTNAME, hostname) == 0); } add_pool: /* * Add this pool to the list of configs. */ verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, &name) == 0); if (nvlist_add_nvlist(ret, name, config) != 0) goto nomem; nvlist_free(config); config = NULL; } return (ret); nomem: (void) no_memory(hdl); error: nvlist_free(config); nvlist_free(ret); for (c = 0; c < children; c++) nvlist_free(child[c]); free(child); return (NULL); }
/* * Given a list of directories to search, find all pools stored on disk. This * includes partial pools which are not available to import. If no args are * given (argc is 0), then the default directory (/dev/dsk) is searched. * poolname or guid (but not both) are provided by the caller when trying * to import a specific pool. */ static nvlist_t * zpool_find_import_impl(libzfs_handle_t *hdl, importargs_t *iarg) { int i, num_labels, dirs = iarg->paths; DIR *dirp = NULL; struct dirent *dp; char path[MAXPATHLEN]; char *end, **dir = iarg->path; size_t pathleft; struct stat statbuf; nvlist_t *ret = NULL, *config; int fd; pool_list_t pools = { 0 }; pool_entry_t *pe, *penext; vdev_entry_t *ve, *venext; config_entry_t *ce, *cenext; name_entry_t *ne, *nenext; verify(iarg->poolname == NULL || iarg->guid == 0); if (dirs == 0) { #ifdef HAVE_LIBBLKID /* Use libblkid to scan all device for their type */ if (zpool_find_import_blkid(hdl, &pools) == 0) goto skip_scanning; (void) zfs_error_fmt(hdl, EZFS_BADCACHE, dgettext(TEXT_DOMAIN, "blkid failure falling back " "to manual probing")); #endif /* HAVE_LIBBLKID */ dir = zpool_default_import_path; dirs = DEFAULT_IMPORT_PATH_SIZE; } /* * Go through and read the label configuration information from every * possible device, organizing the information according to pool GUID * and toplevel GUID. */ for (i = 0; i < dirs; i++) { char *rdsk; int dfd; /* use realpath to normalize the path */ if (realpath(dir[i], path) == 0) { /* it is safe to skip missing search paths */ if (errno == ENOENT) continue; zfs_error_aux(hdl, strerror(errno)); (void) zfs_error_fmt(hdl, EZFS_BADPATH, dgettext(TEXT_DOMAIN, "cannot open '%s'"), dir[i]); goto error; } end = &path[strlen(path)]; *end++ = '/'; *end = 0; pathleft = &path[sizeof (path)] - end; /* * Using raw devices instead of block devices when we're * reading the labels skips a bunch of slow operations during * close(2) processing, so we replace /dev/dsk with /dev/rdsk. */ if (strcmp(path, "/dev/dsk/") == 0) rdsk = "/dev/rdsk/"; else rdsk = path; if ((dfd = open(rdsk, O_RDONLY)) < 0 || (dirp = fdopendir(dfd)) == NULL) { zfs_error_aux(hdl, strerror(errno)); (void) zfs_error_fmt(hdl, EZFS_BADPATH, dgettext(TEXT_DOMAIN, "cannot open '%s'"), rdsk); goto error; } /* * This is not MT-safe, but we have no MT consumers of libzfs */ while ((dp = readdir(dirp)) != NULL) { const char *name = dp->d_name; if (name[0] == '.' && (name[1] == 0 || (name[1] == '.' && name[2] == 0))) continue; /* * Skip checking devices with well known prefixes: * watchdog - A special close is required to avoid * triggering it and resetting the system. * fuse - Fuse control device. * ppp - Generic PPP driver. * tty* - Generic serial interface. * vcs* - Virtual console memory. * parport* - Parallel port interface. * lp* - Printer interface. * fd* - Floppy interface. * hpet - High Precision Event Timer, crashes qemu * when accessed from a virtual machine. * core - Symlink to /proc/kcore, causes a crash * when access from Xen dom0. */ if ((strncmp(name, "watchdog", 8) == 0) || (strncmp(name, "fuse", 4) == 0) || (strncmp(name, "ppp", 3) == 0) || (strncmp(name, "tty", 3) == 0) || (strncmp(name, "vcs", 3) == 0) || (strncmp(name, "parport", 7) == 0) || (strncmp(name, "lp", 2) == 0) || (strncmp(name, "fd", 2) == 0) || (strncmp(name, "hpet", 4) == 0) || #ifdef __APPLE__ (strncmp(name, "pty", 3) == 0) || // lots, skip for speed (strncmp(name, "com", 3) == 0) || // /dev/com_digidesign_semiface #endif (strncmp(name, "core", 4) == 0)) continue; /* * Ignore failed stats. We only want regular * files and block devices. */ if ((fstatat64(dfd, name, &statbuf, 0) != 0) || (!S_ISREG(statbuf.st_mode) && !S_ISBLK(statbuf.st_mode))) continue; #ifdef __APPLE__ /* It is desirable to skip optical media as well, as they are * also called /dev/diskX */ if (is_optical_media((char *)name)) continue; #endif if ((fd = openat64(dfd, name, O_RDONLY)) < 0) continue; int32_t blksz = 0; if (S_ISBLK(statbuf.st_mode) && (ioctl(fd, DKIOCGETBLOCKSIZE, &blksz) || blksz == 0)) { if (strncmp(name, "vn", 2) != 0) fprintf(stderr, "device '%s' failed to report blocksize -- skipping\r\n", name); close(fd); continue; } #ifdef __APPLE__ struct sigaction sact; sigemptyset(&sact.sa_mask); sact.sa_flags = 0; sact.sa_handler = signal_alarm; sigaction(SIGALRM, &sact, NULL); if (setjmp(buffer) != 0) { printf("ZFS: Warning, timeout reading device '%s'\n", name); close(fd); continue; } alarm(20); #endif if ((zpool_read_label(fd, &config, NULL)) != 0) { #ifdef __APPLE__ alarm(0); #endif (void) close(fd); (void) no_memory(hdl); goto error; } #ifdef __APPLE__ alarm(0); #endif (void) close(fd); if (config != NULL) { boolean_t matched = B_TRUE; char *pname; if ((iarg->poolname != NULL) && (nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, &pname) == 0)) { if (strcmp(iarg->poolname, pname)) matched = B_FALSE; } else if (iarg->guid != 0) { uint64_t this_guid; matched = nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &this_guid) == 0 && iarg->guid == this_guid; } if (!matched) { nvlist_free(config); config = NULL; continue; } /* use the non-raw path for the config */ (void) strlcpy(end, name, pathleft); if (add_config(hdl, &pools, path, i+1, num_labels, config)) goto error; } } (void) closedir(dirp); dirp = NULL; } #ifdef HAVE_LIBBLKID skip_scanning: #endif ret = get_configs(hdl, &pools, iarg->can_be_active); error: for (pe = pools.pools; pe != NULL; pe = penext) { penext = pe->pe_next; for (ve = pe->pe_vdevs; ve != NULL; ve = venext) { venext = ve->ve_next; for (ce = ve->ve_configs; ce != NULL; ce = cenext) { cenext = ce->ce_next; if (ce->ce_config) nvlist_free(ce->ce_config); free(ce); } free(ve); } free(pe); } for (ne = pools.names; ne != NULL; ne = nenext) { nenext = ne->ne_next; if (ne->ne_name) free(ne->ne_name); free(ne); } if (dirp) (void) closedir(dirp); return (ret); }