/* * Print log vdevs. * Logs are recorded as top level vdevs in the main pool child array * but with "is_log" set to 1. We use either print_status_config() or * lzwu_print_import_config() to print the top level logs then any log * children (eg mirrored slogs) are printed recursively - which * works because only the top level vdev is marked "is_log" */ void lzwu_print_logs(libzfs_handle_t *p_zhd, zpool_handle_t *zhp, nvlist_t *nv, int namewidth, boolean_t verbose) { uint_t children; nvlist_t **child; if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) return; printf("\tlogs\n"); for(unsigned c = 0; c < children; c++) { uint64_t is_log = B_FALSE; char *name; (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, &is_log); if(!is_log) continue; name = zpool_vdev_name(p_zhd, zhp, child[c], B_TRUE); if(verbose) lzwu_zpool_print_status_config(p_zhd, zhp, name, child[c], namewidth, 2, B_FALSE); else lzwu_print_import_config(p_zhd, name, child[c], namewidth, 2); free(name); } }
/* * given the path to a zvol, return the cXtYdZ name * returns < 0 on error, 0 if it isn't a zvol, > 1 on success */ static int ztop(char *arg, char *diskname) { zpool_handle_t *zpool_handle; nvlist_t *config, *nvroot; nvlist_t **child; uint_t children; libzfs_handle_t *lzfs; char *vname; char *p; char pool_name[MAXPATHLEN]; if (strncmp(arg, "/dev/zvol/dsk/", 14)) { return (0); } arg += 14; (void) strncpy(pool_name, arg, MAXPATHLEN); if ((p = strchr(pool_name, '/')) != NULL) *p = '\0'; STRCPYLIM(new_cc.cf_fs, p + 1, "statefile path"); if ((lzfs = libzfs_init()) == NULL) { mesg(MERR, "failed to initialize ZFS library\n"); return (-1); } if ((zpool_handle = zpool_open(lzfs, pool_name)) == NULL) { mesg(MERR, "couldn't open pool '%s'\n", pool_name); libzfs_fini(lzfs); return (-1); } config = zpool_get_config(zpool_handle, NULL); if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot) != 0) { zpool_close(zpool_handle); libzfs_fini(lzfs); return (-1); } verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, &child, &children) == 0); if (children != 1) { mesg(MERR, "expected one vdev, got %d\n", children); zpool_close(zpool_handle); libzfs_fini(lzfs); return (-1); } vname = zpool_vdev_name(lzfs, zpool_handle, child[0], B_FALSE); if (vname == NULL) { mesg(MERR, "couldn't determine vdev name\n"); zpool_close(zpool_handle); libzfs_fini(lzfs); return (-1); } (void) strcpy(diskname, "/dev/dsk/"); (void) strcat(diskname, vname); free(vname); zpool_close(zpool_handle); libzfs_fini(lzfs); return (1); }
void lzwu_print_spares(libzfs_handle_t *p_zhd, zpool_handle_t *zhp, nvlist_t **spares, uint_t nspares, int namewidth) { if (nspares == 0) return; printf("\tspares\n"); for(unsigned i = 0; i < nspares; i++) { char *name = zpool_vdev_name(p_zhd, zhp, spares[i], B_FALSE); lzwu_zpool_print_status_config(p_zhd, zhp, name, spares[i], namewidth, 2, B_TRUE); free(name); } }
void lzwu_print_l2cache(libzfs_handle_t *p_zhd, zpool_handle_t *zhp, nvlist_t **l2cache, uint_t nl2cache, int namewidth) { if (nl2cache == 0) return; printf("\tcache\n"); for(unsigned i = 0; i < nl2cache; i++) { char *name = zpool_vdev_name(p_zhd, zhp, l2cache[i], B_FALSE); lzwu_zpool_print_status_config(p_zhd, zhp, name, l2cache[i], namewidth, 2, B_FALSE); free(name); } }
/* * Given a vdev configuration, determine the maximum width needed for the device * name column. */ int lzwu_zpool_max_width(libzfs_handle_t *p_zhd, zpool_handle_t *p_zpool, nvlist_t *nv, int depth, int max) { char *psz_zpool = zpool_vdev_name(p_zhd, p_zpool, nv, B_TRUE); nvlist_t **ppnv_child; uint_t i_children; int ret; if(strlen(psz_zpool) + depth > max) max = strlen(psz_zpool) + depth; free(psz_zpool); if(nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, &ppnv_child, &i_children) == 0) { for(int c = 0; c < i_children; c++) if((ret = lzwu_zpool_max_width(p_zhd, p_zpool, ppnv_child[c], depth + 2, max)) > max) max = ret; } if(nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, &ppnv_child, &i_children) == 0) { for(int c = 0; c < i_children; c++) if((ret = lzwu_zpool_max_width(p_zhd, p_zpool, ppnv_child[c], depth + 2, max)) > max) max = ret; } if(nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, &ppnv_child, &i_children) == 0) { for(int c = 0; c < i_children; c++) if((ret = lzwu_zpool_max_width(p_zhd, p_zpool, ppnv_child[c], depth + 2, max)) > max) max = ret; } return max; }
/* * Function: be_do_installgrub * Description: This function runs installgrub using the grub loader files * from the BE we're activating and installing them on the * pool the BE lives in. * * Parameters: * bt - The transaction data for the BE we're activating. * Return: * BE_SUCCESS - Success * be_errno_t - Failure * * Scope: * Private */ static int be_do_installgrub(be_transaction_data_t *bt) { zpool_handle_t *zphp = NULL; zfs_handle_t *zhp = NULL; nvlist_t **child, *nv, *config; uint_t c, children = 0; char *tmp_mntpt = NULL; char *pool_mntpnt = NULL; char *ptmp_mntpnt = NULL; char *orig_mntpnt = NULL; FILE *cap_fp = NULL; FILE *zpool_cap_fp = NULL; char line[BUFSIZ]; char cap_file[MAXPATHLEN]; char zpool_cap_file[MAXPATHLEN]; char stage1[MAXPATHLEN]; char stage2[MAXPATHLEN]; char installgrub_cmd[MAXPATHLEN]; char *vname; char be_run_cmd_errbuf[BUFSIZ]; int ret = BE_SUCCESS; int err = 0; boolean_t be_mounted = B_FALSE; boolean_t pool_mounted = B_FALSE; if (!be_has_grub()) { be_print_err(gettext("be_do_installgrub: Not supported " "on this architecture\n")); return (BE_ERR_NOTSUP); } if ((zhp = zfs_open(g_zfs, bt->obe_root_ds, ZFS_TYPE_FILESYSTEM)) == NULL) { be_print_err(gettext("be_do_installgrub: failed to " "open BE root dataset (%s): %s\n"), bt->obe_root_ds, libzfs_error_description(g_zfs)); ret = zfs_err_to_be_err(g_zfs); return (ret); } if (!zfs_is_mounted(zhp, &tmp_mntpt)) { if ((ret = _be_mount(bt->obe_name, &tmp_mntpt, BE_MOUNT_FLAG_NO_ZONES)) != BE_SUCCESS) { be_print_err(gettext("be_do_installgrub: failed to " "mount BE (%s)\n"), bt->obe_name); ZFS_CLOSE(zhp); return (ret); } be_mounted = B_TRUE; } ZFS_CLOSE(zhp); (void) snprintf(stage1, sizeof (stage1), "%s%s", tmp_mntpt, BE_STAGE_1); (void) snprintf(stage2, sizeof (stage2), "%s%s", tmp_mntpt, BE_STAGE_2); if ((zphp = zpool_open(g_zfs, bt->obe_zpool)) == NULL) { be_print_err(gettext("be_do_installgrub: failed to open " "pool (%s): %s\n"), bt->obe_zpool, libzfs_error_description(g_zfs)); ret = zfs_err_to_be_err(g_zfs); if (be_mounted) (void) _be_unmount(bt->obe_name, 0); free(tmp_mntpt); return (ret); } if ((config = zpool_get_config(zphp, NULL)) == NULL) { be_print_err(gettext("be_do_installgrub: failed to get zpool " "configuration information. %s\n"), libzfs_error_description(g_zfs)); ret = zfs_err_to_be_err(g_zfs); goto done; } /* * Get the vdev tree */ if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nv) != 0) { be_print_err(gettext("be_do_installgrub: failed to get vdev " "tree: %s\n"), libzfs_error_description(g_zfs)); ret = zfs_err_to_be_err(g_zfs); goto done; } if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) { be_print_err(gettext("be_do_installgrub: failed to traverse " "the vdev tree: %s\n"), libzfs_error_description(g_zfs)); ret = zfs_err_to_be_err(g_zfs); goto done; } for (c = 0; c < children; c++) { uint_t i, nchildren = 0; nvlist_t **nvchild; vname = zpool_vdev_name(g_zfs, zphp, child[c], B_FALSE); if (vname == NULL) { be_print_err(gettext( "be_do_installgrub: " "failed to get device name: %s\n"), libzfs_error_description(g_zfs)); ret = zfs_err_to_be_err(g_zfs); goto done; } if (strcmp(vname, "mirror") == 0 || vname[0] != 'c') { if (nvlist_lookup_nvlist_array(child[c], ZPOOL_CONFIG_CHILDREN, &nvchild, &nchildren) != 0) { be_print_err(gettext("be_do_installgrub: " "failed to traverse the vdev tree: %s\n"), libzfs_error_description(g_zfs)); ret = zfs_err_to_be_err(g_zfs); goto done; } for (i = 0; i < nchildren; i++) { vname = zpool_vdev_name(g_zfs, zphp, nvchild[i], B_FALSE); if (vname == NULL) { be_print_err(gettext( "be_do_installgrub: " "failed to get device name: %s\n"), libzfs_error_description(g_zfs)); ret = zfs_err_to_be_err(g_zfs); goto done; } (void) snprintf(installgrub_cmd, sizeof (installgrub_cmd), "%s %s %s /dev/rdsk/%s", BE_INSTALL_GRUB, stage1, stage2, vname); if (be_run_cmd(installgrub_cmd, be_run_cmd_errbuf, BUFSIZ, NULL, 0) != BE_SUCCESS) { be_print_err(gettext( "be_do_installgrub: installgrub " "failed for device %s.\n"), vname); /* Assume localized cmd err output. */ be_print_err(gettext( " Command: \"%s\"\n"), installgrub_cmd); be_print_err("%s", be_run_cmd_errbuf); free(vname); ret = BE_ERR_BOOTFILE_INST; goto done; } free(vname); } } else { (void) snprintf(installgrub_cmd, sizeof (installgrub_cmd), "%s %s %s /dev/rdsk/%s", BE_INSTALL_GRUB, stage1, stage2, vname); if (be_run_cmd(installgrub_cmd, be_run_cmd_errbuf, BUFSIZ, NULL, 0) != BE_SUCCESS) { be_print_err(gettext( "be_do_installgrub: installgrub " "failed for device %s.\n"), vname); /* Assume localized cmd err output. */ be_print_err(gettext(" Command: \"%s\"\n"), installgrub_cmd); be_print_err("%s", be_run_cmd_errbuf); free(vname); ret = BE_ERR_BOOTFILE_INST; goto done; } free(vname); } } /* * Copy the grub capability file from the BE we're activating into * the root pool. */ (void) snprintf(cap_file, sizeof (cap_file), "%s%s", tmp_mntpt, BE_CAP_FILE); if ((zhp = zfs_open(g_zfs, bt->obe_zpool, ZFS_TYPE_FILESYSTEM)) == NULL) { be_print_err(gettext("be_do_installgrub: zfs_open " "failed: %s\n"), libzfs_error_description(g_zfs)); zpool_close(zphp); return (zfs_err_to_be_err(g_zfs)); } /* * Check to see if the pool's dataset is mounted. If it isn't we'll * attempt to mount it. */ if ((ret = be_mount_pool(zhp, &ptmp_mntpnt, &orig_mntpnt, &pool_mounted)) != BE_SUCCESS) { be_print_err(gettext("be_do_installgrub: pool dataset " "(%s) could not be mounted\n"), bt->obe_zpool); ZFS_CLOSE(zhp); zpool_close(zphp); return (ret); } /* * Get the mountpoint for the root pool dataset. */ if (!zfs_is_mounted(zhp, &pool_mntpnt)) { be_print_err(gettext("be_do_installgrub: pool " "dataset (%s) is not mounted. Can't check the grub " "version from the grub capability file.\n"), bt->obe_zpool); ret = BE_ERR_NO_MENU; goto done; } (void) snprintf(zpool_cap_file, sizeof (zpool_cap_file), "%s%s", pool_mntpnt, BE_CAP_FILE); free(pool_mntpnt); pool_mntpnt = NULL; if ((cap_fp = fopen(cap_file, "r")) == NULL) { err = errno; be_print_err(gettext("be_do_installgrub: failed to open grub " "capability file\n")); ret = errno_to_be_err(err); goto done; } if ((zpool_cap_fp = fopen(zpool_cap_file, "w")) == NULL) { err = errno; be_print_err(gettext("be_do_installgrub: failed to open new " "grub capability file\n")); ret = errno_to_be_err(err); (void) fclose(cap_fp); goto done; } while (fgets(line, BUFSIZ, cap_fp)) { (void) fputs(line, zpool_cap_fp); } (void) fclose(zpool_cap_fp); (void) fclose(cap_fp); done: if (pool_mounted) { int iret = 0; iret = be_unmount_pool(zhp, ptmp_mntpnt, orig_mntpnt); if (ret == BE_SUCCESS) ret = iret; free(orig_mntpnt); free(ptmp_mntpnt); } ZFS_CLOSE(zhp); if (be_mounted) (void) _be_unmount(bt->obe_name, 0); zpool_close(zphp); free(tmp_mntpt); return (ret); }
/* * Print the configuration of an exported pool. Iterate over all vdevs in the * pool, printing out the name and status for each one. */ static void lzwu_print_import_config(libzfs_handle_t *p_zhd, const char *name, nvlist_t *nv, int namewidth, int depth) { nvlist_t **child; uint_t c, children; vdev_stat_t *vs; char *type, *vname; verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0); if (strcmp(type, VDEV_TYPE_MISSING) == 0 || strcmp(type, VDEV_TYPE_HOLE) == 0) return; verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_STATS, (uint64_t **)&vs, &c) == 0); (void) printf("\t%*s%-*s", depth, "", namewidth - depth, name); (void) printf(" %s", zpool_state_to_name(vs->vs_state, vs->vs_aux)); if (vs->vs_aux != 0) { (void) printf(" "); switch (vs->vs_aux) { case VDEV_AUX_OPEN_FAILED: printf("cannot open"); break; case VDEV_AUX_BAD_GUID_SUM: printf("missing device"); break; case VDEV_AUX_NO_REPLICAS: printf("insufficient replicas"); break; case VDEV_AUX_VERSION_NEWER: printf("newer version"); break; case VDEV_AUX_ERR_EXCEEDED: printf("too many errors"); break; default: printf("corrupted data"); break; } } (void) printf("\n"); if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) return; for (c = 0; c < children; c++) { uint64_t is_log = B_FALSE; (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, &is_log); if (is_log) continue; vname = zpool_vdev_name(p_zhd, NULL, child[c], B_TRUE); lzwu_print_import_config(p_zhd, vname, child[c], namewidth, depth + 2); free(vname); } if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, &child, &children) == 0) { printf("\tcache\n"); for (c = 0; c < children; c++) { vname = zpool_vdev_name(p_zhd, NULL, child[c], B_FALSE); (void) printf("\t %s\n", vname); free(vname); } } if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, &child, &children) == 0) { printf("\tspares\n"); for (c = 0; c < children; c++) { vname = zpool_vdev_name(p_zhd, NULL, child[c], B_FALSE); (void) printf("\t %s\n", vname); free(vname); } } }
/* * Print out configuration state as requested by status_callback. */ void lzwu_zpool_print_status_config(libzfs_handle_t *p_zhd, zpool_handle_t *zhp, const char *name, nvlist_t *nv, int namewidth, int depth, boolean_t isspare) { nvlist_t **child; uint_t children; unsigned c; vdev_stat_t *vs; char rbuf[6], wbuf[6], cbuf[6], repaired[7]; char *vname; uint64_t notpresent; spare_cbdata_t cb; char *state; verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_STATS, (uint64_t **)&vs, &c) == 0); if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) children = 0; state = zpool_state_to_name(vs->vs_state, vs->vs_aux); if(isspare) { /* * For hot spares, we use the terms 'INUSE' and 'AVAILABLE' for * online drives. */ if(vs->vs_aux == VDEV_AUX_SPARED) state = "INUSE"; else if(vs->vs_state == VDEV_STATE_HEALTHY) state = "AVAIL"; } printf("\t%*s%-*s %-8s", depth, "", namewidth - depth, name, state); if(!isspare) { zfs_nicenum(vs->vs_read_errors, rbuf, sizeof (rbuf)); zfs_nicenum(vs->vs_write_errors, wbuf, sizeof (wbuf)); zfs_nicenum(vs->vs_checksum_errors, cbuf, sizeof (cbuf)); printf(" %5s %5s %5s", rbuf, wbuf, cbuf); } if(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, ¬present) == 0) { char *path; verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0); printf(" was %s", path); } else if(vs->vs_aux != 0) { printf(" "); switch (vs->vs_aux) { case VDEV_AUX_OPEN_FAILED: printf("cannot open"); break; case VDEV_AUX_BAD_GUID_SUM: printf("missing device"); break; case VDEV_AUX_NO_REPLICAS: printf("insufficient replicas"); break; case VDEV_AUX_VERSION_NEWER: printf("newer version"); break; case VDEV_AUX_SPARED: verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &cb.cb_guid) == 0); if(zpool_iter(p_zhd, lzwu_find_spare, &cb) == 1) { if(strcmp(zpool_get_name(cb.cb_zhp), zpool_get_name(zhp)) == 0) printf("currently in use"); else printf("in use by pool '%s'", zpool_get_name(cb.cb_zhp)); zpool_close(cb.cb_zhp); } else printf("currently in use"); break; case VDEV_AUX_ERR_EXCEEDED: printf("too many errors"); break; case VDEV_AUX_IO_FAILURE: printf("experienced I/O failures"); break; case VDEV_AUX_BAD_LOG: printf("bad intent log"); break; case VDEV_AUX_EXTERNAL: printf("external device fault"); break; case VDEV_AUX_SPLIT_POOL: printf("split into new pool"); break; default: printf("corrupted data"); break; } } else if(vs->vs_scrub_repaired != 0 && children == 0) { /* * Report bytes resilvered/repaired on leaf devices. */ zfs_nicenum(vs->vs_scrub_repaired, repaired, sizeof (repaired)); printf(" %s %s", repaired, (vs->vs_scrub_type == POOL_SCRUB_RESILVER) ? "resilvered" : "repaired"); } printf("\n"); for(unsigned c = 0; c < children; c++) { uint64_t islog = B_FALSE, ishole = B_FALSE; /* Don't print logs or holes here */ nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, &islog); nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE, &ishole); if(islog || ishole) continue; vname = zpool_vdev_name(p_zhd, zhp, child[c], B_TRUE); lzwu_zpool_print_status_config(p_zhd, zhp, vname, child[c], namewidth, depth + 2, isspare); free(vname); } }