/* * Similar to zpool_open_canfail(), but refuses to open pools in the faulted * state. */ zpool_handle_t * zpool_open(libzfs_handle_t *hdl, const char *pool) { zpool_handle_t *zhp; if ((zhp = zpool_open_canfail(hdl, pool)) == NULL) return (NULL); if (zhp->zpool_state == POOL_STATE_UNAVAIL) { (void) zfs_error(hdl, EZFS_POOLUNAVAIL, dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name); zpool_close(zhp); return (NULL); } return (zhp); }
/* * This function handles the ESC_ZFS_config_sync event. It will iterate over * the pools vdevs and to update the FRU property. */ int zfs_deliver_sync(nvlist_t *nvl) { dev_data_t dd = { 0 }; char *pname; zpool_handle_t *zhp; nvlist_t *config, *vdev; if (nvlist_lookup_string(nvl, "pool_name", &pname) != 0) { syseventd_print(9, "zfs_deliver_sync: no pool name\n"); return (-1); } /* * If this event was triggered by a pool export or destroy we cannot * open the pool. This is not an error, just return 0 as we don't care * about these events. */ zhp = zpool_open_canfail(g_zfshdl, pname); if (zhp == NULL) return (0); config = zpool_get_config(zhp, NULL); if (config == NULL) { syseventd_print(9, "zfs_deliver_sync: " "failed to get pool config for %s\n", pname); zpool_close(zhp); return (-1); } if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &vdev) != 0) { syseventd_print(0, "zfs_deliver_sync: " "failed to get vdev tree for %s\n", pname); zpool_close(zhp); return (-1); } libzfs_fru_refresh(g_zfshdl); dd.dd_func = zfs_sync_vdev_fru; zfs_iter_vdev(zhp, vdev, &dd); zpool_close(zhp); return (0); }
/* * Collects zpool stats of the pool configured in the configData var. * It opens a handler to the pool, load a tree of vdevs and then the * stats of the root vdev. These stats are temporarily saved and then * inserted in the SQL database. */ int collectZpoolStats() { // Open a handle to the zfs filesystems libzfs_handle_t *g_zfs = libzfs_init(); // Open a handle to the defined zpool zpool_handle_t *zhp; zhp = zpool_open_canfail(g_zfs, configData.poolname->value); // Declarations and such... nvlist_t *configuration, *vdevroot; vdev_stat_t *vdevstats; iostatcollection statisticscollection; int nelem; configuration = zpool_get_config(zhp, NULL); // Now we have the config, we can release the handle to the zpool libzfs_fini(g_zfs); free(zhp); // Put the vdev tree belonging to the pool in newconfig in newnvroot. verify(nvlist_lookup_nvlist(configuration, ZPOOL_CONFIG_VDEV_TREE, &vdevroot) == 0); // Load the new vdev stats in newvdevstat verify(nvlist_lookup_uint64_array(vdevroot, ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vdevstats, &nelem) == 0); // Place the collected statistics in the collection statisticscollection.readops = vdevstats->vs_ops[ZIO_TYPE_READ]; statisticscollection.writeops = vdevstats->vs_ops[ZIO_TYPE_WRITE]; statisticscollection.readbytes = vdevstats->vs_bytes[ZIO_TYPE_READ]; statisticscollection.writebytes = vdevstats->vs_bytes[ZIO_TYPE_WRITE]; statisticscollection.checksum_errors = vdevstats->vs_checksum_errors; statisticscollection.state = vdevstats->vs_state; statisticscollection.space_alloc = vdevstats->vs_alloc; statisticscollection.space = vdevstats->vs_space; // Create the query and post it to MySQL char queryString[1024]; snprintf(queryString, 1024, "INSERT INTO io_stats (date, %s, %s, %s, %s, %s, %s, %s, %s) VALUES (NOW(), '%llu', '%llu', '%llu', '%llu', '%llu', '%llu', '%llu', '%llu')", "iop_read", "iop_write", "bandwidth_read", "bandwidth_write", "space", "space_alloc", "checksum_errors", "state", statisticscollection.readops, statisticscollection.writeops, statisticscollection.readbytes, statisticscollection.writebytes, statisticscollection.space, statisticscollection.space_alloc, statisticscollection.checksum_errors, statisticscollection.state); if (executeQuery(queryString)) return 1; return 0; }
/* * Create a list of pools based on the given arguments. If we're given no * arguments, then iterate over all pools in the system and add them to the AVL * tree. Otherwise, add only those pool explicitly specified on the command * line. */ zpool_list_t * pool_list_get(int argc, char **argv, zprop_list_t **proplist, int *err) { zpool_list_t *zlp; zlp = safe_malloc(sizeof (zpool_list_t)); zlp->zl_pool = uu_avl_pool_create("zfs_pool", sizeof (zpool_node_t), offsetof(zpool_node_t, zn_avlnode), zpool_compare, UU_DEFAULT); if (zlp->zl_pool == NULL) zpool_no_memory(); if ((zlp->zl_avl = uu_avl_create(zlp->zl_pool, NULL, UU_DEFAULT)) == NULL) zpool_no_memory(); zlp->zl_proplist = proplist; if (argc == 0) { (void) zpool_iter(g_zfs, add_pool, zlp); zlp->zl_findall = B_TRUE; } else { int i; for (i = 0; i < argc; i++) { zpool_handle_t *zhp; if ((zhp = zpool_open_canfail(g_zfs, argv[i])) != NULL) { if (add_pool(zhp, zlp) != 0) *err = B_TRUE; } else { *err = B_TRUE; } } } return (zlp); }
/* * Determines if the pool is in use. If so, it returns true and the state of * the pool as well as the name of the pool. Both strings are allocated and * must be freed by the caller. */ int zpool_in_use(libzfs_handle_t *hdl, int fd, pool_state_t *state, char **namestr, boolean_t *inuse) { nvlist_t *config; char *name; boolean_t ret; uint64_t guid, vdev_guid; zpool_handle_t *zhp; nvlist_t *pool_config; uint64_t stateval, isspare; aux_cbdata_t cb = { 0 }; boolean_t isactive; *inuse = B_FALSE; if (zpool_read_label(fd, &config, NULL) != 0 && errno == ENOMEM) { (void) no_memory(hdl); return (-1); } if (config == NULL) return (0); verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, &stateval) == 0); verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, &vdev_guid) == 0); if (stateval != POOL_STATE_SPARE && stateval != POOL_STATE_L2CACHE) { verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, &name) == 0); verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &guid) == 0); } switch (stateval) { case POOL_STATE_EXPORTED: /* * A pool with an exported state may in fact be imported * read-only, so check the in-core state to see if it's * active and imported read-only. If it is, set * its state to active. */ if (pool_active(hdl, name, guid, &isactive) == 0 && isactive && (zhp = zpool_open_canfail(hdl, name)) != NULL) { if (zpool_get_prop_int(zhp, ZPOOL_PROP_READONLY, NULL)) stateval = POOL_STATE_ACTIVE; /* * All we needed the zpool handle for is the * readonly prop check. */ zpool_close(zhp); } ret = B_TRUE; break; case POOL_STATE_ACTIVE: /* * For an active pool, we have to determine if it's really part * of a currently active pool (in which case the pool will exist * and the guid will be the same), or whether it's part of an * active pool that was disconnected without being explicitly * exported. */ if (pool_active(hdl, name, guid, &isactive) != 0) { nvlist_free(config); return (-1); } if (isactive) { /* * Because the device may have been removed while * offlined, we only report it as active if the vdev is * still present in the config. Otherwise, pretend like * it's not in use. */ if ((zhp = zpool_open_canfail(hdl, name)) != NULL && (pool_config = zpool_get_config(zhp, NULL)) != NULL) { nvlist_t *nvroot; verify(nvlist_lookup_nvlist(pool_config, ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); ret = find_guid(nvroot, vdev_guid); } else { ret = B_FALSE; } /* * If this is an active spare within another pool, we * treat it like an unused hot spare. This allows the * user to create a pool with a hot spare that currently * in use within another pool. Since we return B_TRUE, * libdiskmgt will continue to prevent generic consumers * from using the device. */ if (ret && nvlist_lookup_uint64(config, ZPOOL_CONFIG_IS_SPARE, &isspare) == 0 && isspare) stateval = POOL_STATE_SPARE; if (zhp != NULL) zpool_close(zhp); } else { stateval = POOL_STATE_POTENTIALLY_ACTIVE; ret = B_TRUE; } break; case POOL_STATE_SPARE: /* * For a hot spare, it can be either definitively in use, or * potentially active. To determine if it's in use, we iterate * over all pools in the system and search for one with a spare * with a matching guid. * * Due to the shared nature of spares, we don't actually report * the potentially active case as in use. This means the user * can freely create pools on the hot spares of exported pools, * but to do otherwise makes the resulting code complicated, and * we end up having to deal with this case anyway. */ cb.cb_zhp = NULL; cb.cb_guid = vdev_guid; cb.cb_type = ZPOOL_CONFIG_SPARES; if (zpool_iter(hdl, find_aux, &cb) == 1) { name = (char *)zpool_get_name(cb.cb_zhp); ret = B_TRUE; } else { ret = B_FALSE; } break; case POOL_STATE_L2CACHE: /* * Check if any pool is currently using this l2cache device. */ cb.cb_zhp = NULL; cb.cb_guid = vdev_guid; cb.cb_type = ZPOOL_CONFIG_L2CACHE; if (zpool_iter(hdl, find_aux, &cb) == 1) { name = (char *)zpool_get_name(cb.cb_zhp); ret = B_TRUE; } else { ret = B_FALSE; } break; default: ret = B_FALSE; } if (ret) { if ((*namestr = zfs_strdup(hdl, name)) == NULL) { if (cb.cb_zhp) zpool_close(cb.cb_zhp); nvlist_free(config); return (-1); } *state = (pool_state_t)stateval; } if (cb.cb_zhp) zpool_close(cb.cb_zhp); nvlist_free(config); *inuse = ret; return (0); }
/* * Perform the import for the given configuration. This passes the heavy * lifting off to zpool_import_props(), and then mounts the datasets contained * within the pool. */ static int do_import(nvlist_t *config, const char *newname, const char *mntopts, nvlist_t *props, int flags) { zpool_handle_t *zhp; char *name; uint64_t state; uint64_t version; verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, &name) == 0); verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, &state) == 0); verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &version) == 0); if (!SPA_VERSION_IS_SUPPORTED(version)) { printf("cannot import '%s': pool is formatted using an " "unsupported ZFS version\n", name); return (1); } else if (state != POOL_STATE_EXPORTED && !(flags & ZFS_IMPORT_ANY_HOST)) { uint64_t hostid; if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_HOSTID, &hostid) == 0) { unsigned long system_hostid = gethostid() & 0xffffffff; if ((unsigned long)hostid != system_hostid) { char *hostname; uint64_t timestamp; time_t t; verify(nvlist_lookup_string(config, ZPOOL_CONFIG_HOSTNAME, &hostname) == 0); verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_TIMESTAMP, ×tamp) == 0); t = timestamp; printf("cannot import " "'%s': pool may be in " "use from other system, it was last " "accessed by %s (hostid: 0x%lx) on %s\n", name, hostname, (unsigned long)hostid, asctime(localtime(&t))); printf("use '-f' to import anyway\n"); return (1); } } else { printf("cannot import '%s': pool may be in use from " "other system\n", name); printf("use '-f' to import anyway\n"); return (1); } } if (zpool_import_props(g_zfs, config, newname, props, flags) != 0) return (1); if (newname != NULL) name = (char *)newname; if ((zhp = zpool_open_canfail(g_zfs, name)) == NULL) return (1); if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL && !(flags & ZFS_IMPORT_ONLY) && zpool_enable_datasets(zhp, mntopts, 0) != 0) { zpool_close(zhp); return (1); } zpool_close(zhp); return (0); }
/* * Active pool health status. * * To determine the status for a pool, we make several passes over the config, * picking the most egregious error we find. In order of importance, we do the * following: * * - Check for a complete and valid configuration * - Look for any faulted or missing devices in a non-replicated config * - Check for any data errors * - Check for any faulted or missing devices in a replicated config * - Look for any devices showing errors * - Check for any resilvering devices * * There can obviously be multiple errors within a single pool, so this routine * only picks the most damaging of all the current errors to report. */ static zpool_status_t check_status(zpool_handle_t *zhp, nvlist_t *config, boolean_t isimport) { nvlist_t *nvroot; vdev_stat_t *vs; uint_t vsc; uint64_t nerr; uint64_t version; uint64_t stateval; uint64_t hostid = 0; verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &version) == 0); verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS, (uint64_t **)&vs, &vsc) == 0); verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, &stateval) == 0); (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_HOSTID, &hostid); /* * Pool last accessed by another system. */ if (hostid != 0 && (unsigned long)hostid != gethostid() && stateval == POOL_STATE_ACTIVE) return (ZPOOL_STATUS_HOSTID_MISMATCH); /* * Newer on-disk version. */ if (vs->vs_state == VDEV_STATE_CANT_OPEN && vs->vs_aux == VDEV_AUX_VERSION_NEWER) return (ZPOOL_STATUS_VERSION_NEWER); /* * Check that the config is complete. */ if (vs->vs_state == VDEV_STATE_CANT_OPEN && vs->vs_aux == VDEV_AUX_BAD_GUID_SUM) return (ZPOOL_STATUS_BAD_GUID_SUM); /* * Pool has experienced failed I/O. */ if (stateval == POOL_STATE_IO_FAILURE) { zpool_handle_t *tmp_zhp = NULL; libzfs_handle_t *hdl = NULL; char property[ZPOOL_MAXPROPLEN]; char *failmode = NULL; if (zhp == NULL) { char *poolname; verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, &poolname) == 0); if ((hdl = libzfs_init()) == NULL) return (ZPOOL_STATUS_IO_FAILURE_WAIT); tmp_zhp = zpool_open_canfail(hdl, poolname); if (tmp_zhp == NULL) { libzfs_fini(hdl); return (ZPOOL_STATUS_IO_FAILURE_WAIT); } } if (zpool_get_prop(zhp ? zhp : tmp_zhp, ZPOOL_PROP_FAILUREMODE, property, sizeof (property), NULL) == 0) failmode = property; if (tmp_zhp != NULL) zpool_close(tmp_zhp); if (hdl != NULL) libzfs_fini(hdl); if (failmode == NULL) return (ZPOOL_STATUS_IO_FAILURE_WAIT); if (strncmp(failmode, "continue", strlen("continue")) == 0) return (ZPOOL_STATUS_IO_FAILURE_CONTINUE); else return (ZPOOL_STATUS_IO_FAILURE_WAIT); } /* * Could not read a log. */ if (vs->vs_state == VDEV_STATE_CANT_OPEN && vs->vs_aux == VDEV_AUX_BAD_LOG) { return (ZPOOL_STATUS_BAD_LOG); } /* * Bad devices in non-replicated config. */ if (vs->vs_state == VDEV_STATE_CANT_OPEN && find_vdev_problem(nvroot, vdev_faulted)) return (ZPOOL_STATUS_FAULTED_DEV_NR); if (vs->vs_state == VDEV_STATE_CANT_OPEN && find_vdev_problem(nvroot, vdev_missing)) return (ZPOOL_STATUS_MISSING_DEV_NR); if (vs->vs_state == VDEV_STATE_CANT_OPEN && find_vdev_problem(nvroot, vdev_broken)) return (ZPOOL_STATUS_CORRUPT_LABEL_NR); /* * Corrupted pool metadata */ if (vs->vs_state == VDEV_STATE_CANT_OPEN && vs->vs_aux == VDEV_AUX_CORRUPT_DATA) return (ZPOOL_STATUS_CORRUPT_POOL); /* * Persistent data errors. */ if (!isimport) { if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT, &nerr) == 0 && nerr != 0) return (ZPOOL_STATUS_CORRUPT_DATA); } /* * Missing devices in a replicated config. */ if (find_vdev_problem(nvroot, vdev_faulted)) return (ZPOOL_STATUS_FAULTED_DEV_R); if (find_vdev_problem(nvroot, vdev_missing)) return (ZPOOL_STATUS_MISSING_DEV_R); if (find_vdev_problem(nvroot, vdev_broken)) return (ZPOOL_STATUS_CORRUPT_LABEL_R); /* * Devices with errors */ if (!isimport && find_vdev_problem(nvroot, vdev_errors)) return (ZPOOL_STATUS_FAILING_DEV); /* * Offlined devices */ if (find_vdev_problem(nvroot, vdev_offlined)) return (ZPOOL_STATUS_OFFLINE_DEV); /* * Currently resilvering */ if (!vs->vs_scrub_complete && vs->vs_scrub_type == POOL_SCRUB_RESILVER) return (ZPOOL_STATUS_RESILVERING); /* * Outdated, but usable, version */ if (version < SPA_VERSION) return (ZPOOL_STATUS_VERSION_OLDER); return (ZPOOL_STATUS_OK); }