Esempio n. 1
0
/*
 * Retrieves a list of enabled features and their refcounts and caches it in
 * the pool handle.
 */
nvlist_t *
zpool_get_features(zpool_handle_t *zhp)
{
	nvlist_t *config, *features;

	config = zpool_get_config(zhp, NULL);

	if (config == NULL || !nvlist_exists(config,
	    ZPOOL_CONFIG_FEATURE_STATS)) {
		int error;
		boolean_t missing = B_FALSE;

		error = zpool_refresh_stats(zhp, &missing);

		if (error != 0 || missing)
			return (NULL);

		config = zpool_get_config(zhp, NULL);
	}

	verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURE_STATS,
	    &features) == 0);

	return (features);
}
Esempio n. 2
0
static int
zpool_find_load_time(zpool_handle_t *zhp, void *arg)
{
	struct load_time_arg *lta = arg;
	uint64_t pool_guid;
	uint64_t *tod;
	nvlist_t *config;
	uint_t nelem;

	if (lta->lt_found)
		return (0);

	pool_guid = zpool_get_prop_int(zhp, ZPOOL_PROP_GUID, NULL);
	if (pool_guid != lta->lt_guid)
		return (0);

	if ((config = zpool_get_config(zhp, NULL)) == NULL) {
		zpool_close(zhp);
		return (-1);
	}

	if (nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_LOADED_TIME,
	    &tod, &nelem) == 0 && nelem == 2) {
		lta->lt_found = B_TRUE;
		lta->lt_time->ertv_sec = tod[0];
		lta->lt_time->ertv_nsec = tod[1];
	}

	return (0);
}
Esempio n. 3
0
/*ARGSUSED*/
static int
zfs_mark_pool(zpool_handle_t *zhp, void *unused)
{
	zfs_case_t *zcp;
	uint64_t pool_guid;
	nvlist_t *config, *vd;
	int ret;

	pool_guid = zpool_get_prop_int(zhp, ZPOOL_PROP_GUID, NULL);
	/*
	 * Mark any cases associated with just this pool.
	 */
	for (zcp = uu_list_first(zfs_cases); zcp != NULL;
	    zcp = uu_list_next(zfs_cases, zcp)) {
		if (zcp->zc_data.zc_pool_guid == pool_guid &&
		    zcp->zc_data.zc_vdev_guid == 0)
			zcp->zc_present = B_TRUE;
	}

	if ((config = zpool_get_config(zhp, NULL)) == NULL) {
		zpool_close(zhp);
		return (-1);
	}

	ret = nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &vd);
	assert(ret == 0);

	zfs_mark_vdev(pool_guid, vd);

	zpool_close(zhp);

	return (0);
}
Esempio n. 4
0
/*
 * given the path to a zvol, return the cXtYdZ name
 * returns < 0 on error, 0 if it isn't a zvol, > 1 on success
 */
static int
ztop(char *arg, char *diskname)
{
	zpool_handle_t *zpool_handle;
	nvlist_t *config, *nvroot;
	nvlist_t **child;
	uint_t children;
	libzfs_handle_t *lzfs;
	char *vname;
	char *p;
	char pool_name[MAXPATHLEN];

	if (strncmp(arg, "/dev/zvol/dsk/", 14)) {
		return (0);
	}
	arg += 14;
	(void) strncpy(pool_name, arg, MAXPATHLEN);
	if ((p = strchr(pool_name, '/')) != NULL)
		*p = '\0';
	STRCPYLIM(new_cc.cf_fs, p + 1, "statefile path");

	if ((lzfs = libzfs_init()) == NULL) {
		mesg(MERR, "failed to initialize ZFS library\n");
		return (-1);
	}
	if ((zpool_handle = zpool_open(lzfs, pool_name)) == NULL) {
		mesg(MERR, "couldn't open pool '%s'\n", pool_name);
		libzfs_fini(lzfs);
		return (-1);
	}
	config = zpool_get_config(zpool_handle, NULL);
	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
	    &nvroot) != 0) {
		zpool_close(zpool_handle);
		libzfs_fini(lzfs);
		return (-1);
	}
	verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
	    &child, &children) == 0);
	if (children != 1) {
		mesg(MERR, "expected one vdev, got %d\n", children);
		zpool_close(zpool_handle);
		libzfs_fini(lzfs);
		return (-1);
	}
	vname = zpool_vdev_name(lzfs, zpool_handle, child[0], B_FALSE);
	if (vname == NULL) {
		mesg(MERR, "couldn't determine vdev name\n");
		zpool_close(zpool_handle);
		libzfs_fini(lzfs);
		return (-1);
	}
	(void) strcpy(diskname, "/dev/dsk/");
	(void) strcat(diskname, vname);
	free(vname);
	zpool_close(zpool_handle);
	libzfs_fini(lzfs);
	return (1);
}
Esempio n. 5
0
static nvlist_t *
find_vdev(zpool_handle_t *zhp, uint64_t guid)
{
	nvlist_t *config, *nvroot;

	config = zpool_get_config(zhp, NULL);

	(void) nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot);

	return (find_vdev_iter(nvroot, guid));
}
Esempio n. 6
0
/*ARGSUSED*/
static int
zfs_mark_pool(zpool_handle_t *zhp, void *unused)
{
	zfs_case_t *zcp;
	uint64_t pool_guid;
	uint64_t *tod;
	er_timeval_t loaded = { 0 };
	nvlist_t *config, *vd;
	uint_t nelem = 0;
	int ret;

	pool_guid = zpool_get_prop_int(zhp, ZPOOL_PROP_GUID, NULL);
	/*
	 * Mark any cases associated with just this pool.
	 */
	for (zcp = uu_list_first(zfs_cases); zcp != NULL;
	    zcp = uu_list_next(zfs_cases, zcp)) {
		if (zcp->zc_data.zc_pool_guid == pool_guid &&
		    zcp->zc_data.zc_vdev_guid == 0)
			zcp->zc_present = B_TRUE;
	}

	if ((config = zpool_get_config(zhp, NULL)) == NULL) {
		zpool_close(zhp);
		return (-1);
	}

	(void) nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_LOADED_TIME,
	    &tod, &nelem);
	if (nelem == 2) {
		loaded.ertv_sec = tod[0];
		loaded.ertv_nsec = tod[1];
		for (zcp = uu_list_first(zfs_cases); zcp != NULL;
		    zcp = uu_list_next(zfs_cases, zcp)) {
			if (zcp->zc_data.zc_pool_guid == pool_guid &&
			    zcp->zc_data.zc_vdev_guid == 0) {
				zcp->zc_when = loaded;
			}
		}
	}

	ret = nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &vd);
	if (ret) {
		zpool_close(zhp);
		return (-1);
	}

	zfs_mark_vdev(pool_guid, vd, &loaded);

	zpool_close(zhp);

	return (0);
}
Esempio n. 7
0
static int
zfs_toplevel_state(zpool_handle_t *zhp)
{
	nvlist_t *nvroot;
	vdev_stat_t *vs;
	unsigned int c;

	verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
	    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
	verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
	    (uint64_t **)&vs, &c) == 0);
	return (vs->vs_state);
}
Esempio n. 8
0
static char *
find_root_device_from_libzfs (const char *dir)
{
  char *device;
  char *poolname;
  char *poolfs;

  grub_find_zpool_from_dir (dir, &poolname, &poolfs);
  if (! poolname)
    return NULL;

  {
    zpool_handle_t *zpool;
    libzfs_handle_t *libzfs;
    nvlist_t *nvlist;
    nvlist_t **nvlist_array;
    unsigned int nvlist_count;

    libzfs = grub_get_libzfs_handle ();
    if (! libzfs)
      return NULL;

    zpool = zpool_open (libzfs, poolname);
    nvlist = zpool_get_config (zpool, NULL);

    if (nvlist_lookup_nvlist (nvlist, "vdev_tree", &nvlist) != 0)
      error (1, errno, "nvlist_lookup_nvlist (\"vdev_tree\")");

    if (nvlist_lookup_nvlist_array (nvlist, "children", &nvlist_array, &nvlist_count) != 0)
      error (1, errno, "nvlist_lookup_nvlist_array (\"children\")");

    do
      {
	assert (nvlist_count > 0);
      } while (nvlist_lookup_nvlist_array (nvlist_array[0], "children",
					   &nvlist_array, &nvlist_count) == 0);

    if (nvlist_lookup_string (nvlist_array[0], "path", &device) != 0)
      error (1, errno, "nvlist_lookup_string (\"path\")");

    zpool_close (zpool);
  }

  free (poolname);
  if (poolfs)
    free (poolfs);

  return device;
}
Esempio n. 9
0
File: zfs_mod.c Progetto: pyavdr/zfs
static int
zfs_iter_pool(zpool_handle_t *zhp, void *data)
{
    nvlist_t *config, *nvl;
    dev_data_t *dp = data;
    uint64_t pool_guid;
    unavailpool_t *pool;

    zed_log_msg(LOG_INFO, "zfs_iter_pool: evaluating vdevs on %s (by %s)",
                zpool_get_name(zhp), dp->dd_vdev_guid ? "GUID" : dp->dd_prop);

    /*
     * For each vdev in this pool, look for a match to apply dd_func
     */
    if ((config = zpool_get_config(zhp, NULL)) != NULL) {
        if (dp->dd_pool_guid == 0 ||
                (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
                                      &pool_guid) == 0 && pool_guid == dp->dd_pool_guid)) {
            (void) nvlist_lookup_nvlist(config,
                                        ZPOOL_CONFIG_VDEV_TREE, &nvl);
            zfs_iter_vdev(zhp, nvl, data);
        }
    }

    /*
     * if this pool was originally unavailable,
     * then enable its datasets asynchronously
     */
    if (g_enumeration_done)  {
        for (pool = list_head(&g_pool_list); pool != NULL;
                pool = list_next(&g_pool_list, pool)) {

            if (pool->uap_enable_tid != 0)
                continue;	/* entry already processed */
            if (strcmp(zpool_get_name(zhp),
                       zpool_get_name(pool->uap_zhp)))
                continue;
            if (zfs_toplevel_state(zhp) >= VDEV_STATE_DEGRADED) {
                /* send to a background thread; keep on list */
                (void) pthread_create(&pool->uap_enable_tid,
                                      NULL, zfs_enable_ds, pool);
                break;
            }
        }
    }

    zpool_close(zhp);
    return (dp->dd_found);	/* cease iteration after a match */
}
Esempio n. 10
0
/*
 * This function handles the ESC_ZFS_config_sync event. It will iterate over
 * the pools vdevs and to update the FRU property.
 */
int
zfs_deliver_sync(nvlist_t *nvl)
{
	dev_data_t dd = { 0 };
	char *pname;
	zpool_handle_t *zhp;
	nvlist_t *config, *vdev;

	if (nvlist_lookup_string(nvl, "pool_name", &pname) != 0) {
		syseventd_print(9, "zfs_deliver_sync: no pool name\n");
		return (-1);
	}

	/*
	 * If this event was triggered by a pool export or destroy we cannot
	 * open the pool. This is not an error, just return 0 as we don't care
	 * about these events.
	 */
	zhp = zpool_open_canfail(g_zfshdl, pname);
	if (zhp == NULL)
		return (0);

	config = zpool_get_config(zhp, NULL);
	if (config == NULL) {
		syseventd_print(9, "zfs_deliver_sync: "
		    "failed to get pool config for %s\n", pname);
		zpool_close(zhp);
		return (-1);
	}

	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &vdev) != 0) {
		syseventd_print(0, "zfs_deliver_sync: "
		    "failed to get vdev tree for %s\n", pname);
		zpool_close(zhp);
		return (-1);
	}

	libzfs_fru_refresh(g_zfshdl);

	dd.dd_func = zfs_sync_vdev_fru;
	zfs_iter_vdev(zhp, vdev, &dd);

	zpool_close(zhp);
	return (0);
}
int lzwu_find_spare(zpool_handle_t *p_zpool, void *data)
{
        spare_cbdata_t *cbp = (spare_cbdata_t*)data;
        nvlist_t *config, *pnv_root;

        config = zpool_get_config(p_zpool, NULL);
        verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
            &pnv_root) == 0);

        if(lzwu_zpool_find_vdev(pnv_root, cbp->cb_guid))
        {
                cbp->cb_zhp = p_zpool;
                return 1;
        }

        zpool_close(p_zpool);
        return 0;
}
Esempio n. 12
0
/*
 *	Collects zpool stats of the pool configured in the configData var.
 *	It opens a handler to the pool, load a tree of vdevs and then the
 *	stats of the root vdev. These stats are temporarily saved and then
 *	inserted in the SQL database.
 */
int collectZpoolStats() {
	// Open a handle to the zfs filesystems
	libzfs_handle_t *g_zfs = libzfs_init();

	// Open a handle to the defined zpool
	zpool_handle_t *zhp;
	zhp = zpool_open_canfail(g_zfs, configData.poolname->value);

	// Declarations and such...
	nvlist_t *configuration, *vdevroot;
	vdev_stat_t *vdevstats;
	iostatcollection statisticscollection;
	int nelem;
	configuration = zpool_get_config(zhp, NULL);

	// Now we have the config, we can release the handle to the zpool
	libzfs_fini(g_zfs);
	free(zhp);

	// Put the vdev tree belonging to the pool in newconfig in newnvroot.
	verify(nvlist_lookup_nvlist(configuration, ZPOOL_CONFIG_VDEV_TREE, &vdevroot) == 0);

	// Load the new vdev stats in newvdevstat
	verify(nvlist_lookup_uint64_array(vdevroot, ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vdevstats, &nelem) == 0);

	// Place the collected statistics in the collection
	statisticscollection.readops = vdevstats->vs_ops[ZIO_TYPE_READ];
	statisticscollection.writeops = vdevstats->vs_ops[ZIO_TYPE_WRITE];
	statisticscollection.readbytes = vdevstats->vs_bytes[ZIO_TYPE_READ];
	statisticscollection.writebytes = vdevstats->vs_bytes[ZIO_TYPE_WRITE];
	statisticscollection.checksum_errors = vdevstats->vs_checksum_errors;
	statisticscollection.state = vdevstats->vs_state;
	statisticscollection.space_alloc = vdevstats->vs_alloc;
	statisticscollection.space = vdevstats->vs_space;

	
	// Create the query and post it to MySQL
	char queryString[1024];
	snprintf(queryString, 1024, "INSERT INTO io_stats (date, %s, %s, %s, %s, %s, %s, %s, %s) VALUES (NOW(), '%llu', '%llu', '%llu', '%llu', '%llu', '%llu', '%llu', '%llu')", "iop_read", "iop_write", "bandwidth_read", "bandwidth_write", "space", "space_alloc", "checksum_errors", "state", statisticscollection.readops, statisticscollection.writeops, statisticscollection.readbytes, statisticscollection.writebytes, statisticscollection.space, statisticscollection.space_alloc, statisticscollection.checksum_errors, statisticscollection.state);
	if (executeQuery(queryString)) return 1;
	return 0;
}
Esempio n. 13
0
int
zpool_get_stats(zpool_handle_t * zhp, void * data) {
	config_t * cnf = (config_t *)data;
	uint_t c;
    boolean_t missing;

	nvlist_t * nv, * config;
	vdev_stat_t * vs;

    if (zpool_refresh_stats(zhp, &missing) != 0)
		return 1;

	config = zpool_get_config(zhp, NULL);

	if (nvlist_lookup_nvlist(config,
                ZPOOL_CONFIG_VDEV_TREE, &nv) != 0) {
                return 1;
        }

        if (nvlist_lookup_uint64_array(nv,
                ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &c) != 0) {
                return 1;
        }

	if (!strcmp(zpool_get_name(zhp), cnf->zname)) {
		cnf->zpool.read_ops  = vs->vs_ops[ZIO_TYPE_READ];			
		cnf->zpool.write_ops = vs->vs_ops[ZIO_TYPE_WRITE];
		cnf->zpool.read_bts  = vs->vs_bytes[ZIO_TYPE_READ];
		cnf->zpool.write_bts = vs->vs_bytes[ZIO_TYPE_WRITE];
		cnf->zpool.alloc = vs->vs_alloc;
		cnf->zpool.free = vs->vs_space - vs->vs_alloc;
		cnf->zpool.health = zpool_get_health(zhp);
		cnf->zpool.dedupratio = zpool_get_dedupratio(zhp);
		cnf->zpool.name = zpool_get_poolname(zhp);
		cnf->zpool.ddt_memory = get_dedup_stats(config);
	}

	zpool_close(zhp);
	return 0;
}
Esempio n. 14
0
static int
zfs_iter_pool(zpool_handle_t *zhp, void *data)
{
	nvlist_t *config, *nvl;
	dev_data_t *dp = data;
	uint64_t pool_guid;
	unavailpool_t *pool;

	if ((config = zpool_get_config(zhp, NULL)) != NULL) {
		if (dp->dd_pool_guid == 0 ||
		    (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
		    &pool_guid) == 0 && pool_guid == dp->dd_pool_guid)) {
			(void) nvlist_lookup_nvlist(config,
			    ZPOOL_CONFIG_VDEV_TREE, &nvl);
			zfs_iter_vdev(zhp, nvl, data);
		}
	}
	if (g_enumeration_done)  {
		for (pool = list_head(&g_pool_list); pool != NULL;
		    pool = list_next(&g_pool_list, pool)) {

			if (strcmp(zpool_get_name(zhp),
			    zpool_get_name(pool->uap_zhp)))
				continue;
			if (zfs_toplevel_state(zhp) >= VDEV_STATE_DEGRADED) {
				list_remove(&g_pool_list, pool);
				(void) tpool_dispatch(g_tpool, zfs_enable_ds,
				    pool);
				break;
			}
		}
	}

	zpool_close(zhp);
	return (0);
}
Esempio n. 15
0
/*
 * Determines if the pool is in use.  If so, it returns true and the state of
 * the pool as well as the name of the pool.  Both strings are allocated and
 * must be freed by the caller.
 */
int
zpool_in_use(libzfs_handle_t *hdl, int fd, pool_state_t *state, char **namestr,
    boolean_t *inuse)
{
	nvlist_t *config;
	char *name;
	boolean_t ret;
	uint64_t guid, vdev_guid;
	zpool_handle_t *zhp;
	nvlist_t *pool_config;
	uint64_t stateval, isspare;
	aux_cbdata_t cb = { 0 };
	boolean_t isactive;

	*inuse = B_FALSE;

	if (zpool_read_label(fd, &config, NULL) != 0 && errno == ENOMEM) {
		(void) no_memory(hdl);
		return (-1);
	}

	if (config == NULL)
		return (0);

	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
	    &stateval) == 0);
	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
	    &vdev_guid) == 0);

	if (stateval != POOL_STATE_SPARE && stateval != POOL_STATE_L2CACHE) {
		verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
		    &name) == 0);
		verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
		    &guid) == 0);
	}

	switch (stateval) {
	case POOL_STATE_EXPORTED:
		/*
		 * A pool with an exported state may in fact be imported
		 * read-only, so check the in-core state to see if it's
		 * active and imported read-only.  If it is, set
		 * its state to active.
		 */
		if (pool_active(hdl, name, guid, &isactive) == 0 && isactive &&
		    (zhp = zpool_open_canfail(hdl, name)) != NULL) {
			if (zpool_get_prop_int(zhp, ZPOOL_PROP_READONLY, NULL))
				stateval = POOL_STATE_ACTIVE;

			/*
			 * All we needed the zpool handle for is the
			 * readonly prop check.
			 */
			zpool_close(zhp);
		}

		ret = B_TRUE;
		break;

	case POOL_STATE_ACTIVE:
		/*
		 * For an active pool, we have to determine if it's really part
		 * of a currently active pool (in which case the pool will exist
		 * and the guid will be the same), or whether it's part of an
		 * active pool that was disconnected without being explicitly
		 * exported.
		 */
		if (pool_active(hdl, name, guid, &isactive) != 0) {
			nvlist_free(config);
			return (-1);
		}

		if (isactive) {
			/*
			 * Because the device may have been removed while
			 * offlined, we only report it as active if the vdev is
			 * still present in the config.  Otherwise, pretend like
			 * it's not in use.
			 */
			if ((zhp = zpool_open_canfail(hdl, name)) != NULL &&
			    (pool_config = zpool_get_config(zhp, NULL))
			    != NULL) {
				nvlist_t *nvroot;

				verify(nvlist_lookup_nvlist(pool_config,
				    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
				ret = find_guid(nvroot, vdev_guid);
			} else {
				ret = B_FALSE;
			}

			/*
			 * If this is an active spare within another pool, we
			 * treat it like an unused hot spare.  This allows the
			 * user to create a pool with a hot spare that currently
			 * in use within another pool.  Since we return B_TRUE,
			 * libdiskmgt will continue to prevent generic consumers
			 * from using the device.
			 */
			if (ret && nvlist_lookup_uint64(config,
			    ZPOOL_CONFIG_IS_SPARE, &isspare) == 0 && isspare)
				stateval = POOL_STATE_SPARE;

			if (zhp != NULL)
				zpool_close(zhp);
		} else {
			stateval = POOL_STATE_POTENTIALLY_ACTIVE;
			ret = B_TRUE;
		}
		break;

	case POOL_STATE_SPARE:
		/*
		 * For a hot spare, it can be either definitively in use, or
		 * potentially active.  To determine if it's in use, we iterate
		 * over all pools in the system and search for one with a spare
		 * with a matching guid.
		 *
		 * Due to the shared nature of spares, we don't actually report
		 * the potentially active case as in use.  This means the user
		 * can freely create pools on the hot spares of exported pools,
		 * but to do otherwise makes the resulting code complicated, and
		 * we end up having to deal with this case anyway.
		 */
		cb.cb_zhp = NULL;
		cb.cb_guid = vdev_guid;
		cb.cb_type = ZPOOL_CONFIG_SPARES;
		if (zpool_iter(hdl, find_aux, &cb) == 1) {
			name = (char *)zpool_get_name(cb.cb_zhp);
			ret = B_TRUE;
		} else {
			ret = B_FALSE;
		}
		break;

	case POOL_STATE_L2CACHE:

		/*
		 * Check if any pool is currently using this l2cache device.
		 */
		cb.cb_zhp = NULL;
		cb.cb_guid = vdev_guid;
		cb.cb_type = ZPOOL_CONFIG_L2CACHE;
		if (zpool_iter(hdl, find_aux, &cb) == 1) {
			name = (char *)zpool_get_name(cb.cb_zhp);
			ret = B_TRUE;
		} else {
			ret = B_FALSE;
		}
		break;

	default:
		ret = B_FALSE;
	}


	if (ret) {
		if ((*namestr = zfs_strdup(hdl, name)) == NULL) {
			if (cb.cb_zhp)
				zpool_close(cb.cb_zhp);
			nvlist_free(config);
			return (-1);
		}
		*state = (pool_state_t)stateval;
	}

	if (cb.cb_zhp)
		zpool_close(cb.cb_zhp);

	nvlist_free(config);
	*inuse = ret;
	return (0);
}
Esempio n. 16
0
/*
 * Attach new_disk (fully described by nvroot) to old_disk.
 * If 'replacing' is specified, tne new disk will replace the old one.
 */
int
zpool_vdev_attach(zpool_handle_t *zhp,
    const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
{
	zfs_cmd_t zc = { 0 };
	char msg[1024];
	char *packed;
	int ret;
	size_t len;
	nvlist_t *tgt;
	boolean_t avail_spare;
	uint64_t val;
	char *path;
	nvlist_t **child;
	uint_t children;
	nvlist_t *config_root;
	libzfs_handle_t *hdl = zhp->zpool_hdl;

	if (replacing)
		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
		    "cannot replace %s with %s"), old_disk, new_disk);
	else
		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
		    "cannot attach %s to %s"), new_disk, old_disk);

	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
	if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare)) == 0)
		return (zfs_error(hdl, EZFS_NODEVICE, msg));

	if (avail_spare)
		return (zfs_error(hdl, EZFS_ISSPARE, msg));

	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
	zc.zc_cookie = replacing;

	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
	    &child, &children) != 0 || children != 1) {
		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
		    "new device must be a single disk"));
		return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
	}

	verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
	    ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);

	/*
	 * If the target is a hot spare that has been swapped in, we can only
	 * replace it with another hot spare.
	 */
	if (replacing &&
	    nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
	    nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 &&
	    (zpool_find_vdev(zhp, path, &avail_spare) == NULL ||
	    !avail_spare) && is_replacing_spare(config_root, tgt, 1)) {
		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
		    "can only be replaced by another hot spare"));
		return (zfs_error(hdl, EZFS_BADTARGET, msg));
	}

	/*
	 * If we are attempting to replace a spare, it canot be applied to an
	 * already spared device.
	 */
	if (replacing &&
	    nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 &&
	    zpool_find_vdev(zhp, path, &avail_spare) != NULL && avail_spare &&
	    is_replacing_spare(config_root, tgt, 0)) {
		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
		    "device has already been replaced with a spare"));
		return (zfs_error(hdl, EZFS_BADTARGET, msg));
	}

	verify(nvlist_size(nvroot, &len, NV_ENCODE_NATIVE) == 0);

	if ((packed = zfs_alloc(zhp->zpool_hdl, len)) == NULL)
		return (-1);

	verify(nvlist_pack(nvroot, &packed, &len, NV_ENCODE_NATIVE, 0) == 0);

	zc.zc_config_src = (uint64_t)(uintptr_t)packed;
	zc.zc_config_src_size = len;

	ret = ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_ATTACH, &zc);

	free(packed);

	if (ret == 0)
		return (0);

	switch (errno) {
	case ENOTSUP:
		/*
		 * Can't attach to or replace this type of vdev.
		 */
		if (replacing)
			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
			    "cannot replace a replacing device"));
		else
			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
			    "can only attach to mirrors and top-level "
			    "disks"));
		(void) zfs_error(hdl, EZFS_BADTARGET, msg);
		break;

	case EINVAL:
		/*
		 * The new device must be a single disk.
		 */
		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
		    "new device must be a single disk"));
		(void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
		break;

	case EBUSY:
		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
		    new_disk);
		(void) zfs_error(hdl, EZFS_BADDEV, msg);
		break;

	case EOVERFLOW:
		/*
		 * The new device is too small.
		 */
		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
		    "device is too small"));
		(void) zfs_error(hdl, EZFS_BADDEV, msg);
		break;

	case EDOM:
		/*
		 * The new device has a different alignment requirement.
		 */
		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
		    "devices have different sector alignment"));
		(void) zfs_error(hdl, EZFS_BADDEV, msg);
		break;

	case ENAMETOOLONG:
		/*
		 * The resulting top-level vdev spec won't fit in the label.
		 */
		(void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
		break;

	default:
		(void) zpool_standard_error(hdl, errno, msg);
	}

	return (-1);
}
Esempio n. 17
0
/*
 * Function:	be_do_installgrub
 * Description:	This function runs installgrub using the grub loader files
 *              from the BE we're activating and installing them on the
 *              pool the BE lives in.
 *
 * Parameters:
 *              bt - The transaction data for the BE we're activating.
 * Return:
 *		BE_SUCCESS - Success
 *		be_errno_t - Failure
 *
 * Scope:
 *		Private
 */
static int
be_do_installgrub(be_transaction_data_t *bt)
{
	zpool_handle_t  *zphp = NULL;
	zfs_handle_t	*zhp = NULL;
	nvlist_t **child, *nv, *config;
	uint_t c, children = 0;
	char *tmp_mntpt = NULL;
	char *pool_mntpnt = NULL;
	char *ptmp_mntpnt = NULL;
	char *orig_mntpnt = NULL;
	FILE *cap_fp = NULL;
	FILE *zpool_cap_fp = NULL;
	char line[BUFSIZ];
	char cap_file[MAXPATHLEN];
	char zpool_cap_file[MAXPATHLEN];
	char stage1[MAXPATHLEN];
	char stage2[MAXPATHLEN];
	char installgrub_cmd[MAXPATHLEN];
	char *vname;
	char be_run_cmd_errbuf[BUFSIZ];
	int ret = BE_SUCCESS;
	int err = 0;
	boolean_t be_mounted = B_FALSE;
	boolean_t pool_mounted = B_FALSE;

	if (!be_has_grub()) {
		be_print_err(gettext("be_do_installgrub: Not supported "
		    "on this architecture\n"));
		return (BE_ERR_NOTSUP);
	}

	if ((zhp = zfs_open(g_zfs, bt->obe_root_ds, ZFS_TYPE_FILESYSTEM)) ==
	    NULL) {
		be_print_err(gettext("be_do_installgrub: failed to "
		    "open BE root dataset (%s): %s\n"), bt->obe_root_ds,
		    libzfs_error_description(g_zfs));
		ret = zfs_err_to_be_err(g_zfs);
		return (ret);
	}
	if (!zfs_is_mounted(zhp, &tmp_mntpt)) {
		if ((ret = _be_mount(bt->obe_name, &tmp_mntpt,
		    BE_MOUNT_FLAG_NO_ZONES)) != BE_SUCCESS) {
			be_print_err(gettext("be_do_installgrub: failed to "
			    "mount BE (%s)\n"), bt->obe_name);
			ZFS_CLOSE(zhp);
			return (ret);
		}
		be_mounted = B_TRUE;
	}
	ZFS_CLOSE(zhp);

	(void) snprintf(stage1, sizeof (stage1), "%s%s", tmp_mntpt, BE_STAGE_1);
	(void) snprintf(stage2, sizeof (stage2), "%s%s", tmp_mntpt, BE_STAGE_2);

	if ((zphp = zpool_open(g_zfs, bt->obe_zpool)) == NULL) {
		be_print_err(gettext("be_do_installgrub: failed to open "
		    "pool (%s): %s\n"), bt->obe_zpool,
		    libzfs_error_description(g_zfs));
		ret = zfs_err_to_be_err(g_zfs);
		if (be_mounted)
			(void) _be_unmount(bt->obe_name, 0);
		free(tmp_mntpt);
		return (ret);
	}

	if ((config = zpool_get_config(zphp, NULL)) == NULL) {
		be_print_err(gettext("be_do_installgrub: failed to get zpool "
		    "configuration information. %s\n"),
		    libzfs_error_description(g_zfs));
		ret = zfs_err_to_be_err(g_zfs);
		goto done;
	}

	/*
	 * Get the vdev tree
	 */
	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nv) != 0) {
		be_print_err(gettext("be_do_installgrub: failed to get vdev "
		    "tree: %s\n"), libzfs_error_description(g_zfs));
		ret = zfs_err_to_be_err(g_zfs);
		goto done;
	}

	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, &child,
	    &children) != 0) {
		be_print_err(gettext("be_do_installgrub: failed to traverse "
		    "the vdev tree: %s\n"), libzfs_error_description(g_zfs));
		ret = zfs_err_to_be_err(g_zfs);
		goto done;
	}
	for (c = 0; c < children; c++) {
		uint_t i, nchildren = 0;
		nvlist_t **nvchild;
		vname = zpool_vdev_name(g_zfs, zphp, child[c], B_FALSE);
		if (vname == NULL) {
			be_print_err(gettext(
			    "be_do_installgrub: "
			    "failed to get device name: %s\n"),
			    libzfs_error_description(g_zfs));
			ret = zfs_err_to_be_err(g_zfs);
			goto done;
		}
		if (strcmp(vname, "mirror") == 0 || vname[0] != 'c') {

			if (nvlist_lookup_nvlist_array(child[c],
			    ZPOOL_CONFIG_CHILDREN, &nvchild, &nchildren) != 0) {
				be_print_err(gettext("be_do_installgrub: "
				    "failed to traverse the vdev tree: %s\n"),
				    libzfs_error_description(g_zfs));
				ret = zfs_err_to_be_err(g_zfs);
				goto done;
			}

			for (i = 0; i < nchildren; i++) {
				vname = zpool_vdev_name(g_zfs, zphp,
				    nvchild[i], B_FALSE);
				if (vname == NULL) {
					be_print_err(gettext(
					    "be_do_installgrub: "
					    "failed to get device name: %s\n"),
					    libzfs_error_description(g_zfs));
					ret = zfs_err_to_be_err(g_zfs);
					goto done;
				}

				(void) snprintf(installgrub_cmd,
				    sizeof (installgrub_cmd),
				    "%s %s %s /dev/rdsk/%s",
				    BE_INSTALL_GRUB, stage1, stage2, vname);
				if (be_run_cmd(installgrub_cmd,
				    be_run_cmd_errbuf, BUFSIZ, NULL, 0) !=
				    BE_SUCCESS) {
					be_print_err(gettext(
					    "be_do_installgrub: installgrub "
					    "failed for device %s.\n"), vname);
					/* Assume localized cmd err output. */
					be_print_err(gettext(
					    "  Command: \"%s\"\n"),
					    installgrub_cmd);
					be_print_err("%s", be_run_cmd_errbuf);
					free(vname);
					ret = BE_ERR_BOOTFILE_INST;
					goto done;
				}
				free(vname);
			}
		} else {
			(void) snprintf(installgrub_cmd,
			    sizeof (installgrub_cmd), "%s %s %s /dev/rdsk/%s",
			    BE_INSTALL_GRUB, stage1, stage2, vname);
			if (be_run_cmd(installgrub_cmd, be_run_cmd_errbuf,
			    BUFSIZ, NULL, 0) != BE_SUCCESS) {
				be_print_err(gettext(
				    "be_do_installgrub: installgrub "
				    "failed for device %s.\n"), vname);
				/* Assume localized cmd err output. */
				be_print_err(gettext("  Command: \"%s\"\n"),
				    installgrub_cmd);
				be_print_err("%s", be_run_cmd_errbuf);
				free(vname);
				ret = BE_ERR_BOOTFILE_INST;
				goto done;
			}
			free(vname);
		}
	}

	/*
	 * Copy the grub capability file from the BE we're activating into
	 * the root pool.
	 */
	(void) snprintf(cap_file, sizeof (cap_file), "%s%s", tmp_mntpt,
	    BE_CAP_FILE);

	if ((zhp = zfs_open(g_zfs, bt->obe_zpool, ZFS_TYPE_FILESYSTEM)) ==
	    NULL) {
		be_print_err(gettext("be_do_installgrub: zfs_open "
		    "failed: %s\n"), libzfs_error_description(g_zfs));
		zpool_close(zphp);
		return (zfs_err_to_be_err(g_zfs));
	}

	/*
	 * Check to see if the pool's dataset is mounted. If it isn't we'll
	 * attempt to mount it.
	 */
	if ((ret = be_mount_pool(zhp, &ptmp_mntpnt,
	    &orig_mntpnt, &pool_mounted)) != BE_SUCCESS) {
		be_print_err(gettext("be_do_installgrub: pool dataset "
		    "(%s) could not be mounted\n"), bt->obe_zpool);
		ZFS_CLOSE(zhp);
		zpool_close(zphp);
		return (ret);
	}

	/*
	 * Get the mountpoint for the root pool dataset.
	 */
	if (!zfs_is_mounted(zhp, &pool_mntpnt)) {
		be_print_err(gettext("be_do_installgrub: pool "
		    "dataset (%s) is not mounted. Can't check the grub "
		    "version from the grub capability file.\n"), bt->obe_zpool);
		ret = BE_ERR_NO_MENU;
		goto done;
	}

	(void) snprintf(zpool_cap_file, sizeof (zpool_cap_file), "%s%s",
	    pool_mntpnt, BE_CAP_FILE);

	free(pool_mntpnt);
	pool_mntpnt = NULL;

	if ((cap_fp = fopen(cap_file, "r")) == NULL) {
		err = errno;
		be_print_err(gettext("be_do_installgrub: failed to open grub "
		    "capability file\n"));
		ret = errno_to_be_err(err);
		goto done;
	}
	if ((zpool_cap_fp = fopen(zpool_cap_file, "w")) == NULL) {
		err = errno;
		be_print_err(gettext("be_do_installgrub: failed to open new "
		    "grub capability file\n"));
		ret = errno_to_be_err(err);
		(void) fclose(cap_fp);
		goto done;
	}

	while (fgets(line, BUFSIZ, cap_fp)) {
		(void) fputs(line, zpool_cap_fp);
	}

	(void) fclose(zpool_cap_fp);
	(void) fclose(cap_fp);

done:
	if (pool_mounted) {
		int iret = 0;
		iret = be_unmount_pool(zhp, ptmp_mntpnt, orig_mntpnt);
		if (ret == BE_SUCCESS)
			ret = iret;
		free(orig_mntpnt);
		free(ptmp_mntpnt);
	}
	ZFS_CLOSE(zhp);
	if (be_mounted)
		(void) _be_unmount(bt->obe_name, 0);
	zpool_close(zphp);
	free(tmp_mntpt);
	return (ret);
}
Esempio n. 18
0
static char *
find_root_device_from_libzfs (const char *dir)
{
  char *device = NULL;
  char *poolname;
  char *poolfs;

  grub_find_zpool_from_dir (dir, &poolname, &poolfs);
  if (! poolname)
    return NULL;

  {
    zpool_handle_t *zpool;
    libzfs_handle_t *libzfs;
    nvlist_t *config, *vdev_tree;
    nvlist_t **children, **path;
    unsigned int nvlist_count;
    unsigned int i;

    libzfs = grub_get_libzfs_handle ();
    if (! libzfs)
      return NULL;

    zpool = zpool_open (libzfs, poolname);
    config = zpool_get_config (zpool, NULL);

    if (nvlist_lookup_nvlist (config, "vdev_tree", &vdev_tree) != 0)
      error (1, errno, "nvlist_lookup_nvlist (\"vdev_tree\")");

    if (nvlist_lookup_nvlist_array (vdev_tree, "children", &children, &nvlist_count) != 0)
      error (1, errno, "nvlist_lookup_nvlist_array (\"children\")");
    assert (nvlist_count > 0);

    while (nvlist_lookup_nvlist_array (children[0], "children",
				       &children, &nvlist_count) == 0)
      assert (nvlist_count > 0);

    for (i = 0; i < nvlist_count; i++)
      {
	if (nvlist_lookup_string (children[i], "path", &device) != 0)
	  error (1, errno, "nvlist_lookup_string (\"path\")");

	struct stat st;
	if (stat (device, &st) == 0)
	  {
	    device = xstrdup (device);
	    break;
	  }

	device = NULL;
      }

    zpool_close (zpool);
  }

  free (poolname);
  if (poolfs)
    free (poolfs);

  return device;
}