Пример #1
0
static void
dsl_dataset_user_release_onexit(void *arg)
{
	zfs_hold_cleanup_arg_t *ca = arg;
	spa_t *spa;
	int error;

	error = spa_open(ca->zhca_spaname, &spa, FTAG);
	if (error != 0) {
		zfs_dbgmsg("couldn't release holds on pool=%s "
		    "because pool is no longer loaded",
		    ca->zhca_spaname);
		return;
	}
	if (spa_load_guid(spa) != ca->zhca_spa_load_guid) {
		zfs_dbgmsg("couldn't release holds on pool=%s "
		    "because pool is no longer loaded (guid doesn't match)",
		    ca->zhca_spaname);
		spa_close(spa, FTAG);
		return;
	}

	(void) dsl_dataset_user_release_tmp(spa_get_dsl(spa), ca->zhca_holds);
	fnvlist_free(ca->zhca_holds);
	kmem_free(ca, sizeof (zfs_hold_cleanup_arg_t));
	spa_close(spa, FTAG);
}
Пример #2
0
Файл: zvol.c Проект: alek-p/zfs
/*
 * Sanity check volume block size.
 */
int
zvol_check_volblocksize(const char *name, uint64_t volblocksize)
{
	/* Record sizes above 128k need the feature to be enabled */
	if (volblocksize > SPA_OLD_MAXBLOCKSIZE) {
		spa_t *spa;
		int error;

		if ((error = spa_open(name, &spa, FTAG)) != 0)
			return (error);

		if (!spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) {
			spa_close(spa, FTAG);
			return (SET_ERROR(ENOTSUP));
		}

		/*
		 * We don't allow setting the property above 1MB,
		 * unless the tunable has been changed.
		 */
		if (volblocksize > zfs_max_recordsize)
			return (SET_ERROR(EDOM));

		spa_close(spa, FTAG);
	}

	if (volblocksize < SPA_MINBLOCKSIZE ||
	    volblocksize > SPA_MAXBLOCKSIZE ||
	    !ISP2(volblocksize))
		return (SET_ERROR(EDOM));

	return (0);
}
Пример #3
0
/**
 * Add a vdev to a given zpool
 * @param psz_name: zpool name
 * @param pnv_root: the root tree
 * @return 0 in case of success, the error code overwise
 */
int libzfs_zpool_vdev_add(const char *psz_name, nvlist_t *pnv_root)
{
        spa_t *p_spa;
        nvlist_t **pnv_l2cache, **pnv_spares;
        uint_t i_l2cache, i_spares;
        int i_error;

        if((i_error = spa_open(psz_name, &p_spa, FTAG)))
                return i_error;

        nvlist_lookup_nvlist_array(pnv_root, ZPOOL_CONFIG_L2CACHE, &pnv_l2cache, &i_l2cache);
        nvlist_lookup_nvlist_array(pnv_root, ZPOOL_CONFIG_SPARES,  &pnv_spares, &i_spares);

         /*
         * A root pool with concatenated devices is not supported.
         * Thus, can not add a device to a root pool.
         *
         * Intent log device can not be added to a rootpool because
         * during mountroot, zil is replayed, a seperated log device
         * can not be accessed during the mountroot time.
         *
         * l2cache and spare devices are ok to be added to a rootpool.
         */
        if(spa_bootfs(p_spa) != 0 && i_l2cache == 0 && i_spares == 0)
        {
                spa_close(p_spa, FTAG);
                return EDOM;
        }

        //TODO: get the error message
        spa_vdev_add(p_spa, pnv_root);
        spa_close(p_spa, FTAG);

        return 0;
}
Пример #4
0
/**
 * Attach a vdev to a given zpool
 * @return 0 in case of success, the error code overwise
 */
int libzfs_zpool_vdev_attach(zpool_handle_t *p_zpool, const char *psz_current_dev, nvlist_t *pnv_root, int i_replacing, const char **ppsz_error)
{
        spa_t *p_spa;
        nvlist_t *pnv_tgt;
        boolean_t avail_spare, l2cache;
        uint64_t guid;
        int i_error;

        if((pnv_tgt = zpool_find_vdev(p_zpool, psz_current_dev,
                                      &avail_spare, &l2cache, NULL)) == 0)
        {
                *ppsz_error = "no vdev corresponding to the one given";
                return ENOENT;
        }
        assert(nvlist_lookup_uint64(pnv_tgt, ZPOOL_CONFIG_GUID, &guid) == 0);

        // Do not attach hot spares or L2 cache
        if(avail_spare)
        {
                *ppsz_error = "could not attach hot spares";
                return EINVAL;
        }
        if(l2cache)
        {
                *ppsz_error = "could not attach to a device actually used as a cache";
                return EINVAL;
        }


        if((i_error = spa_open(p_zpool->zpool_name, &p_spa, FTAG)))
                return i_error;

        i_error = spa_vdev_attach(p_spa, guid, pnv_root, i_replacing);
        spa_close(p_spa, FTAG);

        switch(i_error)
        {
        case ENOTSUP:
                *ppsz_error = "can only attach to mirror and top-level disks";
                break;
        case EINVAL:
                *ppsz_error = "new device must be a single disk";
                break;
        case EBUSY:
                *ppsz_error = "the device is busy";
                break;
        case EOVERFLOW:
                *ppsz_error = "devices is too small";
                break;
        case EDOM:
                *ppsz_error = "devices have different sector alignment";
                break;
        default:
                *ppsz_error ="unable to attach the new device";
        }

        return i_error;
}
Пример #5
0
int
dmu_objset_snapshot(char *fsname, char *snapname, boolean_t recursive)
{
	dsl_sync_task_t *dst;
	struct osnode *osn;
	struct snaparg sn = { 0 };
	spa_t *spa;
	int err;

	(void) strcpy(sn.failed, fsname);

	err = spa_open(fsname, &spa, FTAG);
	if (err)
		return (err);

	sn.dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
	sn.snapname = snapname;
	list_create(&sn.objsets, sizeof (struct osnode),
	    offsetof(struct osnode, node));

	if (recursive) {
		sn.checkperms = B_TRUE;
		err = dmu_objset_find(fsname,
		    dmu_objset_snapshot_one, &sn, DS_FIND_CHILDREN);
	} else {
		sn.checkperms = B_FALSE;
		err = dmu_objset_snapshot_one(fsname, &sn);
	}

	if (err)
		goto out;

	err = dsl_sync_task_group_wait(sn.dstg);

	for (dst = list_head(&sn.dstg->dstg_tasks); dst;
	    dst = list_next(&sn.dstg->dstg_tasks, dst)) {
		dsl_dataset_t *ds = dst->dst_arg1;
		if (dst->dst_err)
			dsl_dataset_name(ds, sn.failed);
	}

out:
	while (osn = list_head(&sn.objsets)) {
		list_remove(&sn.objsets, osn);
		zil_resume(dmu_objset_zil(osn->os));
		dmu_objset_close(osn->os);
		kmem_free(osn, sizeof (struct osnode));
	}
	list_destroy(&sn.objsets);

	if (err)
		(void) strcpy(fsname, sn.failed);
	dsl_sync_task_group_destroy(sn.dstg);
	spa_close(spa, FTAG);
	return (err);
}
Пример #6
0
/*
 * Called from open context to perform a callback in syncing context.  Waits
 * for the operation to complete.
 *
 * The checkfunc will be called from open context as a preliminary check
 * which can quickly fail.  If it succeeds, it will be called again from
 * syncing context.  The checkfunc should generally be designed to work
 * properly in either context, but if necessary it can check
 * dmu_tx_is_syncing(tx).
 *
 * The synctask infrastructure enforces proper locking strategy with respect
 * to the dp_config_rwlock -- the lock will always be held when the callbacks
 * are called.  It will be held for read during the open-context (preliminary)
 * call to the checkfunc, and then held for write from syncing context during
 * the calls to the check and sync funcs.
 *
 * A dataset or pool name can be passed as the first argument.  Typically,
 * the check func will hold, check the return value of the hold, and then
 * release the dataset.  The sync func will VERIFYO(hold()) the dataset.
 * This is safe because no changes can be made between the check and sync funcs,
 * and the sync func will only be called if the check func successfully opened
 * the dataset.
 */
int
dsl_sync_task(const char *pool, dsl_checkfunc_t *checkfunc,
    dsl_syncfunc_t *syncfunc, void *arg,
    int blocks_modified, zfs_space_check_t space_check)
{
	spa_t *spa;
	dmu_tx_t *tx;
	int err;
	dsl_sync_task_t dst = { { { NULL } } };
	dsl_pool_t *dp;

	err = spa_open(pool, &spa, FTAG);
	if (err != 0)
		return (err);
	dp = spa_get_dsl(spa);

top:
	tx = dmu_tx_create_dd(dp->dp_mos_dir);
	VERIFY0(dmu_tx_assign(tx, TXG_WAIT));

	dst.dst_pool = dp;
	dst.dst_txg = dmu_tx_get_txg(tx);
	dst.dst_space = blocks_modified << DST_AVG_BLKSHIFT;
	dst.dst_space_check = space_check;
	dst.dst_checkfunc = checkfunc != NULL ? checkfunc : dsl_null_checkfunc;
	dst.dst_syncfunc = syncfunc;
	dst.dst_arg = arg;
	dst.dst_error = 0;
	dst.dst_nowaiter = B_FALSE;

	dsl_pool_config_enter(dp, FTAG);
	err = dst.dst_checkfunc(arg, tx);
	dsl_pool_config_exit(dp, FTAG);

	if (err != 0) {
		dmu_tx_commit(tx);
		spa_close(spa, FTAG);
		return (err);
	}

	VERIFY(txg_list_add_tail(&dp->dp_sync_tasks, &dst, dst.dst_txg));

	dmu_tx_commit(tx);

	txg_wait_synced(dp, dst.dst_txg);

	if (dst.dst_error == EAGAIN) {
		txg_wait_synced(dp, dst.dst_txg + TXG_DEFER_SIZE);
		goto top;
	}

	spa_close(spa, FTAG);
	return (dst.dst_error);
}
Пример #7
0
/**
 * Detach the given vdev from the given pool
 * @param p_zpool: the zpool handler
 * @param psz_device: the device name
 * @param ppsz_error: the error message if any
 * @return 0 in case of success, the error code overwise
 */
int libzfs_zpool_vdev_detach(zpool_handle_t *p_zpool, const char *psz_device, const char **ppsz_error)
{
        spa_t *p_spa;
        nvlist_t *pnv_tgt;
        boolean_t avail_spare, l2cache;
        uint64_t guid;
        int i_error;

        if((pnv_tgt = zpool_find_vdev(p_zpool, psz_device,
                                      &avail_spare, &l2cache, NULL)) == 0)
        {
                *ppsz_error = "no vdev corresponding to the one given";
                return ENOENT;
        }

        // Do not detach hot spares or L2 cache
        if(avail_spare)
        {
                *ppsz_error = "could not detach hot spares";
                return EINVAL;
        }
        if(l2cache)
        {
                *ppsz_error = "could not detach device actually used as a cache";
                return EINVAL;
        }

        assert(nvlist_lookup_uint64(pnv_tgt, ZPOOL_CONFIG_GUID, &guid) == 0);

        if((i_error = spa_open(p_zpool->zpool_name, &p_spa, FTAG)))
        {
                *ppsz_error = "unable to open the given zpool";
                return i_error;
        }

        if((i_error = spa_vdev_detach(p_spa, guid, 0, 0)))
        {
                switch(i_error)
                {
                case ENOTSUP:
                        *ppsz_error = "'detach' is only applicable to mirror and to replace vdevs";
                        break;
                case EBUSY:
                        *ppsz_error = "the device is actually in use";
                        break;
                default:
                        *ppsz_error = "unable to detach the given vdev";
                }
        }
        spa_close(p_spa, FTAG);

        return i_error;
}
Пример #8
0
int
dmu_objset_snapshot(char *fsname, char *snapname,
    nvlist_t *props, boolean_t recursive)
{
	dsl_sync_task_t *dst;
	struct snaparg *sn;
	spa_t *spa;
	int err;

	sn = kmem_alloc(sizeof (struct snaparg), KM_SLEEP);
	(void) strcpy(sn->failed, fsname);

	err = spa_open(fsname, &spa, FTAG);
	if (err) {
		kmem_free(sn, sizeof (struct snaparg));
		return (err);
	}

	sn->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
	sn->snapname = snapname;
	sn->props = props;

	if (recursive) {
		sn->checkperms = B_TRUE;
		err = dmu_objset_find(fsname,
		    dmu_objset_snapshot_one, sn, DS_FIND_CHILDREN);
	} else {
		sn->checkperms = B_FALSE;
		err = dmu_objset_snapshot_one(fsname, sn);
	}

	if (err == 0)
		err = dsl_sync_task_group_wait(sn->dstg);

	for (dst = list_head(&sn->dstg->dstg_tasks); dst;
	    dst = list_next(&sn->dstg->dstg_tasks, dst)) {
		objset_t *os = dst->dst_arg1;
		dsl_dataset_t *ds = os->os->os_dsl_dataset;
		if (dst->dst_err)
			dsl_dataset_name(ds, sn->failed);
		zil_resume(dmu_objset_zil(os));
		dmu_objset_close(os);
	}

	if (err)
		(void) strcpy(fsname, sn->failed);
	dsl_sync_task_group_destroy(sn->dstg);
	spa_close(spa, FTAG);
	kmem_free(sn, sizeof (struct snaparg));
	return (err);
}
Пример #9
0
int sp_process_spa(sp_data *sp, void *ud, void (*callback)(sp_data *, void *))
{
    sp_audio spa;
    if(spa_open(sp, &spa, sp->filename, SPA_WRITE) == SP_NOT_OK) {
        fprintf(stderr, "Error: could not open file %s.\n", sp->filename);    
    }
    while(sp->len > 0) {
        callback(sp, ud);
        spa_write_buf(sp, &spa, sp->out, sp->nchan);
        sp->len--;
        sp->pos++;
    }
    spa_close(&spa);
    return SP_OK;
}
Пример #10
0
int sp_spa_init(sp_data *sp, sp_spa *p, const char *filename)
{
    if(spa_open(sp, &p->spa, filename, SPA_READ) != SP_OK) {
        return SP_NOT_OK;
    }
    
    p->pos = 0;

    p->bufsize = SPA_BUFSIZE;
    sp_auxdata_alloc(&p->aux, sizeof(SPFLOAT) * p->bufsize);

    p->buf = p->aux.ptr;

    return SP_OK;
}
Пример #11
0
int sp_ftbl_loadspa(sp_data *sp, sp_ftbl **ft, const char *filename)
{
    *ft = malloc(sizeof(sp_ftbl));
    sp_ftbl *ftp = *ft;

    sp_audio spa;

    spa_open(sp, &spa, filename, SPA_READ);

    size_t size = spa.header.len;

    ftp->tbl = malloc(sizeof(SPFLOAT) * (size + 1));
    sp_ftbl_init(sp, ftp, size);

    spa_read_buf(sp, &spa, ftp->tbl, ftp->size);
    spa_close(&spa);
    return SP_OK;
}
Пример #12
0
static void
zhack_spa_open(const char *target, boolean_t readonly, void *tag, spa_t **spa)
{
	int err;

	import_pool(target, readonly);

	zfeature_checks_disable = B_TRUE;
	err = spa_open(target, spa, tag);
	zfeature_checks_disable = B_FALSE;

	if (err != 0)
		fatal("cannot open '%s': %s", target, strerror(err));
	if (spa_version(*spa) < SPA_VERSION_FEATURES) {
		fatal("'%s' has version %d, features not enabled", target,
		    (int)spa_version(*spa));
	}
}
Пример #13
0
/**
 * Remove the given vdev from the pool
 * @param p_zpool: the zpool handler
 * @param psz_name: the name of the device to remove
 * @param ppsz_error: the error message if any
 * @return 0 in case of success, the error code overwise
 */
int libzfs_zpool_vdev_remove(zpool_handle_t *p_zpool, const char *psz_name, const char **ppsz_error)
{
        nvlist_t *pnv_tgt;
        spa_t *p_spa;
        boolean_t avail_spare, l2cache, islog;
        uint64_t guid;
        int i_error;

        if((pnv_tgt = zpool_find_vdev(p_zpool, psz_name,
                                      &avail_spare, &l2cache, &islog)) == 0)
        {
                *ppsz_error = "no vdev corresponding to the one given";
                return ENOENT;
        }

        assert(nvlist_lookup_uint64(pnv_tgt, ZPOOL_CONFIG_GUID, &guid) == 0);

        if((i_error = spa_open(p_zpool->zpool_name, &p_spa, FTAG)))
        {
                *ppsz_error = "unable to open the spa";
                return i_error;
        }
        i_error = spa_vdev_remove(p_spa, guid, B_FALSE);
        spa_close(p_spa, FTAG);

        switch(i_error)
        {
        case 0:
                return 0;
        case ENOTSUP:
                *ppsz_error = "only spares, slogs, and level 2 ARC devices can be removed";
                break;
        case ENOENT:
                *ppsz_error = "no vdev corresponding to the one given";
                break;
        }

        return i_error;
}
Пример #14
0
int
dsl_crypto_key_change(char *dsname, zcrypt_key_t *newkey, nvlist_t *props)
{
    struct wkey_change_arg *ca;
    struct kcnode *kcn;
    dsl_dataset_t *ds;
    dsl_props_arg_t pa;
    spa_t *spa;
    int err;
    //dsl_sync_task_group_t *dstg;
    zcrypt_key_t *oldkey;
    dsl_pool_t *dp;

    ASSERT(newkey != NULL);
    ASSERT(dsname != NULL);

    err = dsl_pool_hold(dsname, FTAG, &dp);
    if (err != 0)
        return (err);

    if ((err = dsl_dataset_hold(dp, dsname, FTAG, &ds)) != 0) {
        dsl_pool_rele(dp, FTAG);
        return (err);
    }

    /*
     * Take the spa lock here so that new datasets can't get
     * created below us while we are doing a wrapping key change.
     * This is to avoid them being created with the wrong inherited
     * wrapping key.
     */
    err = spa_open(dsname, &spa, FTAG);
    if (err)
        return (err);

    oldkey = zcrypt_key_copy(zcrypt_keystore_find_wrappingkey(spa,
                             ds->ds_object));
    if (oldkey == NULL) {
        dsl_dataset_rele(ds, FTAG);
        dsl_pool_rele(dp, FTAG);
        spa_close(spa, FTAG);
        return (ENOENT);
    }
    ca = kmem_alloc(sizeof (struct wkey_change_arg), KM_SLEEP);
    ca->ca_new_key = newkey;
    ca->ca_old_key = oldkey;
    ca->ca_parent = dsname;
    ca->ca_props = props;

    list_create(&ca->ca_nodes, sizeof (struct kcnode),
                offsetof(struct kcnode, kc_node));

    zcrypt_key_hold(ca->ca_old_key, FTAG);
    zcrypt_key_hold(ca->ca_new_key, FTAG);

    //ca->ca_ds = dsl_sync_task_group_create(spa_get_dsl(spa));

    err = dmu_objset_find(dsname, dsl_crypto_key_change_find,
                          ca, DS_FIND_CHILDREN);

    /*
     * If this is the "top" dataset in this keychange it gets
     * the keysource and salt properties updated.
     */
    pa.pa_props = props;
    pa.pa_source = ZPROP_SRC_LOCAL;
    //pa.pa_flags = 0;
    //pa.pa_zone = curzone;
    //dsl_sync_task_create(ca->ca_dstg, NULL, dsl_props_set_sync, ds, &pa, 2);

    dsl_props_set(dsname, ZPROP_SRC_LOCAL, props);

    //if (err == 0)
    //err = dsl_sync_task_group_wait(dstg);

    while ((kcn = list_head(&ca->ca_nodes))) {
        list_remove(&ca->ca_nodes, kcn);
        dsl_dataset_rele(kcn->kc_ds, kcn);
        kmem_free(kcn, sizeof (struct kcnode));
    }

    //dsl_sync_task_group_destroy(ca->ca_dstg);

    /*
     * We are finished so release and free both the old and new keys.
     * We can free even the new key because everyone got a copy of it
     * not a reference to this one.
     */
    zcrypt_key_release(ca->ca_old_key, FTAG);
    zcrypt_key_free(ca->ca_old_key);
    zcrypt_key_release(ca->ca_new_key, FTAG);
    zcrypt_key_free(ca->ca_new_key);

    kmem_free(ca, sizeof (struct wkey_change_arg));
    dsl_dataset_rele(ds, FTAG);
    dsl_pool_rele(dp, FTAG);

    spa_close(spa, FTAG);

    return (err);
}
Пример #15
0
/*
 * same as dsl_open_dir, ignore the first component of name and use the
 * spa instead
 */
int
dsl_dir_open_spa(spa_t *spa, const char *name, void *tag,
    dsl_dir_t **ddp, const char **tailp)
{
	char buf[MAXNAMELEN];
	const char *next, *nextnext = NULL;
	int err;
	dsl_dir_t *dd;
	dsl_pool_t *dp;
	uint64_t ddobj;
	int openedspa = FALSE;

	dprintf("%s\n", name);

	err = getcomponent(name, buf, &next);
	if (err)
		return (err);
	if (spa == NULL) {
		err = spa_open(buf, &spa, FTAG);
		if (err) {
			dprintf("spa_open(%s) failed\n", buf);
			return (err);
		}
		openedspa = TRUE;

		/* XXX this assertion belongs in spa_open */
		ASSERT(!dsl_pool_sync_context(spa_get_dsl(spa)));
	}

	dp = spa_get_dsl(spa);

	rw_enter(&dp->dp_config_rwlock, RW_READER);
	err = dsl_dir_open_obj(dp, dp->dp_root_dir_obj, NULL, tag, &dd);
	if (err) {
		rw_exit(&dp->dp_config_rwlock);
		if (openedspa)
			spa_close(spa, FTAG);
		return (err);
	}

	while (next != NULL) {
		dsl_dir_t *child_ds;
		err = getcomponent(next, buf, &nextnext);
		if (err)
			break;
		ASSERT(next[0] != '\0');
		if (next[0] == '@')
			break;
		dprintf("looking up %s in obj%lld\n",
		    buf, dd->dd_phys->dd_child_dir_zapobj);

		err = zap_lookup(dp->dp_meta_objset,
		    dd->dd_phys->dd_child_dir_zapobj,
		    buf, sizeof (ddobj), 1, &ddobj);
		if (err) {
			if (err == ENOENT)
				err = 0;
			break;
		}

		err = dsl_dir_open_obj(dp, ddobj, buf, tag, &child_ds);
		if (err)
			break;
		dsl_dir_close(dd, tag);
		dd = child_ds;
		next = nextnext;
	}
	rw_exit(&dp->dp_config_rwlock);

	if (err) {
		dsl_dir_close(dd, tag);
		if (openedspa)
			spa_close(spa, FTAG);
		return (err);
	}

	/*
	 * It's an error if there's more than one component left, or
	 * tailp==NULL and there's any component left.
	 */
	if (next != NULL &&
	    (tailp == NULL || (nextnext && nextnext[0] != '\0'))) {
		/* bad path name */
		dsl_dir_close(dd, tag);
		dprintf("next=%p (%s) tail=%p\n", next, next?next:"", tailp);
		err = ENOENT;
	}
	if (tailp)
		*tailp = next;
	if (openedspa)
		spa_close(spa, FTAG);
	*ddp = dd;
	return (err);
}
Пример #16
0
/*
 * Target is the dataset whose pool we want to open.
 */
static void
import_pool(const char *target, boolean_t readonly)
{
	nvlist_t *config;
	nvlist_t *pools;
	int error;
	char *sepp;
	spa_t *spa;
	nvpair_t *elem;
	nvlist_t *props;
	char *name;

	kernel_init(readonly ? FREAD : (FREAD | FWRITE));
	g_zfs = libzfs_init();
	ASSERT(g_zfs != NULL);

	dmu_objset_register_type(DMU_OST_ZFS, space_delta_cb);

	g_readonly = readonly;

	/*
	 * If we only want readonly access, it's OK if we find
	 * a potentially-active (ie, imported into the kernel) pool from the
	 * default cachefile.
	 */
	if (readonly && spa_open(target, &spa, FTAG) == 0) {
		spa_close(spa, FTAG);
		return;
	}

	g_importargs.unique = B_TRUE;
	g_importargs.can_be_active = readonly;
	g_pool = strdup(target);
	if ((sepp = strpbrk(g_pool, "/@")) != NULL)
		*sepp = '\0';
	g_importargs.poolname = g_pool;
	pools = zpool_search_import(g_zfs, &g_importargs);

	if (nvlist_empty(pools)) {
		if (!g_importargs.can_be_active) {
			g_importargs.can_be_active = B_TRUE;
			if (zpool_search_import(g_zfs, &g_importargs) != NULL ||
			    spa_open(target, &spa, FTAG) == 0) {
				fatal(spa, FTAG, "cannot import '%s': pool is "
				    "active; run " "\"zpool export %s\" "
				    "first\n", g_pool, g_pool);
			}
		}

		fatal(NULL, FTAG, "cannot import '%s': no such pool "
		    "available\n", g_pool);
	}

	elem = nvlist_next_nvpair(pools, NULL);
	name = nvpair_name(elem);
	VERIFY(nvpair_value_nvlist(elem, &config) == 0);

	props = NULL;
	if (readonly) {
		VERIFY(nvlist_alloc(&props, NV_UNIQUE_NAME, 0) == 0);
		VERIFY(nvlist_add_uint64(props,
		    zpool_prop_to_name(ZPOOL_PROP_READONLY), 1) == 0);
	}

	zfeature_checks_disable = B_TRUE;
	error = spa_import(name, config, props, ZFS_IMPORT_NORMAL);
	zfeature_checks_disable = B_FALSE;
	if (error == EEXIST)
		error = 0;

	if (error)
		fatal(NULL, FTAG, "can't import '%s': %s", name,
		    strerror(error));
}