Ejemplo n.º 1
0
void
sa_tear_down(objset_t *os)
{
	sa_os_t *sa = os->os_sa;
	sa_lot_t *layout;
	void *cookie;

	kmem_free(sa->sa_user_table, sa->sa_user_table_sz);

	/* Free up attr table */

	sa_free_attr_table(sa);

	cookie = NULL;
	while (layout = avl_destroy_nodes(&sa->sa_layout_hash_tree, &cookie)) {
		sa_idx_tab_t *tab;
		while (tab = list_head(&layout->lot_idx_tab)) {
			ASSERT(refcount_count(&tab->sa_refcount));
			sa_idx_tab_rele(os, tab);
		}
	}

	cookie = NULL;
	while (layout = avl_destroy_nodes(&sa->sa_layout_num_tree, &cookie)) {
		kmem_free(layout->lot_attrs,
		    sizeof (sa_attr_type_t) * layout->lot_attr_count);
		kmem_free(layout, sizeof (sa_lot_t));
	}

	avl_destroy(&sa->sa_layout_hash_tree);
	avl_destroy(&sa->sa_layout_num_tree);

	kmem_free(sa, sizeof (sa_os_t));
	os->os_sa = NULL;
}
Ejemplo n.º 2
0
void
zfs_fuid_table_destroy(avl_tree_t *idx_tree, avl_tree_t *domain_tree)
{
	fuid_domain_t *domnode;
	void *cookie;

	cookie = NULL;
	while (domnode = avl_destroy_nodes(domain_tree, &cookie))
		ksiddomain_rele(domnode->f_ksid);

	avl_destroy(domain_tree);
	cookie = NULL;
	while (domnode = avl_destroy_nodes(idx_tree, &cookie))
		kmem_free(domnode, sizeof (fuid_domain_t));
	avl_destroy(idx_tree);
}
Ejemplo n.º 3
0
void
zcrypt_keystore_fini(spa_t *spa)
{
	void *cookie;
	avl_tree_t *tree;
	zcrypt_keystore_node_t *node;

	if (spa->spa_keystore == NULL)
		return;

	rw_enter(&spa->spa_keystore->sk_lock, RW_WRITER);
	/*
	 * Note we don't bother with the refcnt of the keys in here
	 * because this function can't return failure so we just need to
	 * destroy everything.
	 */
	cookie = NULL;
	tree = &spa->spa_keystore->sk_dslkeys;
	while ((node = avl_destroy_nodes(tree, &cookie)) != NULL) {
		mutex_enter(&node->skn_lock);
		(void) zcrypt_keychain_fini(node->skn_keychain);
		zcrypt_key_free(node->skn_wrapkey);
		mutex_exit(&node->skn_lock);
		bzero(node, sizeof (zcrypt_keystore_node_t));
		kmem_free(node, sizeof (zcrypt_keystore_node_t));
	}
	avl_destroy(tree);

	rw_exit(&spa->spa_keystore->sk_lock);
	rw_destroy(&spa->spa_keystore->sk_lock);
	kmem_free(spa->spa_keystore, sizeof (zcrypt_keystore_t));
	spa->spa_keystore = NULL;
}
Ejemplo n.º 4
0
static void
trim_map_vdev_commit_done(spa_t *spa, vdev_t *vd)
{
	trim_map_t *tm = vd->vdev_trimmap;
	trim_seg_t *ts;
	list_t pending_writes;
	zio_t *zio;
	uint64_t start, size;
	void *cookie;

	ASSERT(vd->vdev_ops->vdev_op_leaf);

	if (tm == NULL)
		return;

	mutex_enter(&tm->tm_lock);
	if (!avl_is_empty(&tm->tm_inflight_frees)) {
		cookie = NULL;
		while ((ts = avl_destroy_nodes(&tm->tm_inflight_frees,
		    &cookie)) != NULL) {
			kmem_free(ts, sizeof (*ts));
		}
	}
	list_create(&pending_writes, sizeof (zio_t), offsetof(zio_t,
	    io_trim_link));
	list_move_tail(&pending_writes, &tm->tm_pending_writes);
	mutex_exit(&tm->tm_lock);

	while ((zio = list_remove_head(&pending_writes)) != NULL) {
		zio_vdev_io_reissue(zio);
		zio_execute(zio);
	}
	list_destroy(&pending_writes);
}
Ejemplo n.º 5
0
/*
 *  Avl related
 */
static void
fuse_avl_destroy(avl_tree_t *tree_p)
{
	void *cookie = NULL;
	fuse_avl_cache_node_t *node;
	while ((node = avl_destroy_nodes(tree_p, &cookie)) != NULL) {
		fuse_avl_cache_node_destroy(node);
	}
	avl_destroy(tree_p);
}
Ejemplo n.º 6
0
/*
 * Discard memory associated with the inverted fragments tree created
 * by report_dups() via invert_frags().
 */
static void
free_invert_frags(avl_tree_t *tree)
{
	void *outer = NULL;	/* traversal cookie */
	void *inner;		/* traversal cookie */
	inode_dup_t *inode_dup;
	reference_t *ref_dup;

	while ((inode_dup = avl_destroy_nodes(tree, &outer)) != NULL) {
		inner = NULL;
		while ((ref_dup = avl_destroy_nodes(&inode_dup->id_fragments,
		    &inner)) != NULL) {
			free((void *)ref_dup);
		}
		avl_destroy(&inode_dup->id_fragments);
		free((void *)inode_dup);
	}
	avl_destroy(tree);
}
Ejemplo n.º 7
0
/*
 * Discard all memory allocations associated with the current duplicates
 * table.
 */
void
free_dup_state(void)
{
	void *dup_cookie = NULL;
	void *claim_cookie;
	fragment_t *fragv;
	claimant_t *claimv;

	while ((fragv = avl_destroy_nodes(&dup_frags, &dup_cookie)) != NULL) {
		claim_cookie = NULL;
		while ((claimv = avl_destroy_nodes(&fragv->fr_claimants,
		    &claim_cookie)) != NULL) {
			free((void *)claimv);
		}
		avl_destroy(&fragv->fr_claimants);
		free((void *)fragv);
	}
	avl_destroy(&dup_frags);
}
Ejemplo n.º 8
0
static void
mze_destroy(zap_t *zap)
{
	mzap_ent_t *mze;
	void *avlcookie = NULL;

	while (mze = avl_destroy_nodes(&zap->zap_m.zap_avl, &avlcookie))
		kmem_free(mze, sizeof (mzap_ent_t));
	avl_destroy(&zap->zap_m.zap_avl);
}
Ejemplo n.º 9
0
Archivo: zil.c Proyecto: harshada/zfs
static void
zil_dva_tree_fini(avl_tree_t *t)
{
	zil_dva_node_t *zn;
	void *cookie = NULL;

	while ((zn = avl_destroy_nodes(t, &cookie)) != NULL)
		kmem_free(zn, sizeof (zil_dva_node_t));

	avl_destroy(t);
}
Ejemplo n.º 10
0
/*
 * free all data associated with an ace_list
 */
static void
ace_list_free(ace_list_t *al)
{
	acevals_t *node;
	void *cookie;

	if (al == NULL)
		return;

	cookie = NULL;
	while ((node = avl_destroy_nodes(&al->user, &cookie)) != NULL)
		cacl_free(node, sizeof (acevals_t));
	cookie = NULL;
	while ((node = avl_destroy_nodes(&al->group, &cookie)) != NULL)
		cacl_free(node, sizeof (acevals_t));

	avl_destroy(&al->user);
	avl_destroy(&al->group);

	/* free the container itself */
	cacl_free(al, sizeof (ace_list_t));
}
Ejemplo n.º 11
0
static void
tzcheck_free_tze_avl(avl_tree_t *t)
{
	void *ck = NULL;
	tzent_t *tze;

	while ((tze = avl_destroy_nodes(t, &ck)) != NULL) {
		free(tze->tze_target);
		free(tze->tze_path);
		free(tze);
	}
	avl_destroy(t);
}
Ejemplo n.º 12
0
/*
 * Removes and frees all the cache entries
 */
static void
smb_cache_destroy_nodes(smb_cache_t *chandle)
{
	void *cookie = NULL;
	smb_cache_node_t *cnode;
	avl_tree_t *cache;

	cache = &chandle->ch_cache;
	while ((cnode = avl_destroy_nodes(cache, &cookie)) != NULL) {
		if (chandle->ch_free)
			chandle->ch_free(cnode->cn_data);
		free(cnode);
	}
}
Ejemplo n.º 13
0
/*
 * smb_lucache_flush
 *
 * Removes and frees all the cache entries
 */
static void
smb_lucache_flush(void)
{
	void *cookie = NULL;
	smb_ucnode_t *ucnode;

	(void) rw_wrlock(&smb_uch.uc_cache_lck);
	while ((ucnode = avl_destroy_nodes(&smb_uch.uc_cache, &cookie))
	    != NULL) {
		free(ucnode->cn_user.su_name);
		free(ucnode->cn_user.su_fullname);
		free(ucnode->cn_user.su_desc);
		free(ucnode);
	}
	(void) rw_unlock(&smb_uch.uc_cache_lck);
}
Ejemplo n.º 14
0
/*
 * Destroy a search tree.
 */
void
destroy_tree(avl_tree_t *stree)
{
	void *cookie;
	tree_node_t	*tnode;

	if (stree != NULL) {

		cookie = NULL;
		while ((tnode = avl_destroy_nodes(stree, &cookie)) != NULL) {
			free(tnode);
		}
		avl_destroy(stree);
		free(stree);
	}
}
Ejemplo n.º 15
0
/*
 * Destroy the string container [zsp] and all strings within.
 */
void
zed_strings_destroy(zed_strings_t *zsp)
{
	void *cookie;
	zed_strings_node_t *np;

	if (!zsp)
		return;

	cookie = NULL;
	while ((np = avl_destroy_nodes(&zsp->tree, &cookie)))
		free(np);

	avl_destroy(&zsp->tree);
	free(zsp);
}
Ejemplo n.º 16
0
void
range_tree_vacate(range_tree_t *rt, range_tree_func_t *func, void *arg)
{
	range_seg_t *rs;
	void *cookie = NULL;


	if (rt->rt_ops != NULL)
		rt->rt_ops->rtop_vacate(rt, rt->rt_arg);

	while ((rs = avl_destroy_nodes(&rt->rt_root, &cookie)) != NULL) {
		if (func != NULL)
			func(arg, rs->rs_start, rs->rs_end - rs->rs_start);
		kmem_cache_free(range_seg_cache, rs);
	}

	bzero(rt->rt_histogram, sizeof (rt->rt_histogram));
	rt->rt_space = 0;
}
Ejemplo n.º 17
0
/*
 * semexit - Called by exit() to clean up on process exit.
 */
void
semexit(proc_t *pp)
{
    avl_tree_t	*tree;
    struct sem_undo	*undo;
    void		*cookie = NULL;

    mutex_enter(&pp->p_lock);
    tree = pp->p_semacct;
    pp->p_semacct = NULL;
    mutex_exit(&pp->p_lock);

    while (undo = avl_destroy_nodes(tree, &cookie)) {
        ksemid_t *sp = undo->un_sp;
        size_t size = SEM_UNDOSZ(sp->sem_nsems);
        int i;

        (void) ipc_lock(sem_svc, sp->sem_perm.ipc_id);
        if (!IPC_FREE(&sp->sem_perm)) {
            for (i = 0; i < sp->sem_nsems; i++) {
                int adj = undo->un_aoe[i];
                if (adj) {
                    struct sem *semp = &sp->sem_base[i];
                    int v = (int)semp->semval + adj;

                    if (v < 0 || v > USHRT_MAX)
                        continue;
                    semp->semval = (ushort_t)v;
                    if (v == 0 && semp->semzcnt)
                        cv_broadcast(&semp->semzcnt_cv);
                    if (adj > 0 && semp->semncnt)
                        cv_broadcast(&semp->semncnt_cv);
                }
            }
            list_remove(&sp->sem_undos, undo);
        }
        ipc_rele(sem_svc, (kipc_perm_t *)sp);
        kmem_free(undo, size);
    }

    avl_destroy(tree);
    kmem_free(tree, sizeof (avl_tree_t));
}
Ejemplo n.º 18
0
static int
zcrypt_keychain_fini(avl_tree_t keychain)
{
	void *cookie = NULL;
	zcrypt_keychain_node_t *node = NULL;

#if 0
	while (AVL_NEXT(&keychain, node) != NULL) {
		if (!refcount_is_zero(&node->dkn_key->zk_refcnt))
			return (EBUSY);
	}
#endif

	while ((node = avl_destroy_nodes(&keychain, &cookie)) != NULL) {
		zcrypt_key_free(node->dkn_key);
		kmem_free(node, sizeof (zcrypt_keychain_node_t));
	}
	avl_destroy(&keychain);

	return (0);
}
Ejemplo n.º 19
0
int
zfs_iter_snapshots_sorted(zfs_handle_t *zhp, zfs_iter_f callback, void *data)
{
	int ret = 0;
	zfs_node_t *node;
	avl_tree_t avl;
	void *cookie = NULL;

	avl_create(&avl, zfs_snapshot_compare,
	    sizeof (zfs_node_t), offsetof(zfs_node_t, zn_avlnode));

	ret = zfs_iter_snapshots(zhp, B_FALSE, zfs_sort_snaps, &avl);

	for (node = avl_first(&avl); node != NULL; node = AVL_NEXT(&avl, node))
		ret |= callback(node->zn_handle, data);

	while ((node = avl_destroy_nodes(&avl, &cookie)) != NULL)
		free(node);

	avl_destroy(&avl);

	return (ret);
}
Ejemplo n.º 20
0
Archivo: zil.c Proyecto: harshada/zfs
void
zil_flush_vdevs(zilog_t *zilog)
{
	spa_t *spa = zilog->zl_spa;
	avl_tree_t *t = &zilog->zl_vdev_tree;
	void *cookie = NULL;
	zil_vdev_node_t *zv;
	zio_t *zio;

	ASSERT(zilog->zl_writer);

	/*
	 * We don't need zl_vdev_lock here because we're the zl_writer,
	 * and all zl_get_data() callbacks are done.
	 */
	if (avl_numnodes(t) == 0)
		return;

	spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);

	zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);

	while ((zv = avl_destroy_nodes(t, &cookie)) != NULL) {
		vdev_t *vd = vdev_lookup_top(spa, zv->zv_vdev);
		if (vd != NULL)
			zio_flush(zio, vd);
		kmem_free(zv, sizeof (*zv));
	}

	/*
	 * Wait for all the flushes to complete.  Not all devices actually
	 * support the DKIOCFLUSHWRITECACHE ioctl, so it's OK if it fails.
	 */
	(void) zio_wait(zio);

	spa_config_exit(spa, SCL_STATE, FTAG);
}
Ejemplo n.º 21
0
/*
 * Given a list of directories to search, find all pools stored on disk.  This
 * includes partial pools which are not available to import.  If no args are
 * given (argc is 0), then the default directory (/dev/dsk) is searched.
 * poolname or guid (but not both) are provided by the caller when trying
 * to import a specific pool.
 */
static nvlist_t *
zpool_find_import_impl(libzfs_handle_t *hdl, importargs_t *iarg)
{
	int i, dirs = iarg->paths;
	struct dirent *dp;
	char path[MAXPATHLEN];
	char *end, **dir = iarg->path;
	size_t pathleft;
	nvlist_t *ret = NULL;
	pool_list_t pools = { 0 };
	pool_entry_t *pe, *penext;
	vdev_entry_t *ve, *venext;
	config_entry_t *ce, *cenext;
	name_entry_t *ne, *nenext;
	avl_tree_t slice_cache;
	rdsk_node_t *slice;
	void *cookie;

	verify(iarg->poolname == NULL || iarg->guid == 0);

	if (dirs == 0) {
#ifdef HAVE_LIBBLKID
		/* Use libblkid to scan all device for their type */
		if (zpool_find_import_blkid(hdl, &pools) == 0)
			goto skip_scanning;

		(void) zfs_error_fmt(hdl, EZFS_BADCACHE,
		    dgettext(TEXT_DOMAIN, "blkid failure falling back "
		    "to manual probing"));
#endif /* HAVE_LIBBLKID */

		dir = zpool_default_import_path;
		dirs = DEFAULT_IMPORT_PATH_SIZE;
	}

	/*
	 * Go through and read the label configuration information from every
	 * possible device, organizing the information according to pool GUID
	 * and toplevel GUID.
	 */
	for (i = 0; i < dirs; i++) {
		taskq_t *t;
		char rdsk[MAXPATHLEN];
		int dfd;
		boolean_t config_failed = B_FALSE;
		DIR *dirp;

		/* use realpath to normalize the path */
		if (realpath(dir[i], path) == 0) {

			/* it is safe to skip missing search paths */
			if (errno == ENOENT)
				continue;

			zfs_error_aux(hdl, strerror(errno));
			(void) zfs_error_fmt(hdl, EZFS_BADPATH,
			    dgettext(TEXT_DOMAIN, "cannot open '%s'"), dir[i]);
			goto error;
		}
		end = &path[strlen(path)];
		*end++ = '/';
		*end = 0;
		pathleft = &path[sizeof (path)] - end;

		/*
		 * Using raw devices instead of block devices when we're
		 * reading the labels skips a bunch of slow operations during
		 * close(2) processing, so we replace /dev/dsk with /dev/rdsk.
		 */
		if (strcmp(path, ZFS_DISK_ROOTD) == 0)
			(void) strlcpy(rdsk, ZFS_RDISK_ROOTD, sizeof (rdsk));
		else
			(void) strlcpy(rdsk, path, sizeof (rdsk));

		if ((dfd = open(rdsk, O_RDONLY)) < 0 ||
		    (dirp = fdopendir(dfd)) == NULL) {
			if (dfd >= 0)
				(void) close(dfd);
			zfs_error_aux(hdl, strerror(errno));
			(void) zfs_error_fmt(hdl, EZFS_BADPATH,
			    dgettext(TEXT_DOMAIN, "cannot open '%s'"),
			    rdsk);
			goto error;
		}

		avl_create(&slice_cache, slice_cache_compare,
		    sizeof (rdsk_node_t), offsetof(rdsk_node_t, rn_node));

		/*
		 * This is not MT-safe, but we have no MT consumers of libzfs
		 */
		while ((dp = readdir(dirp)) != NULL) {
			const char *name = dp->d_name;
			if (name[0] == '.' &&
			    (name[1] == 0 || (name[1] == '.' && name[2] == 0)))
				continue;

			slice = zfs_alloc(hdl, sizeof (rdsk_node_t));
			slice->rn_name = zfs_strdup(hdl, name);
			slice->rn_avl = &slice_cache;
			slice->rn_dfd = dfd;
			slice->rn_hdl = hdl;
			slice->rn_nozpool = B_FALSE;
			avl_add(&slice_cache, slice);
		}

		/*
		 * create a thread pool to do all of this in parallel;
		 * rn_nozpool is not protected, so this is racy in that
		 * multiple tasks could decide that the same slice can
		 * not hold a zpool, which is benign.  Also choose
		 * double the number of processors; we hold a lot of
		 * locks in the kernel, so going beyond this doesn't
		 * buy us much.
		 */
		t = taskq_create("z_import", 2 * max_ncpus, defclsyspri,
		    2 * max_ncpus, INT_MAX, TASKQ_PREPOPULATE);
		for (slice = avl_first(&slice_cache); slice;
		    (slice = avl_walk(&slice_cache, slice,
		    AVL_AFTER)))
			(void) taskq_dispatch(t, zpool_open_func, slice,
			    TQ_SLEEP);
		taskq_wait(t);
		taskq_destroy(t);

		cookie = NULL;
		while ((slice = avl_destroy_nodes(&slice_cache,
		    &cookie)) != NULL) {
			if (slice->rn_config != NULL && !config_failed) {
				nvlist_t *config = slice->rn_config;
				boolean_t matched = B_TRUE;

				if (iarg->poolname != NULL) {
					char *pname;

					matched = nvlist_lookup_string(config,
					    ZPOOL_CONFIG_POOL_NAME,
					    &pname) == 0 &&
					    strcmp(iarg->poolname, pname) == 0;
				} else if (iarg->guid != 0) {
					uint64_t this_guid;

					matched = nvlist_lookup_uint64(config,
					    ZPOOL_CONFIG_POOL_GUID,
					    &this_guid) == 0 &&
					    iarg->guid == this_guid;
				}
				if (!matched) {
					nvlist_free(config);
				} else {
					/*
					 * use the non-raw path for the config
					 */
					(void) strlcpy(end, slice->rn_name,
					    pathleft);
					if (add_config(hdl, &pools, path, i+1,
					    slice->rn_num_labels, config) != 0)
						config_failed = B_TRUE;
				}
			}
			free(slice->rn_name);
			free(slice);
		}
		avl_destroy(&slice_cache);

		(void) closedir(dirp);

		if (config_failed)
			goto error;
	}

#ifdef HAVE_LIBBLKID
skip_scanning:
#endif
	ret = get_configs(hdl, &pools, iarg->can_be_active, iarg->policy);

error:
	for (pe = pools.pools; pe != NULL; pe = penext) {
		penext = pe->pe_next;
		for (ve = pe->pe_vdevs; ve != NULL; ve = venext) {
			venext = ve->ve_next;
			for (ce = ve->ve_configs; ce != NULL; ce = cenext) {
				cenext = ce->ce_next;
				if (ce->ce_config)
					nvlist_free(ce->ce_config);
				free(ce);
			}
			free(ve);
		}
		free(pe);
	}

	for (ne = pools.names; ne != NULL; ne = nenext) {
		nenext = ne->ne_next;
		free(ne->ne_name);
		free(ne);
	}

	return (ret);
}
Ejemplo n.º 22
0
/*
 * Given a list of directories to search, find all pools stored on disk.  This
 * includes partial pools which are not available to import.  If no args are
 * given (argc is 0), then the default directory (/dev/dsk) is searched.
 * poolname or guid (but not both) are provided by the caller when trying
 * to import a specific pool.
 */
static nvlist_t *
zpool_find_import_impl(libzfs_handle_t *hdl, importargs_t *iarg)
{
	int i, dirs = iarg->paths;
	struct dirent64 *dp;
	char path[MAXPATHLEN];
	char *end, **dir = iarg->path;
	size_t pathleft;
	nvlist_t *ret = NULL;
	static char *default_dir = "/dev/dsk";
	pool_list_t pools = { 0 };
	pool_entry_t *pe, *penext;
	vdev_entry_t *ve, *venext;
	config_entry_t *ce, *cenext;
	name_entry_t *ne, *nenext;
	avl_tree_t slice_cache;
	rdsk_node_t *slice;
	void *cookie;

	if (dirs == 0) {
		dirs = 1;
		dir = &default_dir;
	}

	/*
	 * Go through and read the label configuration information from every
	 * possible device, organizing the information according to pool GUID
	 * and toplevel GUID.
	 */
	for (i = 0; i < dirs; i++) {
		tpool_t *t;
		char *rdsk;
		int dfd;
		boolean_t config_failed = B_FALSE;
		DIR *dirp;

		/* use realpath to normalize the path */
		if (realpath(dir[i], path) == 0) {
			(void) zfs_error_fmt(hdl, EZFS_BADPATH,
			    dgettext(TEXT_DOMAIN, "cannot open '%s'"), dir[i]);
			goto error;
		}
		end = &path[strlen(path)];
		*end++ = '/';
		*end = 0;
		pathleft = &path[sizeof (path)] - end;

		/*
		 * Using raw devices instead of block devices when we're
		 * reading the labels skips a bunch of slow operations during
		 * close(2) processing, so we replace /dev/dsk with /dev/rdsk.
		 */
		if (strcmp(path, "/dev/dsk/") == 0)
			rdsk = "/dev/rdsk/";
		else
			rdsk = path;

		if ((dfd = open64(rdsk, O_RDONLY)) < 0 ||
		    (dirp = fdopendir(dfd)) == NULL) {
			if (dfd >= 0)
				(void) close(dfd);
			zfs_error_aux(hdl, strerror(errno));
			(void) zfs_error_fmt(hdl, EZFS_BADPATH,
			    dgettext(TEXT_DOMAIN, "cannot open '%s'"),
			    rdsk);
			goto error;
		}

		avl_create(&slice_cache, slice_cache_compare,
		    sizeof (rdsk_node_t), offsetof(rdsk_node_t, rn_node));
		/*
		 * This is not MT-safe, but we have no MT consumers of libzfs
		 */
		while ((dp = readdir64(dirp)) != NULL) {
			const char *name = dp->d_name;
			if (name[0] == '.' &&
			    (name[1] == 0 || (name[1] == '.' && name[2] == 0)))
				continue;

			slice = zfs_alloc(hdl, sizeof (rdsk_node_t));
			slice->rn_name = zfs_strdup(hdl, name);
			slice->rn_avl = &slice_cache;
			slice->rn_dfd = dfd;
			slice->rn_hdl = hdl;
			slice->rn_nozpool = B_FALSE;
			avl_add(&slice_cache, slice);
		}
		/*
		 * create a thread pool to do all of this in parallel;
		 * rn_nozpool is not protected, so this is racy in that
		 * multiple tasks could decide that the same slice can
		 * not hold a zpool, which is benign.  Also choose
		 * double the number of processors; we hold a lot of
		 * locks in the kernel, so going beyond this doesn't
		 * buy us much.
		 */
		t = tpool_create(1, 2 * sysconf(_SC_NPROCESSORS_ONLN),
		    0, NULL);
		for (slice = avl_first(&slice_cache); slice;
		    (slice = avl_walk(&slice_cache, slice,
		    AVL_AFTER)))
			(void) tpool_dispatch(t, zpool_open_func, slice);
		tpool_wait(t);
		tpool_destroy(t);

		cookie = NULL;
		while ((slice = avl_destroy_nodes(&slice_cache,
		    &cookie)) != NULL) {
			if (slice->rn_config != NULL && !config_failed) {
				nvlist_t *config = slice->rn_config;
				boolean_t matched = B_TRUE;

				if (iarg->poolname != NULL) {
					char *pname;

					matched = nvlist_lookup_string(config,
					    ZPOOL_CONFIG_POOL_NAME,
					    &pname) == 0 &&
					    strcmp(iarg->poolname, pname) == 0;
				} else if (iarg->guid != 0) {
					uint64_t this_guid;

					matched = nvlist_lookup_uint64(config,
					    ZPOOL_CONFIG_POOL_GUID,
					    &this_guid) == 0 &&
					    iarg->guid == this_guid;
				}
				if (!matched) {
					nvlist_free(config);
				} else {
					/*
					 * use the non-raw path for the config
					 */
					(void) strlcpy(end, slice->rn_name,
					    pathleft);
					if (add_config(hdl, &pools, path,
					    config) != 0)
						config_failed = B_TRUE;
				}
			}
			free(slice->rn_name);
			free(slice);
		}
		avl_destroy(&slice_cache);

		(void) closedir(dirp);

		if (config_failed)
			goto error;
	}

	ret = get_configs(hdl, &pools, iarg->can_be_active);

error:
	for (pe = pools.pools; pe != NULL; pe = penext) {
		penext = pe->pe_next;
		for (ve = pe->pe_vdevs; ve != NULL; ve = venext) {
			venext = ve->ve_next;
			for (ce = ve->ve_configs; ce != NULL; ce = cenext) {
				cenext = ce->ce_next;
				if (ce->ce_config)
					nvlist_free(ce->ce_config);
				free(ce);
			}
			free(ve);
		}
		free(pe);
	}

	for (ne = pools.names; ne != NULL; ne = nenext) {
		nenext = ne->ne_next;
		free(ne->ne_name);
		free(ne);
	}

	return (ret);
}
Ejemplo n.º 23
0
/*
 * Check if user has requested permission.
 */
int
dsl_deleg_access(const char *ddname, const char *perm, cred_t *cr)
{
	dsl_dir_t *dd, *startdd;
	dsl_pool_t *dp;
	void *cookie;
	int	error;
	char	checkflag = ZFS_DELEG_LOCAL;
	const char *tail;
	objset_t *mos;
	avl_tree_t permsets;
	perm_set_t *setnode;

	/*
	 * Use tail so that zfs_ioctl() code doesn't have
	 * to always to to figure out parent name in order
	 * to do access check.  for example renaming a snapshot
	 */
	error = dsl_dir_open(ddname, FTAG, &startdd, &tail);
	if (error)
		return (error);

	if (tail && tail[0] != '@') {
		dsl_dir_close(startdd, FTAG);
		return (ENOENT);
	}
	dp = startdd->dd_pool;
	mos = dp->dp_meta_objset;

	if (dsl_delegation_on(mos) == B_FALSE) {
		dsl_dir_close(startdd, FTAG);
		return (ECANCELED);
	}

	if (spa_version(dmu_objset_spa(dp->dp_meta_objset)) <
	    SPA_VERSION_DELEGATED_PERMS) {
		dsl_dir_close(startdd, FTAG);
		return (EPERM);
	}

	avl_create(&permsets, perm_set_compare, sizeof (perm_set_t),
	    offsetof(perm_set_t, p_node));

	rw_enter(&dp->dp_config_rwlock, RW_READER);
	for (dd = startdd; dd != NULL; dd = dd->dd_parent,
	    checkflag = ZFS_DELEG_DESCENDENT) {
		uint64_t zapobj;
		boolean_t expanded;

		/*
		 * If not in global zone then make sure
		 * the zoned property is set
		 */
		if (!INGLOBALZONE(curproc)) {
			uint64_t zoned;

			if (dsl_prop_get_ds_locked(dd,
			    zfs_prop_to_name(ZFS_PROP_ZONED),
			    8, 1, &zoned, NULL) != 0)
				break;
			if (!zoned)
				break;
		}
		zapobj = dd->dd_phys->dd_deleg_zapobj;

		if (zapobj == 0)
			continue;

		dsl_load_user_sets(mos, zapobj, &permsets, checkflag, cr);
again:
		expanded = B_FALSE;
		for (setnode = avl_first(&permsets); setnode;
		    setnode = AVL_NEXT(&permsets, setnode)) {
			if (setnode->p_matched == B_TRUE)
				continue;

			/* See if this set directly grants this permission */
			error = dsl_check_access(mos, zapobj,
			    ZFS_DELEG_NAMED_SET, 0, setnode->p_setname, perm);
			if (error == 0)
				goto success;
			if (error == EPERM)
				setnode->p_matched = B_TRUE;

			/* See if this set includes other sets */
			error = dsl_load_sets(mos, zapobj,
			    ZFS_DELEG_NAMED_SET_SETS, 0,
			    setnode->p_setname, &permsets);
			if (error == 0)
				setnode->p_matched = expanded = B_TRUE;
		}
		/*
		 * If we expanded any sets, that will define more sets,
		 * which we need to check.
		 */
		if (expanded)
			goto again;

		error = dsl_check_user_access(mos, zapobj, perm, checkflag, cr);
		if (error == 0)
			goto success;
	}
	error = EPERM;
success:
	rw_exit(&dp->dp_config_rwlock);
	dsl_dir_close(startdd, FTAG);

	cookie = NULL;
	while ((setnode = avl_destroy_nodes(&permsets, &cookie)) != NULL)
		kmem_free(setnode, sizeof (perm_set_t));

	return (error);
}
Ejemplo n.º 24
0
/*
 * Check if user has requested permission.
 */
int
dsl_deleg_access(const char *dsname, const char *perm, cred_t *cr)
{
	dsl_dataset_t *ds;
	dsl_dir_t *dd;
	dsl_pool_t *dp;
	void *cookie;
	int	error;
	char	checkflag;
	objset_t *mos;
	avl_tree_t permsets;
	perm_set_t *setnode;

	error = dsl_dataset_hold(dsname, FTAG, &ds);
	if (error)
		return (error);

	dp = ds->ds_dir->dd_pool;
	mos = dp->dp_meta_objset;

	if (dsl_delegation_on(mos) == B_FALSE) {
		dsl_dataset_rele(ds, FTAG);
		return (ECANCELED);
	}

	if (spa_version(dmu_objset_spa(dp->dp_meta_objset)) <
	    SPA_VERSION_DELEGATED_PERMS) {
		dsl_dataset_rele(ds, FTAG);
		return (EPERM);
	}

	if (dsl_dataset_is_snapshot(ds)) {
		/*
		 * Snapshots are treated as descendents only,
		 * local permissions do not apply.
		 */
		checkflag = ZFS_DELEG_DESCENDENT;
	} else {
		checkflag = ZFS_DELEG_LOCAL;
	}

	avl_create(&permsets, perm_set_compare, sizeof (perm_set_t),
	    offsetof(perm_set_t, p_node));

	rw_enter(&dp->dp_config_rwlock, RW_READER);
	for (dd = ds->ds_dir; dd != NULL; dd = dd->dd_parent,
	    checkflag = ZFS_DELEG_DESCENDENT) {
		uint64_t zapobj;
		boolean_t expanded;

		/*
		 * If not in global zone then make sure
		 * the zoned property is set
		 */
		if (!INGLOBALZONE(curproc)) {
			uint64_t zoned;

			if (dsl_prop_get_dd(dd,
			    zfs_prop_to_name(ZFS_PROP_ZONED),
			    8, 1, &zoned, NULL) != 0)
				break;
			if (!zoned)
				break;
		}
		zapobj = dd->dd_phys->dd_deleg_zapobj;

		if (zapobj == 0)
			continue;

		dsl_load_user_sets(mos, zapobj, &permsets, checkflag, cr);
again:
		expanded = B_FALSE;
		for (setnode = avl_first(&permsets); setnode;
		    setnode = AVL_NEXT(&permsets, setnode)) {
			if (setnode->p_matched == B_TRUE)
				continue;

			/* See if this set directly grants this permission */
			error = dsl_check_access(mos, zapobj,
			    ZFS_DELEG_NAMED_SET, 0, setnode->p_setname, perm);
			if (error == 0)
				goto success;
			if (error == EPERM)
				setnode->p_matched = B_TRUE;

			/* See if this set includes other sets */
			error = dsl_load_sets(mos, zapobj,
			    ZFS_DELEG_NAMED_SET_SETS, 0,
			    setnode->p_setname, &permsets);
			if (error == 0)
				setnode->p_matched = expanded = B_TRUE;
		}
		/*
		 * If we expanded any sets, that will define more sets,
		 * which we need to check.
		 */
		if (expanded)
			goto again;

		error = dsl_check_user_access(mos, zapobj, perm, checkflag, cr);
		if (error == 0)
			goto success;
	}
	error = EPERM;
success:
	rw_exit(&dp->dp_config_rwlock);
	dsl_dataset_rele(ds, FTAG);

	cookie = NULL;
	while ((setnode = avl_destroy_nodes(&permsets, &cookie)) != NULL)
		kmem_free(setnode, sizeof (perm_set_t));

	return (error);
}
Ejemplo n.º 25
0
/*
 * smb_lucache_do_update
 *
 * This function takes care of updating the AVL tree.
 * If an entry has been updated, it'll be modified in place.
 *
 * New entries will be added to a temporary AVL tree then
 * passwod file is unlocked and all the new entries will
 * be transferred to the main cache from the temporary tree.
 *
 * This function MUST NOT be called directly
 */
static int
smb_lucache_do_update(void)
{
	avl_tree_t tmp_cache;
	smb_pwbuf_t pwbuf;
	smb_passwd_t smbpw;
	smb_ucnode_t uc_node;
	smb_ucnode_t *uc_newnode;
	smb_luser_t *user;
	smb_sid_t *sid;
	idmap_stat idm_stat;
	int rc = SMB_PWE_SUCCESS;
	void *cookie = NULL;
	FILE *fp;

	if ((rc = smb_pwd_lock()) != SMB_PWE_SUCCESS) {
		syslog(LOG_WARNING, "smb_pwdutil: lock failed, err=%d", rc);
		return (rc);
	}

	if ((fp = fopen(SMB_PASSWD, "rF")) == NULL) {
		syslog(LOG_WARNING, "smb_pwdutil: open failed, %m");
		(void) smb_pwd_unlock();
		return (SMB_PWE_OPEN_FAILED);
	}

	avl_create(&tmp_cache, smb_lucache_cmp,
	    sizeof (smb_ucnode_t), offsetof(smb_ucnode_t, cn_link));

	bzero(&pwbuf, sizeof (smb_pwbuf_t));
	pwbuf.pw_pwd = &smbpw;

	(void) rw_rdlock(&smb_uch.uc_cache_lck);

	while (smb_pwd_fgetent(fp, &pwbuf, SMB_PWD_GETF_NOPWD) != NULL) {
		uc_node.cn_user.su_name = smbpw.pw_name;
		uc_newnode = avl_find(&smb_uch.uc_cache, &uc_node, NULL);
		if (uc_newnode) {
			/* update the node info */
			uc_newnode->cn_user.su_ctrl = smbpw.pw_flags;
			continue;
		}

		/* create a new node */
		if ((uc_newnode = malloc(sizeof (smb_ucnode_t))) == NULL) {
			rc = SMB_PWE_NO_MEMORY;
			break;
		}

		bzero(uc_newnode, sizeof (smb_ucnode_t));
		user = &uc_newnode->cn_user;
		user->su_ctrl = smbpw.pw_flags;

		idm_stat = smb_idmap_getsid(smbpw.pw_uid, SMB_IDMAP_USER, &sid);
		if (idm_stat != IDMAP_SUCCESS) {
			syslog(LOG_WARNING, "smb_pwdutil: couldn't obtain SID "
			    "for uid=%u (%d)", smbpw.pw_uid, idm_stat);
			free(uc_newnode);
			continue;
		}
		(void) smb_sid_getrid(sid, &user->su_rid);
		smb_sid_free(sid);

		user->su_name = strdup(smbpw.pw_name);
		if (user->su_name == NULL) {
			rc = SMB_PWE_NO_MEMORY;
			free(uc_newnode);
			break;
		}

		avl_add(&tmp_cache, uc_newnode);
	}

	(void) rw_unlock(&smb_uch.uc_cache_lck);
	(void) fclose(fp);
	(void) smb_pwd_unlock();

	/* Destroy the temporary list */
	(void) rw_wrlock(&smb_uch.uc_cache_lck);
	while ((uc_newnode = avl_destroy_nodes(&tmp_cache, &cookie)) != NULL) {
		avl_add(&smb_uch.uc_cache, uc_newnode);
	}
	(void) rw_unlock(&smb_uch.uc_cache_lck);

	avl_destroy(&tmp_cache);

	return (rc);
}
Ejemplo n.º 26
0
/*
 * Return amount of space required for the string table.
 */
size_t
st_getstrtab_sz(Str_tbl *stp)
{
	assert(stp->st_fullstrsize > 0);

	if ((stp->st_flags & FLG_STTAB_COMPRESS) == 0) {
		stp->st_flags |= FLG_STTAB_COOKED;
		return (stp->st_fullstrsize);
	}

	if ((stp->st_flags & FLG_STTAB_COOKED) == 0) {
		LenNode		*lnp;
		void		*cookie;

		stp->st_flags |= FLG_STTAB_COOKED;
		/*
		 * allocate a hash table about the size of # of
		 * strings input.
		 */
		stp->st_hbckcnt = findprime(stp->st_strcnt);
		if ((stp->st_hashbcks =
		    calloc(sizeof (Str_hash), stp->st_hbckcnt)) == NULL)
			return (0);

		/*
		 * We now walk all of the strings in the list, from shortest to
		 * longest, and insert them into the hashtable.
		 */
		if ((lnp = avl_first(stp->st_lentree)) == NULL) {
			/*
			 * Is it possible we have an empty string table, if so,
			 * the table still contains '\0', so return the size.
			 */
			if (avl_numnodes(stp->st_lentree) == 0) {
				assert(stp->st_strsize == 1);
				return (stp->st_strsize);
			}
			return (0);
		}

		while (lnp) {
			StrNode	*snp;

			/*
			 * Walk the string lists and insert them into the hash
			 * list.  Once a string is inserted we no longer need
			 * it's entry, so the string can be freed.
			 */
			for (snp = avl_first(lnp->ln_strtree); snp;
			    snp = AVL_NEXT(lnp->ln_strtree, snp)) {
				if (st_hash_insert(stp, snp->sn_str,
				    lnp->ln_strlen) == -1)
					return (0);
			}

			/*
			 * Now that the strings have been copied, walk the
			 * StrNode tree and free all the AVL nodes.  Note,
			 * avl_destroy_nodes() beats avl_remove() as the
			 * latter balances the nodes as they are removed.
			 * We just want to tear the whole thing down fast.
			 */
			cookie = NULL;
			while ((snp = avl_destroy_nodes(lnp->ln_strtree,
			    &cookie)) != NULL)
				free(snp);
			avl_destroy(lnp->ln_strtree);
			free(lnp->ln_strtree);
			lnp->ln_strtree = NULL;

			/*
			 * Move on to the next LenNode.
			 */
			lnp = AVL_NEXT(stp->st_lentree, lnp);
		}

		/*
		 * Now that all of the strings have been freed, walk the
		 * LenNode tree and free all of the AVL nodes.  Note,
		 * avl_destroy_nodes() beats avl_remove() as the latter
		 * balances the nodes as they are removed. We just want to
		 * tear the whole thing down fast.
		 */
		cookie = NULL;
		while ((lnp = avl_destroy_nodes(stp->st_lentree,
		    &cookie)) != NULL)
			free(lnp);
		avl_destroy(stp->st_lentree);
		free(stp->st_lentree);
		stp->st_lentree = 0;
	}

	assert(stp->st_strsize > 0);
	assert(stp->st_fullstrsize >= stp->st_strsize);

	return (stp->st_strsize);
}