Exemplo n.º 1
0
static void
vdev_queue_io_add(vdev_queue_t *vq, zio_t *zio)
{
	spa_t *spa = zio->io_spa;
	spa_stats_history_t *ssh = &spa->spa_stats.io_history;

	ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
	avl_add(vdev_queue_class_tree(vq, zio->io_priority), zio);
	avl_add(vdev_queue_type_tree(vq, zio->io_type), zio);

	if (ssh->kstat != NULL) {
		mutex_enter(&ssh->lock);
		kstat_waitq_enter(ssh->kstat->ks_data);
		mutex_exit(&ssh->lock);
	}
}
Exemplo n.º 2
0
int
zfsctl_unmount_snapshot(zfs_sb_t *zsb, char *name, int flags)
{
	zfs_snapentry_t search;
	zfs_snapentry_t *sep;
	int error = 0;

	mutex_enter(&zsb->z_ctldir_lock);

	search.se_name = name;
	sep = avl_find(&zsb->z_ctldir_snaps, &search, NULL);
	if (sep) {
		avl_remove(&zsb->z_ctldir_snaps, sep);
		error = __zfsctl_unmount_snapshot(sep, flags);
		if (error == EBUSY)
			avl_add(&zsb->z_ctldir_snaps, sep);
		else
			zfsctl_sep_free(sep);
	} else {
		error = ENOENT;
	}

	mutex_exit(&zsb->z_ctldir_lock);
	ASSERT3S(error, >=, 0);

	return (error);
}
Exemplo n.º 3
0
/*
 * Traverse all mounted snapshots and attempt to unmount them.  This
 * is best effort, on failure EEXIST is returned and count will be set
 * to the number of file snapshots which could not be unmounted.
 */
int
zfsctl_unmount_snapshots(zfs_sb_t *zsb, int flags, int *count)
{
	zfs_snapentry_t *sep, *next;
	int error = 0;

	*count = 0;

	ASSERT(zsb->z_ctldir != NULL);
	mutex_enter(&zsb->z_ctldir_lock);

	sep = avl_first(&zsb->z_ctldir_snaps);
	while (sep != NULL) {
		next = AVL_NEXT(&zsb->z_ctldir_snaps, sep);
		avl_remove(&zsb->z_ctldir_snaps, sep);
		error = __zfsctl_unmount_snapshot(sep, flags);
		if (error == EBUSY) {
			avl_add(&zsb->z_ctldir_snaps, sep);
			(*count)++;
		} else {
			zfsctl_sep_free(sep);
		}

		sep = next;
	}

	mutex_exit(&zsb->z_ctldir_lock);

	return ((*count > 0) ? EEXIST : 0);
}
int32_t init_container_resource(container_handle_t **ct, const char *ct_name)
{
    container_handle_t *tmp_ct = NULL;

    ASSERT(ct != NULL);
    ASSERT(ct_name != NULL);

    tmp_ct = (container_handle_t *)OS_MALLOC(sizeof(container_handle_t));
    if (!tmp_ct)
    {
        LOG_ERROR("Allocate memory failed. size(%d)\n", (uint32_t)sizeof(container_handle_t));
        return -INDEX_ERR_ALLOCATE_MEMORY;
    }

    memset(tmp_ct, 0, sizeof(container_handle_t));
    strncpy(tmp_ct->name, ct_name, OFS_NAME_SIZE);
    OS_RWLOCK_INIT(&tmp_ct->ct_lock);
    OS_RWLOCK_INIT(&tmp_ct->metadata_cache_lock);
    tmp_ct->ref_cnt = 1;
    avl_create(&tmp_ct->obj_info_list, (int (*)(const void *, const void*))compare_object1, sizeof(object_info_t),
        OS_OFFSET(object_info_t, entry));
    avl_create(&tmp_ct->metadata_cache, (int (*)(const void *, const void*))compare_cache1, sizeof(ofs_block_cache_t),
        OS_OFFSET(ofs_block_cache_t, fs_entry));
    avl_add(g_container_list, tmp_ct);

    *ct = tmp_ct;

    return 0;
}
Exemplo n.º 5
0
static void
trim_map_vdev_commit(spa_t *spa, zio_t *zio, vdev_t *vd)
{
	trim_map_t *tm = vd->vdev_trimmap;
	trim_seg_t *ts;
	uint64_t start, size, txglimit;

	ASSERT(vd->vdev_ops->vdev_op_leaf);

	if (tm == NULL)
		return;

	txglimit = MIN(spa->spa_syncing_txg, spa_freeze_txg(spa)) -
	    trim_txg_limit;

	mutex_enter(&tm->tm_lock);
	/*
	 * Loop until we send all frees up to the txglimit.
	 */
	while ((ts = trim_map_first(tm, txglimit)) != NULL) {
		list_remove(&tm->tm_head, ts);
		avl_remove(&tm->tm_queued_frees, ts);
		avl_add(&tm->tm_inflight_frees, ts);
		zio_nowait(zio_trim(zio, spa, vd, ts->ts_start,
		    ts->ts_end - ts->ts_start));
	}
	mutex_exit(&tm->tm_lock);
}
Exemplo n.º 6
0
static int
zfs_sort_snaps(zfs_handle_t *zhp, void *data)
{
	avl_tree_t *avl = data;
	zfs_node_t *node;
	zfs_node_t search;

	search.zn_handle = zhp;
	node = avl_find(avl, &search, NULL);
	if (node) {
		/*
		 * If this snapshot was renamed while we were creating the
		 * AVL tree, it's possible that we already inserted it under
		 * its old name. Remove the old handle before adding the new
		 * one.
		 */
		zfs_close(node->zn_handle);
		avl_remove(avl, node);
		free(node);
	}

	node = zfs_alloc(zhp->zfs_hdl, sizeof (zfs_node_t));
	node->zn_handle = zhp;
	avl_add(avl, node);

	return (0);
}
Exemplo n.º 7
0
void
storechar(tchar c, int row, int col) {
	long s = cbits(c);
	if (s != DASH && s != BAR) return;
	if (bst_srch(&coords, XY2KEY(row, col), NULL))
		if (avl_add(&coords, XY2KEY(row, col), BST_VAL(c)))
			fprintf(stderr,
			    "BST: Unexpected internal error\n");
}
Exemplo n.º 8
0
static void
trim_map_vdev_commit(spa_t *spa, zio_t *zio, vdev_t *vd)
{
	trim_map_t *tm = vd->vdev_trimmap;
	trim_seg_t *ts;
	uint64_t size, offset, txgtarget, txgsafe;
	int64_t hard, soft;
	hrtime_t timelimit;

	ASSERT(vd->vdev_ops->vdev_op_leaf);

	if (tm == NULL)
		return;

	timelimit = gethrtime() - (hrtime_t)trim_timeout * NANOSEC;
	if (vd->vdev_isl2cache) {
		txgsafe = UINT64_MAX;
		txgtarget = UINT64_MAX;
	} else {
		txgsafe = MIN(spa_last_synced_txg(spa), spa_freeze_txg(spa));
		if (txgsafe > trim_txg_delay)
			txgtarget = txgsafe - trim_txg_delay;
		else
			txgtarget = 0;
	}

	mutex_enter(&tm->tm_lock);
	hard = 0;
	if (tm->tm_pending > trim_vdev_max_pending)
		hard = (tm->tm_pending - trim_vdev_max_pending) / 4;
	soft = P2ROUNDUP(hard + tm->tm_pending / trim_timeout + 1, 64);
	/* Loop until we have sent all outstanding free's */
	while (soft > 0 &&
	    (ts = trim_map_first(tm, txgtarget, txgsafe, timelimit, hard > 0))
	    != NULL) {
		TRIM_MAP_REM(tm, ts);
		avl_remove(&tm->tm_queued_frees, ts);
		avl_add(&tm->tm_inflight_frees, ts);
		size = ts->ts_end - ts->ts_start;
		offset = ts->ts_start;
		/*
		 * We drop the lock while we call zio_nowait as the IO
		 * scheduler can result in a different IO being run e.g.
		 * a write which would result in a recursive lock.
		 */
		mutex_exit(&tm->tm_lock);

		zio_nowait(zio_trim(zio, spa, vd, offset, size));

		soft -= TRIM_MAP_SEGS(size);
		hard -= TRIM_MAP_SEGS(size);
		mutex_enter(&tm->tm_lock);
	}
	mutex_exit(&tm->tm_lock);
}
Exemplo n.º 9
0
void
char_open(void) {
	char *b;
	ssize_t s;
	if (!(b = file2ram(FNTDIR "/devhtml/CHAR", &s))) exit(1);
	while (1) {
		char *w, *l;
		size_t n;
		int t;
		if (!(s = lineskip(&b, s))) return;
		if (!(w = get_word(&b, &s, &n, &t))) return;
		if (!(l = get_line(&b, &s, NULL))) return;
		if (n == 1)
			avl_add(&chrdat, I2BST(*w), S2BST(l));
		else if (n != 2 && !(t & ~1))
			avl_add(&numdat, I2BST(atoi(w)), S2BST(l));
		else
			avl_add(&namdat, S2BST(w), S2BST(l));
	}
}
Exemplo n.º 10
0
static void
vdev_queue_io_add(vdev_queue_t *vq, zio_t *zio)
{
	spa_t *spa = zio->io_spa;
	avl_tree_t *qtt;
	ASSERT(MUTEX_HELD(&vq->vq_lock));
	ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
	avl_add(vdev_queue_class_tree(vq, zio->io_priority), zio);
	qtt = vdev_queue_type_tree(vq, zio->io_type);
	if (qtt)
		avl_add(qtt, zio);

#ifdef illumos
	mutex_enter(&spa->spa_iokstat_lock);
	spa->spa_queue_stats[zio->io_priority].spa_queued++;
	if (spa->spa_iokstat != NULL)
		kstat_waitq_enter(spa->spa_iokstat->ks_data);
	mutex_exit(&spa->spa_iokstat_lock);
#endif
}
Exemplo n.º 11
0
/* ARGSUSED */
static int
zfsctl_snapdir_remove(vnode_t *dvp, char *name, vnode_t *cwd, cred_t *cr,
    caller_context_t *ct, int flags)
{
	zfsctl_snapdir_t *sdp = dvp->v_data;
	zfs_snapentry_t *sep;
	zfs_snapentry_t search;
	zfsvfs_t *zfsvfs;
	char snapname[MAXNAMELEN];
	char real[MAXNAMELEN];
	int err;

	zfsvfs = dvp->v_vfsp->vfs_data;
	ZFS_ENTER(zfsvfs);

	if ((flags & FIGNORECASE) || zfsvfs->z_case == ZFS_CASE_INSENSITIVE) {

		err = dmu_snapshot_realname(zfsvfs->z_os, name, real,
		    MAXNAMELEN, NULL);
		if (err == 0) {
			name = real;
		} else if (err != ENOTSUP) {
			ZFS_EXIT(zfsvfs);
			return (err);
		}
	}

	ZFS_EXIT(zfsvfs);

	err = zfsctl_snapshot_zname(dvp, name, MAXNAMELEN, snapname);
	if (!err)
		err = zfs_secpolicy_destroy_perms(snapname, cr);
	if (err)
		return (err);

	mutex_enter(&sdp->sd_lock);

	search.se_name = name;
	sep = avl_find(&sdp->sd_snaps, &search, NULL);
	if (sep) {
		avl_remove(&sdp->sd_snaps, sep);
		err = zfsctl_unmount_snap(sep, MS_FORCE, cr);
		if (err)
			avl_add(&sdp->sd_snaps, sep);
		else
			err = dmu_objset_destroy(snapname, B_FALSE);
	} else {
		err = ENOENT;
	}

	mutex_exit(&sdp->sd_lock);

	return (err);
}
Exemplo n.º 12
0
static void
vdev_queue_pending_add(vdev_queue_t *vq, zio_t *zio)
{
	spa_t *spa = zio->io_spa;
	avl_add(&vq->vq_pending_tree, zio);
	if (spa->spa_iokstat != NULL) {
		mutex_enter(&spa->spa_iokstat_lock);
		kstat_runq_enter(spa->spa_iokstat->ks_data);
		mutex_exit(&spa->spa_iokstat_lock);
	}
}
Exemplo n.º 13
0
/*
 * pppt_disable_svc
 *
 * clean up all existing sessions and deregister targets from STMF
 */
static void
pppt_disable_svc(void)
{
	pppt_tgt_t	*tgt, *next_tgt;
	avl_tree_t	delete_target_list;

	ASSERT(pppt_global.global_svc_state == PSS_DISABLING);

	avl_create(&delete_target_list,
	    pppt_tgt_avl_compare, sizeof (pppt_tgt_t),
	    offsetof(pppt_tgt_t, target_global_ln));

	PPPT_GLOBAL_LOCK();
	for (tgt = avl_first(&pppt_global.global_target_list);
	    tgt != NULL;
	    tgt = next_tgt) {
		next_tgt = AVL_NEXT(&pppt_global.global_target_list, tgt);
		avl_remove(&pppt_global.global_target_list, tgt);
		avl_add(&delete_target_list, tgt);
		pppt_tgt_async_delete(tgt);
	}
	PPPT_GLOBAL_UNLOCK();

	for (tgt = avl_first(&delete_target_list);
	    tgt != NULL;
	    tgt = next_tgt) {
		next_tgt = AVL_NEXT(&delete_target_list, tgt);
		mutex_enter(&tgt->target_mutex);
		while ((tgt->target_refcount > 0) ||
		    (tgt->target_state != TS_DELETING)) {
			cv_wait(&tgt->target_cv, &tgt->target_mutex);
		}
		mutex_exit(&tgt->target_mutex);

		avl_remove(&delete_target_list, tgt);
		pppt_tgt_destroy(tgt);
	}

	taskq_destroy(pppt_global.global_sess_taskq);

	taskq_destroy(pppt_global.global_dispatch_taskq);

	avl_destroy(&pppt_global.global_sess_list);
	avl_destroy(&pppt_global.global_target_list);

	(void) stmf_deregister_port_provider(pppt_global.global_pp);

	stmf_free(pppt_global.global_dbuf_store);
	pppt_global.global_dbuf_store = NULL;

	stmf_free(pppt_global.global_pp);
	pppt_global.global_pp = NULL;
}
Exemplo n.º 14
0
static void
vdev_queue_io_add(vdev_queue_t *vq, zio_t *zio)
{
	spa_t *spa = zio->io_spa;
	ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
	avl_add(&vq->vq_class[zio->io_priority].vqc_queued_tree, zio);

	mutex_enter(&spa->spa_iokstat_lock);
	spa->spa_queue_stats[zio->io_priority].spa_queued++;
	if (spa->spa_iokstat != NULL)
		kstat_waitq_enter(spa->spa_iokstat->ks_data);
	mutex_exit(&spa->spa_iokstat_lock);
}
Exemplo n.º 15
0
void
zcrypt_keychain_insert(avl_tree_t *keychain, uint64_t txg,
    zcrypt_key_t *key)
{
	zcrypt_keychain_node_t *dkn;

	dkn = kmem_alloc(sizeof (zcrypt_keychain_node_t), KM_PUSHPAGE); //Sleep

	dkn->dkn_txg = txg;
	dkn->dkn_key = key;

	avl_add(keychain, dkn);
}
Exemplo n.º 16
0
static void
vdev_queue_pending_add(vdev_queue_t *vq, zio_t *zio)
{
	spa_t *spa = zio->io_spa;
	spa_stats_history_t *ssh = &spa->spa_stats.io_history;

	avl_add(&vq->vq_pending_tree, zio);

	if (ssh->kstat != NULL) {
		mutex_enter(&ssh->lock);
		kstat_runq_enter(ssh->kstat->ks_data);
		mutex_exit(&ssh->lock);
	}
}
Exemplo n.º 17
0
/*
 * Allocate an entry in the cache.  At the point we don't have the data,
 * we're just creating a placeholder so that multiple threads don't all
 * go off and read the same blocks.
 */
static vdev_cache_entry_t *
vdev_cache_allocate(zio_t *zio)
{
	vdev_cache_t *vc = &zio->io_vd->vdev_cache;
	uint64_t offset = P2ALIGN(zio->io_offset, VCBS);
	vdev_cache_entry_t *ve;

	ASSERT(MUTEX_HELD(&vc->vc_lock));

	if (zfs_vdev_cache_size == 0)
		return (NULL);

	/*
	 * If adding a new entry would exceed the cache size,
	 * evict the oldest entry (LRU).
	 */
	if ((avl_numnodes(&vc->vc_lastused_tree) << zfs_vdev_cache_bshift) >
	    zfs_vdev_cache_size) {
		ve = avl_first(&vc->vc_lastused_tree);
		if (ve->ve_fill_io != NULL) {
			dprintf("can't evict in %p, still filling\n", vc);
			return (NULL);
		}
		ASSERT(ve->ve_hits != 0);
		vdev_cache_evict(vc, ve);
	}

	ve = kmem_zalloc(sizeof (vdev_cache_entry_t), KM_SLEEP);
	ve->ve_offset = offset;
	ve->ve_lastused = lbolt;
	ve->ve_data = zio_buf_alloc(VCBS);

	avl_add(&vc->vc_offset_tree, ve);
	avl_add(&vc->vc_lastused_tree, ve);

	return (ve);
}
Exemplo n.º 18
0
static void
vdev_queue_pending_add(vdev_queue_t *vq, zio_t *zio)
{
	spa_t *spa = zio->io_spa;
	ASSERT(MUTEX_HELD(&vq->vq_lock));
	ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
	vq->vq_class[zio->io_priority].vqc_active++;
	avl_add(&vq->vq_active_tree, zio);

	mutex_enter(&spa->spa_iokstat_lock);
	spa->spa_queue_stats[zio->io_priority].spa_active++;
	if (spa->spa_iokstat != NULL)
		kstat_runq_enter(spa->spa_iokstat->ks_data);
	mutex_exit(&spa->spa_iokstat_lock);
}
Exemplo n.º 19
0
static void
mze_insert(zap_t *zap, int chunkid, uint64_t hash, mzap_ent_phys_t *mzep)
{
	mzap_ent_t *mze;

	ASSERT(zap->zap_ismicro);
	ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
	ASSERT(mzep->mze_cd < ZAP_MAXCD);
	ASSERT3U(zap_hash(zap, mzep->mze_name), ==, hash);

	mze = kmem_alloc(sizeof (mzap_ent_t), KM_SLEEP);
	mze->mze_chunkid = chunkid;
	mze->mze_hash = hash;
	mze->mze_phys = *mzep;
	avl_add(&zap->zap_m.zap_avl, mze);
}
Exemplo n.º 20
0
static void
vdev_cache_hit(vdev_cache_t *vc, vdev_cache_entry_t *ve, zio_t *zio)
{
	uint64_t cache_phase = P2PHASE(zio->io_offset, VCBS);

	ASSERT(MUTEX_HELD(&vc->vc_lock));
	ASSERT(ve->ve_fill_io == NULL);

	if (ve->ve_lastused != lbolt) {
		avl_remove(&vc->vc_lastused_tree, ve);
		ve->ve_lastused = lbolt;
		avl_add(&vc->vc_lastused_tree, ve);
	}

	ve->ve_hits++;
	bcopy(ve->ve_data + cache_phase, zio->io_data, zio->io_size);
}
Exemplo n.º 21
0
boolean_t
trim_map_write_start(zio_t *zio)
{
	vdev_t *vd = zio->io_vd;
	trim_map_t *tm = vd->vdev_trimmap;
	trim_seg_t tsearch, *ts;
	boolean_t left_over, right_over;
	uint64_t start, end;

	if (!zfs_trim_enabled || vd->vdev_notrim || tm == NULL)
		return (B_TRUE);

	start = zio->io_offset;
	end = TRIM_ZIO_END(zio->io_vd, start, zio->io_size);
	tsearch.ts_start = start;
	tsearch.ts_end = end;

	mutex_enter(&tm->tm_lock);

	/*
	 * Checking for colliding in-flight frees.
	 */
	ts = avl_find(&tm->tm_inflight_frees, &tsearch, NULL);
	if (ts != NULL) {
		list_insert_tail(&tm->tm_pending_writes, zio);
		mutex_exit(&tm->tm_lock);
		return (B_FALSE);
	}

	ts = avl_find(&tm->tm_queued_frees, &tsearch, NULL);
	if (ts != NULL) {
		/*
		 * Loop until all overlapping segments are removed.
		 */
		do {
			trim_map_segment_remove(tm, ts, start, end);
			ts = avl_find(&tm->tm_queued_frees, &tsearch, NULL);
		} while (ts != NULL);
	}
	avl_add(&tm->tm_inflight_writes, zio);

	mutex_exit(&tm->tm_lock);

	return (B_TRUE);
}
Exemplo n.º 22
0
/**
 * 向一个表索引中添加新的字段
 * @param idx {ACL_MDT_IDX*} 表索引
 * @param key {const char*} 数据表索引字段值
 * @param rec {ACL_MDT_REC*}
 * @return {ACL_HTABLE_INFO*}
 */
static void mdt_idx_add(ACL_MDT_IDX *idx, const char *key, ACL_MDT_REC *rec)
{
	ACL_MDT_IDX_AVL *idx_avl = (ACL_MDT_IDX_AVL*) idx;
	TREE_NODE *pnode;

	if (idx_avl->slice)
		pnode = (TREE_NODE*) acl_slice_alloc(idx_avl->slice);
	else
		pnode = (TREE_NODE*) acl_mymalloc(sizeof(TREE_NODE));
	if (idx->flag & ACL_MDT_FLAG_KMR)
		pnode->key.c_key = key;
	else
		pnode->key.key = acl_mystrdup(key);

	pnode->rec = rec;
	avl_add(&idx_avl->avl, pnode);
	rec->key = pnode->key.c_key;
}
Exemplo n.º 23
0
static void
vdev_queue_io_add(vdev_queue_t *vq, zio_t *zio)
{
#ifdef LINUX
	spa_t *spa = zio->io_spa;
	spa_stats_history_t *ssh = &spa->spa_stats.io_history;
#endif

	ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
	avl_add(&vq->vq_class[zio->io_priority].vqc_queued_tree, zio);

#ifdef LINUX
    if (ssh->kstat != NULL) {
		mutex_enter(&ssh->lock);
		kstat_waitq_enter(ssh->kstat->ks_data);
		mutex_exit(&ssh->lock);
	}
#endif
}
Exemplo n.º 24
0
/*
 * Add a copy of the string [s] to the container [zsp].
 * Return 0 on success, or -1 on error.
 *
 * FIXME: Handle dup strings.
 */
int
zed_strings_add(zed_strings_t *zsp, const char *s)
{
	size_t len;
	zed_strings_node_t *np;

	if (!zsp || !s) {
		errno = EINVAL;
		return (-1);
	}
	len = sizeof (zed_strings_node_t) + strlen(s) + 1;
	np = malloc(len);
	if (!np)
		return (-1);

	memset(np, 0, len);
	assert((char *) np->string + strlen(s) < (char *) np + len);
	(void) strcpy(np->string, s);
	avl_add(&zsp->tree, np);
	return (0);
}
Exemplo n.º 25
0
static void
trim_map_vdev_commit(spa_t *spa, zio_t *zio, vdev_t *vd)
{
	trim_map_t *tm = vd->vdev_trimmap;
	trim_seg_t *ts;
	uint64_t size, txgtarget, txgsafe;
	hrtime_t timelimit;

	ASSERT(vd->vdev_ops->vdev_op_leaf);

	if (tm == NULL)
		return;

	timelimit = gethrtime() - trim_timeout * NANOSEC;
	if (vd->vdev_isl2cache) {
		txgsafe = UINT64_MAX;
		txgtarget = UINT64_MAX;
	} else {
		txgsafe = MIN(spa_last_synced_txg(spa), spa_freeze_txg(spa));
		if (txgsafe > trim_txg_delay)
			txgtarget = txgsafe - trim_txg_delay;
		else
			txgtarget = 0;
	}

	mutex_enter(&tm->tm_lock);
	/* Loop until we have sent all outstanding free's */
	while ((ts = trim_map_first(tm, txgtarget, txgsafe, timelimit))
	    != NULL) {
		list_remove(&tm->tm_head, ts);
		avl_remove(&tm->tm_queued_frees, ts);
		avl_add(&tm->tm_inflight_frees, ts);
		size = ts->ts_end - ts->ts_start;
		zio_nowait(zio_trim(zio, spa, vd, ts->ts_start, size));
		TRIM_MAP_SDEC(tm, size);
		TRIM_MAP_QDEC(tm);
	}
	mutex_exit(&tm->tm_lock);
}
Exemplo n.º 26
0
static
#endif
int
zfs_fuid_find_by_domain(zfsvfs_t *zfsvfs, const char *domain, char **retdomain,
    dmu_tx_t *tx)
{
	fuid_domain_t searchnode, *findnode;
	avl_index_t loc;

	/*
	 * If the dummy "nobody" domain then return an index of 0
	 * to cause the created FUID to be a standard POSIX id
	 * for the user nobody.
	 */
	if (domain[0] == '\0') {
		*retdomain = "";
		return (0);
	}

	searchnode.f_ksid = ksid_lookupdomain(domain);
	if (retdomain) {
		*retdomain = searchnode.f_ksid->kd_name;
	}
	if (!zfsvfs->z_fuid_loaded)
		zfs_fuid_init(zfsvfs, tx);

	rw_enter(&zfsvfs->z_fuid_lock, RW_READER);
	findnode = avl_find(&zfsvfs->z_fuid_domain, &searchnode, &loc);
	rw_exit(&zfsvfs->z_fuid_lock);

	if (findnode) {
		ksiddomain_rele(searchnode.f_ksid);
		return (findnode->f_idx);
	} else {
		fuid_domain_t *domnode;
		nvlist_t *nvp;
		nvlist_t **fuids;
		uint64_t retidx;
		size_t nvsize = 0;
		char *packed;
		dmu_buf_t *db;
		int i = 0;

		domnode = kmem_alloc(sizeof (fuid_domain_t), KM_SLEEP);
		domnode->f_ksid = searchnode.f_ksid;

		rw_enter(&zfsvfs->z_fuid_lock, RW_WRITER);
		retidx = domnode->f_idx = avl_numnodes(&zfsvfs->z_fuid_idx) + 1;

		avl_add(&zfsvfs->z_fuid_domain, domnode);
		avl_add(&zfsvfs->z_fuid_idx, domnode);
		/*
		 * Now resync the on-disk nvlist.
		 */
		VERIFY(nvlist_alloc(&nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0);

		domnode = avl_first(&zfsvfs->z_fuid_domain);
		fuids = kmem_alloc(retidx * sizeof (void *), KM_SLEEP);
		while (domnode) {
			VERIFY(nvlist_alloc(&fuids[i],
			    NV_UNIQUE_NAME, KM_SLEEP) == 0);
			VERIFY(nvlist_add_uint64(fuids[i], FUID_IDX,
			    domnode->f_idx) == 0);
			VERIFY(nvlist_add_uint64(fuids[i],
			    FUID_OFFSET, 0) == 0);
			VERIFY(nvlist_add_string(fuids[i++], FUID_DOMAIN,
			    domnode->f_ksid->kd_name) == 0);
			domnode = AVL_NEXT(&zfsvfs->z_fuid_domain, domnode);
		}
		VERIFY(nvlist_add_nvlist_array(nvp, FUID_NVP_ARRAY,
		    fuids, retidx) == 0);
		for (i = 0; i != retidx; i++)
			nvlist_free(fuids[i]);
		kmem_free(fuids, retidx * sizeof (void *));
		VERIFY(nvlist_size(nvp, &nvsize, NV_ENCODE_XDR) == 0);
		packed = kmem_alloc(nvsize, KM_SLEEP);
		VERIFY(nvlist_pack(nvp, &packed, &nvsize,
		    NV_ENCODE_XDR, KM_SLEEP) == 0);
		nvlist_free(nvp);
		zfsvfs->z_fuid_size = nvsize;
		dmu_write(zfsvfs->z_os, zfsvfs->z_fuid_obj, 0,
		    zfsvfs->z_fuid_size, packed, tx);
		kmem_free(packed, zfsvfs->z_fuid_size);
		VERIFY(0 == dmu_bonus_hold(zfsvfs->z_os, zfsvfs->z_fuid_obj,
		    FTAG, &db));
		dmu_buf_will_dirty(db, tx);
		*(uint64_t *)db->db_data = zfsvfs->z_fuid_size;
		dmu_buf_rele(db, FTAG);

		rw_exit(&zfsvfs->z_fuid_lock);
		return (retidx);
	}
}
Exemplo n.º 27
0
/*
 * Create a pseudo export entry
 *
 * This is an export entry that's created as the
 * side-effect of a "real" export.  As a part of
 * a real export, the pathname to the export is
 * checked to see if all the directory components
 * are accessible via an NFSv4 client, i.e. are
 * exported.  If treeclimb_export() finds an unexported
 * mountpoint along the path, then it calls this
 * function to export it.
 *
 * This pseudo export differs from a real export in that
 * it only allows read-only access.  A "visible" list of
 * directories is added to filter lookup and readdir results
 * to only contain dirnames which lead to descendant shares.
 *
 * A visible list has a per-file-system scope.  Any exportinfo
 * struct (real or pseudo) can have a visible list as long as
 * a) its export root is VROOT
 * b) a descendant of the export root is shared
 */
struct exportinfo *
pseudo_exportfs(vnode_t *vp, fid_t *fid, struct exp_visible *vis_head,
	    struct exportdata *exdata)
{
	struct exportinfo *exi;
	struct exportdata *kex;
	fsid_t fsid;
	int vpathlen;
	int i;

	ASSERT(RW_WRITE_HELD(&exported_lock));

	fsid = vp->v_vfsp->vfs_fsid;
	exi = kmem_zalloc(sizeof (*exi), KM_SLEEP);
	exi->exi_fsid = fsid;
	exi->exi_fid = *fid;
	exi->exi_vp = vp;
	VN_HOLD(exi->exi_vp);
	exi->exi_visible = vis_head;
	exi->exi_count = 1;
	exi->exi_volatile_dev = (vfssw[vp->v_vfsp->vfs_fstype].vsw_flag &
	    VSW_VOLATILEDEV) ? 1 : 0;
	mutex_init(&exi->exi_lock, NULL, MUTEX_DEFAULT, NULL);

	/*
	 * Build up the template fhandle
	 */
	exi->exi_fh.fh_fsid = fsid;
	ASSERT(exi->exi_fid.fid_len <= sizeof (exi->exi_fh.fh_xdata));
	exi->exi_fh.fh_xlen = exi->exi_fid.fid_len;
	bcopy(exi->exi_fid.fid_data, exi->exi_fh.fh_xdata,
	    exi->exi_fid.fid_len);
	exi->exi_fh.fh_len = sizeof (exi->exi_fh.fh_data);

	kex = &exi->exi_export;
	kex->ex_flags = EX_PSEUDO;

	vpathlen = vp->v_path ? strlen(vp->v_path) : 0;
	kex->ex_pathlen = vpathlen + strlen(PSEUDOFS_SUFFIX);
	kex->ex_path = kmem_alloc(kex->ex_pathlen + 1, KM_SLEEP);

	if (vpathlen)
		(void) strcpy(kex->ex_path, vp->v_path);
	(void) strcpy(kex->ex_path + vpathlen, PSEUDOFS_SUFFIX);

	/* Transfer the secinfo data from exdata to this new pseudo node */
	if (exdata)
		srv_secinfo_exp2pseu(&exi->exi_export, exdata);

	/*
	 * Initialize auth cache and auth cache lock
	 */
	for (i = 0; i < AUTH_TABLESIZE; i++) {
		exi->exi_cache[i] = kmem_alloc(sizeof (avl_tree_t), KM_SLEEP);
		avl_create(exi->exi_cache[i], nfsauth_cache_clnt_compar,
		    sizeof (struct auth_cache_clnt),
		    offsetof(struct auth_cache_clnt, authc_link));
	}
	rw_init(&exi->exi_cache_lock, NULL, RW_DEFAULT, NULL);

	/*
	 * Insert the new entry at the front of the export list
	 */
	export_link(exi);

	/*
	 * Initialize exi_id and exi_kstats
	 */
	exi->exi_id = exi_id_get_next();
	avl_add(&exi_id_tree, exi);
	exi->exi_kstats = exp_kstats_init(getzoneid(), exi->exi_id,
	    exi->exi_export.ex_path);

	return (exi);
}
Exemplo n.º 28
0
/*
 * Given a list of directories to search, find all pools stored on disk.  This
 * includes partial pools which are not available to import.  If no args are
 * given (argc is 0), then the default directory (/dev/dsk) is searched.
 * poolname or guid (but not both) are provided by the caller when trying
 * to import a specific pool.
 */
static nvlist_t *
zpool_find_import_impl(libzfs_handle_t *hdl, importargs_t *iarg)
{
	int i, dirs = iarg->paths;
	struct dirent *dp;
	char path[MAXPATHLEN];
	char *end, **dir = iarg->path;
	size_t pathleft;
	nvlist_t *ret = NULL;
	pool_list_t pools = { 0 };
	pool_entry_t *pe, *penext;
	vdev_entry_t *ve, *venext;
	config_entry_t *ce, *cenext;
	name_entry_t *ne, *nenext;
	avl_tree_t slice_cache;
	rdsk_node_t *slice;
	void *cookie;

	verify(iarg->poolname == NULL || iarg->guid == 0);

	if (dirs == 0) {
#ifdef HAVE_LIBBLKID
		/* Use libblkid to scan all device for their type */
		if (zpool_find_import_blkid(hdl, &pools) == 0)
			goto skip_scanning;

		(void) zfs_error_fmt(hdl, EZFS_BADCACHE,
		    dgettext(TEXT_DOMAIN, "blkid failure falling back "
		    "to manual probing"));
#endif /* HAVE_LIBBLKID */

		dir = zpool_default_import_path;
		dirs = DEFAULT_IMPORT_PATH_SIZE;
	}

	/*
	 * Go through and read the label configuration information from every
	 * possible device, organizing the information according to pool GUID
	 * and toplevel GUID.
	 */
	for (i = 0; i < dirs; i++) {
		taskq_t *t;
		char rdsk[MAXPATHLEN];
		int dfd;
		boolean_t config_failed = B_FALSE;
		DIR *dirp;

		/* use realpath to normalize the path */
		if (realpath(dir[i], path) == 0) {

			/* it is safe to skip missing search paths */
			if (errno == ENOENT)
				continue;

			zfs_error_aux(hdl, strerror(errno));
			(void) zfs_error_fmt(hdl, EZFS_BADPATH,
			    dgettext(TEXT_DOMAIN, "cannot open '%s'"), dir[i]);
			goto error;
		}
		end = &path[strlen(path)];
		*end++ = '/';
		*end = 0;
		pathleft = &path[sizeof (path)] - end;

		/*
		 * Using raw devices instead of block devices when we're
		 * reading the labels skips a bunch of slow operations during
		 * close(2) processing, so we replace /dev/dsk with /dev/rdsk.
		 */
		if (strcmp(path, ZFS_DISK_ROOTD) == 0)
			(void) strlcpy(rdsk, ZFS_RDISK_ROOTD, sizeof (rdsk));
		else
			(void) strlcpy(rdsk, path, sizeof (rdsk));

		if ((dfd = open(rdsk, O_RDONLY)) < 0 ||
		    (dirp = fdopendir(dfd)) == NULL) {
			if (dfd >= 0)
				(void) close(dfd);
			zfs_error_aux(hdl, strerror(errno));
			(void) zfs_error_fmt(hdl, EZFS_BADPATH,
			    dgettext(TEXT_DOMAIN, "cannot open '%s'"),
			    rdsk);
			goto error;
		}

		avl_create(&slice_cache, slice_cache_compare,
		    sizeof (rdsk_node_t), offsetof(rdsk_node_t, rn_node));

		/*
		 * This is not MT-safe, but we have no MT consumers of libzfs
		 */
		while ((dp = readdir(dirp)) != NULL) {
			const char *name = dp->d_name;
			if (name[0] == '.' &&
			    (name[1] == 0 || (name[1] == '.' && name[2] == 0)))
				continue;

			slice = zfs_alloc(hdl, sizeof (rdsk_node_t));
			slice->rn_name = zfs_strdup(hdl, name);
			slice->rn_avl = &slice_cache;
			slice->rn_dfd = dfd;
			slice->rn_hdl = hdl;
			slice->rn_nozpool = B_FALSE;
			avl_add(&slice_cache, slice);
		}

		/*
		 * create a thread pool to do all of this in parallel;
		 * rn_nozpool is not protected, so this is racy in that
		 * multiple tasks could decide that the same slice can
		 * not hold a zpool, which is benign.  Also choose
		 * double the number of processors; we hold a lot of
		 * locks in the kernel, so going beyond this doesn't
		 * buy us much.
		 */
		t = taskq_create("z_import", 2 * max_ncpus, defclsyspri,
		    2 * max_ncpus, INT_MAX, TASKQ_PREPOPULATE);
		for (slice = avl_first(&slice_cache); slice;
		    (slice = avl_walk(&slice_cache, slice,
		    AVL_AFTER)))
			(void) taskq_dispatch(t, zpool_open_func, slice,
			    TQ_SLEEP);
		taskq_wait(t);
		taskq_destroy(t);

		cookie = NULL;
		while ((slice = avl_destroy_nodes(&slice_cache,
		    &cookie)) != NULL) {
			if (slice->rn_config != NULL && !config_failed) {
				nvlist_t *config = slice->rn_config;
				boolean_t matched = B_TRUE;

				if (iarg->poolname != NULL) {
					char *pname;

					matched = nvlist_lookup_string(config,
					    ZPOOL_CONFIG_POOL_NAME,
					    &pname) == 0 &&
					    strcmp(iarg->poolname, pname) == 0;
				} else if (iarg->guid != 0) {
					uint64_t this_guid;

					matched = nvlist_lookup_uint64(config,
					    ZPOOL_CONFIG_POOL_GUID,
					    &this_guid) == 0 &&
					    iarg->guid == this_guid;
				}
				if (!matched) {
					nvlist_free(config);
				} else {
					/*
					 * use the non-raw path for the config
					 */
					(void) strlcpy(end, slice->rn_name,
					    pathleft);
					if (add_config(hdl, &pools, path, i+1,
					    slice->rn_num_labels, config) != 0)
						config_failed = B_TRUE;
				}
			}
			free(slice->rn_name);
			free(slice);
		}
		avl_destroy(&slice_cache);

		(void) closedir(dirp);

		if (config_failed)
			goto error;
	}

#ifdef HAVE_LIBBLKID
skip_scanning:
#endif
	ret = get_configs(hdl, &pools, iarg->can_be_active, iarg->policy);

error:
	for (pe = pools.pools; pe != NULL; pe = penext) {
		penext = pe->pe_next;
		for (ve = pe->pe_vdevs; ve != NULL; ve = venext) {
			venext = ve->ve_next;
			for (ce = ve->ve_configs; ce != NULL; ce = cenext) {
				cenext = ce->ce_next;
				if (ce->ce_config)
					nvlist_free(ce->ce_config);
				free(ce);
			}
			free(ve);
		}
		free(pe);
	}

	for (ne = pools.names; ne != NULL; ne = nenext) {
		nenext = ne->ne_next;
		free(ne->ne_name);
		free(ne);
	}

	return (ret);
}
Exemplo n.º 29
0
/* New session */
pppt_sess_t *
pppt_sess_lookup_create(scsi_devid_desc_t *lport_devid,
    scsi_devid_desc_t *rport_devid, stmf_remote_port_t *rport,
    uint64_t session_id, stmf_status_t *statusp)
{
	pppt_tgt_t		*tgt;
	pppt_sess_t		*ps;
	stmf_scsi_session_t	*ss;
	pppt_sess_t		tmp_ps;
	stmf_scsi_session_t	tmp_ss;
	*statusp = STMF_SUCCESS;

	PPPT_GLOBAL_LOCK();

	/*
	 * Look for existing session for this ID
	 */
	ps = pppt_sess_lookup_locked(session_id, lport_devid, rport);

	if (ps != NULL) {
		PPPT_GLOBAL_UNLOCK();
		return (ps);
	}

	/*
	 * No session with that ID, look for another session corresponding
	 * to the same IT nexus.
	 */
	tgt = pppt_tgt_lookup_locked(lport_devid);
	if (tgt == NULL) {
		*statusp = STMF_NOT_FOUND;
		PPPT_GLOBAL_UNLOCK();
		return (NULL);
	}

	mutex_enter(&tgt->target_mutex);
	if (tgt->target_state != TS_STMF_ONLINE) {
		*statusp = STMF_NOT_FOUND;
		mutex_exit(&tgt->target_mutex);
		PPPT_GLOBAL_UNLOCK();
		/* Can't create session to offline target */
		return (NULL);
	}

	bzero(&tmp_ps, sizeof (tmp_ps));
	bzero(&tmp_ss, sizeof (tmp_ss));
	tmp_ps.ps_stmf_sess = &tmp_ss;
	tmp_ss.ss_rport = rport;

	/*
	 * Look for an existing session on this IT nexus
	 */
	ps = avl_find(&tgt->target_sess_list, &tmp_ps, NULL);

	if (ps != NULL) {
		/*
		 * Now check the session ID.  It should not match because if
		 * it did we would have found it on the global session list.
		 * If the session ID in the command is higher than the existing
		 * session ID then we need to tear down the existing session.
		 */
		mutex_enter(&ps->ps_mutex);
		ASSERT(ps->ps_session_id != session_id);
		if (ps->ps_session_id > session_id) {
			/* Invalid session ID */
			mutex_exit(&ps->ps_mutex);
			mutex_exit(&tgt->target_mutex);
			PPPT_GLOBAL_UNLOCK();
			*statusp = STMF_INVALID_ARG;
			return (NULL);
		} else {
			/* Existing session needs to be invalidated */
			if (!ps->ps_closed) {
				pppt_sess_close_locked(ps);
			}
		}
		mutex_exit(&ps->ps_mutex);

		/* Fallthrough and create new session */
	}

	/*
	 * Allocate and fill in pppt_session_t with the appropriate data
	 * for the protocol.
	 */
	ps = kmem_zalloc(sizeof (*ps), KM_SLEEP);

	/* Fill in session fields */
	ps->ps_target = tgt;
	ps->ps_session_id = session_id;

	ss = stmf_alloc(STMF_STRUCT_SCSI_SESSION, 0,
	    0);
	if (ss == NULL) {
		mutex_exit(&tgt->target_mutex);
		PPPT_GLOBAL_UNLOCK();
		kmem_free(ps, sizeof (*ps));
		*statusp = STMF_ALLOC_FAILURE;
		return (NULL);
	}

	ss->ss_rport_id = kmem_zalloc(sizeof (scsi_devid_desc_t) +
	    rport_devid->ident_length + 1, KM_SLEEP);
	bcopy(rport_devid, ss->ss_rport_id,
	    sizeof (scsi_devid_desc_t) + rport_devid->ident_length + 1);

	ss->ss_lport = tgt->target_stmf_lport;

	ss->ss_rport = stmf_remote_port_alloc(rport->rport_tptid_sz);
	bcopy(rport->rport_tptid, ss->ss_rport->rport_tptid,
	    rport->rport_tptid_sz);

	if (stmf_register_scsi_session(tgt->target_stmf_lport, ss) !=
	    STMF_SUCCESS) {
		mutex_exit(&tgt->target_mutex);
		PPPT_GLOBAL_UNLOCK();
		kmem_free(ss->ss_rport_id,
		    sizeof (scsi_devid_desc_t) + rport_devid->ident_length + 1);
		stmf_remote_port_free(ss->ss_rport);
		stmf_free(ss);
		kmem_free(ps, sizeof (*ps));
		*statusp = STMF_TARGET_FAILURE;
		return (NULL);
	}

	ss->ss_port_private = ps;
	mutex_init(&ps->ps_mutex, NULL, MUTEX_DEFAULT, NULL);
	cv_init(&ps->ps_cv, NULL, CV_DEFAULT, NULL);
	avl_create(&ps->ps_task_list, pppt_task_avl_compare,
	    sizeof (pppt_task_t), offsetof(pppt_task_t, pt_sess_ln));
	ps->ps_refcnt = 1;
	ps->ps_stmf_sess = ss;
	avl_add(&tgt->target_sess_list, ps);
	avl_add(&pppt_global.global_sess_list, ps);
	mutex_exit(&tgt->target_mutex);
	PPPT_GLOBAL_UNLOCK();
	stmf_trace("pppt", "New session %p", (void *)ps);

	return (ps);
}
Exemplo n.º 30
0
Arquivo: zfs_ctldir.c Projeto: EW1/zfs
int
zfsctl_mount_snapshot(struct path *path, int flags)
{
	struct dentry *dentry = path->dentry;
	struct inode *ip = dentry->d_inode;
	zfs_sb_t *zsb = ITOZSB(ip);
	char *full_name, *full_path;
	zfs_snapentry_t *sep;
	zfs_snapentry_t search;
	char *argv[] = { "/bin/sh", "-c", NULL, NULL };
	char *envp[] = { NULL };
	int error;

	ZFS_ENTER(zsb);

	full_name = kmem_zalloc(MAXNAMELEN, KM_SLEEP);
	full_path = kmem_zalloc(PATH_MAX, KM_SLEEP);

	error = zfsctl_snapshot_zname(ip, dname(dentry), MAXNAMELEN, full_name);
	if (error)
		goto error;

	error = zfsctl_snapshot_zpath(path, PATH_MAX, full_path);
	if (error)
		goto error;

	/*
	 * Attempt to mount the snapshot from user space.  Normally this
	 * would be done using the vfs_kern_mount() function, however that
	 * function is marked GPL-only and cannot be used.  On error we
	 * careful to log the real error to the console and return EISDIR
	 * to safely abort the automount.  This should be very rare.
	 */
	argv[2] = kmem_asprintf(SET_MOUNT_CMD, full_name, full_path);
	error = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
	strfree(argv[2]);
	if (error) {
		printk("ZFS: Unable to automount %s at %s: %d\n",
		    full_name, full_path, error);
		error = EISDIR;
		goto error;
	}

	mutex_enter(&zsb->z_ctldir_lock);

	/*
	 * Ensure a previous entry does not exist, if it does safely remove
	 * it any cancel the outstanding expiration.  This can occur when a
	 * snapshot is manually unmounted and then an automount is triggered.
	 */
	search.se_name = full_name;
	sep = avl_find(&zsb->z_ctldir_snaps, &search, NULL);
	if (sep) {
		avl_remove(&zsb->z_ctldir_snaps, sep);
		taskq_cancel_id(zfs_expire_taskq, sep->se_taskqid);
		zfsctl_sep_free(sep);
	}

	sep = zfsctl_sep_alloc();
	sep->se_name = full_name;
	sep->se_path = full_path;
	sep->se_inode = ip;
	avl_add(&zsb->z_ctldir_snaps, sep);

	sep->se_taskqid = taskq_dispatch_delay(zfs_expire_taskq,
	    zfsctl_expire_snapshot, sep, TQ_SLEEP,
	    ddi_get_lbolt() + zfs_expire_snapshot * HZ);

	mutex_exit(&zsb->z_ctldir_lock);
error:
	if (error) {
		kmem_free(full_name, MAXNAMELEN);
		kmem_free(full_path, PATH_MAX);
	}

	ZFS_EXIT(zsb);

	return (error);
}