Ejemplo n.º 1
0
/*
 * Register interest in the named property.  We'll call the callback
 * once to notify it of the current property value, and again each time
 * the property changes, until this callback is unregistered.
 *
 * Return 0 on success, errno if the prop is not an integer value.
 */
int
dsl_prop_register(dsl_dataset_t *ds, const char *propname,
    dsl_prop_changed_cb_t *callback, void *cbarg)
{
	dsl_dir_t *dd = ds->ds_dir;
	uint64_t value;
	dsl_prop_cb_record_t *cbr;
	int err;
	ASSERTV(dsl_pool_t *dp = dd->dd_pool);

	ASSERT(dsl_pool_config_held(dp));

	err = dsl_prop_get_int_ds(ds, propname, &value);
	if (err != 0)
		return (err);

	cbr = kmem_alloc(sizeof (dsl_prop_cb_record_t), KM_PUSHPAGE);
	cbr->cbr_ds = ds;
	cbr->cbr_propname = kmem_alloc(strlen(propname)+1, KM_PUSHPAGE);
	(void) strcpy((char *)cbr->cbr_propname, propname);
	cbr->cbr_func = callback;
	cbr->cbr_arg = cbarg;
	mutex_enter(&dd->dd_lock);
	list_insert_head(&dd->dd_prop_cbs, cbr);
	mutex_exit(&dd->dd_lock);

	cbr->cbr_func(cbr->cbr_arg, value);
	return (0);
}
Ejemplo n.º 2
0
/*
 * Register interest in the named property.  We'll call the callback
 * once to notify it of the current property value, and again each time
 * the property changes, until this callback is unregistered.
 *
 * Return 0 on success, errno if the prop is not an integer value.
 */
int
dsl_prop_register(dsl_dataset_t *ds, const char *propname,
    dsl_prop_changed_cb_t *callback, void *cbarg)
{
	dsl_dir_t *dd = ds->ds_dir;
	uint64_t value;
	dsl_prop_record_t *pr;
	dsl_prop_cb_record_t *cbr;
	int err;
	ASSERTV(dsl_pool_t *dp = dd->dd_pool);

	ASSERT(dsl_pool_config_held(dp));

	err = dsl_prop_get_int_ds(ds, propname, &value);
	if (err != 0)
		return (err);

	cbr = kmem_alloc(sizeof (dsl_prop_cb_record_t), KM_SLEEP);
	cbr->cbr_ds = ds;
	cbr->cbr_func = callback;
	cbr->cbr_arg = cbarg;

	mutex_enter(&dd->dd_lock);
	pr = dsl_prop_record_find(dd, propname);
	if (pr == NULL)
		pr = dsl_prop_record_create(dd, propname);
	cbr->cbr_pr = pr;
	list_insert_head(&pr->pr_cbs, cbr);
	list_insert_head(&ds->ds_prop_cbs, cbr);
	mutex_exit(&dd->dd_lock);

	cbr->cbr_func(cbr->cbr_arg, value);
	return (0);
}
Ejemplo n.º 3
0
Archivo: dsl_dir.c Proyecto: Lezval/zfs
/* ARGSUSED */
static void
dsl_dir_evict(dmu_buf_t *db, void *arg)
{
	dsl_dir_t *dd = arg;
	int t;
	ASSERTV(dsl_pool_t *dp = dd->dd_pool);

	for (t = 0; t < TXG_SIZE; t++) {
		ASSERT(!txg_list_member(&dp->dp_dirty_dirs, dd, t));
		ASSERT(dd->dd_tempreserved[t] == 0);
		ASSERT(dd->dd_space_towrite[t] == 0);
	}

	if (dd->dd_parent)
		dsl_dir_rele(dd->dd_parent, dd);

	spa_close(dd->dd_pool->dp_spa, dd);

	/*
	 * The props callback list should have been cleaned up by
	 * objset_evict().
	 */
	list_destroy(&dd->dd_prop_cbs);
	mutex_destroy(&dd->dd_lock);
	kmem_free(dd, sizeof (dsl_dir_t));
}
Ejemplo n.º 4
0
/* ARGSUSED */
static void
ready(zio_t *zio, arc_buf_t *abuf, void *arg)
{
	int i;

	blkptr_t *bp = zio->io_bp;
	objset_impl_t *os = arg;
	dnode_phys_t *dnp = &os->os_phys->os_meta_dnode;
	ASSERTV(blkptr_t *bp_orig = &zio->io_bp_orig);

	ASSERT(bp == os->os_rootbp);
	ASSERT(BP_GET_TYPE(bp) == DMU_OT_OBJSET);
	ASSERT(BP_GET_LEVEL(bp) == 0);

	/*
	 * Update rootbp fill count: it should be the number of objects
	 * allocated in the object set (not counting the "special"
	 * objects that are stored in the objset_phys_t -- the meta
	 * dnode and user/group accounting objects).
	 */
	bp->blk_fill = 0;
	for (i = 0; i < dnp->dn_nblkptr; i++)
		bp->blk_fill += dnp->dn_blkptr[i].blk_fill;

	if (zio->io_flags & ZIO_FLAG_IO_REWRITE) {
		ASSERT(DVA_EQUAL(BP_IDENTITY(bp), BP_IDENTITY(bp_orig)));
	} else {
		if (zio->io_bp_orig.blk_birth == os->os_synctx->tx_txg)
			(void) dsl_dataset_block_kill(os->os_dsl_dataset,
			    &zio->io_bp_orig, zio, os->os_synctx);
		dsl_dataset_block_born(os->os_dsl_dataset, bp, os->os_synctx);
	}
}
Ejemplo n.º 5
0
static boolean_t
vdev_indirect_mapping_verify(vdev_indirect_mapping_t *vim)
{
	ASSERT(vim != NULL);

	ASSERT(vim->vim_object != 0);
	ASSERT(vim->vim_objset != NULL);
	ASSERT(vim->vim_phys != NULL);
	ASSERT(vim->vim_dbuf != NULL);

	EQUIV(vim->vim_phys->vimp_num_entries > 0,
	    vim->vim_entries != NULL);
	if (vim->vim_phys->vimp_num_entries > 0) {
		ASSERTV(vdev_indirect_mapping_entry_phys_t *last_entry =
		    &vim->vim_entries[vim->vim_phys->vimp_num_entries - 1]);
		ASSERTV(uint64_t offset =
		    DVA_MAPPING_GET_SRC_OFFSET(last_entry));
		ASSERTV(uint64_t size = DVA_GET_ASIZE(&last_entry->vimep_dst));

		ASSERT3U(vim->vim_phys->vimp_max_offset, >=, offset + size);
	}
Ejemplo n.º 6
0
static int
feature_get_enabled_txg(spa_t *spa, zfeature_info_t *feature, uint64_t *res) {
	ASSERTV(uint64_t enabled_txg_obj = spa->spa_feat_enabled_txg_obj);

	ASSERT(zfeature_depends_on(feature->fi_feature,
	    SPA_FEATURE_ENABLED_TXG));

	if (!spa_feature_is_enabled(spa, feature->fi_feature)) {
		return (SET_ERROR(ENOTSUP));
	}

	ASSERT(enabled_txg_obj != 0);

	VERIFY0(zap_lookup(spa->spa_meta_objset, spa->spa_feat_enabled_txg_obj,
	    feature->fi_guid, sizeof (uint64_t), 1, res));

	return (0);
}
Ejemplo n.º 7
0
/*
 * Read data from the cache.  Returns B_TRUE cache hit, B_FALSE on miss.
 */
boolean_t
vdev_cache_read(zio_t *zio)
{
	vdev_cache_t *vc = &zio->io_vd->vdev_cache;
	vdev_cache_entry_t *ve, *ve_search;
	uint64_t cache_offset = P2ALIGN(zio->io_offset, VCBS);
	zio_t *fio;
	ASSERTV(uint64_t cache_phase = P2PHASE(zio->io_offset, VCBS));

	ASSERT(zio->io_type == ZIO_TYPE_READ);

	if (zio->io_flags & ZIO_FLAG_DONT_CACHE)
		return (B_FALSE);

	if (zio->io_size > zfs_vdev_cache_max)
		return (B_FALSE);

	/*
	 * If the I/O straddles two or more cache blocks, don't cache it.
	 */
	if (P2BOUNDARY(zio->io_offset, zio->io_size, VCBS))
		return (B_FALSE);

	ASSERT(cache_phase + zio->io_size <= VCBS);

	mutex_enter(&vc->vc_lock);

	ve_search = kmem_alloc(sizeof (vdev_cache_entry_t), KM_SLEEP);
	ve_search->ve_offset = cache_offset;
	ve = avl_find(&vc->vc_offset_tree, ve_search, NULL);
	kmem_free(ve_search, sizeof (vdev_cache_entry_t));

	if (ve != NULL) {
		if (ve->ve_missed_update) {
			mutex_exit(&vc->vc_lock);
			return (B_FALSE);
		}

		if ((fio = ve->ve_fill_io) != NULL) {
			zio_vdev_io_bypass(zio);
			zio_add_child(zio, fio);
			mutex_exit(&vc->vc_lock);
			VDCSTAT_BUMP(vdc_stat_delegations);
			return (B_TRUE);
		}

		vdev_cache_hit(vc, ve, zio);
		zio_vdev_io_bypass(zio);

		mutex_exit(&vc->vc_lock);
		VDCSTAT_BUMP(vdc_stat_hits);
		return (B_TRUE);
	}

	ve = vdev_cache_allocate(zio);

	if (ve == NULL) {
		mutex_exit(&vc->vc_lock);
		return (B_FALSE);
	}

	fio = zio_vdev_delegated_io(zio->io_vd, cache_offset,
	    ve->ve_data, VCBS, ZIO_TYPE_READ, ZIO_PRIORITY_NOW,
	    ZIO_FLAG_DONT_CACHE, vdev_cache_fill, ve);

	ve->ve_fill_io = fio;
	zio_vdev_io_bypass(zio);
	zio_add_child(zio, fio);

	mutex_exit(&vc->vc_lock);
	zio_nowait(fio);
	VDCSTAT_BUMP(vdc_stat_misses);

	return (B_TRUE);
}