Esempio n. 1
0
int
dmu_buf_hold_noread(objset_t *os, uint64_t object, uint64_t offset,
    void *tag, dmu_buf_t **dbp)
{
	dnode_t *dn;
	uint64_t blkid;
	dmu_buf_impl_t *db;
	int err;

	err = dnode_hold(os, object, FTAG, &dn);
	if (err)
		return (err);
	blkid = dbuf_whichblock(dn, offset);
	rw_enter(&dn->dn_struct_rwlock, RW_READER);
	db = dbuf_hold(dn, blkid, tag);
	rw_exit(&dn->dn_struct_rwlock);
	dnode_rele(dn, FTAG);

	if (db == NULL) {
		*dbp = NULL;
		return (SET_ERROR(EIO));
	}

	*dbp = &db->db;
	return (err);
}
Esempio n. 2
0
int
dmu_buf_hold(objset_t *os, uint64_t object, uint64_t offset,
    void *tag, dmu_buf_t **dbp, int flags)
{
	dnode_t *dn;
	uint64_t blkid;
	dmu_buf_impl_t *db;
	int err;
	int db_flags = DB_RF_CANFAIL;

	if (flags & DMU_READ_NO_PREFETCH)
		db_flags |= DB_RF_NOPREFETCH;

	err = dnode_hold(os, object, FTAG, &dn);
	if (err)
		return (err);
	blkid = dbuf_whichblock(dn, offset);
	rw_enter(&dn->dn_struct_rwlock, RW_READER);
	db = dbuf_hold(dn, blkid, tag);
	rw_exit(&dn->dn_struct_rwlock);
	if (db == NULL) {
		err = EIO;
	} else {
		err = dbuf_read(db, NULL, db_flags);
		if (err) {
			dbuf_rele(db, tag);
			db = NULL;
		}
	}

	dnode_rele(dn, FTAG);
	*dbp = &db->db; /* NULL db plus first field offset is NULL */
	return (err);
}
Esempio n. 3
0
int
dmu_buf_hold(objset_t *os, uint64_t object, uint64_t offset,
    void *tag, dmu_buf_t **dbp)
{
	dnode_t *dn;
	uint64_t blkid;
	dmu_buf_impl_t *db;
	int err;

	err = dnode_hold(os->os, object, FTAG, &dn);
	if (err)
		return (err);
	blkid = dbuf_whichblock(dn, offset);
	rw_enter(&dn->dn_struct_rwlock, RW_READER);
	db = dbuf_hold(dn, blkid, tag);
	rw_exit(&dn->dn_struct_rwlock);
	if (db == NULL) {
		err = EIO;
	} else {
		err = dbuf_read(db, NULL, DB_RF_CANFAIL);
		if (err) {
			dbuf_rele(db, tag);
			db = NULL;
		}
	}

	dnode_rele(dn, FTAG);
	*dbp = &db->db;
	return (err);
}
Esempio n. 4
0
/*
 * returns ENOENT, EIO, or 0.
 */
int
dmu_bonus_hold(objset_t *os, uint64_t object, void *tag, dmu_buf_t **dbp)
{
	dnode_t *dn;
	dmu_buf_impl_t *db;
	int error;

	error = dnode_hold(os->os, object, FTAG, &dn);
	if (error)
		return (error);

	rw_enter(&dn->dn_struct_rwlock, RW_READER);
	if (dn->dn_bonus == NULL) {
		rw_exit(&dn->dn_struct_rwlock);
		rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
		if (dn->dn_bonus == NULL)
			dbuf_create_bonus(dn);
	}
	db = dn->dn_bonus;
	rw_exit(&dn->dn_struct_rwlock);

	/* as long as the bonus buf is held, the dnode will be held */
	if (refcount_add(&db->db_holds, tag) == 1)
		VERIFY(dnode_add_ref(dn, db));

	dnode_rele(dn, FTAG);

	VERIFY(0 == dbuf_read(db, NULL, DB_RF_MUST_SUCCEED));

	*dbp = &db->db;
	return (0);
}
Esempio n. 5
0
int
dmu_rm_spill(objset_t *os, uint64_t object, dmu_tx_t *tx)
{
	dnode_t *dn;
	int error;

	error = dnode_hold(os, object, FTAG, &dn);
	dbuf_rm_spill(dn, tx);
	rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
	dnode_rm_spill(dn, tx);
	rw_exit(&dn->dn_struct_rwlock);
	dnode_rele(dn, FTAG);
	return (error);
}
Esempio n. 6
0
static dmu_tx_hold_t *
dmu_tx_hold_object_impl(dmu_tx_t *tx, objset_t *os, uint64_t object,
    enum dmu_tx_hold_type type, uint64_t arg1, uint64_t arg2)
{
	dnode_t *dn = NULL;
	dmu_tx_hold_t *txh;
	int err;

	if (object != DMU_NEW_OBJECT) {
		err = dnode_hold(os, object, FTAG, &dn);
		if (err != 0) {
			tx->tx_err = err;
			return (NULL);
		}
	}
	txh = dmu_tx_hold_dnode_impl(tx, dn, type, arg1, arg2);
	if (dn != NULL)
		dnode_rele(dn, FTAG);
	return (txh);
}
Esempio n. 7
0
/*
 * returns ENOENT, EIO, or 0.
 */
int
dmu_bonus_hold(objset_t *os, uint64_t object, void *tag, dmu_buf_t **dbp)
{
	dnode_t *dn;
	dmu_buf_impl_t *db;
	int error;

	error = dnode_hold(os, object, FTAG, &dn);
	if (error)
		return (error);

	rw_enter(&dn->dn_struct_rwlock, RW_READER);
	if (dn->dn_bonus == NULL) {
		rw_exit(&dn->dn_struct_rwlock);
		rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
		if (dn->dn_bonus == NULL)
			dbuf_create_bonus(dn);
	}
	db = dn->dn_bonus;

	/* as long as the bonus buf is held, the dnode will be held */
	if (refcount_add(&db->db_holds, tag) == 1) {
		VERIFY(dnode_add_ref(dn, db));
		atomic_inc_32(&dn->dn_dbufs_count);
	}

	/*
	 * Wait to drop dn_struct_rwlock until after adding the bonus dbuf's
	 * hold and incrementing the dbuf count to ensure that dnode_move() sees
	 * a dnode hold for every dbuf.
	 */
	rw_exit(&dn->dn_struct_rwlock);

	dnode_rele(dn, FTAG);

	VERIFY(0 == dbuf_read(db, NULL, DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH));

	*dbp = &db->db;
	return (0);
}
Esempio n. 8
0
int
dmu_objset_evict_dbufs(objset_t *os)
{
	dnode_t *dn;

	mutex_enter(&os->os_lock);

	/* process the mdn last, since the other dnodes have holds on it */
	list_remove(&os->os_dnodes, DMU_META_DNODE(os));
	list_insert_tail(&os->os_dnodes, DMU_META_DNODE(os));

	/*
	 * Find the first dnode with holds.  We have to do this dance
	 * because dnode_add_ref() only works if you already have a
	 * hold.  If there are no holds then it has no dbufs so OK to
	 * skip.
	 */
	for (dn = list_head(&os->os_dnodes);
	    dn && !dnode_add_ref(dn, FTAG);
	    dn = list_next(&os->os_dnodes, dn))
		continue;

	while (dn) {
		dnode_t *next_dn = dn;

		do {
			next_dn = list_next(&os->os_dnodes, next_dn);
		} while (next_dn && !dnode_add_ref(next_dn, FTAG));

		mutex_exit(&os->os_lock);
		dnode_evict_dbufs(dn);
		dnode_rele(dn, FTAG);
		mutex_enter(&os->os_lock);
		dn = next_dn;
	}
	dn = list_head(&os->os_dnodes);
	mutex_exit(&os->os_lock);
	return (dn != DMU_META_DNODE(os));
}
Esempio n. 9
0
void
dmu_objset_evict_dbufs(objset_t *os)
{
	dnode_t dn_marker;
	dnode_t *dn;

	mutex_enter(&os->os_lock);
	dn = list_head(&os->os_dnodes);
	while (dn != NULL) {
		/*
		 * Skip dnodes without holds.  We have to do this dance
		 * because dnode_add_ref() only works if there is already a
		 * hold.  If the dnode has no holds, then it has no dbufs.
		 */
		if (dnode_add_ref(dn, FTAG)) {
			list_insert_after(&os->os_dnodes, dn, &dn_marker);
			mutex_exit(&os->os_lock);

			dnode_evict_dbufs(dn);
			dnode_rele(dn, FTAG);

			mutex_enter(&os->os_lock);
			dn = list_next(&os->os_dnodes, &dn_marker);
			list_remove(&os->os_dnodes, &dn_marker);
		} else {
			dn = list_next(&os->os_dnodes, dn);
		}
	}
	mutex_exit(&os->os_lock);

	if (DMU_USERUSED_DNODE(os) != NULL) {
		dnode_evict_dbufs(DMU_GROUPUSED_DNODE(os));
		dnode_evict_dbufs(DMU_USERUSED_DNODE(os));
	}
	dnode_evict_dbufs(DMU_META_DNODE(os));
}
Esempio n. 10
0
void
dmu_objset_do_userquota_callbacks(objset_impl_t *os, dmu_tx_t *tx)
{
	dnode_t *dn;
	list_t *list = &os->os_synced_dnodes;
	ASSERTV(static const char zerobuf[DN_MAX_BONUSLEN] = {0});

	ASSERT(list_head(list) == NULL || dmu_objset_userused_enabled(os));

	while ((dn = list_head(list))) {
		dmu_object_type_t bonustype;

		ASSERT(!DMU_OBJECT_IS_SPECIAL(dn->dn_object));
		ASSERT(dn->dn_oldphys);
		ASSERT(dn->dn_phys->dn_type == DMU_OT_NONE ||
		    dn->dn_phys->dn_flags &
		    DNODE_FLAG_USERUSED_ACCOUNTED);

		/* Allocate the user/groupused objects if necessary. */
		if (os->os_userused_dnode->dn_type == DMU_OT_NONE) {
			VERIFY(0 == zap_create_claim(&os->os,
			    DMU_USERUSED_OBJECT,
			    DMU_OT_USERGROUP_USED, DMU_OT_NONE, 0, tx));
			VERIFY(0 == zap_create_claim(&os->os,
			    DMU_GROUPUSED_OBJECT,
			    DMU_OT_USERGROUP_USED, DMU_OT_NONE, 0, tx));
		}

		/*
		 * If the object was not previously
		 * accounted, pretend that it was free.
		 */
		if (!(dn->dn_oldphys->dn_flags &
		    DNODE_FLAG_USERUSED_ACCOUNTED)) {
			bzero(dn->dn_oldphys, sizeof (dnode_phys_t));
		}

		/*
		 * If the object was freed, use the previous bonustype.
		 */
		bonustype = dn->dn_phys->dn_bonustype ?
		    dn->dn_phys->dn_bonustype : dn->dn_oldphys->dn_bonustype;
		ASSERT(dn->dn_phys->dn_type != 0 ||
		    (bcmp(DN_BONUS(dn->dn_phys), zerobuf,
		    DN_MAX_BONUSLEN) == 0 &&
		    DN_USED_BYTES(dn->dn_phys) == 0));
		ASSERT(dn->dn_oldphys->dn_type != 0 ||
		    (bcmp(DN_BONUS(dn->dn_oldphys), zerobuf,
		    DN_MAX_BONUSLEN) == 0 &&
		    DN_USED_BYTES(dn->dn_oldphys) == 0));
		used_cbs[os->os_phys->os_type](&os->os, bonustype,
		    DN_BONUS(dn->dn_oldphys), DN_BONUS(dn->dn_phys),
		    DN_USED_BYTES(dn->dn_oldphys),
		    DN_USED_BYTES(dn->dn_phys), tx);

		/*
		 * The mutex is needed here for interlock with dnode_allocate.
		 */
		mutex_enter(&dn->dn_mtx);
		zio_buf_free(dn->dn_oldphys, sizeof (dnode_phys_t));
		dn->dn_oldphys = NULL;
		mutex_exit(&dn->dn_mtx);

		list_remove(list, dn);
		dnode_rele(dn, list);
	}
}