Beispiel #1
0
void
dmu_objset_evict(dsl_dataset_t *ds, void *arg)
{
	objset_impl_t *osi = arg;
	objset_t os;
	int i;

	for (i = 0; i < TXG_SIZE; i++) {
		ASSERT(list_head(&osi->os_dirty_dnodes[i]) == NULL);
		ASSERT(list_head(&osi->os_free_dnodes[i]) == NULL);
	}

	if (ds) {
		if (!dsl_dataset_is_snapshot(ds)) {
			VERIFY(0 == dsl_prop_unregister(ds, "checksum",
			    checksum_changed_cb, osi));
			VERIFY(0 == dsl_prop_unregister(ds, "compression",
			    compression_changed_cb, osi));
			VERIFY(0 == dsl_prop_unregister(ds, "copies",
			    copies_changed_cb, osi));
		}
		VERIFY(0 == dsl_prop_unregister(ds, "primarycache",
		    primary_cache_changed_cb, osi));
		VERIFY(0 == dsl_prop_unregister(ds, "secondarycache",
		    secondary_cache_changed_cb, osi));
	}

	/*
	 * We should need only a single pass over the dnode list, since
	 * nothing can be added to the list at this point.
	 */
	os.os = osi;
	(void) dmu_objset_evict_dbufs(&os);

	dnode_special_close(osi->os_meta_dnode);
	if (osi->os_userused_dnode) {
		dnode_special_close(osi->os_userused_dnode);
		dnode_special_close(osi->os_groupused_dnode);
	}
	zil_free(osi->os_zil);

	ASSERT3P(list_head(&osi->os_dnodes), ==, NULL);

	VERIFY(arc_buf_remove_ref(osi->os_phys_buf, &osi->os_phys_buf) == 1);
	mutex_destroy(&osi->os_lock);
	mutex_destroy(&osi->os_obj_lock);
	mutex_destroy(&osi->os_user_ptr_lock);
	kmem_free(osi, sizeof (objset_impl_t));
}
Beispiel #2
0
static void
traverse_zil(traverse_data_t *td, zil_header_t *zh)
{
	uint64_t claim_txg = zh->zh_claim_txg;
	zilog_t *zilog;

	/*
	 * We only want to visit blocks that have been claimed but not yet
	 * replayed; plus, in read-only mode, blocks that are already stable.
	 */
	if (claim_txg == 0 && spa_writeable(td->td_spa))
		return;

	zilog = zil_alloc(spa_get_dsl(td->td_spa)->dp_meta_objset, zh);

	(void) zil_parse(zilog, traverse_zil_block, traverse_zil_record, td,
	    claim_txg);

	zil_free(zilog);
}
Beispiel #3
0
static void
dsl_scan_zil(dsl_pool_t *dp, zil_header_t *zh)
{
	uint64_t claim_txg = zh->zh_claim_txg;
	zil_scan_arg_t zsa = { dp, zh };
	zilog_t *zilog;

	/*
	 * We only want to visit blocks that have been claimed but not yet
	 * replayed (or, in read-only mode, blocks that *would* be claimed).
	 */
	if (claim_txg == 0 && spa_writeable(dp->dp_spa))
		return;

	zilog = zil_alloc(dp->dp_meta_objset, zh);

	(void) zil_parse(zilog, dsl_scan_zil_block, dsl_scan_zil_record, &zsa,
	    claim_txg);

	zil_free(zilog);
}
void
dmu_objset_evict(objset_t *os)
{
	dsl_dataset_t *ds = os->os_dsl_dataset;
	int t;

	for (t = 0; t < TXG_SIZE; t++)
		ASSERT(!dmu_objset_is_dirty(os, t));

	if (ds) {
		if (!dsl_dataset_is_snapshot(ds)) {
			VERIFY(0 == dsl_prop_unregister(ds, "checksum",
			    checksum_changed_cb, os));
			VERIFY(0 == dsl_prop_unregister(ds, "compression",
			    compression_changed_cb, os));
			VERIFY(0 == dsl_prop_unregister(ds, "copies",
			    copies_changed_cb, os));
			VERIFY(0 == dsl_prop_unregister(ds, "dedup",
			    dedup_changed_cb, os));
			VERIFY(0 == dsl_prop_unregister(ds, "logbias",
			    logbias_changed_cb, os));
			VERIFY(0 == dsl_prop_unregister(ds, "sync",
			    sync_changed_cb, os));
		}
		VERIFY(0 == dsl_prop_unregister(ds, "primarycache",
		    primary_cache_changed_cb, os));
		VERIFY(0 == dsl_prop_unregister(ds, "secondarycache",
		    secondary_cache_changed_cb, os));
        VERIFY(0 == dsl_prop_unregister(ds, "encryption",
                                        crypt_changed_cb, os));
	}

	if (os->os_sa)
		sa_tear_down(os);

	/*
	 * We should need only a single pass over the dnode list, since
	 * nothing can be added to the list at this point.
	 */
	(void) dmu_objset_evict_dbufs(os);

	dnode_special_close(&os->os_meta_dnode);
	if (DMU_USERUSED_DNODE(os)) {
		dnode_special_close(&os->os_userused_dnode);
		dnode_special_close(&os->os_groupused_dnode);
	}
	zil_free(os->os_zil);

	ASSERT3P(list_head(&os->os_dnodes), ==, NULL);

	VERIFY(arc_buf_remove_ref(os->os_phys_buf, &os->os_phys_buf) == 1);

	/*
	 * This is a barrier to prevent the objset from going away in
	 * dnode_move() until we can safely ensure that the objset is still in
	 * use. We consider the objset valid before the barrier and invalid
	 * after the barrier.
	 */
	rw_enter(&os_lock, RW_READER);
	rw_exit(&os_lock);

	mutex_destroy(&os->os_lock);
	mutex_destroy(&os->os_obj_lock);
	mutex_destroy(&os->os_user_ptr_lock);
	kmem_free(os, sizeof (objset_t));
}
Beispiel #5
0
void
dmu_objset_evict(objset_t *os)
{
	dsl_dataset_t *ds = os->os_dsl_dataset;

	for (int t = 0; t < TXG_SIZE; t++)
		ASSERT(!dmu_objset_is_dirty(os, t));

	if (ds) {
		if (!dsl_dataset_is_snapshot(ds)) {
			VERIFY0(dsl_prop_unregister(ds,
			    zfs_prop_to_name(ZFS_PROP_CHECKSUM),
			    checksum_changed_cb, os));
			VERIFY0(dsl_prop_unregister(ds,
			    zfs_prop_to_name(ZFS_PROP_COMPRESSION),
			    compression_changed_cb, os));
			VERIFY0(dsl_prop_unregister(ds,
			    zfs_prop_to_name(ZFS_PROP_COPIES),
			    copies_changed_cb, os));
			VERIFY0(dsl_prop_unregister(ds,
			    zfs_prop_to_name(ZFS_PROP_DEDUP),
			    dedup_changed_cb, os));
			VERIFY0(dsl_prop_unregister(ds,
			    zfs_prop_to_name(ZFS_PROP_LOGBIAS),
			    logbias_changed_cb, os));
			VERIFY0(dsl_prop_unregister(ds,
			    zfs_prop_to_name(ZFS_PROP_SYNC),
			    sync_changed_cb, os));
		}
		VERIFY0(dsl_prop_unregister(ds,
		    zfs_prop_to_name(ZFS_PROP_PRIMARYCACHE),
		    primary_cache_changed_cb, os));
		VERIFY0(dsl_prop_unregister(ds,
		    zfs_prop_to_name(ZFS_PROP_SECONDARYCACHE),
		    secondary_cache_changed_cb, os));
	}

	if (os->os_sa)
		sa_tear_down(os);

	dmu_objset_evict_dbufs(os);

	dnode_special_close(&os->os_meta_dnode);
	if (DMU_USERUSED_DNODE(os)) {
		dnode_special_close(&os->os_userused_dnode);
		dnode_special_close(&os->os_groupused_dnode);
	}
	zil_free(os->os_zil);

	ASSERT3P(list_head(&os->os_dnodes), ==, NULL);

	VERIFY(arc_buf_remove_ref(os->os_phys_buf, &os->os_phys_buf));

	/*
	 * This is a barrier to prevent the objset from going away in
	 * dnode_move() until we can safely ensure that the objset is still in
	 * use. We consider the objset valid before the barrier and invalid
	 * after the barrier.
	 */
	rw_enter(&os_lock, RW_READER);
	rw_exit(&os_lock);

	mutex_destroy(&os->os_lock);
	mutex_destroy(&os->os_obj_lock);
	mutex_destroy(&os->os_user_ptr_lock);
	kmem_free(os, sizeof (objset_t));
}