Beispiel #1
0
int
__dbuf_stats_hash_table_data(char *buf, size_t size, dmu_buf_impl_t *db)
{
	arc_buf_info_t abi = { 0 };
	dmu_object_info_t doi = { 0 };
	dnode_t *dn = DB_DNODE(db);

	if (db->db_buf)
		arc_buf_info(db->db_buf, &abi, zfs_dbuf_state_index);

	if (dn)
		__dmu_object_info_from_dnode(dn, &doi);

	size = snprintf(buf, size - 1,
	    "%-16s %-8llu %-8lld %-8lld %-8lld %-8llu %-8llu %-5d %-5d %-5lu | "
	    "%-5d %-5d %-6lld 0x%-6x %-6lu %-8llu %-12llu "
	    "%-6lu %-6lu %-6lu %-6lu %-6lu %-8llu %-8llu %-8d %-5lu | "
	    "%-6d %-6d %-8lu %-8lu %-6llu %-6lu %-5lu %-8llu %-8llu\n",
	    /* dmu_buf_impl_t */
	    spa_name(dn->dn_objset->os_spa),
	    (u_longlong_t)dmu_objset_id(db->db_objset),
	    (longlong_t)db->db.db_object,
	    (longlong_t)db->db_level,
	    (longlong_t)db->db_blkid,
	    (u_longlong_t)db->db.db_offset,
	    (u_longlong_t)db->db.db_size,
	    !!dbuf_is_metadata(db),
	    db->db_state,
	    (ulong_t)refcount_count(&db->db_holds),
	    /* arc_buf_info_t */
	    abi.abi_state_type,
	    abi.abi_state_contents,
	    (longlong_t)abi.abi_state_index,
	    abi.abi_flags,
	    (ulong_t)abi.abi_datacnt,
	    (u_longlong_t)abi.abi_size,
	    (u_longlong_t)abi.abi_access,
	    (ulong_t)abi.abi_mru_hits,
	    (ulong_t)abi.abi_mru_ghost_hits,
	    (ulong_t)abi.abi_mfu_hits,
	    (ulong_t)abi.abi_mfu_ghost_hits,
	    (ulong_t)abi.abi_l2arc_hits,
	    (u_longlong_t)abi.abi_l2arc_dattr,
	    (u_longlong_t)abi.abi_l2arc_asize,
	    abi.abi_l2arc_compress,
	    (ulong_t)abi.abi_holds,
	    /* dmu_object_info_t */
	    doi.doi_type,
	    doi.doi_bonus_type,
	    (ulong_t)doi.doi_data_block_size,
	    (ulong_t)doi.doi_metadata_block_size,
	    (u_longlong_t)doi.doi_bonus_size,
	    (ulong_t)doi.doi_indirection,
	    (ulong_t)refcount_count(&dn->dn_holds),
	    (u_longlong_t)doi.doi_fill_count,
	    (u_longlong_t)doi.doi_max_offset);
	buf[size] = '\0';

	return (size);
}
Beispiel #2
0
void
rrw_enter_write(rrwlock_t *rrl)
{
	mutex_enter(&rrl->rr_lock);
	ASSERT(rrl->rr_writer != curthread);

	while (refcount_count(&rrl->rr_anon_rcount) > 0 ||
	    refcount_count(&rrl->rr_linked_rcount) > 0 ||
	    rrl->rr_writer != NULL) {
		rrl->rr_writer_wanted = B_TRUE;
		cv_wait(&rrl->rr_cv, &rrl->rr_lock);
	}
	rrl->rr_writer_wanted = B_FALSE;
	rrl->rr_writer = curthread;
	mutex_exit(&rrl->rr_lock);
}
Beispiel #3
0
void
rrw_enter_read(rrwlock_t *rrl, void *tag)
{
	mutex_enter(&rrl->rr_lock);
#if !defined(DEBUG) && defined(_KERNEL)
	if (rrl->rr_writer == NULL && !rrl->rr_writer_wanted &&
	    !rrl->rr_track_all) {
		rrl->rr_anon_rcount.rc_count++;
		mutex_exit(&rrl->rr_lock);
		return;
	}
	DTRACE_PROBE(zfs__rrwfastpath__rdmiss);
#endif
	ASSERT(rrl->rr_writer != curthread);
	ASSERT(refcount_count(&rrl->rr_anon_rcount) >= 0);

	while (rrl->rr_writer != NULL || (rrl->rr_writer_wanted &&
	    refcount_is_zero(&rrl->rr_anon_rcount) &&
	    rrn_find(rrl) == NULL))
		cv_wait(&rrl->rr_cv, &rrl->rr_lock);

	if (rrl->rr_writer_wanted || rrl->rr_track_all) {
		/* may or may not be a re-entrant enter */
		rrn_add(rrl, tag);
		(void) refcount_add(&rrl->rr_linked_rcount, tag);
	} else {
		(void) refcount_add(&rrl->rr_anon_rcount, tag);
	}
	ASSERT(rrl->rr_writer == NULL);
	mutex_exit(&rrl->rr_lock);
}
Beispiel #4
0
void
sa_tear_down(objset_t *os)
{
	sa_os_t *sa = os->os_sa;
	sa_lot_t *layout;
	void *cookie;

	kmem_free(sa->sa_user_table, sa->sa_user_table_sz);

	/* Free up attr table */

	sa_free_attr_table(sa);

	cookie = NULL;
	while (layout = avl_destroy_nodes(&sa->sa_layout_hash_tree, &cookie)) {
		sa_idx_tab_t *tab;
		while (tab = list_head(&layout->lot_idx_tab)) {
			ASSERT(refcount_count(&tab->sa_refcount));
			sa_idx_tab_rele(os, tab);
		}
	}

	cookie = NULL;
	while (layout = avl_destroy_nodes(&sa->sa_layout_num_tree, &cookie)) {
		kmem_free(layout->lot_attrs,
		    sizeof (sa_attr_type_t) * layout->lot_attr_count);
		kmem_free(layout, sizeof (sa_lot_t));
	}

	avl_destroy(&sa->sa_layout_hash_tree);
	avl_destroy(&sa->sa_layout_num_tree);

	kmem_free(sa, sizeof (sa_os_t));
	os->os_sa = NULL;
}
Beispiel #5
0
static rrw_node_t *
rrn_find(rrwlock_t *rrl)
{
	rrw_node_t *rn;

	if (refcount_count(&rrl->rr_linked_rcount) == 0)
		return (NULL);

	for (rn = tsd_get(rrw_tsd_key); rn != NULL; rn = rn->rn_next) {
		if (rn->rn_rrl == rrl)
			return (rn);
	}
	return (NULL);
}
Beispiel #6
0
/*
 * If a node is found for 'rrl', then remove the node from this
 * thread's list and return TRUE; otherwise return FALSE.
 */
static boolean_t
rrn_find_and_remove(rrwlock_t *rrl, void *tag)
{
	rrw_node_t *rn;
	rrw_node_t *prev = NULL;

	if (refcount_count(&rrl->rr_linked_rcount) == 0)
		return (B_FALSE);

	for (rn = tsd_get(rrw_tsd_key); rn != NULL; rn = rn->rn_next) {
		if (rn->rn_rrl == rrl && rn->rn_tag == tag) {
			if (prev)
				prev->rn_next = rn->rn_next;
			else
				VERIFY(tsd_set(rrw_tsd_key, rn->rn_next) == 0);
			kmem_free(rn, sizeof (*rn));
			return (B_TRUE);
		}
		prev = rn;
	}
	return (B_FALSE);
}
Beispiel #7
0
/* ARGSUSED */
static void
dmu_tx_count_write(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
{
	dnode_t *dn = txh->txh_dnode;
	int err = 0;

	if (len == 0)
		return;

	(void) refcount_add_many(&txh->txh_space_towrite, len, FTAG);

	if (refcount_count(&txh->txh_space_towrite) > 2 * DMU_MAX_ACCESS)
		err = SET_ERROR(EFBIG);

	if (dn == NULL)
		return;

	/*
	 * For i/o error checking, read the blocks that will be needed
	 * to perform the write: the first and last level-0 blocks (if
	 * they are not aligned, i.e. if they are partial-block writes),
	 * and all the level-1 blocks.
	 */
	if (dn->dn_maxblkid == 0) {
		if (off < dn->dn_datablksz &&
		    (off > 0 || len < dn->dn_datablksz)) {
			err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
			if (err != 0) {
				txh->txh_tx->tx_err = err;
			}
		}
	} else {
		zio_t *zio = zio_root(dn->dn_objset->os_spa,
		    NULL, NULL, ZIO_FLAG_CANFAIL);

		/* first level-0 block */
		uint64_t start = off >> dn->dn_datablkshift;
		if (P2PHASE(off, dn->dn_datablksz) || len < dn->dn_datablksz) {
			err = dmu_tx_check_ioerr(zio, dn, 0, start);
			if (err != 0) {
				txh->txh_tx->tx_err = err;
			}
		}

		/* last level-0 block */
		uint64_t end = (off + len - 1) >> dn->dn_datablkshift;
		if (end != start && end <= dn->dn_maxblkid &&
		    P2PHASE(off + len, dn->dn_datablksz)) {
			err = dmu_tx_check_ioerr(zio, dn, 0, end);
			if (err != 0) {
				txh->txh_tx->tx_err = err;
			}
		}

		/* level-1 blocks */
		if (dn->dn_nlevels > 1) {
			int shft = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
			for (uint64_t i = (start >> shft) + 1;
			    i < end >> shft; i++) {
				err = dmu_tx_check_ioerr(zio, dn, 1, i);
				if (err != 0) {
					txh->txh_tx->tx_err = err;
				}
			}
		}

		err = zio_wait(zio);
		if (err != 0) {
			txh->txh_tx->tx_err = err;
		}
	}