static void dmu_tx_count_twig(dmu_tx_hold_t *txh, dnode_t *dn, dmu_buf_impl_t *db, int level, uint64_t blkid, boolean_t freeable, uint64_t *history) { objset_t *os = dn->dn_objset; dsl_dataset_t *ds = os->os_dsl_dataset; int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; dmu_buf_impl_t *parent = NULL; blkptr_t *bp = NULL; uint64_t space; if (level >= dn->dn_nlevels || history[level] == blkid) return; history[level] = blkid; space = (level == 0) ? dn->dn_datablksz : (1ULL << dn->dn_indblkshift); if (db == NULL || db == dn->dn_dbuf) { ASSERT(level != 0); db = NULL; } else { ASSERT(DB_DNODE(db) == dn); ASSERT(db->db_level == level); ASSERT(db->db.db_size == space); ASSERT(db->db_blkid == blkid); bp = db->db_blkptr; parent = db->db_parent; } freeable = (bp && (freeable || dsl_dataset_block_freeable(ds, bp, bp->blk_birth))); if (freeable) { (void) refcount_add_many(&txh->txh_space_tooverwrite, space, FTAG); } else { (void) refcount_add_many(&txh->txh_space_towrite, space, FTAG); } if (bp) { (void) refcount_add_many(&txh->txh_space_tounref, bp_get_dsize(os->os_spa, bp), FTAG); } dmu_tx_count_twig(txh, dn, parent, level + 1, blkid >> epbs, freeable, history); }
int64_t refcount_add(refcount_t *rc, void *holder) { return (refcount_add_many(rc, 1, holder)); }
/* ARGSUSED */ static void dmu_tx_count_write(dmu_tx_hold_t *txh, uint64_t off, uint64_t len) { dnode_t *dn = txh->txh_dnode; uint64_t start, end, i; int min_bs, max_bs, min_ibs, max_ibs, epbs, bits; int err = 0; if (len == 0) return; min_bs = SPA_MINBLOCKSHIFT; max_bs = highbit64(txh->txh_tx->tx_objset->os_recordsize) - 1; min_ibs = DN_MIN_INDBLKSHIFT; max_ibs = DN_MAX_INDBLKSHIFT; if (dn) { uint64_t history[DN_MAX_LEVELS]; int nlvls = dn->dn_nlevels; int delta; /* * For i/o error checking, read the first and last level-0 * blocks (if they are not aligned), and all the level-1 blocks. */ if (dn->dn_maxblkid == 0) { delta = dn->dn_datablksz; start = (off < dn->dn_datablksz) ? 0 : 1; end = (off+len <= dn->dn_datablksz) ? 0 : 1; if (start == 0 && (off > 0 || len < dn->dn_datablksz)) { err = dmu_tx_check_ioerr(NULL, dn, 0, 0); if (err) goto out; delta -= off; } } else { zio_t *zio = zio_root(dn->dn_objset->os_spa, NULL, NULL, ZIO_FLAG_CANFAIL); /* first level-0 block */ start = off >> dn->dn_datablkshift; if (P2PHASE(off, dn->dn_datablksz) || len < dn->dn_datablksz) { err = dmu_tx_check_ioerr(zio, dn, 0, start); if (err) goto out; } /* last level-0 block */ end = (off+len-1) >> dn->dn_datablkshift; if (end != start && end <= dn->dn_maxblkid && P2PHASE(off+len, dn->dn_datablksz)) { err = dmu_tx_check_ioerr(zio, dn, 0, end); if (err) goto out; } /* level-1 blocks */ if (nlvls > 1) { int shft = dn->dn_indblkshift - SPA_BLKPTRSHIFT; for (i = (start>>shft)+1; i < end>>shft; i++) { err = dmu_tx_check_ioerr(zio, dn, 1, i); if (err) goto out; } } err = zio_wait(zio); if (err) goto out; delta = P2NPHASE(off, dn->dn_datablksz); } min_ibs = max_ibs = dn->dn_indblkshift; if (dn->dn_maxblkid > 0) { /* * The blocksize can't change, * so we can make a more precise estimate. */ ASSERT(dn->dn_datablkshift != 0); min_bs = max_bs = dn->dn_datablkshift; } else { /* * The blocksize can increase up to the recordsize, * or if it is already more than the recordsize, * up to the next power of 2. */ min_bs = highbit64(dn->dn_datablksz - 1); max_bs = MAX(max_bs, highbit64(dn->dn_datablksz - 1)); } /* * If this write is not off the end of the file * we need to account for overwrites/unref. */ if (start <= dn->dn_maxblkid) { for (int l = 0; l < DN_MAX_LEVELS; l++) history[l] = -1ULL; } while (start <= dn->dn_maxblkid) { dmu_buf_impl_t *db; rw_enter(&dn->dn_struct_rwlock, RW_READER); err = dbuf_hold_impl(dn, 0, start, FALSE, FALSE, FTAG, &db); rw_exit(&dn->dn_struct_rwlock); if (err) { txh->txh_tx->tx_err = err; return; } dmu_tx_count_twig(txh, dn, db, 0, start, B_FALSE, history); dbuf_rele(db, FTAG); if (++start > end) { /* * Account for new indirects appearing * before this IO gets assigned into a txg. */ bits = 64 - min_bs; epbs = min_ibs - SPA_BLKPTRSHIFT; for (bits -= epbs * (nlvls - 1); bits >= 0; bits -= epbs) { (void) refcount_add_many( &txh->txh_fudge, 1ULL << max_ibs, FTAG); } goto out; } off += delta; if (len >= delta) len -= delta; delta = dn->dn_datablksz; } }
/* ARGSUSED */ static void dmu_tx_count_write(dmu_tx_hold_t *txh, uint64_t off, uint64_t len) { dnode_t *dn = txh->txh_dnode; int err = 0; if (len == 0) return; (void) refcount_add_many(&txh->txh_space_towrite, len, FTAG); if (refcount_count(&txh->txh_space_towrite) > 2 * DMU_MAX_ACCESS) err = SET_ERROR(EFBIG); if (dn == NULL) return; /* * For i/o error checking, read the blocks that will be needed * to perform the write: the first and last level-0 blocks (if * they are not aligned, i.e. if they are partial-block writes), * and all the level-1 blocks. */ if (dn->dn_maxblkid == 0) { if (off < dn->dn_datablksz && (off > 0 || len < dn->dn_datablksz)) { err = dmu_tx_check_ioerr(NULL, dn, 0, 0); if (err != 0) { txh->txh_tx->tx_err = err; } } } else { zio_t *zio = zio_root(dn->dn_objset->os_spa, NULL, NULL, ZIO_FLAG_CANFAIL); /* first level-0 block */ uint64_t start = off >> dn->dn_datablkshift; if (P2PHASE(off, dn->dn_datablksz) || len < dn->dn_datablksz) { err = dmu_tx_check_ioerr(zio, dn, 0, start); if (err != 0) { txh->txh_tx->tx_err = err; } } /* last level-0 block */ uint64_t end = (off + len - 1) >> dn->dn_datablkshift; if (end != start && end <= dn->dn_maxblkid && P2PHASE(off + len, dn->dn_datablksz)) { err = dmu_tx_check_ioerr(zio, dn, 0, end); if (err != 0) { txh->txh_tx->tx_err = err; } } /* level-1 blocks */ if (dn->dn_nlevels > 1) { int shft = dn->dn_indblkshift - SPA_BLKPTRSHIFT; for (uint64_t i = (start >> shft) + 1; i < end >> shft; i++) { err = dmu_tx_check_ioerr(zio, dn, 1, i); if (err != 0) { txh->txh_tx->tx_err = err; } } } err = zio_wait(zio); if (err != 0) { txh->txh_tx->tx_err = err; } }