/* * Return (in *dasizep) the amount of space on the deadlist which is: * mintxg < blk_birth <= maxtxg */ int bplist_space_birthrange(bplist_t *bpl, uint64_t mintxg, uint64_t maxtxg, uint64_t *dasizep) { uint64_t size = 0; uint64_t itor = 0; blkptr_t bp; int err; /* * As an optimization, if they want the whole txg range, just * get bpl_bytes rather than iterating over the bps. */ if (mintxg < TXG_INITIAL && maxtxg == UINT64_MAX) { mutex_enter(&bpl->bpl_lock); err = bplist_hold(bpl); if (err == 0) *dasizep = bpl->bpl_phys->bpl_bytes; mutex_exit(&bpl->bpl_lock); return (err); } while ((err = bplist_iterate(bpl, &itor, &bp)) == 0) { if (bp.blk_birth > mintxg && bp.blk_birth <= maxtxg) { size += bp_get_dasize(dmu_objset_spa(bpl->bpl_mos), &bp); } } if (err == ENOENT) err = 0; *dasizep = size; return (err); }
int bplist_enqueue(bplist_t *bpl, blkptr_t *bp, dmu_tx_t *tx) { uint64_t blk, off; blkptr_t *bparray; int err; ASSERT(!BP_IS_HOLE(bp)); mutex_enter(&bpl->bpl_lock); err = bplist_hold(bpl); if (err) return (err); blk = bpl->bpl_phys->bpl_entries >> bpl->bpl_bpshift; off = P2PHASE(bpl->bpl_phys->bpl_entries, 1ULL << bpl->bpl_bpshift); err = bplist_cache(bpl, blk); if (err) { mutex_exit(&bpl->bpl_lock); return (err); } dmu_buf_will_dirty(bpl->bpl_cached_dbuf, tx); bparray = bpl->bpl_cached_dbuf->db_data; bparray[off] = *bp; /* We never need the fill count. */ bparray[off].blk_fill = 0; /* The bplist will compress better if we can leave off the checksum */ bzero(&bparray[off].blk_cksum, sizeof (bparray[off].blk_cksum)); dmu_buf_will_dirty(bpl->bpl_dbuf, tx); bpl->bpl_phys->bpl_entries++; bpl->bpl_phys->bpl_bytes += bp_get_dasize(dmu_objset_spa(bpl->bpl_mos), bp); if (bpl->bpl_havecomp) { bpl->bpl_phys->bpl_comp += BP_GET_PSIZE(bp); bpl->bpl_phys->bpl_uncomp += BP_GET_UCSIZE(bp); } mutex_exit(&bpl->bpl_lock); return (0); }