static void zvol_get_done(zgd_t *zgd, int error) { if (zgd->zgd_db) dmu_buf_rele(zgd->zgd_db, zgd); zfs_range_unlock(zgd->zgd_rl); if (error == 0 && zgd->zgd_bp) zil_add_block(zgd->zgd_zilog, zgd->zgd_bp); kmem_free(zgd, sizeof (zgd_t)); }
/* * Start a log block write and advance to the next log block. * Calls are serialized. */ static lwb_t * zil_lwb_write_start(zilog_t *zilog, lwb_t *lwb) { lwb_t *nlwb; zil_trailer_t *ztp = (zil_trailer_t *)(lwb->lwb_buf + lwb->lwb_sz) - 1; spa_t *spa = zilog->zl_spa; blkptr_t *bp = &ztp->zit_next_blk; uint64_t txg; uint64_t zil_blksz; int error; ASSERT(lwb->lwb_nused <= ZIL_BLK_DATA_SZ(lwb)); /* * Allocate the next block and save its address in this block * before writing it in order to establish the log chain. * Note that if the allocation of nlwb synced before we wrote * the block that points at it (lwb), we'd leak it if we crashed. * Therefore, we don't do txg_rele_to_sync() until zil_lwb_write_done(). */ txg = txg_hold_open(zilog->zl_dmu_pool, &lwb->lwb_txgh); txg_rele_to_quiesce(&lwb->lwb_txgh); /* * Pick a ZIL blocksize. We request a size that is the * maximum of the previous used size, the current used size and * the amount waiting in the queue. */ zil_blksz = MAX(zilog->zl_prev_used, zilog->zl_cur_used + sizeof (*ztp)); zil_blksz = MAX(zil_blksz, zilog->zl_itx_list_sz + sizeof (*ztp)); zil_blksz = P2ROUNDUP_TYPED(zil_blksz, ZIL_MIN_BLKSZ, uint64_t); if (zil_blksz > ZIL_MAX_BLKSZ) zil_blksz = ZIL_MAX_BLKSZ; BP_ZERO(bp); /* pass the old blkptr in order to spread log blocks across devs */ error = zio_alloc_blk(spa, zil_blksz, bp, &lwb->lwb_blk, txg); if (error) { dmu_tx_t *tx = dmu_tx_create_assigned(zilog->zl_dmu_pool, txg); /* * We dirty the dataset to ensure that zil_sync() will * be called to remove this lwb from our zl_lwb_list. * Failing to do so, may leave an lwb with a NULL lwb_buf * hanging around on the zl_lwb_list. */ dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); dmu_tx_commit(tx); /* * Since we've just experienced an allocation failure so we * terminate the current lwb and send it on its way. */ ztp->zit_pad = 0; ztp->zit_nused = lwb->lwb_nused; ztp->zit_bt.zbt_cksum = lwb->lwb_blk.blk_cksum; zio_nowait(lwb->lwb_zio); /* * By returning NULL the caller will call tx_wait_synced() */ return (NULL); } ASSERT3U(bp->blk_birth, ==, txg); ztp->zit_pad = 0; ztp->zit_nused = lwb->lwb_nused; ztp->zit_bt.zbt_cksum = lwb->lwb_blk.blk_cksum; bp->blk_cksum = lwb->lwb_blk.blk_cksum; bp->blk_cksum.zc_word[ZIL_ZC_SEQ]++; /* * Allocate a new log write buffer (lwb). */ nlwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP); nlwb->lwb_zilog = zilog; nlwb->lwb_blk = *bp; nlwb->lwb_nused = 0; nlwb->lwb_sz = BP_GET_LSIZE(&nlwb->lwb_blk); nlwb->lwb_buf = zio_buf_alloc(nlwb->lwb_sz); nlwb->lwb_max_txg = txg; nlwb->lwb_zio = NULL; /* * Put new lwb at the end of the log chain */ mutex_enter(&zilog->zl_lock); list_insert_tail(&zilog->zl_lwb_list, nlwb); mutex_exit(&zilog->zl_lock); /* Record the block for later vdev flushing */ zil_add_block(zilog, &lwb->lwb_blk); /* * kick off the write for the old log block */ dprintf_bp(&lwb->lwb_blk, "lwb %p txg %llu: ", lwb, txg); ASSERT(lwb->lwb_zio); zio_nowait(lwb->lwb_zio); return (nlwb); }