/* ARGSUSED */ int zil_clear_log_chain(char *osname, void *txarg) { zilog_t *zilog; zil_header_t *zh; objset_t *os; dmu_tx_t *tx; int error; error = dmu_objset_open(osname, DMU_OST_ANY, DS_MODE_USER, &os); if (error) { cmn_err(CE_WARN, "can't open objset for %s", osname); return (0); } zilog = dmu_objset_zil(os); tx = dmu_tx_create(zilog->zl_os); (void) dmu_tx_assign(tx, TXG_WAIT); zh = zil_header_in_syncing_context(zilog); BP_ZERO(&zh->zh_log); dsl_dataset_dirty(dmu_objset_ds(os), tx); dmu_tx_commit(tx); dmu_objset_close(os); return (0); }
int zil_claim(char *osname, void *txarg) { dmu_tx_t *tx = txarg; uint64_t first_txg = dmu_tx_get_txg(tx); zilog_t *zilog; zil_header_t *zh; objset_t *os; int error; error = dmu_objset_open(osname, DMU_OST_ANY, DS_MODE_USER, &os); if (error) { cmn_err(CE_WARN, "can't open objset for %s", osname); return (0); } zilog = dmu_objset_zil(os); zh = zil_header_in_syncing_context(zilog); /* * Record here whether the zil has any records to replay. * If the header block pointer is null or the block points * to the stubby then we know there are no valid log records. * We use the header to store this state as the the zilog gets * freed later in dmu_objset_close(). * The flags (and the rest of the header fields) are cleared in * zil_sync() as a result of a zil_destroy(), after replaying the log. * * Note, the intent log can be empty but still need the * stubby to be claimed. */ if (!zil_empty(zilog)) zh->zh_flags |= ZIL_REPLAY_NEEDED; /* * Claim all log blocks if we haven't already done so, and remember * the highest claimed sequence number. This ensures that if we can * read only part of the log now (e.g. due to a missing device), * but we can read the entire log later, we will not try to replay * or destroy beyond the last block we successfully claimed. */ ASSERT3U(zh->zh_claim_txg, <=, first_txg); if (zh->zh_claim_txg == 0 && !BP_IS_HOLE(&zh->zh_log)) { zh->zh_claim_txg = first_txg; zh->zh_claim_seq = zil_parse(zilog, zil_claim_log_block, zil_claim_log_record, tx, first_txg); dsl_dataset_dirty(dmu_objset_ds(os), tx); } ASSERT3U(first_txg, ==, (spa_last_synced_txg(zilog->zl_spa) + 1)); dmu_objset_close(os); return (0); }
/* ARGSUSED */ int zil_check_log_chain(char *osname, void *txarg) { zilog_t *zilog; zil_header_t *zh; blkptr_t blk; arc_buf_t *abuf; objset_t *os; char *lrbuf; zil_trailer_t *ztp; int error; error = dmu_objset_open(osname, DMU_OST_ANY, DS_MODE_USER, &os); if (error) { cmn_err(CE_WARN, "can't open objset for %s", osname); return (0); } zilog = dmu_objset_zil(os); zh = zil_header_in_syncing_context(zilog); blk = zh->zh_log; if (BP_IS_HOLE(&blk)) { dmu_objset_close(os); return (0); /* no chain */ } for (;;) { error = zil_read_log_block(zilog, &blk, &abuf); if (error) break; lrbuf = abuf->b_data; ztp = (zil_trailer_t *)(lrbuf + BP_GET_LSIZE(&blk)) - 1; blk = ztp->zit_next_blk; VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1); } dmu_objset_close(os); if (error == ECKSUM) return (0); /* normal end of chain */ return (error); }
/* * Called in syncing context to free committed log blocks and update log header. */ void zil_sync(zilog_t *zilog, dmu_tx_t *tx) { zil_header_t *zh = zil_header_in_syncing_context(zilog); uint64_t txg = dmu_tx_get_txg(tx); spa_t *spa = zilog->zl_spa; lwb_t *lwb; /* * We don't zero out zl_destroy_txg, so make sure we don't try * to destroy it twice. */ if (spa_sync_pass(spa) != 1) return; mutex_enter(&zilog->zl_lock); ASSERT(zilog->zl_stop_sync == 0); zh->zh_replay_seq = zilog->zl_replayed_seq[txg & TXG_MASK]; if (zilog->zl_destroy_txg == txg) { blkptr_t blk = zh->zh_log; ASSERT(list_head(&zilog->zl_lwb_list) == NULL); bzero(zh, sizeof (zil_header_t)); bzero(zilog->zl_replayed_seq, sizeof (zilog->zl_replayed_seq)); if (zilog->zl_keep_first) { /* * If this block was part of log chain that couldn't * be claimed because a device was missing during * zil_claim(), but that device later returns, * then this block could erroneously appear valid. * To guard against this, assign a new GUID to the new * log chain so it doesn't matter what blk points to. */ zil_init_log_chain(zilog, &blk); zh->zh_log = blk; } } while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { zh->zh_log = lwb->lwb_blk; if (lwb->lwb_buf != NULL || lwb->lwb_max_txg > txg) break; list_remove(&zilog->zl_lwb_list, lwb); zio_free_blk(spa, &lwb->lwb_blk, txg); kmem_cache_free(zil_lwb_cache, lwb); /* * If we don't have anything left in the lwb list then * we've had an allocation failure and we need to zero * out the zil_header blkptr so that we don't end * up freeing the same block twice. */ if (list_head(&zilog->zl_lwb_list) == NULL) BP_ZERO(&zh->zh_log); } mutex_exit(&zilog->zl_lock); }