int log_do_checkpoint (journal_t *journal, int nblocks) { transaction_t *transaction, *last_transaction, *next_transaction; int batch_count = 0; void *bhs[64]; repeat: transaction = journal->j_checkpoint_transactions; if (transaction == ((void *)0)) return 0; last_transaction = transaction->t_cpprev; next_transaction = transaction; do { struct journal_head *jh, *last_jh, *next_jh; int drop_count = 0; int cleanup_ret, retry = 0; transaction = next_transaction; next_transaction = transaction->t_cpnext; jh = transaction->t_checkpoint_list; last_jh = jh->b_cpprev; next_jh = jh; do { jh = next_jh; next_jh = jh->b_cpnext; retry = __flush_buffer(journal, jh, bhs, &batch_count, &drop_count); } while (jh != last_jh && !retry); if (retry) goto repeat; cleanup_ret = __cleanup_transaction(journal, transaction); goto repeat; } while (transaction != last_transaction); }
/* @@@ `nblocks' is unused. Should it be used? */ int log_do_checkpoint (journal_t *journal, int nblocks) { transaction_t *transaction, *last_transaction, *next_transaction; int result; int target; int batch_count = 0; struct buffer_head *bhs[NR_BATCH]; jbd_debug(1, "Start checkpoint\n"); /* * First thing: if there are any transactions in the log which * don't need checkpointing, just eliminate them from the * journal straight away. */ result = cleanup_journal_tail(journal); jbd_debug(1, "cleanup_journal_tail returned %d\n", result); if (result <= 0) return result; /* * OK, we need to start writing disk blocks. Try to free up a * quarter of the log in a single checkpoint if we can. */ /* * AKPM: check this code. I had a feeling a while back that it * degenerates into a busy loop at unmount time. */ target = (journal->j_last - journal->j_first) / 4; spin_lock(&journal_datalist_lock); repeat: transaction = journal->j_checkpoint_transactions; if (transaction == NULL) goto done; last_transaction = transaction->t_cpprev; next_transaction = transaction; do { struct journal_head *jh, *last_jh, *next_jh; int drop_count = 0; int cleanup_ret, retry = 0; transaction = next_transaction; next_transaction = transaction->t_cpnext; jh = transaction->t_checkpoint_list; last_jh = jh->b_cpprev; next_jh = jh; do { jh = next_jh; next_jh = jh->b_cpnext; retry = __flush_buffer(journal, jh, bhs, &batch_count, &drop_count); } while (jh != last_jh && !retry); if (batch_count) { __flush_batch(bhs, &batch_count); goto repeat; } if (retry) goto repeat; /* * We have walked the whole transaction list without * finding anything to write to disk. We had better be * able to make some progress or we are in trouble. */ cleanup_ret = __cleanup_transaction(journal, transaction); J_ASSERT(drop_count != 0 || cleanup_ret != 0); goto repeat; /* __cleanup may have dropped lock */ } while (transaction != last_transaction); done: spin_unlock(&journal_datalist_lock); result = cleanup_journal_tail(journal); if (result < 0) return result; return 0; }
/* * Perform an actual checkpoint. We don't write out only enough to * satisfy the current blocked requests: rather we submit a reasonably * sized chunk of the outstanding data to disk at once for * efficiency. __log_wait_for_space() will retry if we didn't free enough. * * However, we _do_ take into account the amount requested so that once * the IO has been queued, we can return as soon as enough of it has * completed to disk. * * The journal should be locked before calling this function. */ int log_do_checkpoint(journal_t *journal) { int result; int batch_count = 0; struct buffer_head *bhs[NR_BATCH]; jbd_debug(1, "Start checkpoint\n"); /* * First thing: if there are any transactions in the log which * don't need checkpointing, just eliminate them from the * journal straight away. */ result = cleanup_journal_tail(journal); jbd_debug(1, "cleanup_journal_tail returned %d\n", result); if (result <= 0) return result; /* * OK, we need to start writing disk blocks. Try to free up a * quarter of the log in a single checkpoint if we can. */ /* * AKPM: check this code. I had a feeling a while back that it * degenerates into a busy loop at unmount time. */ spin_lock(&journal->j_list_lock); while (journal->j_checkpoint_transactions) { transaction_t *transaction; struct journal_head *jh, *last_jh, *next_jh; int drop_count = 0; int cleanup_ret, retry = 0; tid_t this_tid; transaction = journal->j_checkpoint_transactions; this_tid = transaction->t_tid; jh = transaction->t_checkpoint_list; last_jh = jh->b_cpprev; next_jh = jh; do { struct buffer_head *bh; jh = next_jh; next_jh = jh->b_cpnext; bh = jh2bh(jh); if (!jbd_trylock_bh_state(bh)) { jbd_sync_bh(journal, bh); spin_lock(&journal->j_list_lock); retry = 1; break; } retry = __flush_buffer(journal, jh, bhs, &batch_count, &drop_count); if (cond_resched_lock(&journal->j_list_lock)) { retry = 1; break; } } while (jh != last_jh && !retry); if (batch_count) { __flush_batch(journal, bhs, &batch_count); retry = 1; } /* * If someone cleaned up this transaction while we slept, we're * done */ if (journal->j_checkpoint_transactions != transaction) break; if (retry) continue; /* * Maybe it's a new transaction, but it fell at the same * address */ if (transaction->t_tid != this_tid) continue; /* * We have walked the whole transaction list without * finding anything to write to disk. We had better be * able to make some progress or we are in trouble. */ cleanup_ret = __cleanup_transaction(journal, transaction); J_ASSERT(drop_count != 0 || cleanup_ret != 0); if (journal->j_checkpoint_transactions != transaction) break; } spin_unlock(&journal->j_list_lock); result = cleanup_journal_tail(journal); if (result < 0) return result; return 0; }