/* * Clean up transaction's list of buffers submitted for io. * We wait for any pending IO to complete and remove any clean * buffers. Note that we take the buffers in the opposite ordering * from the one in which they were submitted for IO. * * Called with j_list_lock held. */ static void __wait_cp_io(journal_t *journal, transaction_t *transaction) { struct journal_head *jh; struct buffer_head *bh; tid_t this_tid; int released = 0; this_tid = transaction->t_tid; restart: /* Did somebody clean up the transaction in the meanwhile? */ if (journal->j_checkpoint_transactions != transaction || transaction->t_tid != this_tid) return; while (!released && transaction->t_checkpoint_io_list) { jh = transaction->t_checkpoint_io_list; bh = jh2bh(jh); if (!jbd_trylock_bh_state(bh)) { jbd_sync_bh(journal, bh); spin_lock(&journal->j_list_lock); goto restart; } if (buffer_locked(bh)) { atomic_inc(&bh->b_count); spin_unlock(&journal->j_list_lock); jbd_unlock_bh_state(bh); wait_on_buffer(bh); /* the journal_head may have gone by now */ BUFFER_TRACE(bh, "brelse"); __brelse(bh); spin_lock(&journal->j_list_lock); goto restart; } /* * Now in whatever state the buffer currently is, we know that * it has been written out and so we can drop it from the list */ released = __journal_remove_checkpoint(jh); jbd_unlock_bh_state(bh); journal_remove_journal_head(bh); __brelse(bh); } }
/* * Perform an actual checkpoint. We don't write out only enough to * satisfy the current blocked requests: rather we submit a reasonably * sized chunk of the outstanding data to disk at once for * efficiency. __log_wait_for_space() will retry if we didn't free enough. * * However, we _do_ take into account the amount requested so that once * the IO has been queued, we can return as soon as enough of it has * completed to disk. * * The journal should be locked before calling this function. */ int log_do_checkpoint(journal_t *journal) { int result; int batch_count = 0; struct buffer_head *bhs[NR_BATCH]; jbd_debug(1, "Start checkpoint\n"); /* * First thing: if there are any transactions in the log which * don't need checkpointing, just eliminate them from the * journal straight away. */ result = cleanup_journal_tail(journal); jbd_debug(1, "cleanup_journal_tail returned %d\n", result); if (result <= 0) return result; /* * OK, we need to start writing disk blocks. Try to free up a * quarter of the log in a single checkpoint if we can. */ /* * AKPM: check this code. I had a feeling a while back that it * degenerates into a busy loop at unmount time. */ spin_lock(&journal->j_list_lock); while (journal->j_checkpoint_transactions) { transaction_t *transaction; struct journal_head *jh, *last_jh, *next_jh; int drop_count = 0; int cleanup_ret, retry = 0; tid_t this_tid; transaction = journal->j_checkpoint_transactions; this_tid = transaction->t_tid; jh = transaction->t_checkpoint_list; last_jh = jh->b_cpprev; next_jh = jh; do { struct buffer_head *bh; jh = next_jh; next_jh = jh->b_cpnext; bh = jh2bh(jh); if (!jbd_trylock_bh_state(bh)) { jbd_sync_bh(journal, bh); spin_lock(&journal->j_list_lock); retry = 1; break; } retry = __flush_buffer(journal, jh, bhs, &batch_count, &drop_count); if (cond_resched_lock(&journal->j_list_lock)) { retry = 1; break; } } while (jh != last_jh && !retry); if (batch_count) { __flush_batch(journal, bhs, &batch_count); retry = 1; } /* * If someone cleaned up this transaction while we slept, we're * done */ if (journal->j_checkpoint_transactions != transaction) break; if (retry) continue; /* * Maybe it's a new transaction, but it fell at the same * address */ if (transaction->t_tid != this_tid) continue; /* * We have walked the whole transaction list without * finding anything to write to disk. We had better be * able to make some progress or we are in trouble. */ cleanup_ret = __cleanup_transaction(journal, transaction); J_ASSERT(drop_count != 0 || cleanup_ret != 0); if (journal->j_checkpoint_transactions != transaction) break; } spin_unlock(&journal->j_list_lock); result = cleanup_journal_tail(journal); if (result < 0) return result; return 0; }
/* * Clean up a transaction's checkpoint list. * * We wait for any pending IO to complete and make sure any clean * buffers are removed from the transaction. * * Return 1 if we performed any actions which might have destroyed the * checkpoint. (journal_remove_checkpoint() deletes the transaction when * the last checkpoint buffer is cleansed) * * Called with j_list_lock held. */ static int __cleanup_transaction(journal_t *journal, transaction_t *transaction) { struct journal_head *jh, *next_jh, *last_jh; struct buffer_head *bh; int ret = 0; assert_spin_locked(&journal->j_list_lock); jh = transaction->t_checkpoint_list; if (!jh) return 0; last_jh = jh->b_cpprev; next_jh = jh; do { jh = next_jh; bh = jh2bh(jh); if (buffer_locked(bh)) { atomic_inc(&bh->b_count); spin_unlock(&journal->j_list_lock); wait_on_buffer(bh); /* the journal_head may have gone by now */ BUFFER_TRACE(bh, "brelse"); __brelse(bh); goto out_return_1; } /* * This is foul */ if (!jbd_trylock_bh_state(bh)) { jbd_sync_bh(journal, bh); goto out_return_1; } if (jh->b_transaction != NULL) { transaction_t *t = jh->b_transaction; tid_t tid = t->t_tid; spin_unlock(&journal->j_list_lock); jbd_unlock_bh_state(bh); log_start_commit(journal, tid); log_wait_commit(journal, tid); goto out_return_1; } /* * AKPM: I think the buffer_jbddirty test is redundant - it * shouldn't have NULL b_transaction? */ next_jh = jh->b_cpnext; if (!buffer_dirty(bh) && !buffer_jbddirty(bh)) { BUFFER_TRACE(bh, "remove from checkpoint"); __journal_remove_checkpoint(jh); jbd_unlock_bh_state(bh); journal_remove_journal_head(bh); __brelse(bh); ret = 1; } else { jbd_unlock_bh_state(bh); } } while (jh != last_jh); return ret; out_return_1: spin_lock(&journal->j_list_lock); return 1; }
/* * Perform an actual checkpoint. We take the first transaction on the * list of transactions to be checkpointed and send all its buffers * to disk. We submit larger chunks of data at once. * * The journal should be locked before calling this function. */ int log_do_checkpoint(journal_t *journal) { transaction_t *transaction; tid_t this_tid; int result; jbd_debug(1, "Start checkpoint\n"); /* * First thing: if there are any transactions in the log which * don't need checkpointing, just eliminate them from the * journal straight away. */ result = cleanup_journal_tail(journal); jbd_debug(1, "cleanup_journal_tail returned %d\n", result); if (result <= 0) return result; /* * OK, we need to start writing disk blocks. Take one transaction * and write it. */ spin_lock(&journal->j_list_lock); if (!journal->j_checkpoint_transactions) goto out; transaction = journal->j_checkpoint_transactions; this_tid = transaction->t_tid; restart: /* * If someone cleaned up this transaction while we slept, we're * done (maybe it's a new transaction, but it fell at the same * address). */ if (journal->j_checkpoint_transactions == transaction && transaction->t_tid == this_tid) { int batch_count = 0; struct buffer_head *bhs[NR_BATCH]; struct journal_head *jh; int retry = 0; while (!retry && transaction->t_checkpoint_list) { struct buffer_head *bh; jh = transaction->t_checkpoint_list; bh = jh2bh(jh); if (!jbd_trylock_bh_state(bh)) { jbd_sync_bh(journal, bh); retry = 1; break; } retry = __process_buffer(journal, jh, bhs,&batch_count); if (!retry && lock_need_resched(&journal->j_list_lock)){ spin_unlock(&journal->j_list_lock); retry = 1; break; } } if (batch_count) { if (!retry) { spin_unlock(&journal->j_list_lock); retry = 1; } __flush_batch(journal, bhs, &batch_count); } if (retry) { spin_lock(&journal->j_list_lock); goto restart; } /* * Now we have cleaned up the first transaction's checkpoint * list. Let's clean up the second one */ __wait_cp_io(journal, transaction); } out: spin_unlock(&journal->j_list_lock); result = cleanup_journal_tail(journal); if (result < 0) return result; return 0; }