Esempio n. 1
0
int journal_stop (handle_t *handle)
{
	transaction_t *transaction = handle->h_transaction;
	journal_t *journal = transaction->t_journal;
	int force_sync;
	
	if (!handle)
		return 0;
	
	J_ASSERT (transaction->t_updates > 0);
	J_ASSERT (current->j_handle == handle);
	
	if (--handle->h_ref > 0)
		return 0;

	jfs_debug(4, "Handle %p going down\n", handle);
	
	current->j_handle = NULL;
	// current->fs_locks--;
	transaction->t_outstanding_credits -= handle->h_buffer_credits;
	transaction->t_updates--;
	if (!transaction->t_updates) {
		wake_up(&journal->j_wait_updates);
		if (journal->j_barrier_count)
			wake_up(&journal->j_wait_transaction_locked);
	}

	/* 
	 * If the journal is marked SYNC, we need to set another commit
	 * going!  We also want to force a commit if the current
	 * transaction is occupying too much of the log, or if the
	 * transaction is too old now.
	 */

	force_sync = (journal->j_flags & JFS_SYNC) || handle->h_sync;
	
	if (force_sync ||
	    transaction->t_outstanding_credits > journal->j_max_transaction_buffers ||
	    time_after_eq(jiffies, transaction->t_expires)) {
		tid_t tid = transaction->t_tid;
		
		jfs_debug(2, "transaction too old, requesting commit for handle %p\n", handle);
		log_start_commit(journal, transaction);
		
		/*
		 * Special case: JFS_SYNC synchronous updates require us
		 * to wait for the commit to complete.  
		 */
		if (force_sync) 
			log_wait_commit(journal, tid);
	}
	
	kfree(handle);
	
	return 0;
}
Esempio n. 2
0
void journal_sync_buffer(struct buffer_head *bh)
{
	transaction_t *transaction;
	journal_t *journal;
	long sequence;
	
	/* If the buffer isn't journaled, this is easy: just sync it to
	 * disk.  */

	if (bh->b_transaction == NULL) {
		/* If the buffer has already been journaled, then this
		 * is a noop. */
		if (bh->b_cp_transaction == NULL) 
			return;
		ll_rw_block (WRITE, 1, &bh);
		wait_on_buffer (bh);
		return;
	}
	
	/* Otherwise, just wait until the transaction is synced to disk. */
	transaction = bh->b_transaction;
	journal = transaction->t_journal;
	sequence = transaction->t_tid;
	
	jfs_debug(2, "requesting commit for bh %p\n", bh);
	log_start_commit (journal, transaction);
	
	while (tid_gt(sequence, journal->j_commit_sequence)) {
		wake_up(&journal->j_wait_done_commit);
		sleep_on(&journal->j_wait_done_commit);
	}
}
Esempio n. 3
0
struct buffer_head *getblk(kdev_t kdev, blk64_t blocknr, int blocksize)
{
	struct buffer_head *bh;
	int bufsize = sizeof(*bh) + kdev->k_ctx->fs->blocksize -
		sizeof(bh->b_data);

	bh = e2fsck_allocate_memory(kdev->k_ctx, bufsize, "block buffer");
	if (!bh)
		return NULL;

#ifdef CONFIG_JBD_DEBUG
	if (journal_enable_debug >= 3)
		bh_count++;
#endif
	jfs_debug(4, "getblk for block %llu (%d bytes)(total %d)\n",
		  (unsigned long long) blocknr, blocksize, bh_count);

	bh->b_ctx = kdev->k_ctx;
	if (kdev->k_dev == K_DEV_FS)
		bh->b_io = kdev->k_ctx->fs->io;
	else
		bh->b_io = kdev->k_ctx->journal_io;
	bh->b_size = blocksize;
	bh->b_blocknr = blocknr;

	return bh;
}
Esempio n. 4
0
struct buffer_head *getblk(kdev_t kdev, blk64_t blocknr, int blocksize)
{
	struct buffer_head *bh;
	int bufsize = sizeof(*bh) + kdev->k_fs->blocksize -
		sizeof(bh->b_data);
	errcode_t retval;

	retval = ext2fs_get_memzero(bufsize, &bh);
	if (retval)
		return NULL;

#ifdef CONFIG_JBD_DEBUG
	if (journal_enable_debug >= 3)
		bh_count++;
#endif
	jfs_debug(4, "getblk for block %llu (%d bytes)(total %d)\n",
		  (unsigned long long) blocknr, blocksize, bh_count);

	bh->b_fs = kdev->k_fs;
	if (kdev->k_dev == K_DEV_FS)
		bh->b_io = kdev->k_fs->io;
	else
		bh->b_io = kdev->k_fs->journal_io;
	bh->b_size = blocksize;
	bh->b_blocknr = blocknr;

	return bh;
}
Esempio n. 5
0
int journal_skip_recovery(journal_t *journal)
{
	int			err;
	journal_superblock_t *	sb;

	struct recovery_info	info;
	
	memset(&info, 0, sizeof(info));
	sb = journal->j_superblock;
	
	err = do_one_pass(journal, &info, PASS_SCAN);

	if (err) {
		printk(KERN_ERR "JFS: error %d scanning journal\n", err);
		++journal->j_transaction_sequence;
	} else {
		int dropped = info.end_transaction - ntohl(sb->s_sequence);
		
		jfs_debug(0, 
			  "JFS: ignoring %d transaction%s from the journal.\n",
			  dropped, (dropped == 1) ? "" : "s");
		journal->j_transaction_sequence = ++info.end_transaction;
	}

	journal->j_tail = 0;
	
	return err;
}
Esempio n. 6
0
void brelse(struct buffer_head *bh)
{
	if (bh->b_dirty)
		ll_rw_block(WRITE, 1, &bh);
	jfs_debug(3, "freeing block %lu/%p (total %d)\n",
		  (unsigned long) bh->b_blocknr, (void *) bh, --bh_count);
	ext2fs_free_mem(&bh);
}
Esempio n. 7
0
void ll_rw_block(int rw, int nr, struct buffer_head *bhp[])
{
	int retval;
	struct buffer_head *bh;

	for (; nr > 0; --nr) {
		bh = *bhp++;
		if (rw == READ && !bh->b_uptodate) {
			jfs_debug(3, "reading block %llu/%p\n",
				  bh->b_blocknr, (void *) bh);
			retval = io_channel_read_blk64(bh->b_io,
						     bh->b_blocknr,
						     1, bh->b_data);
			if (retval) {
				com_err(bh->b_ctx->device_name, retval,
					"while reading block %llu\n",
					bh->b_blocknr);
				bh->b_err = retval;
				continue;
			}
			bh->b_uptodate = 1;
		} else if (rw == WRITE && bh->b_dirty) {
			jfs_debug(3, "writing block %llu/%p\n",
				  bh->b_blocknr,
				  (void *) bh);
			retval = io_channel_write_blk64(bh->b_io,
						      bh->b_blocknr,
						      1, bh->b_data);
			if (retval) {
				com_err(bh->b_ctx->device_name, retval,
					"while writing block %llu\n",
					bh->b_blocknr);
				bh->b_err = retval;
				continue;
			}
			bh->b_dirty = 0;
			bh->b_uptodate = 1;
		} else {
			jfs_debug(3, "no-op %s for block %llu\n",
				  rw == READ ? "read" : "write",
				  bh->b_blocknr);
		}
	}
}
Esempio n. 8
0
int journal_recover(journal_t *journal)
{
	int			err;
	journal_superblock_t *	sb;

	struct recovery_info	info;
	
	memset(&info, 0, sizeof(info));
	sb = journal->j_superblock;
	
	/* 
	 * The journal superblock's s_start field (the current log head)
	 * is always zero if, and only if, the journal was cleanly
	 * unmounted.  
	 */

	if (!sb->s_start) {
		jfs_debug(1, "No recovery required, last transaction %d\n",
			  ntohl(sb->s_sequence));
		journal->j_transaction_sequence = ntohl(sb->s_sequence) + 1;
		return 0;
	}
	

	err = do_one_pass(journal, &info, PASS_SCAN);
	if (!err)
		err = do_one_pass(journal, &info, PASS_REVOKE);
	if (!err)
		err = do_one_pass(journal, &info, PASS_REPLAY);

	jfs_debug(0, "JFS: recovery, exit status %d, "
		  "recovered transactions %u to %u\n",
		  err, info.start_transaction, info.end_transaction);
	jfs_debug(0, "JFS: Replayed %d and revoked %d/%d blocks\n", 
		  info.nr_replays, info.nr_revoke_hits, info.nr_revokes);

	/* Restart the log at the next transaction ID, thus invalidating
	 * any existing commit records in the log. */
	journal->j_transaction_sequence = ++info.end_transaction;
		
	journal_clear_revoke(journal);
	fsync_dev(journal->j_dev);
	return err;
}
Esempio n. 9
0
int journal_extend (handle_t *handle, int nblocks)
{
	transaction_t *transaction = handle->h_transaction;
	journal_t *journal = transaction->t_journal;
	int result = 1;
	int wanted;
	
	lock_journal (journal);

	/* Don't extend a locked-down transaction! */
	if (handle->h_transaction->t_state != T_RUNNING) {
		jfs_debug(3, "denied handle %p %d blocks: "
			  "transaction not running\n", handle, nblocks);
		goto error_out;
	}
	
	wanted = transaction->t_outstanding_credits + nblocks;
	
	if (wanted > journal->j_max_transaction_buffers) {
		jfs_debug(3, "denied handle %p %d blocks: "
			  "transaction too large\n", handle, nblocks);
		goto error_out;
	}

	if (wanted > log_space_left(journal)) {
		jfs_debug(3, "denied handle %p %d blocks: "
			  "insufficient log space\n", handle, nblocks);
		goto error_out;
	}
	
	handle->h_buffer_credits += nblocks;
	transaction->t_outstanding_credits += nblocks;
	result = 0;

	jfs_debug(3, "extended handle %p by %d\n", handle, nblocks);
	
error_out:
	unlock_journal (journal);
	return result;
}
Esempio n. 10
0
struct buffer_head *getblk(kdev_t kdev, blk_t blocknr, int blocksize)
{
	struct buffer_head *bh;

	bh = e2fsck_allocate_memory(kdev->k_ctx, sizeof(*bh), "block buffer");
	if (!bh)
		return NULL;

	jfs_debug(4, "getblk for block %lu (%d bytes)(total %d)\n",
		  (unsigned long) blocknr, blocksize, ++bh_count);

	bh->b_ctx = kdev->k_ctx;
	if (kdev->k_dev == K_DEV_FS)
		bh->b_io = kdev->k_ctx->fs->io;
	else 
		bh->b_io = kdev->k_ctx->journal_io;
	bh->b_size = blocksize;
	bh->b_blocknr = blocknr;

	return bh;
}
Esempio n. 11
0
int journal_restart(handle_t *handle, int nblocks)
{
	transaction_t *transaction = handle->h_transaction;
	journal_t *journal = transaction->t_journal;
		
	/* First unlink the handle from its current transaction, and
	 * start the commit on that. */
	
	J_ASSERT (transaction->t_updates > 0);
	J_ASSERT (current->j_handle == handle);

	transaction->t_outstanding_credits -= handle->h_buffer_credits;
	transaction->t_updates--;

	if (!transaction->t_updates)
		wake_up(&journal->j_wait_updates);

	jfs_debug(2, "restarting handle %p\n", handle);
	log_start_commit(journal, transaction);

	handle->h_buffer_credits = nblocks;
	return start_this_handle(journal, handle);
}
Esempio n. 12
0
int journal_get_create_access (handle_t *handle, struct buffer_head *bh) 
{
	transaction_t *transaction = handle->h_transaction;
	journal_t *journal = transaction->t_journal;

	jfs_debug(5, "buffer_head %p\n", bh);
	lock_journal(journal);

	/* The buffer may already belong to this transaction due to
           pre-zeroing in the filesystem's new_block code */
	J_ASSERT (bh->b_transaction == transaction || bh->b_transaction == NULL);
	J_ASSERT (bh->b_next_transaction == NULL);
	J_ASSERT (buffer_locked(bh));

	J_ASSERT(handle->h_buffer_credits > 0);
	handle->h_buffer_credits--;
	
	bh->b_transaction = transaction;
	journal_file_buffer(bh, transaction, BJ_Reserved);
	refile_buffer(bh);
	unlock_journal(journal);
	return 0;
}
Esempio n. 13
0
int journal_dirty_metadata (handle_t *handle, struct buffer_head *bh)
{
	transaction_t *transaction = handle->h_transaction;
	journal_t *journal = transaction->t_journal;

	jfs_debug(5, "buffer_head %p\n", bh);

	lock_journal(journal);
	mark_buffer_jdirty(bh);

	J_ASSERT(bh->b_transaction != NULL);
	
	/* 
	 * Metadata already on the current transaction list doesn't
	 * need to be filed.  Metadata on another transaction's list must
	 * be committing, and will be refiled once the commit completes:
	 * leave it alone for now. 
	 */

	if (bh->b_transaction != transaction) {
		J_ASSERT (bh->b_transaction == journal->j_committing_transaction);
		J_ASSERT (bh->b_next_transaction == transaction);
		/* And this case is illegal: we can't reuse another
		 * transaction's data buffer, ever. */
		J_ASSERT (bh->b_jlist != BJ_Data);
		goto done;
	}
	
	/* That test should have eliminated the following case: */
	J_ASSERT (bh->b_frozen_data == 0);

	journal_file_buffer (bh, handle->h_transaction, BJ_Metadata);

 done:
	unlock_journal(journal);
	return 0;
}
Esempio n. 14
0
int do_get_write_access (handle_t *handle, struct buffer_head *bh, 
			 int force_copy) 
{
	transaction_t *transaction = handle->h_transaction;
	journal_t *journal = transaction->t_journal;
	int error = 0;
	char *frozen_buffer = NULL;

	jfs_debug(5, "buffer_head %p, force_copy %d\n", bh, force_copy);

repeat:
	/* @@@ Need to check for errors here at some point. */

	/* The caller must make sure that we enter here with the buffer
	 * unlocked (probably by calling lock_journal_bh_wait).  If we
	 * sleep in this function, we have to wait again for the buffer
	 * to make sure it is still unlocked.  We cannot journal a
	 * buffer if somebody else may already be in the process of
	 * writing it to disk! */

	J_ASSERT (buffer_uptodate(bh));
	J_ASSERT (!buffer_locked(bh));
	
	/* The buffer is already part of this transaction if
	 * b_transaction or b_next_transaction points to it. */

	if (bh->b_transaction == transaction ||
	    bh->b_next_transaction == transaction)
		goto done;

	/* If there is already a copy-out version of this buffer, then
	 * we don't need to make another one. */

	if (bh->b_frozen_data) {
		J_ASSERT(bh->b_next_transaction == NULL);
		bh->b_next_transaction = transaction;

		J_ASSERT(handle->h_buffer_credits > 0);
		handle->h_buffer_credits--;
		goto done;
	}
	
	/* Is there data here we need to preserve? */
	
	if (bh->b_transaction && bh->b_transaction != transaction) {
		J_ASSERT (bh->b_next_transaction == NULL);
		J_ASSERT (bh->b_transaction == journal->j_committing_transaction);
		J_ASSERT (bh->b_list == BUF_JOURNAL);

		/* There is one case we have to be very careful about.
		 * If the committing transaction is currently writing
		 * this buffer out to disk and has NOT made a copy-out,
		 * then we cannot modify the buffer contents at all
		 * right now.  The essence of copy-out is that it is the
		 * extra copy, not the primary copy, which gets
		 * journaled.  If the primary copy is already going to
		 * disk then we cannot do copy-out here. */

		if (bh->b_jlist == BJ_Shadow) {
			unlock_journal(journal);
			/* commit wakes up all shadow buffers after IO */
			sleep_on(&bh->b_wait);
			lock_journal_bh_wait(bh, journal);
			goto repeat;
		}
			
		/* Only do the copy if the currently-owning transaction
		 * still needs it.  If it is on the Forget list, the
		 * committing transaction is past that stage.  The
		 * buffer had better remain locked during the kmalloc,
		 * but that should be true --- we hold the journal lock
		 * still and the buffer is already on the BUF_JOURNAL
		 * list so won't be flushed. 
		 *
		 * Subtle point, though: if this is a get_undo_access,
		 * then we will be relying on the frozen_data to contain
		 * the new value of the committed_data record after the
		 * transaction, so we HAVE to force the frozen_data copy
		 * in that case. */

		if (bh->b_jlist != BJ_Forget || force_copy) {
			if (bh->b_jlist == BJ_Data)
				J_ASSERT(test_bit(BH_QuickFree, &bh->b_state));

			if (!frozen_buffer) {
				unlock_journal(journal);
				frozen_buffer = kmalloc(bh->b_size,GFP_KERNEL);
				lock_journal_bh_wait(bh, journal);
				if (!frozen_buffer) {
					error = -ENOMEM;
					goto done;
				}
				goto repeat;
			}
			
			bh->b_frozen_data = frozen_buffer;
			frozen_buffer = NULL;
			
			memcpy (bh->b_frozen_data, bh->b_data, bh->b_size);
		}

		bh->b_next_transaction = transaction;


	}

	J_ASSERT(handle->h_buffer_credits > 0);
	handle->h_buffer_credits--;
	
	/* Finally, if the buffer is not journaled right now, we need to
	 * make sure it doesn't get written to disk before the caller
	 * actually commits the new data. */

	if (!bh->b_transaction) {
		J_ASSERT (!bh->b_next_transaction);
		bh->b_transaction = transaction;
		journal_file_buffer(bh, transaction, BJ_Reserved);
	}
	
 done:
	if (bh->b_list != BUF_JOURNAL)
		refile_buffer(bh);
	clear_bit(BH_QuickFree, &bh->b_state);

	/* If we are about to journal a buffer, then any revoke pending
           on it is no longer valid. */
	journal_cancel_revoke(handle, bh);
	
	if (frozen_buffer)
		kfree(frozen_buffer);

	return error;
}
Esempio n. 15
0
static errcode_t e2fsck_get_journal(e2fsck_t ctx, journal_t **ret_journal)
{
	struct ext2_super_block *sb = ctx->fs->super;
	struct ext2_super_block jsuper;
	struct problem_context	pctx;
	struct buffer_head 	*bh;
	struct inode		*j_inode = NULL;
	struct kdev_s		*dev_fs = NULL, *dev_journal;
	const char		*journal_name = 0;
	journal_t		*journal = NULL;
	errcode_t		retval = 0;
	io_manager		io_ptr = 0;
	unsigned long		start = 0;
	blk_t			blk;
	int			ext_journal = 0;
	int			tried_backup_jnl = 0;
	int			i;
		
	clear_problem_context(&pctx);
	
	journal = e2fsck_allocate_memory(ctx, sizeof(journal_t), "journal");
	if (!journal) {
		return EXT2_ET_NO_MEMORY;
	}

	dev_fs = e2fsck_allocate_memory(ctx, 2*sizeof(struct kdev_s), "kdev");
	if (!dev_fs) {
		retval = EXT2_ET_NO_MEMORY;
		goto errout;
	}
	dev_journal = dev_fs+1;
	
	dev_fs->k_ctx = dev_journal->k_ctx = ctx;
	dev_fs->k_dev = K_DEV_FS;
	dev_journal->k_dev = K_DEV_JOURNAL;
	
	journal->j_dev = dev_journal;
	journal->j_fs_dev = dev_fs;
	journal->j_inode = NULL;
	journal->j_blocksize = ctx->fs->blocksize;

	if (uuid_is_null(sb->s_journal_uuid)) {
		if (!sb->s_journal_inum)
			return EXT2_ET_BAD_INODE_NUM;
		j_inode = e2fsck_allocate_memory(ctx, sizeof(*j_inode),
						 "journal inode");
		if (!j_inode) {
			retval = EXT2_ET_NO_MEMORY;
			goto errout;
		}

		j_inode->i_ctx = ctx;
		j_inode->i_ino = sb->s_journal_inum;
		
		if ((retval = ext2fs_read_inode(ctx->fs,
						sb->s_journal_inum,
						&j_inode->i_ext2))) {
		try_backup_journal:
			if (sb->s_jnl_backup_type != EXT3_JNL_BACKUP_BLOCKS ||
			    tried_backup_jnl)
				goto errout;
			memset(&j_inode->i_ext2, 0, sizeof(struct ext2_inode));
			memcpy(&j_inode->i_ext2.i_block[0], sb->s_jnl_blocks, 
			       EXT2_N_BLOCKS*4);
			j_inode->i_ext2.i_size = sb->s_jnl_blocks[16];
			j_inode->i_ext2.i_links_count = 1;
			j_inode->i_ext2.i_mode = LINUX_S_IFREG | 0600;
			tried_backup_jnl++;
		}
		if (!j_inode->i_ext2.i_links_count ||
		    !LINUX_S_ISREG(j_inode->i_ext2.i_mode)) {
			retval = EXT2_ET_NO_JOURNAL;
			goto try_backup_journal;
		}
		if (j_inode->i_ext2.i_size / journal->j_blocksize <
		    JFS_MIN_JOURNAL_BLOCKS) {
			retval = EXT2_ET_JOURNAL_TOO_SMALL;
			goto try_backup_journal;
		}
		for (i=0; i < EXT2_N_BLOCKS; i++) {
			blk = j_inode->i_ext2.i_block[i];
			if (!blk) {
				if (i < EXT2_NDIR_BLOCKS) {
					retval = EXT2_ET_JOURNAL_TOO_SMALL;
					goto try_backup_journal;
				}
				continue;
			}
			if (blk < sb->s_first_data_block ||
			    blk >= sb->s_blocks_count) {
				retval = EXT2_ET_BAD_BLOCK_NUM;
				goto try_backup_journal;
			}
		}
		journal->j_maxlen = j_inode->i_ext2.i_size / journal->j_blocksize;

#ifdef USE_INODE_IO
		retval = ext2fs_inode_io_intern2(ctx->fs, sb->s_journal_inum,
						 &j_inode->i_ext2,
						 &journal_name);
		if (retval)
			goto errout;

		io_ptr = inode_io_manager;
#else
		journal->j_inode = j_inode;
		ctx->journal_io = ctx->fs->io;
		if ((retval = journal_bmap(journal, 0, &start)) != 0)
			goto errout;
#endif
	} else {
		ext_journal = 1;
		if (!ctx->journal_name) {
			char uuid[37];

			uuid_unparse(sb->s_journal_uuid, uuid);
			ctx->journal_name = blkid_get_devname(ctx->blkid,
							      "UUID", uuid);
			if (!ctx->journal_name)
				ctx->journal_name = blkid_devno_to_devname(sb->s_journal_dev);
		}
		journal_name = ctx->journal_name;
			
		if (!journal_name) {
			fix_problem(ctx, PR_0_CANT_FIND_JOURNAL, &pctx);
			return EXT2_ET_LOAD_EXT_JOURNAL;
		}
		
		jfs_debug(1, "Using journal file %s\n", journal_name);
		io_ptr = unix_io_manager;
	}

#if 0
	test_io_backing_manager = io_ptr;
	io_ptr = test_io_manager;
#endif
#ifndef USE_INODE_IO
	if (ext_journal)
#endif
		retval = io_ptr->open(journal_name, IO_FLAG_RW,
				      &ctx->journal_io);
	if (retval)
		goto errout;

	io_channel_set_blksize(ctx->journal_io, ctx->fs->blocksize);

	if (ext_journal) {
		if (ctx->fs->blocksize == 1024)
			start = 1;
		bh = getblk(dev_journal, start, ctx->fs->blocksize);
		if (!bh) {
			retval = EXT2_ET_NO_MEMORY;
			goto errout;
		}
		ll_rw_block(READ, 1, &bh);
		if ((retval = bh->b_err) != 0) {
			brelse(bh);
			goto errout;
		}
		memcpy(&jsuper, start ? bh->b_data :  bh->b_data + 1024,
		       sizeof(jsuper));
		brelse(bh);
#ifdef EXT2FS_ENABLE_SWAPFS
		if (jsuper.s_magic == ext2fs_swab16(EXT2_SUPER_MAGIC)) 
			ext2fs_swap_super(&jsuper);
#endif
		if (jsuper.s_magic != EXT2_SUPER_MAGIC ||
		    !(jsuper.s_feature_incompat & EXT3_FEATURE_INCOMPAT_JOURNAL_DEV)) {
			fix_problem(ctx, PR_0_EXT_JOURNAL_BAD_SUPER, &pctx);
			retval = EXT2_ET_LOAD_EXT_JOURNAL;
			goto errout;
		}
		/* Make sure the journal UUID is correct */
		if (memcmp(jsuper.s_uuid, ctx->fs->super->s_journal_uuid,
			   sizeof(jsuper.s_uuid))) {
			fix_problem(ctx, PR_0_JOURNAL_BAD_UUID, &pctx);
			retval = EXT2_ET_LOAD_EXT_JOURNAL;
			goto errout;
		}
		
		journal->j_maxlen = jsuper.s_blocks_count;
		start++;
	}

	if (!(bh = getblk(dev_journal, start, journal->j_blocksize))) {
		retval = EXT2_ET_NO_MEMORY;
		goto errout;
	}
	
	journal->j_sb_buffer = bh;
	journal->j_superblock = (journal_superblock_t *)bh->b_data;
	
#ifdef USE_INODE_IO
	if (j_inode)
		ext2fs_free_mem(&j_inode);
#endif

	*ret_journal = journal;
	return 0;

errout:
	if (dev_fs)
		ext2fs_free_mem(&dev_fs);
	if (j_inode)
		ext2fs_free_mem(&j_inode);
	if (journal)
		ext2fs_free_mem(&journal);
	return retval;
	
}
Esempio n. 16
0
static int do_one_pass(journal_t *journal, struct recovery_info *info,
		       enum passtype pass)
{
	
	unsigned int		first_commit_ID, next_commit_ID;
	unsigned long		next_log_block;
	int			err, success = 0;
	journal_superblock_t *	sb;
	journal_header_t * 	tmp;
	struct buffer_head *	bh;
	unsigned int		sequence;
	int			blocktype;
	
	/* Precompute the maximum metadata descriptors in a descriptor block */
	int			MAX_BLOCKS_PER_DESC;
	MAX_BLOCKS_PER_DESC = ((journal->j_blocksize-sizeof(journal_header_t))
			       / sizeof(journal_block_tag_t));

	/* 
	 * First thing is to establish what we expect to find in the log
	 * (in terms of transaction IDs), and where (in terms of log
	 * block offsets): query the superblock.  
	 */

	sb = journal->j_superblock;
	next_commit_ID = ntohl(sb->s_sequence);
	next_log_block = ntohl(sb->s_start);

	first_commit_ID = next_commit_ID;
	if (pass == PASS_SCAN)
		info->start_transaction = first_commit_ID;
	
	jfs_debug(1, "Starting recovery pass %d\n", pass);
	
	/*
	 * Now we walk through the log, transaction by transaction,
	 * making sure that each transaction has a commit block in the
	 * expected place.  Each complete transaction gets replayed back
	 * into the main filesystem. 
	 */

	while (1) {
		int			flags;
		char *			tagp;
		journal_block_tag_t *	tag;
		struct buffer_head *	obh;
		struct buffer_head *	nbh;
		
		/* If we already know where to stop the log traversal,
		 * check right now that we haven't gone past the end of
		 * the log. */
		
		if (pass != PASS_SCAN)
			if (tid_geq(next_commit_ID, info->end_transaction))
				break;
		
		jfs_debug(2, "Scanning for sequence ID %u at %lu/%lu\n",
			  next_commit_ID, next_log_block, journal->j_last);

		/* Skip over each chunk of the transaction looking
		 * either the next descriptor block or the final commit
		 * record. */
		
		jfs_debug(3, "JFS: checking block %ld\n", next_log_block);
		err = jread(&bh, journal, next_log_block);
		if (err)
			goto failed;
		
		next_log_block++;
		wrap(journal, next_log_block);
		
		/* What kind of buffer is it? 
		 * 
		 * If it is a descriptor block, check that it has the
		 * expected sequence number.  Otherwise, we're all done
		 * here. */

		tmp = (journal_header_t *) bh->b_data;
		
		if (tmp->h_magic != htonl(JFS_MAGIC_NUMBER)) {
			brelse(bh);
			break;
		}
		
		blocktype = ntohl(tmp->h_blocktype);
		sequence = ntohl(tmp->h_sequence);
		jfs_debug(3, "Found magic %d, sequence %d\n", 
			  blocktype, sequence);
		
		if (sequence != next_commit_ID) {
			brelse(bh);
			break;
		}
		
		/* OK, we have a valid descriptor block which matches
		 * all of the sequence number checks.  What are we going
		 * to do with it?  That depends on the pass... */
		
		switch(blocktype) {
		case JFS_DESCRIPTOR_BLOCK:
			/* If it is a valid descriptor block, replay it
			 * in pass REPLAY; otherwise, just skip over the
			 * blocks it describes. */
			if (pass != PASS_REPLAY) {
				next_log_block += count_tags(bh, journal->j_blocksize);
				wrap(journal, next_log_block);
				brelse(bh);
				continue;
			}

			/* A descriptor block: we can now write all of
			 * the data blocks.  Yay, useful work is finally
			 * getting done here! */

			tagp = &bh->b_data[sizeof(journal_header_t)];
			while ((tagp - bh->b_data +sizeof(journal_block_tag_t))
			       <= journal->j_blocksize) {
				unsigned long io_block;
				
				tag = (journal_block_tag_t *) tagp;
				flags = ntohl(tag->t_flags);
				
				io_block = next_log_block++;
				wrap(journal, next_log_block);
				err = jread(&obh, journal, io_block);
				if (err) {
					/* Recover what we can, but
					 * report failure at the end. */
					success = err;
					printk (KERN_ERR 
						"JFS: IO error %d recovering "
						"block %ld in log\n",
						err, io_block);
				} else {
					unsigned long blocknr;
					
					J_ASSERT(obh != NULL);
					blocknr = ntohl(tag->t_blocknr);

					/* If the block has been
					 * revoked, then we're all done
					 * here. */
					if (journal_test_revoke
					    (journal, blocknr, 
					     next_commit_ID)) {
						brelse(obh);
						++info->nr_revoke_hits;
						goto skip_write;
					}
								
					/* Find a buffer for the new
					 * data being restored */
					nbh = getblk(journal->j_dev, blocknr,
						     journal->j_blocksize);
					if (nbh == NULL) {
						printk(KERN_ERR 
						       "JFS: Out of memory "
						       "during recovery.\n");
						err = -ENOMEM;
						brelse(bh);
						brelse(obh);
						goto failed;
					}

					memcpy(nbh->b_data, obh->b_data, 
					       journal->j_blocksize);
					if (flags & JFS_FLAG_ESCAPE) {
						* ((unsigned int *) bh->b_data) = htonl(JFS_MAGIC_NUMBER);
					}
					
					mark_buffer_dirty(nbh, 1);
					mark_buffer_uptodate(nbh, 1);
					++info->nr_replays;
					/* ll_rw_block(WRITE, 1, &nbh); */
					brelse(obh);
					brelse(nbh);
				}
				
			skip_write:
				tagp += sizeof(journal_block_tag_t);
				if (!(flags & JFS_FLAG_SAME_UUID))
					tagp += 16;

				if (flags & JFS_FLAG_LAST_TAG)
					break;
			}
			
			brelse(bh);
			continue;
				
		case JFS_COMMIT_BLOCK:
			/* Found an expected commit block: not much to
			 * do other than move on to the next sequence
			 * number. */
			brelse(bh);
			next_commit_ID++;
			continue;

		case JFS_REVOKE_BLOCK:
			/* If we aren't in the REVOKE pass, then we can
			 * just skip over this block. */
			if (pass != PASS_REVOKE) {
				brelse(bh);
				continue;
			}

			err = scan_revoke_records(journal, bh, 
						  next_commit_ID, info);
			brelse(bh);
			if (err)
				goto failed;
			continue;

		default:
			jfs_debug(3, "Unrecognised magic %d, end of scan.\n",
				  blocktype);
			goto done;
		}
	}

 done:
	/* 
	 * We broke out of the log scan loop: either we came to the
	 * known end of the log or we found an unexpected block in the
	 * log.  If the latter happened, then we know that the "current"
	 * transaction marks the end of the valid log.
	 */
	
	if (pass == PASS_SCAN)
		info->end_transaction = next_commit_ID;
	else {
		/* It's really bad news if different passes end up at
		 * different places (but possible due to IO errors). */
		if (info->end_transaction != next_commit_ID) {
			printk (KERN_ERR "JFS: recovery pass %d ended at "
				"transaction %u, expected %u\n",
				pass, next_commit_ID, info->end_transaction);
			if (!success)
				success = -EIO;
		}
	}

	return success;

 failed:
	return err;
}
Esempio n. 17
0
static errcode_t ext2fs_get_journal(ext2_filsys fs, journal_t **ret_journal)
{
	struct process_block_struct pb;
	struct ext2_super_block *sb = fs->super;
	struct ext2_super_block jsuper;
	struct buffer_head	*bh;
	struct inode		*j_inode = NULL;
	struct kdev_s		*dev_fs = NULL, *dev_journal;
	const char		*journal_name = 0;
	journal_t		*journal = NULL;
	errcode_t		retval = 0;
	io_manager		io_ptr = 0;
	unsigned long long	start = 0;
	int			ext_journal = 0;
	int			tried_backup_jnl = 0;

	retval = ext2fs_get_memzero(sizeof(journal_t), &journal);
	if (retval)
		return retval;

	retval = ext2fs_get_memzero(2 * sizeof(struct kdev_s), &dev_fs);
	if (retval)
		goto errout;
	dev_journal = dev_fs+1;

	dev_fs->k_fs = dev_journal->k_fs = fs;
	dev_fs->k_dev = K_DEV_FS;
	dev_journal->k_dev = K_DEV_JOURNAL;

	journal->j_dev = dev_journal;
	journal->j_fs_dev = dev_fs;
	journal->j_inode = NULL;
	journal->j_blocksize = fs->blocksize;

	if (uuid_is_null(sb->s_journal_uuid)) {
		if (!sb->s_journal_inum) {
			retval = EXT2_ET_BAD_INODE_NUM;
			goto errout;
		}
		retval = ext2fs_get_memzero(sizeof(*j_inode), &j_inode);
		if (retval)
			goto errout;

		j_inode->i_fs = fs;
		j_inode->i_ino = sb->s_journal_inum;

		retval = ext2fs_read_inode(fs, sb->s_journal_inum,
					   &j_inode->i_ext2);
		if (retval) {
try_backup_journal:
			if (sb->s_jnl_backup_type != EXT3_JNL_BACKUP_BLOCKS ||
			    tried_backup_jnl)
				goto errout;
			memset(&j_inode->i_ext2, 0, sizeof(struct ext2_inode));
			memcpy(&j_inode->i_ext2.i_block[0], sb->s_jnl_blocks,
			       EXT2_N_BLOCKS*4);
			j_inode->i_ext2.i_size_high = sb->s_jnl_blocks[15];
			j_inode->i_ext2.i_size = sb->s_jnl_blocks[16];
			j_inode->i_ext2.i_links_count = 1;
			j_inode->i_ext2.i_mode = LINUX_S_IFREG | 0600;
			tried_backup_jnl++;
		}
		if (!j_inode->i_ext2.i_links_count ||
		    !LINUX_S_ISREG(j_inode->i_ext2.i_mode)) {
			retval = EXT2_ET_NO_JOURNAL;
			goto try_backup_journal;
		}
		if (EXT2_I_SIZE(&j_inode->i_ext2) / journal->j_blocksize <
		    JFS_MIN_JOURNAL_BLOCKS) {
			retval = EXT2_ET_JOURNAL_TOO_SMALL;
			goto try_backup_journal;
		}
		pb.last_block = -1;
		retval = ext2fs_block_iterate3(fs, j_inode->i_ino,
					       BLOCK_FLAG_HOLE, 0,
					       process_journal_block, &pb);
		if ((pb.last_block + 1) * fs->blocksize <
		    (int) EXT2_I_SIZE(&j_inode->i_ext2)) {
			retval = EXT2_ET_JOURNAL_TOO_SMALL;
			goto try_backup_journal;
		}
		if (tried_backup_jnl && (fs->flags & EXT2_FLAG_RW)) {
			retval = ext2fs_write_inode(fs, sb->s_journal_inum,
						    &j_inode->i_ext2);
			if (retval)
				goto errout;
		}

		journal->j_maxlen = EXT2_I_SIZE(&j_inode->i_ext2) /
			journal->j_blocksize;

#ifdef USE_INODE_IO
		retval = ext2fs_inode_io_intern2(fs, sb->s_journal_inum,
						 &j_inode->i_ext2,
						 &journal_name);
		if (retval)
			goto errout;

		io_ptr = inode_io_manager;
#else
		journal->j_inode = j_inode;
		fs->journal_io = fs->io;
		retval = (errcode_t)journal_bmap(journal, 0, &start);
		if (retval)
			goto errout;
#endif
	} else {
		ext_journal = 1;
		if (!fs->journal_name) {
			char uuid[37];
			blkid_cache blkid;

			blkid_get_cache(&blkid, NULL);
			uuid_unparse(sb->s_journal_uuid, uuid);
			fs->journal_name = blkid_get_devname(blkid,
							      "UUID", uuid);
			if (!fs->journal_name)
				fs->journal_name = blkid_devno_to_devname(sb->s_journal_dev);
			blkid_put_cache(blkid);
		}
		journal_name = fs->journal_name;

		if (!journal_name) {
			retval = EXT2_ET_LOAD_EXT_JOURNAL;
			goto errout;
		}

		jfs_debug(1, "Using journal file %s\n", journal_name);
		io_ptr = unix_io_manager;
	}

#if 0
	test_io_backing_manager = io_ptr;
	io_ptr = test_io_manager;
#endif
#ifndef USE_INODE_IO
	if (ext_journal)
#endif
	{
		retval = io_ptr->open(journal_name, fs->flags & EXT2_FLAG_RW,
				      &fs->journal_io);
	}
	if (retval)
		goto errout;

	io_channel_set_blksize(fs->journal_io, fs->blocksize);

	if (ext_journal) {
		blk64_t maxlen;

		start = ext2fs_journal_sb_start(fs->blocksize) - 1;
		bh = getblk(dev_journal, start, fs->blocksize);
		if (!bh) {
			retval = EXT2_ET_NO_MEMORY;
			goto errout;
		}
		ll_rw_block(READ, 1, &bh);
		retval = bh->b_err;
		if (retval) {
			brelse(bh);
			goto errout;
		}
		memcpy(&jsuper, start ? bh->b_data :
				bh->b_data + SUPERBLOCK_OFFSET,
		       sizeof(jsuper));
#ifdef WORDS_BIGENDIAN
		if (jsuper.s_magic == ext2fs_swab16(EXT2_SUPER_MAGIC))
			ext2fs_swap_super(&jsuper);
#endif
		if (jsuper.s_magic != EXT2_SUPER_MAGIC ||
		    !ext2fs_has_feature_journal_dev(&jsuper)) {
			retval = EXT2_ET_LOAD_EXT_JOURNAL;
			brelse(bh);
			goto errout;
		}
		/* Make sure the journal UUID is correct */
		if (memcmp(jsuper.s_uuid, fs->super->s_journal_uuid,
			   sizeof(jsuper.s_uuid))) {
			retval = EXT2_ET_LOAD_EXT_JOURNAL;
			brelse(bh);
			goto errout;
		}

		/* Check the superblock checksum */
		if (ext2fs_has_feature_metadata_csum(&jsuper)) {
			struct struct_ext2_filsys fsx;
			struct ext2_super_block	superx;
			void *p;

			p = start ? bh->b_data : bh->b_data + SUPERBLOCK_OFFSET;
			memcpy(&fsx, fs, sizeof(fsx));
			memcpy(&superx, fs->super, sizeof(superx));
			fsx.super = &superx;
			ext2fs_set_feature_metadata_csum(fsx.super);
			if (!ext2fs_superblock_csum_verify(&fsx, p)) {
				retval = EXT2_ET_LOAD_EXT_JOURNAL;
				brelse(bh);
				goto errout;
			}
		}
		brelse(bh);

		maxlen = ext2fs_blocks_count(&jsuper);
		journal->j_maxlen = (maxlen < 1ULL << 32) ? maxlen :
				    (1ULL << 32) - 1;
		start++;
	}

	bh = getblk(dev_journal, start, journal->j_blocksize);
	if (!bh) {
		retval = EXT2_ET_NO_MEMORY;
		goto errout;
	}

	journal->j_sb_buffer = bh;
	journal->j_superblock = (journal_superblock_t *)bh->b_data;

#ifdef USE_INODE_IO
	if (j_inode)
		ext2fs_free_mem(&j_inode);
#endif

	*ret_journal = journal;
	return 0;

errout:
	if (dev_fs)
		ext2fs_free_mem(&dev_fs);
	if (j_inode)
		ext2fs_free_mem(&j_inode);
	if (journal)
		ext2fs_free_mem(&journal);
	return retval;
}
Esempio n. 18
0
static errcode_t e2fsck_get_journal(e2fsck_t ctx, journal_t **ret_journal)
{
	struct process_block_struct pb;
	struct ext2_super_block *sb = ctx->fs->super;
	struct ext2_super_block jsuper;
	struct problem_context	pctx;
	struct buffer_head 	*bh;
	struct inode		*j_inode = NULL;
	struct kdev_s		*dev_fs = NULL, *dev_journal;
	const char		*journal_name = 0;
	journal_t		*journal = NULL;
	errcode_t		retval = 0;
	io_manager		io_ptr = 0;
	unsigned long long	start = 0;
	int			ext_journal = 0;
	int			tried_backup_jnl = 0;

	clear_problem_context(&pctx);

	journal = e2fsck_allocate_memory(ctx, sizeof(journal_t), "journal");
	if (!journal) {
		return EXT2_ET_NO_MEMORY;
	}

	dev_fs = e2fsck_allocate_memory(ctx, 2*sizeof(struct kdev_s), "kdev");
	if (!dev_fs) {
		retval = EXT2_ET_NO_MEMORY;
		goto errout;
	}
	dev_journal = dev_fs+1;

	dev_fs->k_ctx = dev_journal->k_ctx = ctx;
	dev_fs->k_dev = K_DEV_FS;
	dev_journal->k_dev = K_DEV_JOURNAL;

	journal->j_dev = dev_journal;
	journal->j_fs_dev = dev_fs;
	journal->j_inode = NULL;
	journal->j_blocksize = ctx->fs->blocksize;

	if (uuid_is_null(sb->s_journal_uuid)) {
		if (!sb->s_journal_inum) {
			retval = EXT2_ET_BAD_INODE_NUM;
			goto errout;
		}
		j_inode = e2fsck_allocate_memory(ctx, sizeof(*j_inode),
						 "journal inode");
		if (!j_inode) {
			retval = EXT2_ET_NO_MEMORY;
			goto errout;
		}

		j_inode->i_ctx = ctx;
		j_inode->i_ino = sb->s_journal_inum;

		if ((retval = ext2fs_read_inode(ctx->fs,
						sb->s_journal_inum,
						&j_inode->i_ext2))) {
		try_backup_journal:
			if (sb->s_jnl_backup_type != EXT3_JNL_BACKUP_BLOCKS ||
			    tried_backup_jnl)
				goto errout;
			memset(&j_inode->i_ext2, 0, sizeof(struct ext2_inode));
			memcpy(&j_inode->i_ext2.i_block[0], sb->s_jnl_blocks,
			       EXT2_N_BLOCKS*4);
			j_inode->i_ext2.i_size_high = sb->s_jnl_blocks[15];
			j_inode->i_ext2.i_size = sb->s_jnl_blocks[16];
			j_inode->i_ext2.i_links_count = 1;
			j_inode->i_ext2.i_mode = LINUX_S_IFREG | 0600;
			e2fsck_use_inode_shortcuts(ctx, 1);
			ctx->stashed_ino = j_inode->i_ino;
			ctx->stashed_inode = &j_inode->i_ext2;
			tried_backup_jnl++;
		}
		if (!j_inode->i_ext2.i_links_count ||
		    !LINUX_S_ISREG(j_inode->i_ext2.i_mode)) {
			retval = EXT2_ET_NO_JOURNAL;
			goto try_backup_journal;
		}
		if (EXT2_I_SIZE(&j_inode->i_ext2) / journal->j_blocksize <
		    JFS_MIN_JOURNAL_BLOCKS) {
			retval = EXT2_ET_JOURNAL_TOO_SMALL;
			goto try_backup_journal;
		}
		pb.last_block = -1;
		retval = ext2fs_block_iterate3(ctx->fs, j_inode->i_ino,
					       BLOCK_FLAG_HOLE, 0,
					       process_journal_block, &pb);
		if ((pb.last_block + 1) * ctx->fs->blocksize <
		    (int) EXT2_I_SIZE(&j_inode->i_ext2)) {
			retval = EXT2_ET_JOURNAL_TOO_SMALL;
			goto try_backup_journal;
		}
		if (tried_backup_jnl && !(ctx->options & E2F_OPT_READONLY)) {
			retval = ext2fs_write_inode(ctx->fs, sb->s_journal_inum,
						    &j_inode->i_ext2);
			if (retval)
				goto errout;
		}

		journal->j_maxlen = EXT2_I_SIZE(&j_inode->i_ext2) /
			journal->j_blocksize;

#ifdef USE_INODE_IO
		retval = ext2fs_inode_io_intern2(ctx->fs, sb->s_journal_inum,
						 &j_inode->i_ext2,
						 &journal_name);
		if (retval)
			goto errout;

		io_ptr = inode_io_manager;
#else
		journal->j_inode = j_inode;
		ctx->journal_io = ctx->fs->io;
		if ((retval = (errcode_t) journal_bmap(journal, 0, &start)) != 0)
			goto errout;
#endif
	} else {
		ext_journal = 1;
		if (!ctx->journal_name) {
			char uuid[37];

			uuid_unparse(sb->s_journal_uuid, uuid);
			ctx->journal_name = blkid_get_devname(ctx->blkid,
							      "UUID", uuid);
			if (!ctx->journal_name)
				ctx->journal_name = blkid_devno_to_devname(sb->s_journal_dev);
		}
		journal_name = ctx->journal_name;

		if (!journal_name) {
			fix_problem(ctx, PR_0_CANT_FIND_JOURNAL, &pctx);
			retval = EXT2_ET_LOAD_EXT_JOURNAL;
			goto errout;
		}

		jfs_debug(1, "Using journal file %s\n", journal_name);
		io_ptr = unix_io_manager;
	}

#if 0
	test_io_backing_manager = io_ptr;
	io_ptr = test_io_manager;
#endif
#ifndef USE_INODE_IO
	if (ext_journal)
#endif
	{
		int flags = IO_FLAG_RW;
		if (!(ctx->mount_flags & EXT2_MF_ISROOT &&
		      ctx->mount_flags & EXT2_MF_READONLY))
			flags |= IO_FLAG_EXCLUSIVE;
		if ((ctx->mount_flags & EXT2_MF_READONLY) &&
		    (ctx->options & E2F_OPT_FORCE))
			flags &= ~IO_FLAG_EXCLUSIVE;


		retval = io_ptr->open(journal_name, flags,
				      &ctx->journal_io);
	}
	if (retval)
		goto errout;

	io_channel_set_blksize(ctx->journal_io, ctx->fs->blocksize);

	if (ext_journal) {
		blk64_t maxlen;

		start = ext2fs_journal_sb_start(ctx->fs->blocksize) - 1;
		bh = getblk(dev_journal, start, ctx->fs->blocksize);
		if (!bh) {
			retval = EXT2_ET_NO_MEMORY;
			goto errout;
		}
		ll_rw_block(READ, 1, &bh);
		if ((retval = bh->b_err) != 0) {
			brelse(bh);
			goto errout;
		}
		memcpy(&jsuper, start ? bh->b_data :  bh->b_data + SUPERBLOCK_OFFSET,
		       sizeof(jsuper));
#ifdef WORDS_BIGENDIAN
		if (jsuper.s_magic == ext2fs_swab16(EXT2_SUPER_MAGIC))
			ext2fs_swap_super(&jsuper);
#endif
		if (jsuper.s_magic != EXT2_SUPER_MAGIC ||
		    !(jsuper.s_feature_incompat & EXT3_FEATURE_INCOMPAT_JOURNAL_DEV)) {
			fix_problem(ctx, PR_0_EXT_JOURNAL_BAD_SUPER, &pctx);
			retval = EXT2_ET_LOAD_EXT_JOURNAL;
			brelse(bh);
			goto errout;
		}
		/* Make sure the journal UUID is correct */
		if (memcmp(jsuper.s_uuid, ctx->fs->super->s_journal_uuid,
			   sizeof(jsuper.s_uuid))) {
			fix_problem(ctx, PR_0_JOURNAL_BAD_UUID, &pctx);
			retval = EXT2_ET_LOAD_EXT_JOURNAL;
			brelse(bh);
			goto errout;
		}

		/* Check the superblock checksum */
		if (jsuper.s_feature_ro_compat &
		    EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) {
			struct struct_ext2_filsys fsx;
			struct ext2_super_block	superx;
			void *p;

			p = start ? bh->b_data : bh->b_data + SUPERBLOCK_OFFSET;
			memcpy(&fsx, ctx->fs, sizeof(fsx));
			memcpy(&superx, ctx->fs->super, sizeof(superx));
			fsx.super = &superx;
			fsx.super->s_feature_ro_compat |=
					EXT4_FEATURE_RO_COMPAT_METADATA_CSUM;
			if (!ext2fs_superblock_csum_verify(&fsx, p) &&
			    fix_problem(ctx, PR_0_EXT_JOURNAL_SUPER_CSUM_INVALID,
					&pctx)) {
				ext2fs_superblock_csum_set(&fsx, p);
				mark_buffer_dirty(bh);
			}
		}
		brelse(bh);

		maxlen = ext2fs_blocks_count(&jsuper);
		journal->j_maxlen = (maxlen < 1ULL << 32) ? maxlen : (1ULL << 32) - 1;
		start++;
	}

	if (!(bh = getblk(dev_journal, start, journal->j_blocksize))) {
		retval = EXT2_ET_NO_MEMORY;
		goto errout;
	}

	journal->j_sb_buffer = bh;
	journal->j_superblock = (journal_superblock_t *)bh->b_data;

#ifdef USE_INODE_IO
	if (j_inode)
		ext2fs_free_mem(&j_inode);
#endif

	*ret_journal = journal;
	e2fsck_use_inode_shortcuts(ctx, 0);
	return 0;

errout:
	e2fsck_use_inode_shortcuts(ctx, 0);
	if (dev_fs)
		ext2fs_free_mem(&dev_fs);
	if (j_inode)
		ext2fs_free_mem(&j_inode);
	if (journal)
		ext2fs_free_mem(&journal);
	return retval;
}
Esempio n. 19
0
static int start_this_handle(journal_t *journal, handle_t *handle)
{
	transaction_t *transaction;
	int needed;
	int nblocks = handle->h_buffer_credits;
	
	jfs_debug(4, "New handle %p going live.\n", handle);

repeat:

	lock_journal(journal);

	if ((journal->j_flags & JFS_ABORT) ||
	    (journal->j_errno != 0 && !(journal->j_flags & JFS_ACK_ERR))) {
		unlock_journal(journal);
		return -EROFS; 
	}

	/* Wait on the journal's transaction barrier if necessary */
	if (journal->j_barrier_count) {
		unlock_journal(journal);
		sleep_on(&journal->j_wait_transaction_locked);
		goto repeat;
	}
	
repeat_locked:
	if (!journal->j_running_transaction)
		get_transaction(journal);
	/* @@@ Error? */
	J_ASSERT(journal->j_running_transaction);
	
	transaction = journal->j_running_transaction;

	/* If the current transaction is locked down for commit, wait
	 * for the lock to be released. */

	if (transaction->t_state == T_LOCKED) {
		unlock_journal(journal);
		jfs_debug(3, "Handle %p stalling...\n", handle);
		sleep_on(&journal->j_wait_transaction_locked);
		goto repeat;
	}
	
	/* If there is not enough space left in the log to write all
	 * potential buffers requested by this operation, we need to
	 * stall pending a log checkpoint to free some more log
	 * space. */

	needed = transaction->t_outstanding_credits + nblocks;

	if (needed > journal->j_max_transaction_buffers) {
		/* If the current transaction is already too large, then
		 * start to commit it: we can then go back and attach
		 * this handle to a new transaction. */
		
		jfs_debug(2, "Handle %p starting new commit...\n", handle);
		log_start_commit(journal, transaction);
		unlock_journal(journal);
		sleep_on(&journal->j_wait_transaction_locked);
		lock_journal(journal);
		goto repeat_locked;
	}

	/* 
	 * The commit code assumes that it can get enough log space
	 * without forcing a checkpoint.  This is *critical* for
	 * correctness: a checkpoint of a buffer which is also
	 * associated with a committing transaction creates a deadlock,
	 * so commit simply cannot force through checkpoints.
	 *
	 * We must therefore ensure the necessary space in the journal
	 * *before* starting to dirty potentially checkpointed buffers
	 * in the new transaction. 
	 *
	 * The worst part is, any transaction currently committing can
	 * reduce the free space arbitrarily.  Be careful to account for
	 * those buffers when checkpointing.
	 */

	needed = journal->j_max_transaction_buffers;
	if (journal->j_committing_transaction) 
		needed += journal->j_committing_transaction->t_outstanding_credits;
	
	if (log_space_left(journal) < needed) {
		jfs_debug(2, "Handle %p waiting for checkpoint...\n", handle);
		log_wait_for_space(journal, needed);
		goto repeat_locked;
	}

	/* OK, account for the buffers that this operation expects to
	 * use and add the handle to the running transaction. */

	handle->h_transaction = transaction;
	transaction->t_outstanding_credits += nblocks;
	transaction->t_updates++;
	jfs_debug(4, "Handle %p given %d credits (total %d, free %d)\n",
		  handle, nblocks, transaction->t_outstanding_credits,
		  log_space_left(journal));

	unlock_journal(journal);
	
	return 0;
}