Esempio n. 1
0
static int nilfs_palloc_get_block(struct inode *inode, unsigned long blkoff,
				  int create,
				  void (*init_block)(struct inode *,
						     struct buffer_head *,
						     void *),
				  struct buffer_head **bhp,
				  struct nilfs_bh_assoc *prev,
				  spinlock_t *lock)
{
	int ret;

	spin_lock(lock);
	if (prev->bh && blkoff == prev->blkoff) {
		get_bh(prev->bh);
		*bhp = prev->bh;
		spin_unlock(lock);
		return 0;
	}
	spin_unlock(lock);

	ret = nilfs_mdt_get_block(inode, blkoff, create, init_block, bhp);
	if (!ret) {
		spin_lock(lock);
		/*
		 * The following code must be safe for change of the
		 * cache contents during the get block call.
		 */
		brelse(prev->bh);
		get_bh(*bhp);
		prev->bh = *bhp;
		prev->blkoff = blkoff;
		spin_unlock(lock);
	}
	return ret;
}
Esempio n. 2
0
static int
nilfs_mdt_submit_block(struct inode *inode, unsigned long blkoff,
		       int mode, struct buffer_head **out_bh)
{
	struct buffer_head *bh;
	unsigned long blknum = 0;
	int ret = -ENOMEM;

	bh = nilfs_grab_buffer(inode, inode->i_mapping, blkoff, 0);
	if (unlikely(!bh))
		goto failed;

	ret = -EEXIST; /* internal code */
	if (buffer_uptodate(bh))
		goto out;

	if (mode == READA) {
		if (!trylock_buffer(bh)) {
			ret = -EBUSY;
			goto failed_bh;
		}
	} else /* mode == READ */
		lock_buffer(bh);

	if (buffer_uptodate(bh)) {
		unlock_buffer(bh);
		goto out;
	}
	if (!buffer_mapped(bh)) { /* unused buffer */
		ret = nilfs_bmap_lookup(NILFS_I(inode)->i_bmap, blkoff,
					&blknum);
		if (unlikely(ret)) {
			unlock_buffer(bh);
			goto failed_bh;
		}
		bh->b_bdev = NILFS_MDT(inode)->mi_nilfs->ns_bdev;
		bh->b_blocknr = blknum;
		set_buffer_mapped(bh);
	}

	bh->b_end_io = end_buffer_read_sync;
	get_bh(bh);
	submit_bh(mode, bh);
	ret = 0;
 out:
	get_bh(bh);
	*out_bh = bh;

 failed_bh:
	unlock_page(bh->b_page);
	page_cache_release(bh->b_page);
	brelse(bh);
 failed:
	return ret;
}
Esempio n. 3
0
static int __process_buffer(journal_t *journal, struct journal_head *jh,
                            int *batch_count, transaction_t *transaction)
{
    struct buffer_head *bh = jh2bh(jh);
    int ret = 0;

    if (buffer_locked(bh)) {
        get_bh(bh);
        spin_unlock(&journal->j_list_lock);
        wait_on_buffer(bh);

        BUFFER_TRACE(bh, "brelse");
        __brelse(bh);
        ret = 1;
    } else if (jh->b_transaction != NULL) {
        transaction_t *t = jh->b_transaction;
        tid_t tid = t->t_tid;

        transaction->t_chp_stats.cs_forced_to_close++;
        spin_unlock(&journal->j_list_lock);
        if (unlikely(journal->j_flags & JBD2_UNMOUNT))
            printk(KERN_ERR "JBD2: %s: "
                   "Waiting for Godot: block %llu\n",
                   journal->j_devname,
                   (unsigned long long) bh->b_blocknr);
        jbd2_log_start_commit(journal, tid);
        jbd2_log_wait_commit(journal, tid);
        ret = 1;
    } else if (!buffer_dirty(bh)) {
        ret = 1;
        if (unlikely(buffer_write_io_error(bh)))
            ret = -EIO;
        get_bh(bh);
        BUFFER_TRACE(bh, "remove from checkpoint");
        __jbd2_journal_remove_checkpoint(jh);
        spin_unlock(&journal->j_list_lock);
        __brelse(bh);
    } else {
        BUFFER_TRACE(bh, "queue");
        get_bh(bh);
        J_ASSERT_BH(bh, !buffer_jwrite(bh));
        journal->j_chkpt_bhs[*batch_count] = bh;
        __buffer_relink_io(jh);
        transaction->t_chp_stats.cs_written++;
        (*batch_count)++;
        if (*batch_count == JBD2_NR_BATCH) {
            spin_unlock(&journal->j_list_lock);
            __flush_batch(journal, batch_count);
            ret = 1;
        }
    }
    return ret;
}
Esempio n. 4
0
File: mdt.c Progetto: AK101111/linux
static int
nilfs_mdt_submit_block(struct inode *inode, unsigned long blkoff,
		       int mode, int mode_flags, struct buffer_head **out_bh)
{
	struct buffer_head *bh;
	__u64 blknum = 0;
	int ret = -ENOMEM;

	bh = nilfs_grab_buffer(inode, inode->i_mapping, blkoff, 0);
	if (unlikely(!bh))
		goto failed;

	ret = -EEXIST; /* internal code */
	if (buffer_uptodate(bh))
		goto out;

	if (mode_flags & REQ_RAHEAD) {
		if (!trylock_buffer(bh)) {
			ret = -EBUSY;
			goto failed_bh;
		}
	} else /* mode == READ */
		lock_buffer(bh);

	if (buffer_uptodate(bh)) {
		unlock_buffer(bh);
		goto out;
	}

	ret = nilfs_bmap_lookup(NILFS_I(inode)->i_bmap, blkoff, &blknum);
	if (unlikely(ret)) {
		unlock_buffer(bh);
		goto failed_bh;
	}
	map_bh(bh, inode->i_sb, (sector_t)blknum);

	bh->b_end_io = end_buffer_read_sync;
	get_bh(bh);
	submit_bh(mode, mode_flags, bh);
	ret = 0;

	trace_nilfs2_mdt_submit_block(inode, inode->i_ino, blkoff, mode);
 out:
	get_bh(bh);
	*out_bh = bh;

 failed_bh:
	unlock_page(bh->b_page);
	put_page(bh->b_page);
	brelse(bh);
 failed:
	return ret;
}
Esempio n. 5
0
int nilfs_load_inode_block(struct nilfs_sb_info *sbi, struct inode *inode,
			   struct buffer_head **pbh)
{
	struct nilfs_inode_info *ii = NILFS_I(inode);
	int err;

	spin_lock(&sbi->s_inode_lock);
	/* Caller of this function MUST lock s_inode_lock */
	if (ii->i_bh == NULL) {
		spin_unlock(&sbi->s_inode_lock);
		err = nilfs_ifile_get_inode_block(sbi->s_ifile, inode->i_ino,
						  pbh);
		if (unlikely(err))
			return err;
		spin_lock(&sbi->s_inode_lock);
		if (ii->i_bh == NULL)
			ii->i_bh = *pbh;
		else {
			brelse(*pbh);
			*pbh = ii->i_bh;
		}
	} else
		*pbh = ii->i_bh;

	get_bh(*pbh);
	spin_unlock(&sbi->s_inode_lock);
	return 0;
}
Esempio n. 6
0
int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh)
{
	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
	struct nilfs_inode_info *ii = NILFS_I(inode);
	int err;

	spin_lock(&nilfs->ns_inode_lock);
	if (ii->i_bh == NULL) {
		spin_unlock(&nilfs->ns_inode_lock);
		err = nilfs_ifile_get_inode_block(ii->i_root->ifile,
						  inode->i_ino, pbh);
		if (unlikely(err))
			return err;
		spin_lock(&nilfs->ns_inode_lock);
		if (ii->i_bh == NULL)
			ii->i_bh = *pbh;
		else {
			brelse(*pbh);
			*pbh = ii->i_bh;
		}
	} else
		*pbh = ii->i_bh;

	get_bh(*pbh);
	spin_unlock(&nilfs->ns_inode_lock);
	return 0;
}
Esempio n. 7
0
/*
 * Write the MMP block using WRITE_SYNC to try to get the block on-disk
 * faster.
 */
static int write_mmp_block(struct super_block *sb, struct buffer_head *bh)
{
	struct mmp_struct *mmp = (struct mmp_struct *)(bh->b_data);

	/*
	 * We protect against freezing so that we don't create dirty buffers
	 * on frozen filesystem.
	 */
	sb_start_write(sb);
	ext4_mmp_csum_set(sb, mmp);
	mark_buffer_dirty(bh);
	lock_buffer(bh);
	bh->b_end_io = end_buffer_write_sync;
	get_bh(bh);
#ifdef FEATURE_STORAGE_META_LOG
	if( bh && bh->b_bdev && bh->b_bdev->bd_disk)
		set_metadata_rw_status(bh->b_bdev->bd_disk->first_minor, WAIT_WRITE_CNT);
#endif
	submit_bh(WRITE_SYNC | REQ_META | REQ_PRIO, bh);
	wait_on_buffer(bh);
	sb_end_write(sb);
	if (unlikely(!buffer_uptodate(bh)))
		return 1;

	return 0;
}
Esempio n. 8
0
static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
			  ext4_fsblk_t mmp_block)
{
	struct mmp_struct *mmp;

	if (*bh)
		clear_buffer_uptodate(*bh);

	if (!*bh)
		*bh = sb_getblk(sb, mmp_block);
	if (!*bh)
		return -ENOMEM;
	if (*bh) {
		get_bh(*bh);
		lock_buffer(*bh);
		(*bh)->b_end_io = end_buffer_read_sync;
		submit_bh(READ_SYNC, *bh);
		wait_on_buffer(*bh);
		if (!buffer_uptodate(*bh)) {
			brelse(*bh);
			*bh = NULL;
		}
	}
	if (!*bh) {
		ext4_warning(sb, "Error while reading MMP block %llu",
			     mmp_block);
		return -EIO;
	}

	mmp = (struct mmp_struct *)((*bh)->b_data);
	if (le32_to_cpu(mmp->mmp_magic) != EXT4_MMP_MAGIC)
		return -EINVAL;

	return 0;
}
Esempio n. 9
0
int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
		   struct buffer_head **bhp)
{
	struct gfs2_sbd *sdp = gl->gl_sbd;
	struct buffer_head *bh;

	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
		return -EIO;

	*bhp = bh = gfs2_getbuf(gl, blkno, CREATE);

	lock_buffer(bh);
	if (buffer_uptodate(bh)) {
		unlock_buffer(bh);
		return 0;
	}
	bh->b_end_io = end_buffer_read_sync;
	get_bh(bh);
	submit_bh(READ_SYNC | (1 << BIO_RW_META), bh);
	if (!(flags & DIO_WAIT))
		return 0;

	wait_on_buffer(bh);
	if (unlikely(!buffer_uptodate(bh))) {
		struct gfs2_trans *tr = current->journal_info;
		if (tr && tr->tr_touched)
			gfs2_io_error_bh(sdp, bh);
		brelse(bh);
		return -EIO;
	}

	return 0;
}
Esempio n. 10
0
int reiserfs_remove_entry (reiserfs_filsys_t fs, struct key * key)
{
    struct path path;
    struct tree_balance tb;
    struct item_head * ih;
    struct reiserfs_de_head * deh;

    if (_search_by_entry_key (fs, key, &path) != POSITION_FOUND) {
	pathrelse (&path);
	return 1;
    }

    ih = get_ih (&path);
    if (ih_entry_count (ih) == 1) {
	_init_tb_struct (&tb, fs, &path, -(IH_SIZE + ih_item_len (ih)));
	if (fix_nodes (M_DELETE, &tb, 0) != CARRY_ON) {
	    unfix_nodes (&tb);
	    return 1;
	}
	do_balance (&tb, 0, 0, M_DELETE, 0);
	return 0;
    }

    deh = B_I_DEH (get_bh (&path), ih) + path.pos_in_item;
    _init_tb_struct (&tb, fs, &path, -(DEH_SIZE + entry_length (ih, deh, path.pos_in_item)));
    if (fix_nodes (M_CUT, &tb, 0) != CARRY_ON) {
	unfix_nodes (&tb);
	return 1;
    }
    do_balance (&tb, 0, 0, M_CUT, 0);
    return 0;
}
Esempio n. 11
0
static int nilfs_mdt_create_block(struct inode *inode, unsigned long block,
				  struct buffer_head **out_bh,
				  void (*init_block)(struct inode *,
						     struct buffer_head *,
						     void *))
{
	struct the_nilfs *nilfs = NILFS_MDT(inode)->mi_nilfs;
	struct super_block *sb = inode->i_sb;
	struct nilfs_transaction_info ti;
	struct buffer_head *bh;
	int err;

	if (!sb) {
		/*
		 * Make sure this function is not called from any
		 * read-only context.
		 */
		if (!nilfs->ns_writer) {
			WARN_ON(1);
			err = -EROFS;
			goto out;
		}
		sb = nilfs->ns_writer->s_super;
	}

	nilfs_transaction_begin(sb, &ti, 0);

	err = -ENOMEM;
	bh = nilfs_grab_buffer(inode, inode->i_mapping, block, 0);
	if (unlikely(!bh))
		goto failed_unlock;

	err = -EEXIST;
	if (buffer_uptodate(bh))
		goto failed_bh;

	wait_on_buffer(bh);
	if (buffer_uptodate(bh))
		goto failed_bh;

	bh->b_bdev = nilfs->ns_bdev;
	err = nilfs_mdt_insert_new_block(inode, block, bh, init_block);
	if (likely(!err)) {
		get_bh(bh);
		*out_bh = bh;
	}

 failed_bh:
	unlock_page(bh->b_page);
	page_cache_release(bh->b_page);
	brelse(bh);

 failed_unlock:
	if (likely(!err))
		err = nilfs_transaction_commit(sb);
	else
		nilfs_transaction_abort(sb);
 out:
	return err;
}
Esempio n. 12
0
/*
 * We were unable to perform jbd_trylock_bh_state() inside j_list_lock.
 * The caller must restart a list walk.  Wait for someone else to run
 * jbd_unlock_bh_state().
 */
static void jbd_sync_bh(journal_t *journal, struct buffer_head *bh)
{
	get_bh(bh);
	spin_unlock(&journal->j_list_lock);
	jbd_lock_bh_state(bh);
	jbd_unlock_bh_state(bh);
	put_bh(bh);
}
Esempio n. 13
0
/*
 * Try to flush one buffer from the checkpoint list to disk.
 *
 * Return 1 if something happened which requires us to abort the current
 * scan of the checkpoint list.  
 *
 * Called with j_list_lock held and drops it if 1 is returned
 * Called under jbd_lock_bh_state(jh2bh(jh)), and drops it
 */
static int __process_buffer(journal_t *journal, struct journal_head *jh,
			struct buffer_head **bhs, int *batch_count)
{
	struct buffer_head *bh = jh2bh(jh);
	int ret = 0;

	if (buffer_locked(bh)) {
		atomic_inc(&bh->b_count);
		spin_unlock(&journal->j_list_lock);
		jbd_unlock_bh_state(bh);
		wait_on_buffer(bh);
		/* the journal_head may have gone by now */
		BUFFER_TRACE(bh, "brelse");
		__brelse(bh);
		ret = 1;
	} else if (jh->b_transaction != NULL) {
		transaction_t *t = jh->b_transaction;
		tid_t tid = t->t_tid;

		spin_unlock(&journal->j_list_lock);
		jbd_unlock_bh_state(bh);
		log_start_commit(journal, tid);
		log_wait_commit(journal, tid);
		ret = 1;
	} else if (!buffer_dirty(bh)) {
		J_ASSERT_JH(jh, !buffer_jbddirty(bh));
		BUFFER_TRACE(bh, "remove from checkpoint");
		__journal_remove_checkpoint(jh);
		spin_unlock(&journal->j_list_lock);
		jbd_unlock_bh_state(bh);
		journal_remove_journal_head(bh);
		__brelse(bh);
		ret = 1;
	} else {
		/*
		 * Important: we are about to write the buffer, and
		 * possibly block, while still holding the journal lock.
		 * We cannot afford to let the transaction logic start
		 * messing around with this buffer before we write it to
		 * disk, as that would break recoverability.  
		 */
		BUFFER_TRACE(bh, "queue");
		get_bh(bh);
		J_ASSERT_BH(bh, !buffer_jwrite(bh));
		set_buffer_jwrite(bh);
		bhs[*batch_count] = bh;
		__buffer_relink_io(jh);
		jbd_unlock_bh_state(bh);
		(*batch_count)++;
		if (*batch_count == NR_BATCH) {
			spin_unlock(&journal->j_list_lock);
			__flush_batch(journal, bhs, batch_count);
			ret = 1;
		}
	}
	return ret;
}
Esempio n. 14
0
static int ufs_fmp_run(struct device *dev, uint32_t mode, uint8_t *data,
			uint32_t len, uint32_t write)
{
	int ret = 0;
	struct ufs_hba *hba;
	struct ufs_fmp_work *work;
	struct Scsi_Host *host;
	static struct buffer_head *bh;

	work = dev_get_drvdata(dev);
	if (!work) {
		dev_err(dev, "Fail to get work from platform device\n");
		return -ENODEV;
	}
	host = work->host;
	hba = shost_priv(host);
	hba->self_test_mode = mode;

	bh = __getblk(work->bdev, work->sector, FMP_BLK_SIZE);
	if (!bh) {
		dev_err(dev, "Fail to get block from bdev\n");
		return -ENODEV;
	}
	hba->self_test_bh = bh;

	get_bh(bh);
	if (write == WRITE_MODE) {
		memcpy(bh->b_data, data, len);
		set_buffer_dirty(bh);
		sync_dirty_buffer(bh);
		if (buffer_req(bh) && !buffer_uptodate(bh)) {
			dev_err(dev, "IO error syncing for FMP fips write\n");
			ret = -EIO;
			goto out;
		}
		memset(bh->b_data, 0, FMP_BLK_SIZE);
	} else {
		lock_buffer(bh);
		bh->b_end_io = end_buffer_read_sync;
		submit_bh(READ_SYNC, bh);
		wait_on_buffer(bh);
		if (unlikely(!buffer_uptodate(bh))) {
			ret = -EIO;
			goto out;
		}
		memcpy(data, bh->b_data, len);
	}
out:
	hba->self_test_mode = 0;
	hba->self_test_bh = NULL;
	put_bh(bh);

	return ret;
}
Esempio n. 15
0
int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr,
			      sector_t pblocknr, struct buffer_head **pbh)
{
	struct buffer_head *bh;
	struct inode *inode = NILFS_BTNC_I(btnc);
	int err;

	bh = nilfs_grab_buffer(inode, btnc, blocknr, 1 << BH_NILFS_Node);
	if (unlikely(!bh))
		return -ENOMEM;

	err = -EEXIST; /* internal code */

	if (buffer_uptodate(bh) || buffer_dirty(bh))
		goto found;

	if (pblocknr == 0) {
		pblocknr = blocknr;
		if (inode->i_ino != NILFS_DAT_INO) {
			struct inode *dat =
				nilfs_dat_inode(NILFS_I_NILFS(inode));

			/* blocknr is a virtual block number */
			err = nilfs_dat_translate(dat, blocknr, &pblocknr);
			if (unlikely(err)) {
				brelse(bh);
				goto out_locked;
			}
		}
	}
	lock_buffer(bh);
	if (buffer_uptodate(bh)) {
		unlock_buffer(bh);
		err = -EEXIST; /* internal code */
		goto found;
	}
	set_buffer_mapped(bh);
	bh->b_bdev = NILFS_I_NILFS(inode)->ns_bdev;
	bh->b_blocknr = pblocknr; /* set block address for read */
	bh->b_end_io = end_buffer_read_sync;
	get_bh(bh);
	submit_bh(READ, bh);
	bh->b_blocknr = blocknr; /* set back to the given block address */
	err = 0;
found:
	*pbh = bh;

out_locked:
	unlock_page(bh->b_page);
	page_cache_release(bh->b_page);
	return err;
}
Esempio n. 16
0
File: acl.c Progetto: acton393/linux
/*
 * Helper function to set i_mode in memory and disk. Some call paths
 * will not have di_bh or a journal handle to pass, in which case it
 * will create it's own.
 */
static int ocfs2_acl_set_mode(struct inode *inode, struct buffer_head *di_bh,
			      handle_t *handle, umode_t new_mode)
{
	int ret, commit_handle = 0;
	struct ocfs2_dinode *di;

	if (di_bh == NULL) {
		ret = ocfs2_read_inode_block(inode, &di_bh);
		if (ret) {
			mlog_errno(ret);
			goto out;
		}
	} else
		get_bh(di_bh);

	if (handle == NULL) {
		handle = ocfs2_start_trans(OCFS2_SB(inode->i_sb),
					   OCFS2_INODE_UPDATE_CREDITS);
		if (IS_ERR(handle)) {
			ret = PTR_ERR(handle);
			mlog_errno(ret);
			goto out_brelse;
		}

		commit_handle = 1;
	}

	di = (struct ocfs2_dinode *)di_bh->b_data;
	ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
				      OCFS2_JOURNAL_ACCESS_WRITE);
	if (ret) {
		mlog_errno(ret);
		goto out_commit;
	}

	inode->i_mode = new_mode;
	inode->i_ctime = current_time(inode);
	di->i_mode = cpu_to_le16(inode->i_mode);
	di->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
	di->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
	ocfs2_update_inode_fsync_trans(handle, inode, 0);

	ocfs2_journal_dirty(handle, di_bh);

out_commit:
	if (commit_handle)
		ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
out_brelse:
	brelse(di_bh);
out:
	return ret;
}
Esempio n. 17
0
void jbd2_journal_refile_buffer(journal_t *journal, struct journal_head *jh)
{
	struct buffer_head *bh = jh2bh(jh);

	
	get_bh(bh);
	jbd_lock_bh_state(bh);
	spin_lock(&journal->j_list_lock);
	__jbd2_journal_refile_buffer(jh);
	jbd_unlock_bh_state(bh);
	spin_unlock(&journal->j_list_lock);
	__brelse(bh);
}
Esempio n. 18
0
/*
 * Write the MMP block using WRITE_SYNC to try to get the block on-disk
 * faster.
 */
static int write_mmp_block(struct buffer_head *bh)
{
	mark_buffer_dirty(bh);
	lock_buffer(bh);
	bh->b_end_io = end_buffer_write_sync;
	get_bh(bh);
	submit_bh(WRITE_SYNC, bh);
	wait_on_buffer(bh);
	if (unlikely(!buffer_uptodate(bh)))
		return 1;

	return 0;
}
Esempio n. 19
0
/*
 * nilfs_gccache_submit_read_data() - add data buffer and submit read request
 * @inode - gc inode
 * @blkoff - dummy offset treated as the key for the page cache
 * @pbn - physical block number of the block
 * @vbn - virtual block number of the block, 0 for non-virtual block
 * @out_bh - indirect pointer to a buffer_head struct to receive the results
 *
 * Description: nilfs_gccache_submit_read_data() registers the data buffer
 * specified by @pbn to the GC pagecache with the key @blkoff.
 * This function sets @vbn (@pbn if @vbn is zero) in b_blocknr of the buffer.
 *
 * Return Value: On success, 0 is returned. On Error, one of the following
 * negative error code is returned.
 *
 * %-EIO - I/O error.
 *
 * %-ENOMEM - Insufficient amount of memory available.
 *
 * %-ENOENT - The block specified with @pbn does not exist.
 */
int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff,
				   sector_t pbn, __u64 vbn,
				   struct buffer_head **out_bh)
{
	struct buffer_head *bh;
	int err;

	bh = nilfs_grab_buffer(inode, inode->i_mapping, blkoff, 0);
	if (unlikely(!bh))
		return -ENOMEM;

	if (buffer_uptodate(bh))
		goto out;

	if (pbn == 0) {
		struct the_nilfs *nilfs = inode->i_sb->s_fs_info;

		err = nilfs_dat_translate(nilfs->ns_dat, vbn, &pbn);
		if (unlikely(err)) { /* -EIO, -ENOMEM, -ENOENT */
			brelse(bh);
			goto failed;
		}
	}

	lock_buffer(bh);
	if (buffer_uptodate(bh)) {
		unlock_buffer(bh);
		goto out;
	}

	if (!buffer_mapped(bh)) {
		bh->b_bdev = inode->i_sb->s_bdev;
		set_buffer_mapped(bh);
	}
	bh->b_blocknr = pbn;
	bh->b_end_io = end_buffer_read_sync;
	get_bh(bh);
	submit_bh(READ, bh);
	if (vbn)
		bh->b_blocknr = vbn;
 out:
	err = 0;
	*out_bh = bh;

 failed:
	unlock_page(bh->b_page);
	page_cache_release(bh->b_page);
	return err;
}
Esempio n. 20
0
static int __try_to_free_cp_buf(struct journal_head *jh)
{
    int ret = 0;
    struct buffer_head *bh = jh2bh(jh);

    if (jh->b_transaction == NULL && !buffer_locked(bh) &&
            !buffer_dirty(bh) && !buffer_write_io_error(bh)) {
        get_bh(bh);
        JBUFFER_TRACE(jh, "remove from checkpoint list");
        ret = __jbd2_journal_remove_checkpoint(jh) + 1;
        BUFFER_TRACE(bh, "release");
        __brelse(bh);
    }
    return ret;
}
Esempio n. 21
0
int ocfs2_write_block(struct ocfs2_super *osb, struct buffer_head *bh,
		      struct inode *inode)
{
	int ret = 0;

	mlog_entry("(bh->b_blocknr = %llu, inode=%p)\n",
		   (unsigned long long)bh->b_blocknr, inode);

	BUG_ON(bh->b_blocknr < OCFS2_SUPER_BLOCK_BLKNO);
	BUG_ON(buffer_jbd(bh));

	/* No need to check for a soft readonly file system here. non
	 * journalled writes are only ever done on system files which
	 * can get modified during recovery even if read-only. */
	if (ocfs2_is_hard_readonly(osb)) {
		ret = -EROFS;
		goto out;
	}

	mutex_lock(&OCFS2_I(inode)->ip_io_mutex);

	lock_buffer(bh);
	set_buffer_uptodate(bh);

	/* remove from dirty list before I/O. */
	clear_buffer_dirty(bh);

	get_bh(bh); /* for end_buffer_write_sync() */
	bh->b_end_io = end_buffer_write_sync;
	submit_bh(WRITE, bh);

	wait_on_buffer(bh);

	if (buffer_uptodate(bh)) {
		ocfs2_set_buffer_uptodate(inode, bh);
	} else {
		/* We don't need to remove the clustered uptodate
		 * information for this bh as it's not marked locally
		 * uptodate. */
		ret = -EIO;
		put_bh(bh);
	}

	mutex_unlock(&OCFS2_I(inode)->ip_io_mutex);
out:
	mlog_exit(ret);
	return ret;
}
static int ufs_fmp_run(struct device *dev, uint32_t mode, uint8_t *data,
			uint32_t len, uint32_t write)
{
	struct ufs_hba *hba;
	struct ufs_fmp_work *work;
	struct Scsi_Host *host;
	static struct buffer_head *bh;

	work = dev_get_drvdata(dev);
	if (!work) {
		dev_err(dev, "Fail to get work from platform device\n");
		return -ENODEV;
	}
	host = work->host;
	hba = shost_priv(host);
	hba->self_test_mode = mode;

	bh = __getblk(work->bdev, work->sector, FMP_BLK_SIZE);
	if (!bh) {
		dev_err(dev, "Fail to get block from bdev\n");
		return -ENODEV;
	}
	hba->self_test_bh = bh;

	get_bh(bh);
	if (write == WRITE_MODE) {
		memcpy(bh->b_data, data, len);
		bh->b_state &= ~(1 << BH_Uptodate);
		bh->b_state &= ~(1 << BH_Lock);
		ll_rw_block(WRITE_FLUSH_FUA, 1, &bh);
		wait_on_buffer(bh);

		memset(bh->b_data, 0, FMP_BLK_SIZE);
	} else {
		bh->b_state &= ~(1 << BH_Uptodate);
		bh->b_state &= ~(1 << BH_Lock);
		ll_rw_block(READ_SYNC, 1, &bh);
		wait_on_buffer(bh);

		memcpy(data, bh->b_data, len);
	}
	put_bh(bh);

	hba->self_test_mode = 0;
	hba->self_test_bh = NULL;

	return 0;
}
Esempio n. 23
0
int ocfs2_write_block(struct ocfs2_super *osb, struct buffer_head *bh,
		      struct ocfs2_caching_info *ci)
{
	int ret = 0;

	trace_ocfs2_write_block((unsigned long long)bh->b_blocknr, ci);

	BUG_ON(bh->b_blocknr < OCFS2_SUPER_BLOCK_BLKNO);
	BUG_ON(buffer_jbd(bh));

	/* No need to check for a soft readonly file system here. non
	 * journalled writes are only ever done on system files which
	 * can get modified during recovery even if read-only. */
	if (ocfs2_is_hard_readonly(osb)) {
		ret = -EROFS;
		mlog_errno(ret);
		goto out;
	}

	ocfs2_metadata_cache_io_lock(ci);

	lock_buffer(bh);
	set_buffer_uptodate(bh);

	/* remove from dirty list before I/O. */
	clear_buffer_dirty(bh);

	get_bh(bh); /* for end_buffer_write_sync() */
	bh->b_end_io = end_buffer_write_sync;
	submit_bh(REQ_OP_WRITE, 0, bh);

	wait_on_buffer(bh);

	if (buffer_uptodate(bh)) {
		ocfs2_set_buffer_uptodate(ci, bh);
	} else {
		/* We don't need to remove the clustered uptodate
		 * information for this bh as it's not marked locally
		 * uptodate. */
		ret = -EIO;
		mlog_errno(ret);
	}

	ocfs2_metadata_cache_io_unlock(ci);
out:
	return ret;
}
Esempio n. 24
0
static int nilfs_mdt_create_block(struct inode *inode, unsigned long block,
				  struct buffer_head **out_bh,
				  void (*init_block)(struct inode *,
						     struct buffer_head *,
						     void *))
{
	struct super_block *sb = inode->i_sb;
	struct nilfs_transaction_info ti;
	struct buffer_head *bh;
	int err;

	nilfs_transaction_begin(sb, &ti, 0);

	err = -ENOMEM;
	bh = nilfs_grab_buffer(inode, inode->i_mapping, block, 0);
	if (unlikely(!bh))
		goto failed_unlock;

	err = -EEXIST;
	if (buffer_uptodate(bh))
		goto failed_bh;

	wait_on_buffer(bh);
	if (buffer_uptodate(bh))
		goto failed_bh;

	bh->b_bdev = sb->s_bdev;
	err = nilfs_mdt_insert_new_block(inode, block, bh, init_block);
	if (likely(!err)) {
		get_bh(bh);
		*out_bh = bh;
	}

 failed_bh:
	unlock_page(bh->b_page);
	page_cache_release(bh->b_page);
	brelse(bh);

 failed_unlock:
	if (likely(!err))
		err = nilfs_transaction_commit(sb);
	else
		nilfs_transaction_abort(sb);

	return err;
}
Esempio n. 25
0
File: mmp.c Progetto: 020gzh/linux
/*
 * Read the MMP block. It _must_ be read from disk and hence we clear the
 * uptodate flag on the buffer.
 */
static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
			  ext4_fsblk_t mmp_block)
{
	struct mmp_struct *mmp;
	int ret;

	if (*bh)
		clear_buffer_uptodate(*bh);

	/* This would be sb_bread(sb, mmp_block), except we need to be sure
	 * that the MD RAID device cache has been bypassed, and that the read
	 * is not blocked in the elevator. */
	if (!*bh) {
		*bh = sb_getblk(sb, mmp_block);
		if (!*bh) {
			ret = -ENOMEM;
			goto warn_exit;
		}
	}

	get_bh(*bh);
	lock_buffer(*bh);
	(*bh)->b_end_io = end_buffer_read_sync;
	submit_bh(READ_SYNC | REQ_META | REQ_PRIO, *bh);
	wait_on_buffer(*bh);
	if (!buffer_uptodate(*bh)) {
		ret = -EIO;
		goto warn_exit;
	}
	mmp = (struct mmp_struct *)((*bh)->b_data);
	if (le32_to_cpu(mmp->mmp_magic) != EXT4_MMP_MAGIC) {
		ret = -EFSCORRUPTED;
		goto warn_exit;
	}
	if (!ext4_mmp_csum_verify(sb, mmp)) {
		ret = -EFSBADCRC;
		goto warn_exit;
	}
	return 0;
warn_exit:
	brelse(*bh);
	*bh = NULL;
	ext4_warning(sb, "Error %d while reading MMP block %llu",
		     ret, mmp_block);
	return ret;
}
Esempio n. 26
0
/*
 * Write the MMP block using WRITE_SYNC to try to get the block on-disk
 * faster.
 */
static int write_mmp_block(struct buffer_head *bh)
{
	mark_buffer_dirty(bh);
	lock_buffer(bh);
	bh->b_end_io = end_buffer_write_sync;
	get_bh(bh);
#ifdef FEATURE_STORAGE_META_LOG
	set_metadata_rw_status(bh->b_bdev->bd_disk->first_minor, WAIT_WRITE_CNT);
#endif

	submit_bh(WRITE_SYNC, bh);
	wait_on_buffer(bh);
	if (unlikely(!buffer_uptodate(bh)))
		return 1;

	return 0;
}
static int __try_to_free_cp_buf(struct journal_head *jh)
{
	int ret = 0;
	struct buffer_head *bh = jh2bh(jh);

	if (jh->b_jlist == BJ_None && !buffer_locked(bh) &&
	    !buffer_dirty(bh) && !buffer_write_io_error(bh)) {
		get_bh(bh);
		JBUFFER_TRACE(jh, "remove from checkpoint list");
		ret = __journal_remove_checkpoint(jh) + 1;
		jbd_unlock_bh_state(bh);
		BUFFER_TRACE(bh, "release");
		__brelse(bh);
	} else {
		jbd_unlock_bh_state(bh);
	}
	return ret;
}
Esempio n. 28
0
struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create)
{
	struct address_space *mapping = gl->gl_aspace->i_mapping;
	struct gfs2_sbd *sdp = gl->gl_sbd;
	struct page *page;
	struct buffer_head *bh;
	unsigned int shift;
	unsigned long index;
	unsigned int bufnum;

	shift = PAGE_CACHE_SHIFT - sdp->sd_sb.sb_bsize_shift;
	index = blkno >> shift;             /* convert block to page */
	bufnum = blkno - (index << shift);  /* block buf index within page */

	if (create) {
		for (;;) {
			page = grab_cache_page(mapping, index);
			if (page)
				break;
			yield();
		}
	} else {
		page = find_lock_page(mapping, index);
		if (!page)
			return NULL;
	}

	if (!page_has_buffers(page))
		create_empty_buffers(page, sdp->sd_sb.sb_bsize, 0);

	/* Locate header for our buffer within our page */
	for (bh = page_buffers(page); bufnum--; bh = bh->b_this_page)
		/* Do nothing */;
	get_bh(bh);

	if (!buffer_mapped(bh))
		map_bh(bh, sdp->sd_vfs, blkno);

	unlock_page(page);
	mark_page_accessed(page);
	page_cache_release(page);

	return bh;
}
Esempio n. 29
0
/*
 * Try to flush one buffer from the checkpoint list to disk.
 *
 * Return 1 if something happened which requires us to abort the current
 * scan of the checkpoint list.  
 *
 * Called with j_list_lock held.
 * Called under jbd_lock_bh_state(jh2bh(jh)), and drops it
 */
static int __flush_buffer(journal_t *journal, struct journal_head *jh,
			struct buffer_head **bhs, int *batch_count,
			int *drop_count)
{
	struct buffer_head *bh = jh2bh(jh);
	int ret = 0;

	if (buffer_dirty(bh) && !buffer_locked(bh) && jh->b_jlist == BJ_None) {
		J_ASSERT_JH(jh, jh->b_transaction == NULL);

		/*
		 * Important: we are about to write the buffer, and
		 * possibly block, while still holding the journal lock.
		 * We cannot afford to let the transaction logic start
		 * messing around with this buffer before we write it to
		 * disk, as that would break recoverability.  
		 */
		BUFFER_TRACE(bh, "queue");
		get_bh(bh);
		J_ASSERT_BH(bh, !buffer_jwrite(bh));
		set_buffer_jwrite(bh);
		bhs[*batch_count] = bh;
		jbd_unlock_bh_state(bh);
		(*batch_count)++;
		if (*batch_count == NR_BATCH) {
			__flush_batch(journal, bhs, batch_count);
			ret = 1;
		}
	} else {
		int last_buffer = 0;
		if (jh->b_cpnext == jh) {
			/* We may be about to drop the transaction.  Tell the
			 * caller that the lists have changed.
			 */
			last_buffer = 1;
		}
		if (__try_to_free_cp_buf(jh)) {
			(*drop_count)++;
			ret = last_buffer;
		}
	}
	return ret;
}
Esempio n. 30
0
/*
 * Read the MMP block. It _must_ be read from disk and hence we clear the
 * uptodate flag on the buffer.
 */
static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
			  ext4_fsblk_t mmp_block)
{
	struct mmp_struct *mmp;

	if (*bh)
		clear_buffer_uptodate(*bh);

	/* This would be sb_bread(sb, mmp_block), except we need to be sure
	 * that the MD RAID device cache has been bypassed, and that the read
	 * is not blocked in the elevator. */
	if (!*bh)
		*bh = sb_getblk(sb, mmp_block);
	if (!*bh)
		return -ENOMEM;
	if (*bh) {
		get_bh(*bh);
		lock_buffer(*bh);
		(*bh)->b_end_io = end_buffer_read_sync;
#ifdef FEATURE_STORAGE_META_LOG
		if( (*bh) && (*bh)->b_bdev && (*bh)->b_bdev->bd_disk)
			set_metadata_rw_status((*bh)->b_bdev->bd_disk->first_minor, WAIT_READ_CNT);
#endif
		submit_bh(READ_SYNC | REQ_META | REQ_PRIO, *bh);
		wait_on_buffer(*bh);
		if (!buffer_uptodate(*bh)) {
			brelse(*bh);
			*bh = NULL;
		}
	}
	if (unlikely(!*bh)) {
		ext4_warning(sb, "Error while reading MMP block %llu",
			     mmp_block);
		return -EIO;
	}

	mmp = (struct mmp_struct *)((*bh)->b_data);
	if (le32_to_cpu(mmp->mmp_magic) != EXT4_MMP_MAGIC ||
	    !ext4_mmp_csum_verify(sb, mmp))
		return -EINVAL;

	return 0;
}