Exemplo n.º 1
0
static int
adfs_fplus_sync(struct adfs_dir *dir)
{
	int err = 0;
	int i;

	for (i = dir->nr_buffers - 1; i >= 0; i--) {
		struct buffer_head *bh = dir->bh[i];
		sync_dirty_buffer(bh);
		if (buffer_req(bh) && !buffer_uptodate(bh))
			err = -EIO;
	}

	return err;
}
Exemplo n.º 2
0
int nilfs_gccache_wait_and_mark_dirty(struct buffer_head *bh)
{
	wait_on_buffer(bh);
	if (!buffer_uptodate(bh))
		return -EIO;
	if (buffer_dirty(bh))
		return -EEXIST;

	if (buffer_nilfs_node(bh) && nilfs_btree_broken_node_block(bh)) {
		clear_buffer_uptodate(bh);
		return -EIO;
	}
	mark_buffer_dirty(bh);
	return 0;
}
Exemplo n.º 3
0
int ocfs2_write_block(struct ocfs2_super *osb, struct buffer_head *bh,
		      struct inode *inode)
{
	int ret = 0;

	mlog_entry("(bh->b_blocknr = %llu, inode=%p)\n",
		   (unsigned long long)bh->b_blocknr, inode);

	BUG_ON(bh->b_blocknr < OCFS2_SUPER_BLOCK_BLKNO);
	BUG_ON(buffer_jbd(bh));

	/* No need to check for a soft readonly file system here. non
	 * journalled writes are only ever done on system files which
	 * can get modified during recovery even if read-only. */
	if (ocfs2_is_hard_readonly(osb)) {
		ret = -EROFS;
		goto out;
	}

	mutex_lock(&OCFS2_I(inode)->ip_io_mutex);

	lock_buffer(bh);
	set_buffer_uptodate(bh);

	/* remove from dirty list before I/O. */
	clear_buffer_dirty(bh);

	get_bh(bh); /* for end_buffer_write_sync() */
	bh->b_end_io = end_buffer_write_sync;
	submit_bh(WRITE, bh);

	wait_on_buffer(bh);

	if (buffer_uptodate(bh)) {
		ocfs2_set_buffer_uptodate(inode, bh);
	} else {
		/* We don't need to remove the clustered uptodate
		 * information for this bh as it's not marked locally
		 * uptodate. */
		ret = -EIO;
		put_bh(bh);
	}

	mutex_unlock(&OCFS2_I(inode)->ip_io_mutex);
out:
	mlog_exit(ret);
	return ret;
}
Exemplo n.º 4
0
int __ext4_handle_dirty_metadata(const char *where, unsigned int line,
				 handle_t *handle, struct inode *inode,
				 struct buffer_head *bh)
{
	int err = 0;

	might_sleep();

	set_buffer_meta(bh);
	set_buffer_prio(bh);
	if (ext4_handle_valid(handle)) {
		err = jbd2_journal_dirty_metadata(handle, bh);
		/* Errors can only happen if there is a bug */
		if (WARN_ON_ONCE(err)) {
			ext4_journal_abort_handle(where, line, __func__, bh,
						  handle, err);
			ext4_error_inode(inode, where, line,
					 bh->b_blocknr,
					 "journal_dirty_metadata failed: "
					 "handle type %u started at line %u, "
					 "credits %u/%u, errcode %d",
					 handle->h_type,
					 handle->h_line_no,
					 handle->h_requested_credits,
					 handle->h_buffer_credits, err);
		}
	} else {
		if (inode)
			mark_buffer_dirty_inode(bh, inode);
		else
			mark_buffer_dirty(bh);
		if (inode && inode_needs_sync(inode)) {
			sync_dirty_buffer(bh);
			if (buffer_req(bh) && !buffer_uptodate(bh)) {
				struct ext4_super_block *es;

				es = EXT4_SB(inode->i_sb)->s_es;
				es->s_last_error_block =
					cpu_to_le64(bh->b_blocknr);
				ext4_error_inode(inode, where, line,
						 bh->b_blocknr,
					"IO error syncing itable block");
				err = -EIO;
			}
		}
	}
	return err;
}
Exemplo n.º 5
0
void *hpfs_get_sector(struct super_block *s, unsigned secno, struct buffer_head **bhp)
{
	struct buffer_head *bh;
	/*return hpfs_map_sector(s, secno, bhp, 0);*/

	cond_resched();

	if ((*bhp = bh = sb_getblk(s, secno)) != NULL) {
		if (!buffer_uptodate(bh)) wait_on_buffer(bh);
		set_buffer_uptodate(bh);
		return bh->b_data;
	} else {
		printk("HPFS: hpfs_get_sector: getblk failed\n");
		return NULL;
	}
}
Exemplo n.º 6
0
int ocfs2_write_block(struct ocfs2_super *osb, struct buffer_head *bh,
		      struct ocfs2_caching_info *ci)
{
	int ret = 0;

	trace_ocfs2_write_block((unsigned long long)bh->b_blocknr, ci);

	BUG_ON(bh->b_blocknr < OCFS2_SUPER_BLOCK_BLKNO);
	BUG_ON(buffer_jbd(bh));

	/* No need to check for a soft readonly file system here. non
	 * journalled writes are only ever done on system files which
	 * can get modified during recovery even if read-only. */
	if (ocfs2_is_hard_readonly(osb)) {
		ret = -EROFS;
		mlog_errno(ret);
		goto out;
	}

	ocfs2_metadata_cache_io_lock(ci);

	lock_buffer(bh);
	set_buffer_uptodate(bh);

	/* remove from dirty list before I/O. */
	clear_buffer_dirty(bh);

	get_bh(bh); /* for end_buffer_write_sync() */
	bh->b_end_io = end_buffer_write_sync;
	submit_bh(REQ_OP_WRITE, 0, bh);

	wait_on_buffer(bh);

	if (buffer_uptodate(bh)) {
		ocfs2_set_buffer_uptodate(ci, bh);
	} else {
		/* We don't need to remove the clustered uptodate
		 * information for this bh as it's not marked locally
		 * uptodate. */
		ret = -EIO;
		mlog_errno(ret);
	}

	ocfs2_metadata_cache_io_unlock(ci);
out:
	return ret;
}
Exemplo n.º 7
0
Arquivo: mmp.c Projeto: 020gzh/linux
/*
 * Read the MMP block. It _must_ be read from disk and hence we clear the
 * uptodate flag on the buffer.
 */
static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
			  ext4_fsblk_t mmp_block)
{
	struct mmp_struct *mmp;
	int ret;

	if (*bh)
		clear_buffer_uptodate(*bh);

	/* This would be sb_bread(sb, mmp_block), except we need to be sure
	 * that the MD RAID device cache has been bypassed, and that the read
	 * is not blocked in the elevator. */
	if (!*bh) {
		*bh = sb_getblk(sb, mmp_block);
		if (!*bh) {
			ret = -ENOMEM;
			goto warn_exit;
		}
	}

	get_bh(*bh);
	lock_buffer(*bh);
	(*bh)->b_end_io = end_buffer_read_sync;
	submit_bh(READ_SYNC | REQ_META | REQ_PRIO, *bh);
	wait_on_buffer(*bh);
	if (!buffer_uptodate(*bh)) {
		ret = -EIO;
		goto warn_exit;
	}
	mmp = (struct mmp_struct *)((*bh)->b_data);
	if (le32_to_cpu(mmp->mmp_magic) != EXT4_MMP_MAGIC) {
		ret = -EFSCORRUPTED;
		goto warn_exit;
	}
	if (!ext4_mmp_csum_verify(sb, mmp)) {
		ret = -EFSBADCRC;
		goto warn_exit;
	}
	return 0;
warn_exit:
	brelse(*bh);
	*bh = NULL;
	ext4_warning(sb, "Error %d while reading MMP block %llu",
		     ret, mmp_block);
	return ret;
}
Exemplo n.º 8
0
int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
		   struct buffer_head **bhp)
{
	*bhp = gfs2_getbuf(gl, blkno, CREATE);
	if (!buffer_uptodate(*bhp)) {
		ll_rw_block(READ_META, 1, bhp);
		if (flags & DIO_WAIT) {
			int error = gfs2_meta_wait(gl->gl_sbd, *bhp);
			if (error) {
				brelse(*bhp);
				return error;
			}
		}
	}

	return 0;
}
Exemplo n.º 9
0
/* Clear buffer dirty for I/O (Caller must remove buffer from list) */
static void tux3_clear_buffer_dirty_for_io(struct buffer_head *buffer,
					   struct sb *sb, block_t block)
{
	assert(list_empty(&buffer->b_assoc_buffers));
	assert(buffer_dirty(buffer));	/* Who cleared the dirty? */
	/* If buffer was hole and dirtied, it can be !buffer_mapped() */
	/*assert(buffer_mapped(buffer));*/
	assert(buffer_uptodate(buffer));

	/* Set up buffer for I/O. FIXME: need? */
	map_bh(buffer, vfs_sb(sb), block);
	clear_buffer_delay(buffer);

	/*buffer->b_assoc_map = NULL;*/	/* FIXME: hack for *_for_io_hack */
	tux3_clear_bufdelta(buffer);	/* FIXME: hack for save delta */
	clear_buffer_dirty(buffer);
}
Exemplo n.º 10
0
/*
 * Write the MMP block using WRITE_SYNC to try to get the block on-disk
 * faster.
 */
static int write_mmp_block(struct buffer_head *bh)
{
	mark_buffer_dirty(bh);
	lock_buffer(bh);
	bh->b_end_io = end_buffer_write_sync;
	get_bh(bh);
#ifdef FEATURE_STORAGE_META_LOG
	set_metadata_rw_status(bh->b_bdev->bd_disk->first_minor, WAIT_WRITE_CNT);
#endif

	submit_bh(WRITE_SYNC, bh);
	wait_on_buffer(bh);
	if (unlikely(!buffer_uptodate(bh)))
		return 1;

	return 0;
}
Exemplo n.º 11
0
static int bfs_write_inode(struct inode *inode, struct writeback_control *wbc)
{
	struct bfs_sb_info *info = BFS_SB(inode->i_sb);
	unsigned int ino = (u16)inode->i_ino;
        unsigned long i_sblock;
	struct bfs_inode *di;
	struct buffer_head *bh;
	int err = 0;

        dprintf("ino=%08x\n", ino);

	di = find_inode(inode->i_sb, ino, &bh);
	if (IS_ERR(di))
		return PTR_ERR(di);

	mutex_lock(&info->bfs_lock);

	if (ino == BFS_ROOT_INO)
		di->i_vtype = cpu_to_le32(BFS_VDIR);
	else
		di->i_vtype = cpu_to_le32(BFS_VREG);

	di->i_ino = cpu_to_le16(ino);
	di->i_mode = cpu_to_le32(inode->i_mode);
	di->i_uid = cpu_to_le32(inode->i_uid);
	di->i_gid = cpu_to_le32(inode->i_gid);
	di->i_nlink = cpu_to_le32(inode->i_nlink);
	di->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
	di->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
	di->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
        i_sblock = BFS_I(inode)->i_sblock;
	di->i_sblock = cpu_to_le32(i_sblock);
	di->i_eblock = cpu_to_le32(BFS_I(inode)->i_eblock);
	di->i_eoffset = cpu_to_le32(i_sblock * BFS_BSIZE + inode->i_size - 1);

	mark_buffer_dirty(bh);
	if (wbc->sync_mode == WB_SYNC_ALL) {
		sync_dirty_buffer(bh);
		if (buffer_req(bh) && !buffer_uptodate(bh))
			err = -EIO;
	}
	brelse(bh);
	mutex_unlock(&info->bfs_lock);
	return err;
}
Exemplo n.º 12
0
/* Warning: even if it returns true, this does *not* guarantee that
 * the block is stored in our inode metadata cache. 
 * 
 * This can be called under lock_buffer()
 */
int ocfs2_buffer_uptodate(struct inode *inode,
			  struct buffer_head *bh)
{
	/* Doesn't matter if the bh is in our cache or not -- if it's
	 * not marked uptodate then we know it can't have correct
	 * data. */
	if (!buffer_uptodate(bh))
		return 0;

	/* OCFS2 does not allow multiple nodes to be changing the same
	 * block at the same time. */
	if (buffer_jbd(bh))
		return 1;

	/* Ok, locally the buffer is marked as up to date, now search
	 * our cache to see if we can trust that. */
	return ocfs2_buffer_cached(OCFS2_I(inode), bh);
}
Exemplo n.º 13
0
void *hpfs_get_sector(struct super_block *s, unsigned secno, struct buffer_head **bhp)
{
	struct buffer_head *bh;
	/*return hpfs_map_sector(s, secno, bhp, 0);*/

	hpfs_lock_assert(s);

	cond_resched();

	if ((*bhp = bh = sb_getblk(s, secno)) != NULL) {
		if (!buffer_uptodate(bh)) wait_on_buffer(bh);
		set_buffer_uptodate(bh);
		return bh->b_data;
	} else {
		pr_err("%s(): getblk failed\n", __func__);
		return NULL;
	}
}
Exemplo n.º 14
0
int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh)
{
	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
		return -EIO;

	wait_on_buffer(bh);

	if (!buffer_uptodate(bh)) {
		struct gfs2_trans *tr = current->journal_info;
		if (tr && tr->tr_touched)
			gfs2_io_error_bh(sdp, bh);
		return -EIO;
	}
	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
		return -EIO;

	return 0;
}
Exemplo n.º 15
0
static int nilfs_mdt_read_block(struct inode *inode, unsigned long block,
				int readahead, struct buffer_head **out_bh)
{
	struct buffer_head *first_bh, *bh;
	unsigned long blkoff;
	int i, nr_ra_blocks = NILFS_MDT_MAX_RA_BLOCKS;
	int err;

	err = nilfs_mdt_submit_block(inode, block, READ, &first_bh);
	if (err == -EEXIST) /* internal code */
		goto out;

	if (unlikely(err))
		goto failed;

	if (readahead) {
		blkoff = block + 1;
		for (i = 0; i < nr_ra_blocks; i++, blkoff++) {
			err = nilfs_mdt_submit_block(inode, blkoff, READA, &bh);
			if (likely(!err || err == -EEXIST))
				brelse(bh);
			else if (err != -EBUSY)
				break;
				/* abort readahead if bmap lookup failed */
			if (!buffer_locked(first_bh))
				goto out_no_wait;
		}
	}

	wait_on_buffer(first_bh);

 out_no_wait:
	err = -EIO;
	if (!buffer_uptodate(first_bh))
		goto failed_bh;
 out:
	*out_bh = first_bh;
	return 0;

 failed_bh:
	brelse(first_bh);
 failed:
	return err;
}
Exemplo n.º 16
0
/*
 * write back super block information to disk
 *
 * @vsb: the VFS super block structure
 * @wait: whether to wait for the super block to be synced to disk
 *
 * return: 0 on success, error code otherwise
 */
int wtfs_sync_super(struct super_block * vsb, int wait)
{
	struct wtfs_sb_info * sbi = WTFS_SB_INFO(vsb);
	struct wtfs_super_block * sb = NULL;
	struct buffer_head * bh = NULL;
	int ret = -EIO;

	if ((bh = sb_bread(vsb, WTFS_RB_SUPER)) == NULL) {
		wtfs_error("unable to read the super block\n");
		goto error;
	}

	sb = (struct wtfs_super_block *)bh->b_data;
	sb->version = cpu_to_wtfs64(sbi->version);
	sb->magic = cpu_to_wtfs64(sbi->magic);
	sb->block_size = cpu_to_wtfs64(sbi->block_size);
	sb->block_count = cpu_to_wtfs64(sbi->block_count);
	sb->inode_table_first = cpu_to_wtfs64(sbi->inode_table_first);
	sb->inode_table_count = cpu_to_wtfs64(sbi->inode_table_count);
	sb->block_bitmap_first = cpu_to_wtfs64(sbi->block_bitmap_first);
	sb->block_bitmap_count = cpu_to_wtfs64(sbi->block_bitmap_count);
	sb->inode_bitmap_first = cpu_to_wtfs64(sbi->inode_bitmap_first);
	sb->inode_bitmap_count = cpu_to_wtfs64(sbi->inode_bitmap_count);
	sb->inode_count = cpu_to_wtfs64(sbi->inode_count);
	sb->free_block_count = cpu_to_wtfs64(sbi->free_block_count);

	mark_buffer_dirty(bh);
	if (wait) {
		sync_dirty_buffer(bh);
		if (buffer_req(bh) && !buffer_uptodate(bh)) {
			wtfs_error("super block sync failed\n");
			goto error;
		}
	}
	brelse(bh);

	return 0;

error:
	if (bh != NULL) {
		brelse(bh);
	}
	return ret;
}
Exemplo n.º 17
0
/*
 * Read the MMP block. It _must_ be read from disk and hence we clear the
 * uptodate flag on the buffer.
 */
static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
			  ext4_fsblk_t mmp_block)
{
	struct mmp_struct *mmp;

	if (*bh)
		clear_buffer_uptodate(*bh);

	/* This would be sb_bread(sb, mmp_block), except we need to be sure
	 * that the MD RAID device cache has been bypassed, and that the read
	 * is not blocked in the elevator. */
	if (!*bh)
		*bh = sb_getblk(sb, mmp_block);
	if (!*bh)
		return -ENOMEM;
	if (*bh) {
		get_bh(*bh);
		lock_buffer(*bh);
		(*bh)->b_end_io = end_buffer_read_sync;
#ifdef FEATURE_STORAGE_META_LOG
		if( (*bh) && (*bh)->b_bdev && (*bh)->b_bdev->bd_disk)
			set_metadata_rw_status((*bh)->b_bdev->bd_disk->first_minor, WAIT_READ_CNT);
#endif
		submit_bh(READ_SYNC | REQ_META | REQ_PRIO, *bh);
		wait_on_buffer(*bh);
		if (!buffer_uptodate(*bh)) {
			brelse(*bh);
			*bh = NULL;
		}
	}
	if (unlikely(!*bh)) {
		ext4_warning(sb, "Error while reading MMP block %llu",
			     mmp_block);
		return -EIO;
	}

	mmp = (struct mmp_struct *)((*bh)->b_data);
	if (le32_to_cpu(mmp->mmp_magic) != EXT4_MMP_MAGIC ||
	    !ext4_mmp_csum_verify(sb, mmp))
		return -EINVAL;

	return 0;
}
Exemplo n.º 18
0
int sysv_sync_inode(struct inode * inode)
{
        int err = 0;
        struct buffer_head *bh;

        bh = sysv_update_inode(inode);
        if (bh && buffer_dirty(bh)) {
                sync_dirty_buffer(bh);
                if (buffer_req(bh) && !buffer_uptodate(bh)) {
                        printk ("IO error syncing sysv inode [%s:%08lx]\n",
                                inode->i_sb->s_id, inode->i_ino);
                        err = -1;
                }
        }
        else if (!bh)
                err = -1;
        brelse (bh);
        return err;
}
Exemplo n.º 19
0
static void reiserfsck_check_cached_tree (int dev, int block, int size)
{
    struct buffer_head * bh;
    int what_node;

    bh =  find_buffer(dev, block, size);
    if (bh == 0)
	return;
    if (!buffer_uptodate (bh)) {
	die ("reiserfsck_check_cached_tree: found notuptodate buffer");
    }

    bh->b_count ++;
    
    if (!B_IS_IN_TREE (bh)) {
	die ("reiserfsck_check_cached_tree: buffer (%b %z) not in tree", bh, bh);
    }

    what_node = who_is_this (bh->b_data, bh->b_size);
    if ((what_node != THE_LEAF && what_node != THE_INTERNAL) ||
	!is_block_used (bh->b_blocknr) ||
	(is_leaf_node (bh) && is_leaf_bad (bh)) ||
	(is_internal_node(bh) && is_internal_bad (bh)))
	die ("reiserfsck_check_cached_tree: bad node in the tree");
    if (is_internal_node (bh)) {
	int i;
	struct disk_child * dc;
	
	dc = B_N_CHILD (bh, 0);
	for (i = 0; i <= B_NR_ITEMS (bh); i ++, dc ++) {
	    reiserfsck_check_cached_tree (dev, dc_block_number(dc), size);
	    g_dkey = B_N_PDELIM_KEY (bh, i);
	}
    } else if (is_leaf_node (bh)) {
	brelse (bh);
	return;
    } else {
	reiserfs_panic ("reiserfsck_check_cached_tree: block %lu has bad block type (%b)",
			bh->b_blocknr, bh);
    }
    brelse (bh);
}
Exemplo n.º 20
0
/* Get delimiting key of the buffer by looking for it in the buffers in the path, starting from the bottom
   of the path, and going upwards.  We must check the path's validity at each step.  If the key is not in
   the path, there is no delimiting key in the tree (buffer is first or last buffer in tree), and in this
   case we return a special key, either MIN_KEY or MAX_KEY. */
static inline const struct reiserfs_key *get_lkey(const struct treepath *chk_path,
						  const struct super_block *sb)
{
	int position, path_offset = chk_path->path_length;
	struct buffer_head *parent;

	RFALSE(path_offset < FIRST_PATH_ELEMENT_OFFSET,
	       "PAP-5010: invalid offset in the path");

	/* While not higher in path than first element. */
	while (path_offset-- > FIRST_PATH_ELEMENT_OFFSET) {

		RFALSE(!buffer_uptodate
		       (PATH_OFFSET_PBUFFER(chk_path, path_offset)),
		       "PAP-5020: parent is not uptodate");

		/* Parent at the path is not in the tree now. */
		if (!B_IS_IN_TREE
		    (parent =
		     PATH_OFFSET_PBUFFER(chk_path, path_offset)))
			return &MAX_KEY;
		/* Check whether position in the parent is correct. */
		if ((position =
		     PATH_OFFSET_POSITION(chk_path,
					  path_offset)) >
		    B_NR_ITEMS(parent))
			return &MAX_KEY;
		/* Check whether parent at the path really points to the child. */
		if (B_N_CHILD_NUM(parent, position) !=
		    PATH_OFFSET_PBUFFER(chk_path,
					path_offset + 1)->b_blocknr)
			return &MAX_KEY;
		/* Return delimiting key if position in the parent is not equal to zero. */
		if (position)
			return B_N_PDELIM_KEY(parent, position - 1);
	}
	/* Return MIN_KEY if we are in the root of the buffer tree. */
	if (PATH_OFFSET_PBUFFER(chk_path, FIRST_PATH_ELEMENT_OFFSET)->
	    b_blocknr == SB_ROOT_BLOCK(sb))
		return &MIN_KEY;
	return &MAX_KEY;
}
Exemplo n.º 21
0
Arquivo: buffer.c Projeto: foolsh/elks
struct buffer_head *getblk(kdev_t dev, block_t block)
{
    register struct buffer_head *bh;


    /* If there are too many dirty buffers, we wake up the update process
     * now so as to ensure that there are still clean buffers available
     * for user processes to use (and dirty) */

    do {
	bh = get_hash_table(dev, block);
	if (bh != NULL) {
	    if (buffer_clean(bh) && buffer_uptodate(bh))
		put_last_lru(bh);
	    return bh;
	}

	/* I think the following check is redundant
	 * So I will remove it for now
	 */

    } while(find_buffer(dev, block));

    /*
     *      Create a buffer for this job.
     */
    bh = get_free_buffer();

/* OK, FINALLY we know that this buffer is the only one of its kind,
 * and that it's unused (b_count=0), unlocked (buffer_locked=0), and clean
 */

    bh->b_count = 1;
    bh->b_dirty = 0;
    bh->b_lock = 0;
    bh->b_uptodate = 0;
    bh->b_dev = dev;
    bh->b_blocknr = block;
    bh->b_seg = kernel_ds;

    return bh;
}
int nilfs_btnode_get(struct address_space *btnc, __u64 blocknr,
		     sector_t pblocknr, struct buffer_head **pbh, int newblk)
{
	struct buffer_head *bh;
	int err;

	err = nilfs_btnode_submit_block(btnc, blocknr, pblocknr, pbh, newblk);
	if (err == -EEXIST) /* internal code (cache hit) */
		return 0;
	if (unlikely(err))
		return err;

	bh = *pbh;
	wait_on_buffer(bh);
	if (!buffer_uptodate(bh)) {
		brelse(bh);
		return -EIO;
	}
	return 0;
}
Exemplo n.º 23
0
/* Get delimiting key of the buffer at the path and its right neighbor. */
inline	struct  key * get_rkey  (
    struct path         * p_s_chk_path,
    struct super_block  * p_s_sb
    ) {
    int                   n_position,
	n_path_offset = p_s_chk_path->path_length;
    struct buffer_head  * p_s_parent;

#ifdef CONFIG_REISERFS_CHECK
    if ( n_path_offset < FIRST_PATH_ELEMENT_OFFSET )
	reiserfs_panic(p_s_sb,"PAP-5030: get_rkey: illegal offset in the path");
#endif

    while ( n_path_offset-- > FIRST_PATH_ELEMENT_OFFSET ) {

#ifdef CONFIG_REISERFS_CHECK
	if ( ! buffer_uptodate(PATH_OFFSET_PBUFFER(p_s_chk_path, n_path_offset)) )
	    reiserfs_panic(p_s_sb, "PAP-5040: get_rkey: parent is not uptodate");
#endif

	/* Parent at the path is not in the tree now. */
	if ( ! B_IS_IN_TREE(p_s_parent = PATH_OFFSET_PBUFFER(p_s_chk_path, n_path_offset)) )
	    return &MIN_KEY;
	/* Check whether position in the parrent is correct. */
	if ( (n_position = PATH_OFFSET_POSITION(p_s_chk_path, n_path_offset)) > B_NR_ITEMS(p_s_parent) )
	    return &MIN_KEY;
	/* Check whether parent at the path really points to the child. */
	if ( B_N_CHILD_NUM(p_s_parent, n_position) !=
	     PATH_OFFSET_PBUFFER(p_s_chk_path, n_path_offset + 1)->b_blocknr )
	    return &MIN_KEY;
	/* Return delimiting key if position in the parent is not the last one. */
	if ( n_position != B_NR_ITEMS(p_s_parent) )
	    return B_N_PDELIM_KEY(p_s_parent, n_position);
    }
    /* Return MAX_KEY if we are in the root of the buffer tree. */
    if ( PATH_OFFSET_PBUFFER(p_s_chk_path, FIRST_PATH_ELEMENT_OFFSET)->b_blocknr ==
	 SB_ROOT_BLOCK (p_s_sb) )
	return &MAX_KEY;
    return  &MIN_KEY;
}
Exemplo n.º 24
0
int __ext4_handle_dirty_metadata(const char *where, unsigned int line,
				 handle_t *handle, struct inode *inode,
				 struct buffer_head *bh)
{
	int err = 0;

	might_sleep();

	set_buffer_meta(bh);
	set_buffer_prio(bh);
	if (ext4_handle_valid(handle)) {
		err = jbd2_journal_dirty_metadata(handle, bh);
		if (err) {
			/* Errors can only happen if there is a bug */
			handle->h_err = err;
			__ext4_journal_stop(where, line, handle);
		}
	} else {
		if (inode)
			mark_buffer_dirty_inode(bh, inode);
		else
			mark_buffer_dirty(bh);
		if (inode && inode_needs_sync(inode)) {
			sync_dirty_buffer(bh);
			if (buffer_req(bh) && !buffer_uptodate(bh)) {
				struct ext4_super_block *es;

				es = EXT4_SB(inode->i_sb)->s_es;
				es->s_last_error_block =
					cpu_to_le64(bh->b_blocknr);
				ext4_error_inode(inode, where, line,
						 bh->b_blocknr,
					"IO error syncing itable block");
				err = -EIO;
			}
		}
	}
	return err;
}
Exemplo n.º 25
0
static inline void fat_dir_readahead(struct inode *dir, sector_t iblock,
				     sector_t phys)
{
	struct super_block *sb = dir->i_sb;
	struct msdos_sb_info *sbi = MSDOS_SB(sb);
	struct buffer_head *bh;
	int sec;

	/* This is not a first sector of cluster, or sec_per_clus == 1 */
	if ((iblock & (sbi->sec_per_clus - 1)) || sbi->sec_per_clus == 1)
		return;
	/* root dir of FAT12/FAT16 */
	if ((sbi->fat_bits != 32) && (dir->i_ino == MSDOS_ROOT_INO))
		return;

	bh = sb_find_get_block(sb, phys);
	if (bh == NULL || !buffer_uptodate(bh)) {
		for (sec = 0; sec < sbi->sec_per_clus; sec++)
			sb_breadahead(sb, phys + sec);
	}
	brelse(bh);
}
Exemplo n.º 26
0
static int minix_write_inode(struct inode *inode, struct writeback_control *wbc)
{
	int err = 0;
	struct buffer_head *bh;

	if (INODE_VERSION(inode) == MINIX_V1)
		bh = V1_minix_update_inode(inode);
	else
		bh = V2_minix_update_inode(inode);
	if (!bh)
		return -EIO;
	if (wbc->sync_mode == WB_SYNC_ALL && buffer_dirty(bh)) {
		sync_dirty_buffer(bh);
		if (buffer_req(bh) && !buffer_uptodate(bh)) {
			printk("IO error syncing minix inode [%s:%08lx]\n",
				inode->i_sb->s_id, inode->i_ino);
			err = -EIO;
		}
	}
	brelse (bh);
	return err;
}
Exemplo n.º 27
0
Arquivo: inode.c Projeto: nosway/sfs
int sfs_write_inode(struct inode *inode, struct writeback_control *wbc)
{
	int err = 0;
	struct buffer_head *bh;

	pr_debug("Enter: sfs_write_inode (ino = %ld)\n", inode->i_ino);
	bh = sfs_update_inode(inode);
	if (!bh)
		return -EIO;
	
	if (wbc->sync_mode == WB_SYNC_ALL && buffer_dirty(bh)) {
		sync_dirty_buffer(bh);
		if (buffer_req(bh) && !buffer_uptodate(bh)) {
			pr_debug("IO error syncing sfs inode 0x%lx\n", 
				inode->i_ino);
			err = -EIO;
		}
	}
	pr_debug("Leave: sfs_write_inode (ino = %ld)\n", inode->i_ino);
	brelse(bh);
	return err;
}
Exemplo n.º 28
0
/*
 * Lookup the desired type of offset from the given page.
 *
 * On success, return true and the offset argument will point to the
 * start of the region that was found.  Otherwise this function will
 * return false and keep the offset argument unchanged.
 */
STATIC bool
xfs_lookup_buffer_offset(
	struct page		*page,
	loff_t			*offset,
	unsigned int		type)
{
	loff_t			lastoff = page_offset(page);
	bool			found = false;
	struct buffer_head	*bh, *head;

	bh = head = page_buffers(page);
	do {
		/*
		 * Unwritten extents that have data in the page
		 * cache covering them can be identified by the
		 * BH_Unwritten state flag.  Pages with multiple
		 * buffers might have a mix of holes, data and
		 * unwritten extents - any buffer with valid
		 * data in it should have BH_Uptodate flag set
		 * on it.
		 */
		if (buffer_unwritten(bh) ||
		    buffer_uptodate(bh)) {
			if (type == DATA_OFF)
				found = true;
		} else {
			if (type == HOLE_OFF)
				found = true;
		}

		if (found) {
			*offset = lastoff;
			break;
		}
		lastoff += bh->b_size;
	} while ((bh = bh->b_this_page) != head);

	return found;
}
Exemplo n.º 29
0
/*
 * Read the MMP block. It _must_ be read from disk and hence we clear the
 * uptodate flag on the buffer.
 */
static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
			  ext4_fsblk_t mmp_block)
{
	struct mmp_struct *mmp;

	if (*bh)
		clear_buffer_uptodate(*bh);

	/* This would be sb_bread(sb, mmp_block), except we need to be sure
	 * that the MD RAID device cache has been bypassed, and that the read
	 * is not blocked in the elevator. */
	if (!*bh)
		*bh = sb_getblk(sb, mmp_block);
	if (!*bh)
		return -ENOMEM;
	if (*bh) {
		get_bh(*bh);
		lock_buffer(*bh);
		(*bh)->b_end_io = end_buffer_read_sync;
		submit_bh(READ_SYNC, *bh);
		wait_on_buffer(*bh);
		if (!buffer_uptodate(*bh)) {
			brelse(*bh);
			*bh = NULL;
		}
	}
	if (unlikely(!*bh)) {
		ext4_warning(sb, "Error while reading MMP block %llu",
			     mmp_block);
		return -EIO;
	}

	mmp = (struct mmp_struct *)((*bh)->b_data);
	if (le32_to_cpu(mmp->mmp_magic) != EXT4_MMP_MAGIC ||
	    !ext4_mmp_csum_verify(sb, mmp))
		return -EINVAL;

	return 0;
}
Exemplo n.º 30
0
/*
 * Write the MMP block using WRITE_SYNC to try to get the block on-disk
 * faster.
 */
static int write_mmp_block(struct super_block *sb, struct buffer_head *bh)
{
	struct mmp_struct *mmp = (struct mmp_struct *)(bh->b_data);

	/*
	 * We protect against freezing so that we don't create dirty buffers
	 * on frozen filesystem.
	 */
	sb_start_write(sb);
	ext4_mmp_csum_set(sb, mmp);
	mark_buffer_dirty(bh);
	lock_buffer(bh);
	bh->b_end_io = end_buffer_write_sync;
	get_bh(bh);
	submit_bh(WRITE_SYNC | REQ_META | REQ_PRIO, bh);
	wait_on_buffer(bh);
	sb_end_write(sb);
	if (unlikely(!buffer_uptodate(bh)))
		return 1;

	return 0;
}