예제 #1
0
s32 bdev_mwrite(struct super_block *sb, u32 secno, struct buffer_head *bh, u32 num_secs, s32 sync)
{
	s32 count;
	struct buffer_head *bh2;
	FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
#ifdef CONFIG_SDFAT_DBG_IOCTL
	struct sdfat_sb_info *sbi = SDFAT_SB(sb);
	long flags = sbi->debug_flags;

	if (flags & SDFAT_DEBUGFLAGS_ERROR_RW)	
		return -EIO;
#endif /* CONFIG_SDFAT_DBG_IOCTL */

	if (!fsi->bd_opened) 
		return -EIO;

	if (secno == bh->b_blocknr) {
		set_buffer_uptodate(bh);
		mark_buffer_dirty(bh);
		if (sync && (sync_dirty_buffer(bh) != 0))
			return -EIO;
	} else {
		count = num_secs << sb->s_blocksize_bits;

		bh2 = __getblk(sb->s_bdev, secno, count);

		if (!bh2)
			goto no_bh;

		lock_buffer(bh2);
		memcpy(bh2->b_data, bh->b_data, count);
		set_buffer_uptodate(bh2);
		mark_buffer_dirty(bh2);
		unlock_buffer(bh2);
		if (sync && (sync_dirty_buffer(bh2) != 0)) {
			__brelse(bh2);
			goto no_bh;
		}
		__brelse(bh2);
	}

	return 0;

no_bh:
	/* 
	 * patch 1.2.4 : reset ONCE warning message per volume.
	 */
	if(!(fsi->prev_eio & SDFAT_EIO_WRITE)) {
		fsi->prev_eio |= SDFAT_EIO_WRITE;
		sdfat_log_msg(sb, KERN_ERR, "%s: No bh. I/O error.", __func__);
#ifdef CONFIG_SDFAT_DEBUG
		sdfat_debug_warn_on(1);
#endif
	}

	return -EIO;
}
예제 #2
0
s32 bdev_write(struct super_block *sb, u32 secno, struct buffer_head *bh, u32 num_secs, s32 sync)
{
	s32 count;
	struct buffer_head *bh2;
	BD_INFO_T *p_bd = &(EXFAT_SB(sb)->bd_info);
	FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info);
#ifdef CONFIG_EXFAT_KERNEL_DEBUG
	struct exfat_sb_info *sbi = EXFAT_SB(sb);
	long flags = sbi->debug_flags;

	if (flags & EXFAT_DEBUGFLAGS_ERROR_RW)
		return FFS_MEDIAERR;
#endif /* CONFIG_EXFAT_KERNEL_DEBUG */

	if (!p_bd->opened)
		return FFS_MEDIAERR;

	if (secno == bh->b_blocknr) {
		lock_buffer(bh);
		set_buffer_uptodate(bh);
		mark_buffer_dirty(bh);
		unlock_buffer(bh);
		if (sync && (sync_dirty_buffer(bh) != 0))
			return FFS_MEDIAERR;
	} else {
		count = num_secs << p_bd->sector_size_bits;

		bh2 = __getblk(sb->s_bdev, secno, count);

		if (bh2 == NULL)
			goto no_bh;

		lock_buffer(bh2);
		memcpy(bh2->b_data, bh->b_data, count);
		set_buffer_uptodate(bh2);
		mark_buffer_dirty(bh2);
		unlock_buffer(bh2);
		if (sync && (sync_dirty_buffer(bh2) != 0)) {
			__brelse(bh2);
			goto no_bh;
		}
		__brelse(bh2);
	}

	return FFS_SUCCESS;

no_bh:
	WARN(!p_fs->dev_ejected,
		"[EXFAT] No bh, device seems wrong or to be ejected.\n");

	return FFS_MEDIAERR;
}
예제 #3
0
파일: mdt.c 프로젝트: Ale1ster/kerneldir
static int
nilfs_mdt_insert_new_block(struct inode *inode, unsigned long block,
			   struct buffer_head *bh,
			   void (*init_block)(struct inode *,
					      struct buffer_head *, void *))
{
	struct nilfs_inode_info *ii = NILFS_I(inode);
	void *kaddr;
	int ret;

	/* Caller exclude read accesses using page lock */

	/* set_buffer_new(bh); */
	bh->b_blocknr = 0;

	ret = nilfs_bmap_insert(ii->i_bmap, block, (unsigned long)bh);
	if (unlikely(ret))
		return ret;

	set_buffer_mapped(bh);

	kaddr = kmap_atomic(bh->b_page, KM_USER0);
	memset(kaddr + bh_offset(bh), 0, 1 << inode->i_blkbits);
	if (init_block)
		init_block(inode, bh, kaddr);
	flush_dcache_page(bh->b_page);
	kunmap_atomic(kaddr, KM_USER0);

	set_buffer_uptodate(bh);
	nilfs_mark_buffer_dirty(bh);
	nilfs_mdt_mark_dirty(inode);
	return 0;
}
예제 #4
0
static int lfs_readpage(struct file *file, struct page *page)
{	
	struct inode *inode = page->mapping->host;
	sector_t iblock, block;
	unsigned int blocksize;
	struct buffer_head *bh, *head;

	blocksize = 1 << inode->i_blkbits;
	if (!page_has_buffers(page))
		create_empty_buffers(page, blocksize, 0);
	head = page_buffers(page);
	bh = head;

	iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
	do {
		struct buffer_head *bh_temp;
		block = lfs_disk_block(inode, iblock);

		dprintk("searching for block %Lu in segments: ", (long long unsigned int)block);
		bh_temp = lfs_read_block(inode, iblock);
		if(bh_temp) {
			dprintk("FOUND\n");

			memcpy(bh->b_data, bh_temp->b_data, LFS_BSIZE);
			set_buffer_uptodate(bh);
			brelse(bh_temp);
		}
		else
			dprintk("NOT FOUND\n");
	} while (iblock++, (bh = bh->b_this_page) != head);
	return block_read_full_page(page, lfs_map_block);
}
예제 #5
0
파일: ialloc.c 프로젝트: 274914765/C
/*
 * Nullify new chunk of inodes,
 * BSD people also set ui_gen field of inode
 * during nullification, but we not care about
 * that because of linux ufs do not support NFS
 */
static void ufs2_init_inodes_chunk(struct super_block *sb,
                   struct ufs_cg_private_info *ucpi,
                   struct ufs_cylinder_group *ucg)
{
    struct buffer_head *bh;
    struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
    sector_t beg = uspi->s_sbbase +
        ufs_inotofsba(ucpi->c_cgx * uspi->s_ipg +
                  fs32_to_cpu(sb, ucg->cg_u.cg_u2.cg_initediblk));
    sector_t end = beg + uspi->s_fpb;

    UFSD("ENTER cgno %d\n", ucpi->c_cgx);

    for (; beg < end; ++beg) {
        bh = sb_getblk(sb, beg);
        lock_buffer(bh);
        memset(bh->b_data, 0, sb->s_blocksize);
        set_buffer_uptodate(bh);
        mark_buffer_dirty(bh);
        unlock_buffer(bh);
        if (sb->s_flags & MS_SYNCHRONOUS)
            sync_dirty_buffer(bh);
        brelse(bh);
    }

    fs32_add(sb, &ucg->cg_u.cg_u2.cg_initediblk, uspi->s_inopb);
    ubh_mark_buffer_dirty(UCPI_UBH(ucpi));
    if (sb->s_flags & MS_SYNCHRONOUS) {
        ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
        ubh_wait_on_buffer(UCPI_UBH(ucpi));
    }

    UFSD("EXIT\n");
}
예제 #6
0
파일: super.c 프로젝트: RepoBackups/UBER-L
int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
{
	struct buffer_head *sbh = sbi->raw_super_buf;
	sector_t block = sbh->b_blocknr;
	int err;

	/* write back-up superblock first */
	sbh->b_blocknr = block ? 0 : 1;
	mark_buffer_dirty(sbh);
	err = sync_dirty_buffer(sbh);

	sbh->b_blocknr = block;

	/* if we are in recovery path, skip writing valid superblock */
	if (recover || err)
		goto out;

	/* write current valid superblock */
	mark_buffer_dirty(sbh);
	err = sync_dirty_buffer(sbh);
out:
	clear_buffer_write_io_error(sbh);
	set_buffer_uptodate(sbh);
	return err;
}
예제 #7
0
/*
 * If a ramdisk page has buffers, some may be uptodate and some may be not.
 * To bring the page uptodate we zero out the non-uptodate buffers.  The
 * page must be locked.
 */
static void make_page_uptodate(struct page *page)
{
	if (page_has_buffers(page)) {
		struct buffer_head *bh = page_buffers(page);
		struct buffer_head *head = bh;

		do {
			if (!buffer_uptodate(bh)) {
				memset(bh->b_data, 0, bh->b_size);
				/*
				 * akpm: I'm totally undecided about this.  The
				 * buffer has just been magically brought "up to
				 * date", but nobody should want to be reading
				 * it anyway, because it hasn't been used for
				 * anything yet.  It is still in a "not read
				 * from disk yet" state.
				 *
				 * But non-uptodate buffers against an uptodate
				 * page are against the rules.  So do it anyway.
				 */
				 set_buffer_uptodate(bh);
			}
		} while ((bh = bh->b_this_page) != head);
	} else {
		memset(page_address(page), 0, PAGE_CACHE_SIZE);
	}
	flush_dcache_page(page);
	SetPageUptodate(page);
}
예제 #8
0
파일: buffer.c 프로젝트: OGAWAHirofumi/tux3
/*
 * Caller must hold lock_page() or backend (otherwise, you may race
 * with buffer fork or clear dirty)
 */
int tux3_set_buffer_dirty_list(struct address_space *mapping,
			       struct buffer_head *buffer, int delta,
			       struct list_head *head)
{
	/* FIXME: we better to set this by caller? */
	if (!buffer_uptodate(buffer))
		set_buffer_uptodate(buffer);

	/*
	 * Basically, open code of mark_buffer_dirty() without mark
	 * inode dirty.  Caller decides whether dirty inode or not.
	 */
	if (!test_set_buffer_dirty(buffer)) {
		struct page *page = buffer->b_page;

		/* Mark dirty for delta, then add buffer to our dirty list */
		__tux3_set_buffer_dirty_list(mapping, buffer, delta, head);

		if (!TestSetPageDirty(page)) {
			struct address_space *mapping = page->mapping;
			if (mapping)
				__tux3_set_page_dirty(page, mapping, 0);
			return 1;
		}
	}
	return 0;
}
예제 #9
0
struct buffer_head *
nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr)
{
	struct inode *inode = NILFS_BTNC_I(btnc);
	struct buffer_head *bh;

	bh = nilfs_grab_buffer(inode, btnc, blocknr, 1 << BH_NILFS_Node);
	if (unlikely(!bh))
		return NULL;

	if (unlikely(buffer_mapped(bh) || buffer_uptodate(bh) ||
		     buffer_dirty(bh))) {
		brelse(bh);
		BUG();
	}
	memset(bh->b_data, 0, 1 << inode->i_blkbits);
	bh->b_bdev = inode->i_sb->s_bdev;
	bh->b_blocknr = blocknr;
	set_buffer_mapped(bh);
	set_buffer_uptodate(bh);

	unlock_page(bh->b_page);
	page_cache_release(bh->b_page);
	return bh;
}
예제 #10
0
static void init_dinode(struct gfs2_inode *dip, struct gfs2_inode *ip,
			const char *symname)
{
	struct gfs2_dinode *di;
	struct buffer_head *dibh;

	dibh = gfs2_meta_new(ip->i_gl, ip->i_no_addr);
	gfs2_trans_add_meta(ip->i_gl, dibh);
	di = (struct gfs2_dinode *)dibh->b_data;
	gfs2_dinode_out(ip, di);

	di->di_major = cpu_to_be32(MAJOR(ip->i_inode.i_rdev));
	di->di_minor = cpu_to_be32(MINOR(ip->i_inode.i_rdev));
	di->__pad1 = 0;
	di->__pad2 = 0;
	di->__pad3 = 0;
	memset(&di->__pad4, 0, sizeof(di->__pad4));
	memset(&di->di_reserved, 0, sizeof(di->di_reserved));
	gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));

	switch(ip->i_inode.i_mode & S_IFMT) {
	case S_IFDIR:
		gfs2_init_dir(dibh, dip);
		break;
	case S_IFLNK:
		memcpy(dibh->b_data + sizeof(struct gfs2_dinode), symname, ip->i_inode.i_size);
		break;
	}

	set_buffer_uptodate(dibh);
	brelse(dibh);
}
예제 #11
0
/* Done it all: now write the commit record.  We should have
 * cleaned up our previous buffers by now, so if we are in abort
 * mode we can now just skip the rest of the journal write
 * entirely.
 *
 * Returns 1 if the journal needs to be aborted or 0 on success
 */
static int journal_write_commit_record(journal_t *journal,
					transaction_t *commit_transaction)
{
	struct journal_head *descriptor;
	struct buffer_head *bh;
	int i, ret;
	int barrier_done = 0;

	if (is_journal_aborted(journal))
		return 0;

	descriptor = jbd2_journal_get_descriptor_buffer(journal);
	if (!descriptor)
		return 1;

	bh = jh2bh(descriptor);

	/* AKPM: buglet - add `i' to tmp! */
	for (i = 0; i < bh->b_size; i += 512) {
		journal_header_t *tmp = (journal_header_t*)bh->b_data;
		tmp->h_magic = cpu_to_be32(JBD2_MAGIC_NUMBER);
		tmp->h_blocktype = cpu_to_be32(JBD2_COMMIT_BLOCK);
		tmp->h_sequence = cpu_to_be32(commit_transaction->t_tid);
	}

	JBUFFER_TRACE(descriptor, "write commit block");
	set_buffer_dirty(bh);
	if (journal->j_flags & JBD2_BARRIER) {
		set_buffer_ordered(bh);
		barrier_done = 1;
	}
	ret = sync_dirty_buffer(bh);
	/* is it possible for another commit to fail at roughly
	 * the same time as this one?  If so, we don't want to
	 * trust the barrier flag in the super, but instead want
	 * to remember if we sent a barrier request
	 */
	if (ret == -EOPNOTSUPP && barrier_done) {
		char b[BDEVNAME_SIZE];

		printk(KERN_WARNING
			"JBD: barrier-based sync failed on %s - "
			"disabling barriers\n",
			bdevname(journal->j_dev, b));
		spin_lock(&journal->j_state_lock);
		journal->j_flags &= ~JBD2_BARRIER;
		spin_unlock(&journal->j_state_lock);

		/* And try again, without the barrier */
		clear_buffer_ordered(bh);
		set_buffer_uptodate(bh);
		set_buffer_dirty(bh);
		ret = sync_dirty_buffer(bh);
	}
	put_bh(bh);		/* One for getblk() */
	jbd2_journal_put_journal_head(descriptor);

	return (ret == -EIO);
}
예제 #12
0
int ext4_bio_write_page(struct ext4_io_submit *io,
			struct page *page,
			int len,
			struct writeback_control *wbc)
{
	struct inode *inode = page->mapping->host;
	unsigned block_start, block_end, blocksize;
	struct ext4_io_page *io_page;
	struct buffer_head *bh, *head;
	int ret = 0;

	blocksize = 1 << inode->i_blkbits;

	BUG_ON(PageWriteback(page));
	set_page_writeback(page);
	ClearPageError(page);

	io_page = kmem_cache_alloc(io_page_cachep, GFP_NOFS);
	if (!io_page) {
		set_page_dirty(page);
		unlock_page(page);
		return -ENOMEM;
	}
	io_page->p_page = page;
	atomic_set(&io_page->p_count, 1);
	get_page(page);

	for (bh = head = page_buffers(page), block_start = 0;
	     bh != head || !block_start;
	     block_start = block_end, bh = bh->b_this_page) {
		block_end = block_start + blocksize;
		if (block_start >= len) {
			clear_buffer_dirty(bh);
			set_buffer_uptodate(bh);
			continue;
		}
		ret = io_submit_add_bh(io, io_page, inode, wbc, bh);
		if (ret) {
			/*
			 * We only get here on ENOMEM.  Not much else
			 * we can do but mark the page as dirty, and
			 * better luck next time.
			 */
			set_page_dirty(page);
			break;
		}
	}
	unlock_page(page);
	/*
	 * If the page was truncated before we could do the writeback,
	 * or we had a memory allocation error while trying to write
	 * the first buffer head, we won't have submitted any pages for
	 * I/O.  In that case we need to make sure we've cleared the
	 * PageWriteback bit from the page to prevent the system from
	 * wedging later on.
	 */
	put_io_page(io_page);
	return ret;
}
예제 #13
0
/* Done it all: now write the commit record.  We should have
 * cleaned up our previous buffers by now, so if we are in abort
 * mode we can now just skip the rest of the journal write
 * entirely.
 *
 * Returns 1 if the journal needs to be aborted or 0 on success
 */
static int journal_write_commit_record(journal_t *journal,
					transaction_t *commit_transaction)
{
	struct journal_head *descriptor;
	struct buffer_head *bh;
	journal_header_t *header;
	int ret;

	if (is_journal_aborted(journal))
		return 0;

	descriptor = journal_get_descriptor_buffer(journal);
	if (!descriptor)
		return 1;

	bh = jh2bh(descriptor);

	header = (journal_header_t *)(bh->b_data);
	header->h_magic = cpu_to_be32(JFS_MAGIC_NUMBER);
	header->h_blocktype = cpu_to_be32(JFS_COMMIT_BLOCK);
	header->h_sequence = cpu_to_be32(commit_transaction->t_tid);

	JBUFFER_TRACE(descriptor, "write commit block");
	set_buffer_dirty(bh);

	if (journal->j_flags & JFS_BARRIER) {
		ret = __sync_dirty_buffer(bh, WRITE_SYNC | WRITE_BARRIER);

		/*
		 * Is it possible for another commit to fail at roughly
		 * the same time as this one?  If so, we don't want to
		 * trust the barrier flag in the super, but instead want
		 * to remember if we sent a barrier request
		 */
		if (ret == -EOPNOTSUPP) {
			char b[BDEVNAME_SIZE];

			printk(KERN_WARNING
				"JBD: barrier-based sync failed on %s - "
				"disabling barriers\n",
				bdevname(journal->j_dev, b));
			spin_lock(&journal->j_state_lock);
			journal->j_flags &= ~JFS_BARRIER;
			spin_unlock(&journal->j_state_lock);

			/* And try again, without the barrier */
			set_buffer_uptodate(bh);
			set_buffer_dirty(bh);
			ret = sync_dirty_buffer(bh);
		}
	} else {
		ret = sync_dirty_buffer(bh);
	}

	put_bh(bh);		/* One for getblk() */
	journal_put_journal_head(descriptor);

	return (ret == -EIO);
}
예제 #14
0
static void init_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
			const struct gfs2_inum_host *inum, unsigned int mode,
			unsigned int uid, unsigned int gid,
			const u64 *generation, dev_t dev, struct buffer_head **bhp)
{
	struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
	struct gfs2_dinode *di;
	struct buffer_head *dibh;
	struct timespec tv = CURRENT_TIME;

	dibh = gfs2_meta_new(gl, inum->no_addr);
	gfs2_trans_add_bh(gl, dibh, 1);
	gfs2_metatype_set(dibh, GFS2_METATYPE_DI, GFS2_FORMAT_DI);
	gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
	di = (struct gfs2_dinode *)dibh->b_data;

	di->di_num.no_formal_ino = cpu_to_be64(inum->no_formal_ino);
	di->di_num.no_addr = cpu_to_be64(inum->no_addr);
	di->di_mode = cpu_to_be32(mode);
	di->di_uid = cpu_to_be32(uid);
	di->di_gid = cpu_to_be32(gid);
	di->di_nlink = 0;
	di->di_size = 0;
	di->di_blocks = cpu_to_be64(1);
	di->di_atime = di->di_mtime = di->di_ctime = cpu_to_be64(tv.tv_sec);
	di->di_major = cpu_to_be32(MAJOR(dev));
	di->di_minor = cpu_to_be32(MINOR(dev));
	di->di_goal_meta = di->di_goal_data = cpu_to_be64(inum->no_addr);
	di->di_generation = cpu_to_be64(*generation);
	di->di_flags = 0;

	if (S_ISREG(mode)) {
		if ((dip->i_diskflags & GFS2_DIF_INHERIT_JDATA) ||
		    gfs2_tune_get(sdp, gt_new_files_jdata))
			di->di_flags |= cpu_to_be32(GFS2_DIF_JDATA);
	} else if (S_ISDIR(mode)) {
		di->di_flags |= cpu_to_be32(dip->i_diskflags &
					    GFS2_DIF_INHERIT_JDATA);
	}

	di->__pad1 = 0;
	di->di_payload_format = cpu_to_be32(S_ISDIR(mode) ? GFS2_FORMAT_DE : 0);
	di->di_height = 0;
	di->__pad2 = 0;
	di->__pad3 = 0;
	di->di_depth = 0;
	di->di_entries = 0;
	memset(&di->__pad4, 0, sizeof(di->__pad4));
	di->di_eattr = 0;
	di->di_atime_nsec = cpu_to_be32(tv.tv_nsec);
	di->di_mtime_nsec = cpu_to_be32(tv.tv_nsec);
	di->di_ctime_nsec = cpu_to_be32(tv.tv_nsec);
	memset(&di->di_reserved, 0, sizeof(di->di_reserved));
	
	set_buffer_uptodate(dibh);

	*bhp = dibh;
}
예제 #15
0
/*
 * Default IO end handler for temporary BJ_IO buffer_heads.
 */
static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
{
	BUFFER_TRACE(bh, "");
	if (uptodate)
		set_buffer_uptodate(bh);
	else
		clear_buffer_uptodate(bh);
	unlock_buffer(bh);
}
예제 #16
0
void sysv_free_block(struct super_block * sb, sysv_zone_t nr)
{
	struct sysv_sb_info * sbi = SYSV_SB(sb);
	struct buffer_head * bh;
	sysv_zone_t *blocks = sbi->s_bcache;
	unsigned count;
	unsigned block = fs32_to_cpu(sbi, nr);

	/*
	 * This code does not work at all for AFS (it has a bitmap
	 * free list).  As AFS is supposed to be read-only no one
	 * should call this for an AFS filesystem anyway...
	 */
	if (sbi->s_type == FSTYPE_AFS)
		return;

	if (block < sbi->s_firstdatazone || block >= sbi->s_nzones) {
		printk("sysv_free_block: trying to free block not in datazone\n");
		return;
	}

	mutex_lock(&sbi->s_lock);
	count = fs16_to_cpu(sbi, *sbi->s_bcache_count);

	if (count > sbi->s_flc_size) {
	  printk("sysv_free_block: flc_count %d > flc_size %d\n", count, sbi->s_flc_size);
		mutex_unlock(&sbi->s_lock);
		return;
	}
	/* If the free list head in super-block is full, it is copied
	 * into this block being freed, ditto if it's completely empty
	 * (applies only on Coherent).
	 */
	if (count == sbi->s_flc_size || count == 0) {
		block += sbi->s_block_base;
		bh = sb_getblk(sb, block);
		if (!bh) {
			printk("sysv_free_block: getblk() failed\n");
			mutex_unlock(&sbi->s_lock);
			return;
		}
		memset(bh->b_data, 0, sb->s_blocksize);
		*(__fs16*)bh->b_data = cpu_to_fs16(sbi, count);
		memcpy(get_chunk(sb,bh), blocks, count * sizeof(sysv_zone_t));
		mark_buffer_dirty(bh);
		set_buffer_uptodate(bh);
		brelse(bh);
		count = 0;
	}
	sbi->s_bcache[count++] = nr;

	*sbi->s_bcache_count = cpu_to_fs16(sbi, count);
	fs32_add(sbi, sbi->s_free_blocks, 1);
	dirty_sb(sb);
	mutex_unlock(&sbi->s_lock);
}
예제 #17
0
static void meta_prep_new(struct buffer_head *bh)
{
	struct gfs2_meta_header *mh = (struct gfs2_meta_header *)bh->b_data;

	lock_buffer(bh);
	clear_buffer_dirty(bh);
	set_buffer_uptodate(bh);
	unlock_buffer(bh);

	mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
}
예제 #18
0
/* Write to quotafile */
static ssize_t ext2_quota_write(struct super_block *sb, int type,
				const char *data, size_t len, loff_t off)
{
	struct inode *inode = sb_dqopt(sb)->files[type];
	sector_t blk = off >> EXT2_BLOCK_SIZE_BITS(sb);
	int err = 0;
	int offset = off & (sb->s_blocksize - 1);
	int tocopy;
	size_t towrite = len;
	struct buffer_head tmp_bh;
	struct buffer_head *bh;

	mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
	while (towrite > 0) {
		tocopy = sb->s_blocksize - offset < towrite ?
				sb->s_blocksize - offset : towrite;

		tmp_bh.b_state = 0;
		err = ext2_get_block(inode, blk, &tmp_bh, 1);
		if (err < 0)
			goto out;
		if (offset || tocopy != EXT2_BLOCK_SIZE(sb))
			bh = sb_bread(sb, tmp_bh.b_blocknr);
		else
			bh = sb_getblk(sb, tmp_bh.b_blocknr);
		if (!bh) {
			err = -EIO;
			goto out;
		}
		lock_buffer(bh);
		memcpy(bh->b_data+offset, data, tocopy);
		flush_dcache_page(bh->b_page);
		set_buffer_uptodate(bh);
		mark_buffer_dirty(bh);
		unlock_buffer(bh);
		brelse(bh);
		offset = 0;
		towrite -= tocopy;
		data += tocopy;
		blk++;
	}
out:
	if (len == towrite) {
		mutex_unlock(&inode->i_mutex);
		return err;
	}
	if (inode->i_size < off+len-towrite)
		i_size_write(inode, off+len-towrite);
	inode->i_version++;
	inode->i_mtime = inode->i_ctime = CURRENT_TIME;
	mark_inode_dirty(inode);
	mutex_unlock(&inode->i_mutex);
	return len - towrite;
}
예제 #19
0
static int ufs_alloc_lastblock(struct inode *inode)
{
    int err = 0;
    struct address_space *mapping = inode->i_mapping;
    struct ufs_sb_private_info *uspi = UFS_SB(inode->i_sb)->s_uspi;
    unsigned lastfrag, i, end;
    struct page *lastpage;
    struct buffer_head *bh;

    lastfrag = (i_size_read(inode) + uspi->s_fsize - 1) >> uspi->s_fshift;

    if (!lastfrag)
        goto out;

    lastfrag--;

    lastpage = ufs_get_locked_page(mapping, lastfrag >>
                                   (PAGE_CACHE_SHIFT - inode->i_blkbits));
    if (IS_ERR(lastpage)) {
        err = -EIO;
        goto out;
    }

    end = lastfrag & ((1 << (PAGE_CACHE_SHIFT - inode->i_blkbits)) - 1);
    bh = page_buffers(lastpage);
    for (i = 0; i < end; ++i)
        bh = bh->b_this_page;


    err = ufs_getfrag_block(inode, lastfrag, bh, 1);

    if (unlikely(err))
        goto out_unlock;

    if (buffer_new(bh)) {
        clear_buffer_new(bh);
        unmap_underlying_metadata(bh->b_bdev,
                                  bh->b_blocknr);
        /*
        * we do not zeroize fragment, because of
        * if it maped to hole, it already contains zeroes
        */
        set_buffer_uptodate(bh);
        mark_buffer_dirty(bh);
        set_page_dirty(lastpage);
    }

out_unlock:
    ufs_put_locked_page(lastpage);
out:
    return err;
}
예제 #20
0
파일: util.c 프로젝트: AK101111/linux
void ubh_mark_buffer_uptodate (struct ufs_buffer_head * ubh, int flag)
{
	unsigned i;
	if (!ubh)
		return;
	if (flag) {
		for ( i = 0; i < ubh->count; i++ )
			set_buffer_uptodate (ubh->bh[i]);
	} else {
		for ( i = 0; i < ubh->count; i++ )
			clear_buffer_uptodate (ubh->bh[i]);
	}
}
예제 #21
0
int ext4_bio_write_page(struct ext4_io_submit *io,
			struct page *page,
			int len,
			struct writeback_control *wbc)
{
	struct inode *inode = page->mapping->host;
	unsigned block_start, block_end, blocksize;
	struct ext4_io_page *io_page;
	struct buffer_head *bh, *head;
	int ret = 0;

	blocksize = 1 << inode->i_blkbits;

	BUG_ON(!PageLocked(page));
	BUG_ON(PageWriteback(page));

	io_page = kmem_cache_alloc(io_page_cachep, GFP_NOFS);
	if (!io_page) {
		set_page_dirty(page);
		unlock_page(page);
		return -ENOMEM;
	}
	io_page->p_page = page;
	atomic_set(&io_page->p_count, 1);
	get_page(page);
	set_page_writeback(page);
	ClearPageError(page);

	for (bh = head = page_buffers(page), block_start = 0;
	     bh != head || !block_start;
	     block_start = block_end, bh = bh->b_this_page) {

		block_end = block_start + blocksize;
		if (block_start >= len) {
			zero_user_segment(page, block_start, block_end);
			clear_buffer_dirty(bh);
			set_buffer_uptodate(bh);
			continue;
		}
		clear_buffer_dirty(bh);
		ret = io_submit_add_bh(io, io_page, inode, wbc, bh);
		if (ret) {
			set_page_dirty(page);
			break;
		}
	}
	unlock_page(page);
	put_io_page(io_page);
	return ret;
}
예제 #22
0
static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh,
			       u64 block, struct page *page)
{
	struct inode *inode = &ip->i_inode;
	struct buffer_head *bh;
	int release = 0;

	if (!page || page->index) {
		page = grab_cache_page(inode->i_mapping, 0);
		if (!page)
			return -ENOMEM;
		release = 1;
	}

	if (!PageUptodate(page)) {
		void *kaddr = kmap(page);
		u64 dsize = i_size_read(inode);
 
		if (dsize > (dibh->b_size - sizeof(struct gfs2_dinode)))
			dsize = dibh->b_size - sizeof(struct gfs2_dinode);

		memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
		memset(kaddr + dsize, 0, PAGE_CACHE_SIZE - dsize);
		kunmap(page);

		SetPageUptodate(page);
	}

	if (!page_has_buffers(page))
		create_empty_buffers(page, 1 << inode->i_blkbits,
				     (1 << BH_Uptodate));

	bh = page_buffers(page);

	if (!buffer_mapped(bh))
		map_bh(bh, inode->i_sb, block);

	set_buffer_uptodate(bh);
	if (!gfs2_is_jdata(ip))
		mark_buffer_dirty(bh);
	if (!gfs2_is_writeback(ip))
		gfs2_trans_add_bh(ip->i_gl, bh, 0);

	if (release) {
		unlock_page(page);
		page_cache_release(page);
	}

	return 0;
}
static int ext4_finish_convert_inline_dir(handle_t *handle,
					  struct inode *inode,
					  struct buffer_head *dir_block,
					  void *buf,
					  int inline_size)
{
	int err, csum_size = 0, header_size = 0;
	struct ext4_dir_entry_2 *de;
	struct ext4_dir_entry_tail *t;
	void *target = dir_block->b_data;

	/*
	 * First create "." and ".." and then copy the dir information
	 * back to the block.
	 */
	de = (struct ext4_dir_entry_2 *)target;
	de = ext4_init_dot_dotdot(inode, de,
		inode->i_sb->s_blocksize, csum_size,
		le32_to_cpu(((struct ext4_dir_entry_2 *)buf)->inode), 1);
	header_size = (void *)de - target;

	memcpy((void *)de, buf + EXT4_INLINE_DOTDOT_SIZE,
		inline_size - EXT4_INLINE_DOTDOT_SIZE);

	if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
				       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
		csum_size = sizeof(struct ext4_dir_entry_tail);

	inode->i_size = inode->i_sb->s_blocksize;
	i_size_write(inode, inode->i_sb->s_blocksize);
	EXT4_I(inode)->i_disksize = inode->i_sb->s_blocksize;
	ext4_update_final_de(dir_block->b_data,
			inline_size - EXT4_INLINE_DOTDOT_SIZE + header_size,
			inode->i_sb->s_blocksize - csum_size);

	if (csum_size) {
		t = EXT4_DIRENT_TAIL(dir_block->b_data,
				     inode->i_sb->s_blocksize);
		initialize_dirent_tail(t, inode->i_sb->s_blocksize);
	}
	set_buffer_uptodate(dir_block);
	err = ext4_handle_dirty_dirent_node(handle, inode, dir_block);
	if (err)
		goto out;
	set_buffer_verified(dir_block);
out:
	return err;
}
예제 #24
0
int ocfs2_write_block(struct ocfs2_super *osb, struct buffer_head *bh,
		      struct inode *inode)
{
	int ret = 0;

	mlog_entry("(bh->b_blocknr = %llu, inode=%p)\n",
		   (unsigned long long)bh->b_blocknr, inode);

	BUG_ON(bh->b_blocknr < OCFS2_SUPER_BLOCK_BLKNO);
	BUG_ON(buffer_jbd(bh));

	/* No need to check for a soft readonly file system here. non
	 * journalled writes are only ever done on system files which
	 * can get modified during recovery even if read-only. */
	if (ocfs2_is_hard_readonly(osb)) {
		ret = -EROFS;
		goto out;
	}

	mutex_lock(&OCFS2_I(inode)->ip_io_mutex);

	lock_buffer(bh);
	set_buffer_uptodate(bh);

	/* remove from dirty list before I/O. */
	clear_buffer_dirty(bh);

	get_bh(bh); /* for end_buffer_write_sync() */
	bh->b_end_io = end_buffer_write_sync;
	submit_bh(WRITE, bh);

	wait_on_buffer(bh);

	if (buffer_uptodate(bh)) {
		ocfs2_set_buffer_uptodate(inode, bh);
	} else {
		/* We don't need to remove the clustered uptodate
		 * information for this bh as it's not marked locally
		 * uptodate. */
		ret = -EIO;
		put_bh(bh);
	}

	mutex_unlock(&OCFS2_I(inode)->ip_io_mutex);
out:
	mlog_exit(ret);
	return ret;
}
예제 #25
0
void *hpfs_get_sector(struct super_block *s, unsigned secno, struct buffer_head **bhp)
{
	struct buffer_head *bh;
	/*return hpfs_map_sector(s, secno, bhp, 0);*/

	cond_resched();

	if ((*bhp = bh = sb_getblk(s, secno)) != NULL) {
		if (!buffer_uptodate(bh)) wait_on_buffer(bh);
		set_buffer_uptodate(bh);
		return bh->b_data;
	} else {
		printk("HPFS: hpfs_get_sector: getblk failed\n");
		return NULL;
	}
}
예제 #26
0
int ocfs2_write_block(struct ocfs2_super *osb, struct buffer_head *bh,
		      struct ocfs2_caching_info *ci)
{
	int ret = 0;

	trace_ocfs2_write_block((unsigned long long)bh->b_blocknr, ci);

	BUG_ON(bh->b_blocknr < OCFS2_SUPER_BLOCK_BLKNO);
	BUG_ON(buffer_jbd(bh));

	/* No need to check for a soft readonly file system here. non
	 * journalled writes are only ever done on system files which
	 * can get modified during recovery even if read-only. */
	if (ocfs2_is_hard_readonly(osb)) {
		ret = -EROFS;
		mlog_errno(ret);
		goto out;
	}

	ocfs2_metadata_cache_io_lock(ci);

	lock_buffer(bh);
	set_buffer_uptodate(bh);

	/* remove from dirty list before I/O. */
	clear_buffer_dirty(bh);

	get_bh(bh); /* for end_buffer_write_sync() */
	bh->b_end_io = end_buffer_write_sync;
	submit_bh(REQ_OP_WRITE, 0, bh);

	wait_on_buffer(bh);

	if (buffer_uptodate(bh)) {
		ocfs2_set_buffer_uptodate(ci, bh);
	} else {
		/* We don't need to remove the clustered uptodate
		 * information for this bh as it's not marked locally
		 * uptodate. */
		ret = -EIO;
		mlog_errno(ret);
	}

	ocfs2_metadata_cache_io_unlock(ci);
out:
	return ret;
}
static void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
				   unsigned int from, unsigned int to)
{
	struct buffer_head *head = page_buffers(page);
	unsigned int bsize = head->b_size;
	struct buffer_head *bh;
	unsigned int start, end;

	for (bh = head, start = 0; bh != head || !start;
	     bh = bh->b_this_page, start = end) {
		end = start + bsize;
		if (end <= from || start >= to)
			continue;
		if (gfs2_is_jdata(ip))
			set_buffer_uptodate(bh);
		gfs2_trans_add_bh(ip->i_gl, bh, 0);
	}
}
예제 #28
0
파일: buffer.c 프로젝트: 19Dan01/linux
void *hpfs_get_sector(struct super_block *s, unsigned secno, struct buffer_head **bhp)
{
	struct buffer_head *bh;
	/*return hpfs_map_sector(s, secno, bhp, 0);*/

	hpfs_lock_assert(s);

	cond_resched();

	if ((*bhp = bh = sb_getblk(s, secno)) != NULL) {
		if (!buffer_uptodate(bh)) wait_on_buffer(bh);
		set_buffer_uptodate(bh);
		return bh->b_data;
	} else {
		pr_err("%s(): getblk failed\n", __func__);
		return NULL;
	}
}
예제 #29
0
static void ufs_clear_frags(struct inode *inode, sector_t beg, unsigned int n,
			    int sync)
{
	struct buffer_head *bh;
	sector_t end = beg + n;

	for (; beg < end; ++beg) {
		bh = sb_getblk(inode->i_sb, beg);
		lock_buffer(bh);
		memset(bh->b_data, 0, inode->i_sb->s_blocksize);
		set_buffer_uptodate(bh);
		mark_buffer_dirty(bh);
		unlock_buffer(bh);
		if (IS_SYNC(inode) || sync)
			sync_dirty_buffer(bh);
		brelse(bh);
	}
}
예제 #30
0
파일: super.c 프로젝트: helicopter3/wl500g
/* Write to quotafile */
static ssize_t ufs_quota_write(struct super_block *sb, int type,
                               const char *data, size_t len, loff_t off)
{
    struct inode *inode = sb_dqopt(sb)->files[type];
    sector_t blk = off >> sb->s_blocksize_bits;
    int err = 0;
    int offset = off & (sb->s_blocksize - 1);
    int tocopy;
    size_t towrite = len;
    struct buffer_head *bh;

    mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
    while (towrite > 0) {
        tocopy = sb->s_blocksize - offset < towrite ?
                 sb->s_blocksize - offset : towrite;

        bh = ufs_bread(inode, blk, 1, &err);
        if (!bh)
            goto out;
        lock_buffer(bh);
        memcpy(bh->b_data+offset, data, tocopy);
        flush_dcache_page(bh->b_page);
        set_buffer_uptodate(bh);
        mark_buffer_dirty(bh);
        unlock_buffer(bh);
        brelse(bh);
        offset = 0;
        towrite -= tocopy;
        data += tocopy;
        blk++;
    }
out:
    if (len == towrite) {
        mutex_unlock(&inode->i_mutex);
        return err;
    }
    if (inode->i_size < off+len-towrite)
        i_size_write(inode, off+len-towrite);
    inode->i_version++;
    inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
    mark_inode_dirty(inode);
    mutex_unlock(&inode->i_mutex);
    return len - towrite;
}