示例#1
0
/*
 * support function for mpage_readpages.  The fs supplied get_block might
 * return an up to date buffer.  This is used to map that buffer into
 * the page, which allows readpage to avoid triggering a duplicate call
 * to get_block.
 *
 * The idea is to avoid adding buffers to pages that don't already have
 * them.  So when the buffer is up to date and the page size == block size,
 * this marks the page up to date instead of adding new buffers.
 */
static void 
map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block) 
{
	struct _inode *inode = tx_cache_get_inode_ro(page->mapping->host);
	struct buffer_head *page_bh, *head;
	int block = 0;

	if (!page_has_buffers(page)) {
		/*
		 * don't make any buffers if there is only one buffer on
		 * the page and the page just needs to be set up to date
		 */
		if (inode->i_blkbits == PAGE_CACHE_SHIFT && 
		    buffer_uptodate(bh)) {
			SetPageUptodate(page);    
			return;
		}
		create_empty_buffers(page, 1 << inode->i_blkbits, 0);
	}
	head = page_buffers(page);
	page_bh = head;
	do {
		if (block == page_block) {
			page_bh->b_state = bh->b_state;
			page_bh->b_bdev = bh->b_bdev;
			page_bh->b_blocknr = bh->b_blocknr;
			break;
		}
		page_bh = page_bh->b_this_page;
		block++;
	} while (page_bh != head);
}
示例#2
0
long do_fsync(struct file *file, int datasync)
{
	int ret;
	int err;
	struct address_space *mapping = file->f_mapping;
	
	if (live_transaction()){
		/* DEP 5/27/10 - Defer fsync until commit. */
		struct deferred_object_operation *def_op;
		txobj_thread_list_node_t *list_node = workset_has_object(&file->f_mapping->host->xobj);

		if (!list_node) {
			tx_cache_get_file_ro(file);
			tx_cache_get_inode_ro(file->f_mapping->host);
			list_node = workset_has_object(&file->f_mapping->host->xobj); 
		}

		def_op = alloc_deferred_object_operation();
		INIT_LIST_HEAD(&def_op->list);
		def_op->type = DEFERRED_TYPE_FSYNC;
		def_op->u.fsync.datasync = datasync;
		def_op->u.fsync.file = file;

		/* DEP: Pin the file until the sync is executed */
		tx_atomic_inc_not_zero(&file->f_count);

		// XXX: Could probably use something finer grained here.  
		WORKSET_LOCK(current->transaction);
		list_add(&def_op->list, &list_node->deferred_operations);
		WORKSET_UNLOCK(current->transaction);
		return 0;
	}

	if (!file->f_op || !file->f_op->fsync) {
		/* Why?  We can still call filemap_fdatawrite */
		ret = -EINVAL;
		goto out;
	}

	ret = filemap_fdatawrite(mapping);

	/*
	 * We need to protect against concurrent writers, which could cause
	 * livelocks in fsync_buffers_list().
	 */
	if (!committing_transaction())
		mutex_lock(&mapping->host->i_mutex);
	err = file->f_op->fsync(file, file_get_dentry(file), datasync);
	if (!ret)
		ret = err;
	if (!committing_transaction())
		mutex_unlock(&mapping->host->i_mutex);
	err = filemap_fdatawait(mapping);
	if (!ret)
		ret = err;
out:
	return ret;
}
示例#3
0
/*
 * Generic function to fsync a file.
 *
 * filp may be NULL if called via the msync of a vma.
 */
int file_fsync(struct file *filp, struct dentry *dentry, int datasync)
{
	struct inode * inode = tx_cache_get_dentry(dentry)->d_inode;
	struct super_block * sb;
	int ret, err;

	/* sync the inode to buffers */
	ret = write_inode_now(inode, 0);

	/* sync the superblock to buffers */
	sb = tx_cache_get_inode_ro(inode)->i_sb;
	lock_super(sb);
	if (sb->s_op->write_super)
		sb->s_op->write_super(sb);
	unlock_super(sb);

	/* .. finally sync the buffers to disk */
	err = sync_blockdev(sb->s_bdev);
	if (!ret)
		ret = err;
	return ret;
}