Esempio n. 1
0
int nilfs_bmap_get_new_block(const struct nilfs_bmap *bmap, __u64 ptr,
			     struct buffer_head **bhp)
{
	int ret;

	ret = nilfs_btnode_get(&NILFS_BMAP_I(bmap)->i_btnode_cache,
			       ptr, 0, bhp, 1);
	if (ret < 0)
		return ret;
	set_buffer_nilfs_volatile(*bhp);
	return 0;
}
/**
 * nilfs_btnode_prepare_change_key
 *  prepare to move contents of the block for old key to one of new key.
 *  the old buffer will not be removed, but might be reused for new buffer.
 *  it might return -ENOMEM because of memory allocation errors,
 *  and might return -EIO because of disk read errors.
 */
int nilfs_btnode_prepare_change_key(struct address_space *btnc,
				    struct nilfs_btnode_chkey_ctxt *ctxt)
{
	struct buffer_head *obh, *nbh;
	struct inode *inode = NILFS_BTNC_I(btnc);
	__u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey;
	int err;

	if (oldkey == newkey)
		return 0;

	obh = ctxt->bh;
	ctxt->newbh = NULL;

	if (inode->i_blkbits == PAGE_CACHE_SHIFT) {
		lock_page(obh->b_page);
#if HAVE_EXPORTED_RADIX_TREE_PRELOAD
		/*
		 * We cannot call radix_tree_preload for the kernels older
		 * than 2.6.23, because it is not exported for modules.
		 */
		err = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
		if (err)
			goto failed_unlock;
#endif
		/* BUG_ON(oldkey != obh->b_page->index); */
		if (unlikely(oldkey != obh->b_page->index))
			NILFS_PAGE_BUG(obh->b_page,
				       "invalid oldkey %lld (newkey=%lld)",
				       (unsigned long long)oldkey,
				       (unsigned long long)newkey);

retry:
		WRITE_LOCK_IRQ(&btnc->tree_lock);
		err = radix_tree_insert(&btnc->page_tree, newkey, obh->b_page);
		WRITE_UNLOCK_IRQ(&btnc->tree_lock);
		/*
		 * Note: page->index will not change to newkey until
		 * nilfs_btnode_commit_change_key() will be called.
		 * To protect the page in intermediate state, the page lock
		 * is held.
		 */
#if HAVE_EXPORTED_RADIX_TREE_PRELOAD
		radix_tree_preload_end();
#endif
		if (!err)
			return 0;
		else if (err != -EEXIST)
			goto failed_unlock;

		err = invalidate_inode_pages2_range(btnc, newkey, newkey);
		if (!err)
			goto retry;
		/* fallback to copy mode */
		unlock_page(obh->b_page);
	}

	err = nilfs_btnode_get(btnc, newkey, 0, &nbh, 1);
	if (likely(!err)) {
		BUG_ON(nbh == obh);
		ctxt->newbh = nbh;
	}
	return err;

 failed_unlock:
	unlock_page(obh->b_page);
	return err;
}
Esempio n. 3
0
int nilfs_bmap_get_block(const struct nilfs_bmap *bmap, __u64 ptr,
			 struct buffer_head **bhp)
{
	return nilfs_btnode_get(&NILFS_BMAP_I(bmap)->i_btnode_cache,
				ptr, 0, bhp, 0);
}