Esempio n. 1
0
static int nilfs_direct_propagate(struct nilfs_bmap *bmap,
				  struct buffer_head *bh)
{
	struct nilfs_palloc_req oldreq, newreq;
	struct inode *dat;
	__u64 key;
	__u64 ptr;
	int ret;

	if (!NILFS_BMAP_USE_VBN(bmap))
		return 0;

	dat = nilfs_bmap_get_dat(bmap);
	key = nilfs_bmap_data_get_key(bmap, bh);
	ptr = nilfs_direct_get_ptr(bmap, key);
	if (!buffer_nilfs_volatile(bh)) {
		oldreq.pr_entry_nr = ptr;
		newreq.pr_entry_nr = ptr;
		ret = nilfs_dat_prepare_update(dat, &oldreq, &newreq);
		if (ret < 0)
			return ret;
		nilfs_dat_commit_update(dat, &oldreq, &newreq,
					bmap->b_ptr_type == NILFS_BMAP_PTR_VS);
		set_buffer_nilfs_volatile(bh);
		nilfs_direct_set_ptr(bmap, key, newreq.pr_entry_nr);
	} else
		ret = nilfs_dat_mark_dirty(dat, ptr);

	return ret;
}
Esempio n. 2
0
static int nilfs_direct_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr)
{
	union nilfs_bmap_ptr_req req;
	struct inode *dat = NULL;
	struct buffer_head *bh;
	int ret;

	if (key > NILFS_DIRECT_KEY_MAX)
		return -ENOENT;
	if (nilfs_direct_get_ptr(bmap, key) != NILFS_BMAP_INVALID_PTR)
		return -EEXIST;

	if (NILFS_BMAP_USE_VBN(bmap)) {
		req.bpr_ptr = nilfs_direct_find_target_v(bmap, key);
		dat = nilfs_bmap_get_dat(bmap);
	}
	ret = nilfs_bmap_prepare_alloc_ptr(bmap, &req, dat);
	if (!ret) {
		/* ptr must be a pointer to a buffer head. */
		bh = (struct buffer_head *)((unsigned long)ptr);
		set_buffer_nilfs_volatile(bh);

		nilfs_bmap_commit_alloc_ptr(bmap, &req, dat);
		nilfs_direct_set_ptr(bmap, key, req.bpr_ptr);

		if (!nilfs_bmap_dirty(bmap))
			nilfs_bmap_set_dirty(bmap);

		if (NILFS_BMAP_USE_VBN(bmap))
			nilfs_bmap_set_target_v(bmap, key, req.bpr_ptr);

		nilfs_inode_add_blocks(bmap->b_inode, 1);
	}
	return ret;
}
Esempio n. 3
0
static int nilfs_direct_propagate_v(struct nilfs_direct *direct,
				    struct buffer_head *bh)
{
	union nilfs_bmap_ptr_req oldreq, newreq;
	__u64 key;
	__u64 ptr;
	int ret;

	key = nilfs_bmap_data_get_key(&direct->d_bmap, bh);
	ptr = nilfs_direct_get_ptr(direct, key);
	if (!buffer_nilfs_volatile(bh)) {
		oldreq.bpr_ptr = ptr;
		newreq.bpr_ptr = ptr;
		ret = nilfs_bmap_prepare_update_v(&direct->d_bmap, &oldreq,
						  &newreq);
		if (ret < 0)
			return ret;
		nilfs_bmap_commit_update_v(&direct->d_bmap, &oldreq, &newreq);
		set_buffer_nilfs_volatile(bh);
		nilfs_direct_set_ptr(direct, key, newreq.bpr_ptr);
	} else
		ret = nilfs_bmap_mark_dirty(&direct->d_bmap, ptr);

	return ret;
}
Esempio n. 4
0
int nilfs_bmap_get_new_block(const struct nilfs_bmap *bmap, __u64 ptr,
			     struct buffer_head **bhp)
{
	int ret;

	ret = nilfs_btnode_get(&NILFS_BMAP_I(bmap)->i_btnode_cache,
			       ptr, 0, bhp, 1);
	if (ret < 0)
		return ret;
	set_buffer_nilfs_volatile(*bhp);
	return 0;
}
Esempio n. 5
0
static void nilfs_direct_commit_insert(struct nilfs_direct *direct,
				       union nilfs_bmap_ptr_req *req,
				       __u64 key, __u64 ptr)
{
	struct buffer_head *bh;

	/* ptr must be a pointer to a buffer head. */
	bh = (struct buffer_head *)((unsigned long)ptr);
	set_buffer_nilfs_volatile(bh);

	nilfs_bmap_commit_alloc_ptr(&direct->d_bmap, req);
	nilfs_direct_set_ptr(direct, key, req->bpr_ptr);

	if (!nilfs_bmap_dirty(&direct->d_bmap))
		nilfs_bmap_set_dirty(&direct->d_bmap);

	if (NILFS_BMAP_USE_VBN(&direct->d_bmap))
		nilfs_direct_set_target_v(direct, key, req->bpr_ptr);
}