Beispiel #1
0
static void ileaf_dump(struct btree *btree, vleaf *vleaf)
{
	struct sb *sb = btree->sb;
	struct ileaf *leaf = vleaf;
	inum_t inum = ibase(leaf);
	be_u16 *dict = vleaf + sb->blocksize;
	unsigned offset = 0;
	printf("inode table block 0x%Lx/%i (%x bytes free)\n", (L)ibase(leaf), icount(leaf), ileaf_free(btree, leaf));
	//hexdump(dict - icount(leaf), icount(leaf) * 2);
	for (int i = -1; -i <= icount(leaf); i--, inum++) {
		int limit = from_be_u16(dict[i]), size = limit - offset;
		if (!size)
			continue;
		printf("  0x%Lx: ", (L)inum);
		//printf("[%x] ", offset);
		if (size < 0)
			printf("<corrupt>\n");
		else if (!size)
			printf("<empty>\n");
		else {
			/* FIXME: this doesn't work in kernel */
			struct inode inode = { .i_sb = vfs_sb(btree->sb) };
			unsigned xsize = decode_xsize(&inode, leaf->table + offset, size);
			tux_inode(&inode)->xcache = xsize ? new_xcache(xsize) : NULL;
			decode_attrs(&inode, leaf->table + offset, size);
			dump_attrs(&inode);
			xcache_dump(&inode);
			free(tux_inode(&inode)->xcache);
		}
		offset = limit;
	}
}

void *ileaf_lookup(struct btree *btree, inum_t inum, struct ileaf *leaf, unsigned *result)
{
	assert(inum >= ibase(leaf));
	assert(inum < ibase(leaf) + btree->entries_per_leaf);
	unsigned at = inum - ibase(leaf), size = 0;
	void *attrs = NULL;
	printf("lookup inode 0x%Lx, %Lx + %x\n", (L)inum, (L)ibase(leaf), at);
	if (at < icount(leaf)) {
		be_u16 *dict = (void *)leaf + btree->sb->blocksize;
		unsigned offset = atdict(dict, at);
		if ((size = from_be_u16(*(dict - at - 1)) - offset))
			attrs = leaf->table + offset;
	}
	*result = size;
	return attrs;
}
/*
 * Read iattrs, then clear iattr dirty to tell no need to iattrfork
 * anymore if needed.
 *
 * Caller must hold tuxnode->lock.
 */
static void tux3_iattr_read_and_clear(struct inode *inode,
				      struct tux3_iattr_data *result,
				      unsigned delta)
{
	struct tux3_inode *tuxnode = tux_inode(inode);
	unsigned long flags;

	trace("inum %Lu, delta %u", tuxnode->inum, delta);

	/*
	 * If delta is same, iattrs are available in inode. If not,
	 * iattrs were forked.
	 */
	flags = tuxnode->flags;
	if (!tux3_iattrsta_has_delta(flags) ||
	    tux3_iattrsta_get_delta(flags) == tux3_delta(delta)) {
		/*
		 * If btree is only dirtied, or if dirty and no fork,
		 * use inode.
		 */
		idata_copy(inode, result);
		tuxnode->flags = tux3_iattrsta_clear(flags);
	} else {
		/* If dirty and forked, use copy */
		struct tux3_iattr_data *idata =
			&tux3_inode_ddc(inode, delta)->idata;
		assert(idata->present != TUX3_INVALID_PRESENT);
		*result = *idata;
	}

	/* For debugging, set invalid value to ->present after read */
	tux3_inode_ddc(inode, delta)->idata.present = TUX3_INVALID_PRESENT;
}
Beispiel #3
0
static int __tux_add_dirent(struct inode *dir, struct dentry *dentry,
			    struct inode *inode)
{
	if(DEBUG_MODE_K==1)
	{
		printf("\t\t\t\t%25s[K]  %25s  %4d  #in\n",__FILE__,__func__,__LINE__);
	}
	return tux_create_dirent(dir, &dentry->d_name, tux_inode(inode)->inum,
				 inode->i_mode);
}
Beispiel #4
0
Datei: dir.c Projekt: Zkin/tux3
/*
 * NOTE: For now, we don't have ".." though, we shouldn't use this for
 * "..". rename() shouldn't update ->mtime for ".." usually.
 */
void tux_update_dirent(struct inode *dir, struct buffer_head *buffer,
		       tux_dirent *entry, struct inode *new_inode)
{
	inum_t new_inum = tux_inode(new_inode)->inum;

	tux_update_entry(buffer, entry, new_inum, new_inode->i_mode);

	tux3_iattrdirty(dir);
	dir->i_mtime = dir->i_ctime = gettime();
	tux3_mark_inode_dirty(dir);
}
void tux3_iattrdirty(struct inode *inode)
{
	struct tux3_inode *tuxnode = tux_inode(inode);
	unsigned delta = tux3_inode_delta(inode);
	unsigned flags = tuxnode->flags;

	/* If dirtied on this delta, nothing to do */
	if (tux3_iattrsta_has_delta(flags) &&
	    tux3_iattrsta_get_delta(flags) == tux3_delta(delta))
		return;

	trace("inum %Lu, delta %u", tuxnode->inum, delta);

	spin_lock(&tuxnode->lock);
	flags = tuxnode->flags;
	if (S_ISREG(inode->i_mode) || tux3_iattrsta_has_delta(flags)) {
		unsigned old_delta;

		/*
		 * For a regular file, and even if iattrs are clean,
		 * we have to provide stable idata for backend.
		 *
		 * Because backend may be committing data pages. If
		 * so, backend have to check idata->i_size, and may
		 * save dtree root. But previous delta doesn't have
		 * stable iattrs.
		 *
		 * So, this provides stable iattrs for regular file,
		 * even if previous delta is clean.
		 *
		 * Other types don't have this problem, because:
		 * - Never dirty iattr (e.g. volmap). IOW, iattrs are
		 *   always stable.
		 * - Or dirty iattr with data, e.g. directory updates
		 *   timestamp too with data blocks.
		 */
		if (S_ISREG(inode->i_mode) && !tux3_iattrsta_has_delta(flags))
			old_delta = tux3_delta(delta - 1);
		else
			old_delta = tux3_iattrsta_get_delta(flags);

		/* If delta is difference, iattrs was stabilized. Copy. */
		if (old_delta != tux3_delta(delta)) {
			struct tux3_iattr_data *idata =
				&tux3_inode_ddc(inode, old_delta)->idata;
			idata_copy(inode, idata);
		}
	}
	/* Update iattr state to current delta */
	tuxnode->flags = tux3_iattrsta_update(flags, delta);
	spin_unlock(&tuxnode->lock);
}
Beispiel #6
0
/* Test basic low level functions */
static void test01(struct sb *sb)
{
	char attrs[1000] = { };
	struct xattr *xattr;
	int err;

	change_begin_atomic(sb);

	/* Test positive and negative refcount carry */
	atom_t atom;
	err = make_atom(sb->atable, "foo", 3, &atom);
	test_assert(!err);
	err = atomref(sb->atable, atom, 1 << 15);
	test_assert(!err);
	err = atomref(sb->atable, atom, 1 << 15);
	test_assert(!err);
	err = atomref(sb->atable, atom, -(1 << 15));
	test_assert(!err);
	err = atomref(sb->atable, atom, -(1 << 15));
	test_assert(!err);

	atom_t atom1, atom2, atom3;
	/* Test atom table */
	err = make_atom(sb->atable, "foo", 3, &atom1);
	test_assert(!err);
	err = make_atom(sb->atable, "foo", 3, &atom2);
	test_assert(!err);
	test_assert(atom1 == atom2);

	err = make_atom(sb->atable, "bar", 3, &atom1);
	test_assert(!err);
	err = make_atom(sb->atable, "foo", 3, &atom2);
	test_assert(!err);
	test_assert(atom1 != atom2);
	err = make_atom(sb->atable, "bar", 3, &atom3);
	test_assert(!err);
	test_assert(atom1 == atom3);

	change_end_atomic(sb);

	struct inode *inode;
	struct tux3_inode *tuxnode;
	struct tux_iattr iattr = { .mode = S_IFREG, };
	inode = tuxcreate(sb->rootdir, "foo", 3, &iattr);
	test_assert(inode);
	tuxnode = tux_inode(inode);

	struct xcache_data data[] = {
		{ .buf = "hello ", .len = strlen("hello "), .atom = 0x666, },
		{ .buf = "world!", .len = strlen("world!"), .atom = 0x777, },
/* Mark inode dirty to delete. (called from ->drop_inode()). */
static void __tux3_mark_inode_to_delete(struct inode *inode, unsigned delta)
{
	struct tux3_inode *tuxnode = tux_inode(inode);
	unsigned flags;

	trace("mark as dead: inum %Lu, delta %d", tuxnode->inum, delta);

	spin_lock(&tuxnode->lock);
	flags = tuxnode->flags;
	assert(!tux3_deadsta_has_delta(flags));
	/* Mark inode dirty to delete on this delta */
	tuxnode->flags |= tux3_deadsta_delta(delta);
	spin_unlock(&tuxnode->lock);
}
/* Caller must hold tuxnode->lock. */
static void idata_copy(struct inode *inode, struct tux3_iattr_data *idata)
{
	idata->present		= tux_inode(inode)->present;
	idata->i_mode		= inode->i_mode;
	idata->i_uid		= i_uid_read(inode);
	idata->i_gid		= i_gid_read(inode);
	idata->i_nlink		= inode->i_nlink;
	idata->i_rdev		= inode->i_rdev;
	idata->i_size		= i_size_read(inode);
//	idata->i_atime		= inode->i_atime;
	idata->i_mtime		= inode->i_mtime;
	idata->i_ctime		= inode->i_ctime;
	idata->i_version	= inode->i_version;
}
/*
 * Check whether inode was dead. Then clear iattr dirty to tell no
 * need to iattrfork anymore if needed.
 */
static void tux3_dead_read_and_clear(struct inode *inode,
				     unsigned *deleted,
				     unsigned delta)
{
	struct tux3_inode *tuxnode = tux_inode(inode);
	unsigned flags = tuxnode->flags;

	*deleted = 0;

	if (tux3_deadsta_has_delta(flags) &&
	    tux3_deadsta_get_delta(flags) == tux3_delta(delta)) {
		*deleted = 1;
		flags |= TUX3_INODE_DEAD;
		tuxnode->flags = tux3_deadsta_clear(flags);
	}
}
Beispiel #10
0
static void __check_xcache(struct inode *inode, struct xcache_data *data,
			   int nr_data)
{
	for (int i = 0; i < nr_data; i++) {
		struct xattr *xattr;
		xattr = xcache_lookup(tux_inode(inode)->xcache, data[i].atom);
		if (data[i].len == -1)
			test_assert(IS_ERR(xattr));
		else {
			test_assert(!IS_ERR(xattr));
			test_assert(xattr->atom == data[i].atom);
			test_assert(xattr->size == data[i].len);
			test_assert(!memcmp(xattr->body, data[i].buf,
					    xattr->size));
		}
	}
}
/*
 * Mark inode dirty to delete. (called from ->drop_inode()).
 * Caller must hold inode->i_lock.
 */
void tux3_mark_inode_to_delete(struct inode *inode)
{
	struct sb *sb = tux_sb(inode->i_sb);
	struct tux3_inode *tuxnode = tux_inode(inode);
	unsigned delta;

	/* inode has dead mark already */
	if (tux3_inode_is_dead(tuxnode))
		return;

	change_begin_atomic(sb);

	delta = tux3_inode_delta(inode);
	__tux3_mark_inode_to_delete(inode, delta);

	/*
	 * Hack: this is called under inode->i_lock. So, we have to
	 * release inode->i_lock to call mark_inode_dirty_sync().
	 *
	 * FIXME: we want to set I_DIRTY_SYNC (I_DIRTY_SYNC will
	 * prevent the indo is freed) and wakeup flusher if need,
	 * while preventing inode is freed. Need better way to do.
	 */
	if (!(tux3_dirty_flags(inode, delta) & I_DIRTY_SYNC)) {
		/* FIXME: I_REFERENCED can't prevent completely */
		//inode->i_state |= I_REFERENCED;
		/* FIXME: I_WILL_FREE will bother igrab() grabs reference */
		inode->i_state |= I_WILL_FREE;
		spin_unlock(&inode->i_lock);

		/* Tell dead inode to backend by marking as dirty. */
		tux3_mark_inode_dirty_sync(inode);

		spin_lock(&inode->i_lock);
		inode->i_state &= ~I_WILL_FREE;
#ifdef __KERNEL__
		wake_up_bit(&inode->i_state, __I_NEW);
#endif
	}

	change_end_atomic(sb);
}
Beispiel #12
0
Datei: dir.c Projekt: Zkin/tux3
int tux_dir_is_empty(struct inode *dir)
{
	struct sb *sb = tux_sb(dir->i_sb);
	block_t block, blocks = dir->i_size >> sb->blockbits;
	__be64 self = cpu_to_be64(tux_inode(dir)->inum);
	struct buffer_head *buffer;

	for (block = 0; block < blocks; block++) {
		buffer = blockread(mapping(dir), block);
		if (!buffer)
			return -EIO;

		tux_dirent *entry = bufdata(buffer);
		tux_dirent *limit = bufdata(buffer) + sb->blocksize - TUX_REC_LEN(1);
		for (; entry <= limit; entry = next_entry(entry)) {
			if (!entry->rec_len) {
				blockput(buffer);
				tux_zero_len_error(dir, block);
				return -EIO;
			}
			if (is_deleted(entry))
				continue;
			if (entry->name[0] != '.')
				goto not_empty;
			if (entry->name_len > 2)
				goto not_empty;
			if (entry->name_len < 2) {
				if (entry->inum != self)
					goto not_empty;
			} else if (entry->name[1] != '.')
				goto not_empty;
		}
		blockput(buffer);
	}
	return 0;
not_empty:
	blockput(buffer);
	return -ENOTEMPTY;
}
/*
 * DATA_BTREE_BIT is not set in normal state. We set it only when
 * flush inode.  So, this is called to flush inode.
 */
static void tux3_iattr_adjust_for_btree(struct inode *inode,
					struct tux3_iattr_data *idata)
{
	if (has_root(&tux_inode(inode)->btree))
		idata->present |= DATA_BTREE_BIT;
}
Beispiel #14
0
static void tux3_destroy_inode(struct inode *inode)
{
	kmem_cache_free(tux_inode_cachep, tux_inode(inode));
}
Beispiel #15
0
/* Test basic operations */
static void test01(struct sb *sb, struct inode *inode)
{
	/*
	 * FIXME: map_region() are not supporting to read segments on
	 * multiple leaves at once.
	 */
#define CAN_HANDLE_A_LEAF	1

	/* Create by ascending order */
	if (test_start("test01.1")) {
		struct block_segment seg;
		int err, segs;

		/* Set fake backend mark to modify backend objects. */
		tux3_start_backend(sb);

		for (int i = 0, j = 0; i < 30; i++, j++) {
			segs = d_map_region(inode, 2*i, 1, &seg, 1, MAP_WRITE);
			test_assert(segs == 1);
		}
#ifdef CAN_HANDLE_A_LEAF
		for (int i = 0; i < 30; i++) {
			segs = check_map_region(inode, 2*i, 1, &seg, 1);
			test_assert(segs == 1);
		}
#else
		segs = check_map_region(inode, 0, 30*2, seg, ARRAY_SIZE(seg));
		test_assert(segs == 30*2);
#endif

		/* btree_chop and dleaf_chop test */
		int index = 31*2;
		while (index--) {
			err = btree_chop(&tux_inode(inode)->btree, index,
					 TUXKEY_LIMIT);
			test_assert(!err);
#ifdef CAN_HANDLE_A_LEAF
			for (int i = 0; i < 30; i++) {
				if (index <= i*2)
					break;
				segs = check_map_region(inode, 2*i, 1, &seg, 1);
				test_assert(segs == 1);
			}
#else
			segs = check_map_region(inode, 0, 30*2, seg,
						ARRAY_SIZE(seg));
			test_assert(segs == i*2);
#endif
		}

		/* Check if truncated all */
		segs = map_region(inode, 0, INT_MAX, &seg, 1, MAP_READ);
		test_assert(segs == 1);
		test_assert(seg.count == INT_MAX);
		test_assert(seg.state == BLOCK_SEG_HOLE);

		tux3_end_backend();

		test_assert(force_delta(sb) == 0);
		clean_main(sb, inode);
	}
	test_end();

	/* Create by descending order */
	if (test_start("test01.2")) {
		struct block_segment seg;
		int err, segs;

		/* Set fake backend mark to modify backend objects. */
		tux3_start_backend(sb);

		for (int i = 30; i >= 0; i--) {
			segs = d_map_region(inode, 2*i, 1, &seg, 1, MAP_WRITE);
			test_assert(segs == 1);
		}
#ifdef CAN_HANDLE_A_LEAF
		for (int i = 30; i >= 0; i--) {
			segs = check_map_region(inode, 2*i, 1, &seg, 1);
			test_assert(segs == 1);
		}
#else
		segs = check_map_region(inode, 0, 30*2, seg, ARRAY_SIZE(seg));
		test_assert(segs == i*2);
#endif

		err = btree_chop(&tux_inode(inode)->btree, 0, TUXKEY_LIMIT);
		test_assert(!err);

		/* Check if truncated all */
		segs = map_region(inode, 0, INT_MAX, &seg, 1, MAP_READ);
		test_assert(segs == 1);
		test_assert(seg.count == INT_MAX);
		test_assert(seg.state == BLOCK_SEG_HOLE);

		tux3_end_backend();

		test_assert(force_delta(sb) == 0);
		clean_main(sb, inode);
	}
	test_end();

	test_assert(force_delta(sb) == 0);
	clean_main(sb, inode);
}
Beispiel #16
0
static int tux3_rename(struct inode *old_dir, struct dentry *old_dentry,
		       struct inode *new_dir, struct dentry *new_dentry)
{
	struct inode *old_inode = old_dentry->d_inode;
	struct inode *new_inode = new_dentry->d_inode;
	struct buffer_head *old_buffer, *new_buffer;
	tux_dirent *old_entry, *new_entry;
	int err, new_subdir = 0;

	old_entry = tux_find_dirent(old_dir, old_dentry->d_name.name,
				    old_dentry->d_name.len, &old_buffer);
	if (IS_ERR(old_entry))
		return PTR_ERR(old_entry);

	/* FIXME: is this needed? */
	BUG_ON(from_be_u64(old_entry->inum) != tux_inode(old_inode)->inum);

	change_begin(tux_sb(old_inode->i_sb));
	if (new_inode) {
		int old_is_dir = S_ISDIR(old_inode->i_mode);
		if (old_is_dir) {
			err = tux_dir_is_empty(new_inode);
			if (err)
				goto error;
		}

		new_entry = tux_find_dirent(new_dir, new_dentry->d_name.name,
					new_dentry->d_name.len, &new_buffer);
		if (IS_ERR(new_entry)) {
			BUG_ON(PTR_ERR(new_entry) == -ENOENT);
			err = PTR_ERR(new_entry);
			goto error;
		}
		/* this releases new_buffer */
		tux_update_dirent(new_buffer, new_entry, old_inode);
		new_inode->i_ctime = new_dir->i_ctime;
		if (old_is_dir)
			drop_nlink(new_inode);
		inode_dec_link_count(new_inode);
	} else {
		new_subdir = S_ISDIR(old_inode->i_mode) && new_dir != old_dir;
		if (new_subdir) {
			if (new_dir->i_nlink >= TUX_LINK_MAX) {
				err = -EMLINK;
				goto error;
			}
		}
		err = __tux_add_dirent(new_dir, new_dentry, old_inode);
		if (err)
			goto error;
		if (new_subdir)
			inode_inc_link_count(new_dir);
	}
	old_inode->i_ctime = new_dir->i_ctime;
	mark_inode_dirty(old_inode);

	err = tux_delete_dirent(old_buffer, old_entry);
	if (err) {
		printk(KERN_ERR "TUX3: %s: couldn't delete old entry (%Lu)\n",
		       __func__, (L)tux_inode(old_inode)->inum);
		/* FIXME: now, we have hardlink even if it's dir. */
		inode_inc_link_count(old_inode);
	}
	if (!err && new_subdir)
		inode_dec_link_count(old_dir);

	change_end(tux_sb(old_inode->i_sb));
	return err;

error:
	change_end(tux_sb(old_inode->i_sb));
	brelse(old_buffer);
	return err;
}
Beispiel #17
0
static int tux3_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
{
	struct inode *inode = file_inode(vma->vm_file);
	struct sb *sb = tux_sb(inode->i_sb);
	struct page *clone, *page = vmf->page;
	void *ptr;
	int ret;

	sb_start_pagefault(inode->i_sb);

retry:
	down_read(&tux_inode(inode)->truncate_lock);
	lock_page(page);
	if (page->mapping != mapping(inode)) {
		unlock_page(page);
		ret = VM_FAULT_NOPAGE;
		goto out;
	}

	/*
	 * page fault can be happened while holding change_begin/end()
	 * (e.g. copy of user data between ->write_begin and
	 * ->write_end for write(2)).
	 *
	 * So, we use nested version here.
	 */
	change_begin_atomic_nested(sb, &ptr);

	/*
	 * FIXME: Caller releases vmf->page (old_page) unconditionally.
	 * So, this takes additional refcount to workaround it.
	 */
	if (vmf->page == page)
		page_cache_get(page);

	clone = pagefork_for_blockdirty(page, tux3_get_current_delta());
	if (IS_ERR(clone)) {
		/* Someone did page fork */
		pgoff_t index = page->index;

		change_end_atomic_nested(sb, ptr);
		unlock_page(page);
		page_cache_release(page);
		up_read(&tux_inode(inode)->truncate_lock);

		switch (PTR_ERR(clone)) {
		case -EAGAIN:
			page = find_get_page(inode->i_mapping, index);
			assert(page);
			goto retry;
		case -ENOMEM:
			ret = VM_FAULT_OOM;
			break;
		default:
			ret = VM_FAULT_SIGBUS;
			break;
		}

		goto out;
	}

	file_update_time(vma->vm_file);

	/* Assign buffers to dirty */
	if (!page_has_buffers(clone))
		create_empty_buffers(clone, sb->blocksize, 0);

	/*
	 * We mark the page dirty already here so that when freeze is in
	 * progress, we are guaranteed that writeback during freezing will
	 * see the dirty page and writeprotect it again.
	 */
	tux3_set_page_dirty(clone);
#if 1
	/* FIXME: Caller doesn't see the changed vmf->page */
	vmf->page = clone;

	change_end_atomic_nested(sb, ptr);
	/* FIXME: caller doesn't know about pagefork */
	unlock_page(clone);
	page_cache_release(clone);
	ret = 0;
//	ret = VM_FAULT_LOCKED;
#endif
out:
	up_read(&tux_inode(inode)->truncate_lock);
	sb_end_pagefault(inode->i_sb);

	return ret;
}
Beispiel #18
0
Datei: namei.c Projekt: Zkin/tux3
static int __tux_add_dirent(struct inode *dir, struct dentry *dentry,
			    struct inode *inode)
{
	return tux_create_dirent(dir, &dentry->d_name, tux_inode(inode)->inum,
				 inode->i_mode);
}
Beispiel #19
0
int main(int argc, char *argv[])
{
	unsigned abits = DATA_BTREE_BIT|CTIME_SIZE_BIT|MODE_OWNER_BIT|LINK_COUNT_BIT|MTIME_BIT;
	struct dev *dev = &(struct dev){ .bits = 8, .fd = open(argv[1], O_CREAT|O_RDWR, S_IRUSR|S_IWUSR) };
	assert(!ftruncate(dev->fd, 1 << 24));
	init_buffers(dev, 1 << 20, 0);
	struct sb *sb = rapid_sb(dev,
		.version = 0,
		.atomref_base = 1 << 10,
		.unatom_base = 1 << 11,
		.atomgen = 1);
	struct inode *inode = rapid_open_inode(sb, NULL, S_IFDIR | 0x666,
		.present = abits, .i_uid = 0x12121212, .i_gid = 0x34343434,
		.i_ctime = spectime(0xdec0debeadULL),
		.i_mtime = spectime(0xbadfaced00dULL));
	inode->btree = (struct btree){
		.root = { .block = 0xcaba1f00dULL, .depth = 3 }
	};
	sb->atable = inode;

	for (int i = 0; i < 2; i++) {
		struct buffer_head *buffer = blockget(mapping(inode), tux_sb(inode->i_sb)->atomref_base + i);
		memset(bufdata(buffer), 0, sb->blocksize);
		blockput_dirty(buffer);
	}

	if (1) {
		warn("---- test positive and negative refcount carry ----");
		use_atom(inode, 6, 1 << 15);
		use_atom(inode, 6, (1 << 15));
		use_atom(inode, 6, -(1 << 15));
		use_atom(inode, 6, -(1 << 15));
	}

	warn("---- test atom table ----");
	printf("atom = %Lx\n", (L)make_atom(inode, "foo", 3));
	printf("atom = %Lx\n", (L)make_atom(inode, "foo", 3));
	printf("atom = %Lx\n", (L)make_atom(inode, "bar", 3));
	printf("atom = %Lx\n", (L)make_atom(inode, "foo", 3));
	printf("atom = %Lx\n", (L)make_atom(inode, "bar", 3));

	warn("---- test inode xattr cache ----");
	int err;
	err = xcache_update(inode, 0x666, "hello", 5, 0);
	if (err)
		printf("err %d\n", err);
	err = xcache_update(inode, 0x777, "world!", 6, 0);
	if (err)
		printf("err %d\n", err);
	xcache_dump(inode);
	struct xattr *xattr = xcache_lookup(tux_inode(inode)->xcache, 0x777);
	if (!IS_ERR(xattr))
		printf("atom %x => %.*s\n", xattr->atom, xattr->size, xattr->body);
	err = xcache_update(inode, 0x111, "class", 5, 0);
	if (err)
		printf("err %d\n", err);
	err = xcache_update(inode, 0x666, NULL, 0, 0);
	if (err)
		printf("err %d\n", err);
	err = xcache_update(inode, 0x222, "boooyah", 7, 0);
	if (err)
		printf("err %d\n", err);
	xcache_dump(inode);

	warn("---- test xattr inode table encode and decode ----");
	char attrs[1000] = { };
	char *top = encode_xattrs(inode, attrs, sizeof(attrs));
	hexdump(attrs, top - attrs);
	printf("predicted size = %x, encoded size = %Lx\n", encode_xsize(inode), (L)(top - attrs));
	inode->xcache->size = offsetof(struct xcache, xattrs);
	char *newtop = decode_attrs(inode, attrs, top - attrs);
	printf("predicted size = %x, xcache size = %x\n", decode_xsize(inode, attrs, top - attrs), inode->xcache->size);
	assert(top == newtop);
	xcache_dump(inode);
	free(inode->xcache);
	inode->xcache = NULL;
	warn("---- xattr update ----");
	set_xattr(inode, "hello", 5, "world!", 6, 0);
	set_xattr(inode, "empty", 5, "zot", 0, 0);
	set_xattr(inode, "foo", 3, "foobar", 6, 0);
	xcache_dump(inode);
	warn("---- xattr remove ----");
//	del_xattr(inode, "hello", 5);
	xcache_dump(inode);
	warn("---- xattr lookup ----");
	for (int i = 0, len; i < 3; i++) {
		char *namelist[] = { "hello", "foo", "world" }, *name = namelist[i];
		char data[100];
		int size = get_xattr(inode, name, len = strlen(name), data, sizeof(data));
		if (size < 0)
			printf("xattr %.*s not found (%s)\n", len, name, strerror(-size));
		else
			printf("found xattr %.*s => %.*s\n", len, name, size, data);
	}
	warn("---- list xattrs ----");
	int len = xattr_list(inode, attrs, sizeof(attrs));
	printf("xattr list length = %i\n", xattr_list(inode, NULL, 0));
	hexdump(attrs, len);

	warn("---- atom reverse map ----");
	for (int i = 0; i < 5; i++) {
		unsigned atom = i, offset;
		struct buffer_head *buffer = blockread_unatom(inode, atom, &offset);
		loff_t where = from_be_u64(((be_u64 *)bufdata(buffer))[offset]);
		blockput_dirty(buffer);
		buffer = blockread(mapping(inode), where >> sb->blockbits);
		printf("atom %.3Lx at dirent %.4Lx, ", (L)atom, (L)where);
		hexdump(bufdata(buffer) + (where & sb->blockmask), 16);
		blockput(buffer);
	}
	warn("---- atom recycle ----");
	set_xattr(inode, "hello", 5, NULL, 0, 0);
	show_freeatoms(sb);
	printf("got free atom %x\n", get_freeatom(inode));
	printf("got free atom %x\n", get_freeatom(inode));
	printf("got free atom %x\n", get_freeatom(inode));

	warn("---- dump atom table ----");
	dump_atoms(inode);
	show_buffers(inode->map);
	exit(0);
}
Beispiel #20
0
static int tux3_symlink(struct inode *dir, struct dentry *dentry,
			const char *symname)
{
	struct tux_iattr iattr = {
		.uid	= current_fsuid(),
		.gid	= current_fsgid(),
		.mode	= S_IFLNK | S_IRWXUGO,
	};

	return __tux3_symlink(dir, dentry, &iattr, symname);
}
#endif /* !__KERNEL__ */

static int tux3_unlink(struct inode *dir, struct dentry *dentry)
{
	struct inode *inode = dentry->d_inode;
	struct sb *sb = tux_sb(inode->i_sb);

	change_begin(sb);
	int err = tux_del_dirent(dir, dentry);
	if (!err) {
		tux3_iattrdirty(inode);
		inode->i_ctime = dir->i_ctime;
		/* FIXME: we shouldn't write inode for i_nlink = 0? */
		inode_dec_link_count(inode);
	}
	change_end(sb);

	return err;
}

static int tux3_rmdir(struct inode *dir, struct dentry *dentry)
{
	struct sb *sb = tux_sb(dir->i_sb);
	struct inode *inode = dentry->d_inode;
	int err = tux_dir_is_empty(inode);

	if (!err) {
		change_begin(sb);
		err = tux_del_dirent(dir, dentry);
		if (!err) {
			tux3_iattrdirty(inode);
			inode->i_ctime = dir->i_ctime;
			/* FIXME: we need to do this for POSIX? */
			/* inode->i_size = 0; */
			clear_nlink(inode);
			tux3_mark_inode_dirty_sync(inode);

			inode_dec_link_count(dir);
		}
		change_end(sb);
	}
	return err;
}

static int tux3_rename(struct inode *old_dir, struct dentry *old_dentry,
		       struct inode *new_dir, struct dentry *new_dentry)
{
	struct inode *old_inode = old_dentry->d_inode;
	struct inode *new_inode = new_dentry->d_inode;
	struct sb *sb = tux_sb(old_inode->i_sb);
	struct buffer_head *old_buffer, *new_buffer, *clone;
	tux_dirent *old_entry, *new_entry;
	void *olddata;
	int err, new_subdir = 0;
	unsigned delta;

	old_entry = tux_find_dirent(old_dir, &old_dentry->d_name, &old_buffer);
	if (IS_ERR(old_entry))
		return PTR_ERR(old_entry);

	/* FIXME: is this needed? */
	assert(be64_to_cpu(old_entry->inum) == tux_inode(old_inode)->inum);

	change_begin(sb);
	delta = tux3_get_current_delta();

	if (new_inode) {
		int old_is_dir = S_ISDIR(old_inode->i_mode);
		if (old_is_dir) {
			err = tux_dir_is_empty(new_inode);
			if (err)
				goto error;
		}

		new_entry = tux_find_dirent(new_dir, &new_dentry->d_name,
					    &new_buffer);
		if (IS_ERR(new_entry)) {
			assert(PTR_ERR(new_entry) != -ENOENT);
			err = PTR_ERR(new_entry);
			goto error;
		}

		/*
		 * The directory is protected by i_mutex.
		 * blockdirty() should never return -EAGAIN.
		 */
		olddata = bufdata(new_buffer);
		clone = blockdirty(new_buffer, delta);
		if (IS_ERR(clone)) {
			assert(PTR_ERR(clone) != -EAGAIN);
			blockput(new_buffer);
			err = PTR_ERR(clone);
			goto error;
		}
		new_entry = ptr_redirect(new_entry, olddata, bufdata(clone));

		/* this releases new_buffer */
		tux_update_dirent(new_dir, clone, new_entry, old_inode);

		tux3_iattrdirty(new_inode);
		new_inode->i_ctime = new_dir->i_ctime;
		if (old_is_dir)
			drop_nlink(new_inode);
		inode_dec_link_count(new_inode);
	} else {
		new_subdir = S_ISDIR(old_inode->i_mode) && new_dir != old_dir;
		if (new_subdir) {
			if (new_dir->i_nlink >= TUX_LINK_MAX) {
				err = -EMLINK;
				goto error;
			}
		}
		err = tux_create_dirent(new_dir, &new_dentry->d_name,
					old_inode);
		if (err)
			goto error;
		if (new_subdir)
			inode_inc_link_count(new_dir);
	}
	tux3_iattrdirty(old_inode);
	old_inode->i_ctime = new_dir->i_ctime;
	tux3_mark_inode_dirty(old_inode);

	/*
	 * The new entry can be on same buffer with old_buffer, and
	 * may did buffer fork in the above path. So if old_buffer is
	 * forked buffer, we update the old_buffer in here.
	 */
	if (buffer_forked(old_buffer)) {
		clone = blockget(mapping(old_dir), bufindex(old_buffer));
		assert(clone);
		old_entry = ptr_redirect(old_entry, bufdata(old_buffer),
					 bufdata(clone));
		blockput(old_buffer);
		old_buffer = clone;
	}
	err = tux_delete_dirent(old_dir, old_buffer, old_entry);
	if (err) {
		tux3_fs_error(sb, "couldn't delete old entry (%Lu)",
			      tux_inode(old_inode)->inum);
		/* FIXME: now, we have hardlink even if it's dir. */
		inode_inc_link_count(old_inode);
	}
	if (!err && new_subdir)
		inode_dec_link_count(old_dir);

	change_end(sb);
	return err;

error:
	change_end(sb);
	blockput(old_buffer);
	return err;
}

#ifdef __KERNEL__
const struct file_operations tux_dir_fops = {
	.llseek		= generic_file_llseek,
	.read		= generic_read_dir,
	.readdir	= tux_readdir,
	.fsync		= tux3_sync_file,
};

const struct inode_operations tux_dir_iops = {
	.create		= tux3_create,
	.lookup		= tux3_lookup,
	.link		= tux3_link,
	.unlink		= tux3_unlink,
	.symlink	= tux3_symlink,
	.mkdir		= tux3_mkdir,
	.rmdir		= tux3_rmdir,
	.mknod		= tux3_mknod,
	.rename		= tux3_rename,
	.setattr	= tux3_setattr,
	.getattr	= tux3_getattr
//	.setxattr	= generic_setxattr,
//	.getxattr	= generic_getxattr,
//	.listxattr	= ext3_listxattr,
//	.removexattr	= generic_removexattr,
//	.permission	= ext3_permission,
	/* FIXME: why doesn't ext4 support this for directory? */
//	.fallocate	= ext4_fallocate,
//	.fiemap		= ext4_fiemap,
};
Beispiel #21
0
static void tux3_destroy_inode(struct inode *inode)
{
	BUG_ON(!list_empty(&tux_inode(inode)->alloc_list));
	kmem_cache_free(tux_inode_cachep, tux_inode(inode));
}