Esempio n. 1
0
static struct dentry *fuse_get_dentry(struct super_block *sb, void *vobjp)
{
	__u32 *objp = vobjp;
	unsigned long nodeid = objp[0];
	__u32 generation = objp[1];
	struct inode *inode;
	struct dentry *entry;

	if (nodeid == 0)
		return ERR_PTR(-ESTALE);

	inode = ilookup5(sb, nodeid, fuse_inode_eq, &nodeid);
	if (!inode)
		return ERR_PTR(-ESTALE);
	if (inode->i_generation != generation) {
		iput(inode);
		return ERR_PTR(-ESTALE);
	}

	entry = d_alloc_anon(inode);
	if (!entry) {
		iput(inode);
		return ERR_PTR(-ENOMEM);
	}

	return entry;
}
Esempio n. 2
0
struct inode *gfs2_ilookup(struct super_block *sb, u64 no_addr, int non_block)
{
	unsigned long hash = (unsigned long)no_addr;
	struct gfs2_skip_data data;

	data.no_addr = no_addr;
	data.skipped = 0;
	data.non_block = non_block;
	return ilookup5(sb, hash, iget_test, &data);
}
Esempio n. 3
0
struct inode *ocfs2_ilookup(struct super_block *sb, u64 blkno)
{
	struct ocfs2_find_inode_args args;

	args.fi_blkno = blkno;
	args.fi_flags = 0;
	args.fi_ino = ino_from_blkno(sb, blkno);
	args.fi_sysfile_type = 0;

	return ilookup5(sb, blkno, ocfs2_find_actor, &args);
}
Esempio n. 4
0
static struct dentry *fuse_get_dentry(struct super_block *sb,
				      struct fuse_inode_handle *handle)
{
	struct fuse_conn *fc = get_fuse_conn_super(sb);
	struct inode *inode;
	struct dentry *entry;
	int err = -ESTALE;

	if (handle->nodeid == 0)
		goto out_err;

	inode = ilookup5(sb, handle->nodeid, fuse_inode_eq, &handle->nodeid);
	if (!inode) {
		struct fuse_entry_out outarg;
		struct qstr name;

		if (!fc->export_support)
			goto out_err;

		name.len = 1;
		name.name = ".";
		err = fuse_lookup_name(sb, handle->nodeid, &name, &outarg,
				       &inode);
		if (err && err != -ENOENT)
			goto out_err;
		if (err || !inode) {
			err = -ESTALE;
			goto out_err;
		}
		err = -EIO;
		if (get_node_id(inode) != handle->nodeid)
			goto out_iput;
	}
	err = -ESTALE;
	if (inode->i_generation != handle->generation)
		goto out_iput;

	entry = d_obtain_alias(inode);
	if (!IS_ERR(entry) && get_node_id(inode) != FUSE_ROOT_ID) {
		entry->d_op = &fuse_dentry_operations;
		fuse_invalidate_entry_cache(entry);
	}

	return entry;

 out_iput:
	iput(inode);
 out_err:
	return ERR_PTR(err);
}
Esempio n. 5
0
struct inode *search_inode_for_lustre(struct super_block *sb,
				      const struct lu_fid *fid)
{
	struct ll_sb_info     *sbi = ll_s2sbi(sb);
	struct ptlrpc_request *req = NULL;
	struct inode	  *inode = NULL;
	int		   eadatalen = 0;
	unsigned long	      hash = cl_fid_build_ino(fid,
						      ll_need_32bit_api(sbi));
	struct  md_op_data    *op_data;
	int		   rc;

	CDEBUG(D_INFO, "searching inode for:(%lu,"DFID")\n", hash, PFID(fid));

	inode = ilookup5(sb, hash, ll_nfs_test_inode, (void *)fid);
	if (inode)
		return inode;

	rc = ll_get_default_mdsize(sbi, &eadatalen);
	if (rc)
		return ERR_PTR(rc);

	/* Because inode is NULL, ll_prep_md_op_data can not
	 * be used here. So we allocate op_data ourselves */
	op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
	if (!op_data)
		return ERR_PTR(-ENOMEM);

	op_data->op_fid1 = *fid;
	op_data->op_mode = eadatalen;
	op_data->op_valid = OBD_MD_FLEASIZE;

	/* mds_fid2dentry ignores f_type */
	rc = md_getattr(sbi->ll_md_exp, op_data, &req);
	kfree(op_data);
	if (rc) {
		CERROR("can't get object attrs, fid "DFID", rc %d\n",
		       PFID(fid), rc);
		return ERR_PTR(rc);
	}
	rc = ll_prep_inode(&inode, req, sb, NULL);
	ptlrpc_req_finished(req);
	if (rc)
		return ERR_PTR(rc);

	return inode;
}
struct inode *coda_fid_to_inode(struct CodaFid *fid, struct super_block *sb) 
{
	struct inode *inode;
	unsigned long hash = coda_f2i(fid);

	if ( !sb ) {
		printk("coda_fid_to_inode: no sb!\n");
		return NULL;
	}

	inode = ilookup5(sb, hash, coda_test_inode, fid);
	if ( !inode )
		return NULL;

	BUG_ON(inode->i_state & I_NEW);

	return inode;
}
Esempio n. 7
0
/* convert a fid to an inode. */
struct inode *coda_fid_to_inode(struct CodaFid *fid, struct super_block *sb)
{
	struct inode *inode;
	unsigned long hash = coda_f2i(fid);

	if ( !sb ) {
		printk("coda_fid_to_inode: no sb!\n");
		return NULL;
	}

	inode = ilookup5(sb, hash, coda_test_inode, fid);
	if ( !inode )
		return NULL;

	/* we should never see newly created inodes because we intentionally
	 * fail in the initialization callback */
	BUG_ON(inode->i_state & I_NEW);

	return inode;
}
Esempio n. 8
0
int fuse_reverse_inval_inode(struct super_block *sb, u64 nodeid,
			     loff_t offset, loff_t len)
{
	struct inode *inode;
	pgoff_t pg_start;
	pgoff_t pg_end;

	inode = ilookup5(sb, nodeid, fuse_inode_eq, &nodeid);
	if (!inode)
		return -ENOENT;

	fuse_invalidate_attr(inode);
	if (offset >= 0) {
		pg_start = offset >> PAGE_CACHE_SHIFT;
		if (len <= 0)
			pg_end = -1;
		else
			pg_end = (offset + len - 1) >> PAGE_CACHE_SHIFT;
		invalidate_inode_pages2_range(inode->i_mapping,
					      pg_start, pg_end);
	}
Esempio n. 9
0
int fuse_reverse_inval_entry(struct super_block *sb, u64 parent_nodeid,
			     struct qstr *name)
{
	int err = -ENOTDIR;
	struct inode *parent;
	struct dentry *dir;
	struct dentry *entry;

	parent = ilookup5(sb, parent_nodeid, fuse_inode_eq, &parent_nodeid);
	if (!parent)
		return -ENOENT;

	mutex_lock(&parent->i_mutex);
	if (!S_ISDIR(parent->i_mode))
		goto unlock;

	err = -ENOENT;
	dir = d_find_alias(parent);
	if (!dir)
		goto unlock;

	entry = d_lookup(dir, name);
	dput(dir);
	if (!entry)
		goto unlock;

	fuse_invalidate_attr(parent);
	fuse_invalidate_entry(entry);
	dput(entry);
	err = 0;

 unlock:
	mutex_unlock(&parent->i_mutex);
	iput(parent);
	return err;
}
Esempio n. 10
0
struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root,
			    unsigned long ino)
{
	struct nilfs_iget_args args = {
		.ino = ino, .root = root, .cno = 0, .for_gc = 0
	};

	return ilookup5(sb, ino, nilfs_iget_test, &args);
}

struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root,
				unsigned long ino)
{
	struct nilfs_iget_args args = {
		.ino = ino, .root = root, .cno = 0, .for_gc = 0
	};

	return iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
}

struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root,
			 unsigned long ino)
{
	struct inode *inode;
	int err;

	inode = nilfs_iget_locked(sb, root, ino);
	if (unlikely(!inode))
		return ERR_PTR(-ENOMEM);
	if (!(inode->i_state & I_NEW))
		return inode;

	err = __nilfs_read_inode(sb, root, ino, inode);
	if (unlikely(err)) {
		iget_failed(inode);
		return ERR_PTR(err);
	}
	unlock_new_inode(inode);
	return inode;
}

struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
				__u64 cno)
{
	struct nilfs_iget_args args = {
		.ino = ino, .root = NULL, .cno = cno, .for_gc = 1
	};
	struct inode *inode;
	int err;

	inode = iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
	if (unlikely(!inode))
		return ERR_PTR(-ENOMEM);
	if (!(inode->i_state & I_NEW))
		return inode;

	err = nilfs_init_gcinode(inode);
	if (unlikely(err)) {
		iget_failed(inode);
		return ERR_PTR(err);
	}
	unlock_new_inode(inode);
	return inode;
}

void nilfs_write_inode_common(struct inode *inode,
			      struct nilfs_inode *raw_inode, int has_bmap)
{
	struct nilfs_inode_info *ii = NILFS_I(inode);

	raw_inode->i_mode = cpu_to_le16(inode->i_mode);
	raw_inode->i_uid = cpu_to_le32(inode->i_uid);
	raw_inode->i_gid = cpu_to_le32(inode->i_gid);
	raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
	raw_inode->i_size = cpu_to_le64(inode->i_size);
	raw_inode->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
	raw_inode->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
	raw_inode->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
	raw_inode->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
	raw_inode->i_blocks = cpu_to_le64(inode->i_blocks);

	raw_inode->i_flags = cpu_to_le32(ii->i_flags);
	raw_inode->i_generation = cpu_to_le32(inode->i_generation);

	if (NILFS_ROOT_METADATA_FILE(inode->i_ino)) {
		struct the_nilfs *nilfs = inode->i_sb->s_fs_info;

		/* zero-fill unused portion in the case of super root block */
		raw_inode->i_xattr = 0;
		raw_inode->i_pad = 0;
		memset((void *)raw_inode + sizeof(*raw_inode), 0,
		       nilfs->ns_inode_size - sizeof(*raw_inode));
	}

	if (has_bmap)
		nilfs_bmap_write(ii->i_bmap, raw_inode);
	else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
		raw_inode->i_device_code =
			cpu_to_le64(huge_encode_dev(inode->i_rdev));
	/* When extending inode, nilfs->ns_inode_size should be checked
	   for substitutions of appended fields */
}

void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh)
{
	ino_t ino = inode->i_ino;
	struct nilfs_inode_info *ii = NILFS_I(inode);
	struct inode *ifile = ii->i_root->ifile;
	struct nilfs_inode *raw_inode;

	raw_inode = nilfs_ifile_map_inode(ifile, ino, ibh);

	if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state))
		memset(raw_inode, 0, NILFS_MDT(ifile)->mi_entry_size);
	set_bit(NILFS_I_INODE_DIRTY, &ii->i_state);

	nilfs_write_inode_common(inode, raw_inode, 0);
		/* XXX: call with has_bmap = 0 is a workaround to avoid
		   deadlock of bmap. This delays update of i_bmap to just
		   before writing */
	nilfs_ifile_unmap_inode(ifile, ino, ibh);
}

#define NILFS_MAX_TRUNCATE_BLOCKS	16384  /* 64MB for 4KB block */

static void nilfs_truncate_bmap(struct nilfs_inode_info *ii,
				unsigned long from)
{
	unsigned long b;
	int ret;

	if (!test_bit(NILFS_I_BMAP, &ii->i_state))
		return;
repeat:
	ret = nilfs_bmap_last_key(ii->i_bmap, &b);
	if (ret == -ENOENT)
		return;
	else if (ret < 0)
		goto failed;

	if (b < from)
		return;

	b -= min_t(unsigned long, NILFS_MAX_TRUNCATE_BLOCKS, b - from);
	ret = nilfs_bmap_truncate(ii->i_bmap, b);
	nilfs_relax_pressure_in_lock(ii->vfs_inode.i_sb);
	if (!ret || (ret == -ENOMEM &&
		     nilfs_bmap_truncate(ii->i_bmap, b) == 0))
		goto repeat;

failed:
	nilfs_warning(ii->vfs_inode.i_sb, __func__,
		      "failed to truncate bmap (ino=%lu, err=%d)",
		      ii->vfs_inode.i_ino, ret);
}

void nilfs_truncate(struct inode *inode)
{
	unsigned long blkoff;
	unsigned int blocksize;
	struct nilfs_transaction_info ti;
	struct super_block *sb = inode->i_sb;
	struct nilfs_inode_info *ii = NILFS_I(inode);

	if (!test_bit(NILFS_I_BMAP, &ii->i_state))
		return;
	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
		return;

	blocksize = sb->s_blocksize;
	blkoff = (inode->i_size + blocksize - 1) >> sb->s_blocksize_bits;
	nilfs_transaction_begin(sb, &ti, 0); /* never fails */

	block_truncate_page(inode->i_mapping, inode->i_size, nilfs_get_block);

	nilfs_truncate_bmap(ii, blkoff);

	inode->i_mtime = inode->i_ctime = CURRENT_TIME;
	if (IS_SYNC(inode))
		nilfs_set_transaction_flag(NILFS_TI_SYNC);

	nilfs_mark_inode_dirty(inode);
	nilfs_set_file_dirty(inode, 0);
	nilfs_transaction_commit(sb);
	/* May construct a logical segment and may fail in sync mode.
	   But truncate has no return value. */
}
Esempio n. 11
0
int fuse_reverse_inval_entry(struct super_block *sb, u64 parent_nodeid,
			     u64 child_nodeid, struct qstr *name)
{
	int err = -ENOTDIR;
	struct inode *parent;
	struct dentry *dir;
	struct dentry *entry;

	parent = ilookup5(sb, parent_nodeid, fuse_inode_eq, &parent_nodeid);
	if (!parent)
		return -ENOENT;

	mutex_lock(&parent->i_mutex);
	if (!S_ISDIR(parent->i_mode))
		goto unlock;

	err = -ENOENT;
	dir = d_find_alias(parent);
	if (!dir)
		goto unlock;

	entry = d_lookup(dir, name);
	dput(dir);
	if (!entry)
		goto unlock;

	fuse_invalidate_attr(parent);
	fuse_invalidate_entry(entry);

	if (child_nodeid != 0 && entry->d_inode) {
		mutex_lock(&entry->d_inode->i_mutex);
		if (get_node_id(entry->d_inode) != child_nodeid) {
			err = -ENOENT;
			goto badentry;
		}
		if (d_mountpoint(entry)) {
			err = -EBUSY;
			goto badentry;
		}
		if (S_ISDIR(entry->d_inode->i_mode)) {
			shrink_dcache_parent(entry);
			if (!simple_empty(entry)) {
				err = -ENOTEMPTY;
				goto badentry;
			}
			entry->d_inode->i_flags |= S_DEAD;
		}
		dont_mount(entry);
		clear_nlink(entry->d_inode);
		err = 0;
 badentry:
		mutex_unlock(&entry->d_inode->i_mutex);
		if (!err)
			d_delete(entry);
	} else {
		err = 0;
	}
	dput(entry);

 unlock:
	mutex_unlock(&parent->i_mutex);
	iput(parent);
	return err;
}
Esempio n. 12
0
struct inode *gfs2_ilookup(struct super_block *sb, u64 no_addr)
{
	unsigned long hash = (unsigned long)no_addr;
	return ilookup5(sb, hash, iget_test, &no_addr);
}
Esempio n. 13
0
struct inode *f2fs_iget_nowait(struct super_block *sb, unsigned long ino)
{
	struct f2fs_iget_args args = {
		.ino = ino,
		.on_free = 0
	};
	struct inode *inode = ilookup5(sb, ino, f2fs_iget_test, &args);

	if (inode)
		return inode;
	if (!args.on_free)
		return f2fs_iget(sb, ino);
	return ERR_PTR(-ENOENT);
}

static int do_read_inode(struct inode *inode)
{
	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
	struct f2fs_inode_info *fi = F2FS_I(inode);
	struct page *node_page;
	struct f2fs_node *rn;
	struct f2fs_inode *ri;

	/* Check if ino is within scope */
	check_nid_range(sbi, inode->i_ino);

	node_page = get_node_page(sbi, inode->i_ino);
	if (IS_ERR(node_page))
		return PTR_ERR(node_page);

	rn = page_address(node_page);
	ri = &(rn->i);

	inode->i_mode = le16_to_cpu(ri->i_mode);
	i_uid_write(inode, le32_to_cpu(ri->i_uid));
	i_gid_write(inode, le32_to_cpu(ri->i_gid));
	set_nlink(inode, le32_to_cpu(ri->i_links));
	inode->i_size = le64_to_cpu(ri->i_size);
	inode->i_blocks = le64_to_cpu(ri->i_blocks);

	inode->i_atime.tv_sec = le64_to_cpu(ri->i_atime);
	inode->i_ctime.tv_sec = le64_to_cpu(ri->i_ctime);
	inode->i_mtime.tv_sec = le64_to_cpu(ri->i_mtime);
	inode->i_atime.tv_nsec = le32_to_cpu(ri->i_atime_nsec);
	inode->i_ctime.tv_nsec = le32_to_cpu(ri->i_ctime_nsec);
	inode->i_mtime.tv_nsec = le32_to_cpu(ri->i_mtime_nsec);
	inode->i_generation = le32_to_cpu(ri->i_generation);

	fi->i_current_depth = le32_to_cpu(ri->i_current_depth);
	fi->i_xattr_nid = le32_to_cpu(ri->i_xattr_nid);
	fi->i_flags = le32_to_cpu(ri->i_flags);
	fi->flags = 0;
	fi->data_version = le64_to_cpu(F2FS_CKPT(sbi)->checkpoint_ver) - 1;
	fi->i_advise = ri->i_advise;
	fi->i_pino = le32_to_cpu(ri->i_pino);
	get_extent_info(&fi->ext, ri->i_ext);
	f2fs_put_page(node_page, 1);
	return 0;
}

struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
{
	struct f2fs_sb_info *sbi = F2FS_SB(sb);
	struct inode *inode;
	int ret;

	inode = iget_locked(sb, ino);
	if (!inode)
		return ERR_PTR(-ENOMEM);
	if (!(inode->i_state & I_NEW))
		return inode;
	if (ino == F2FS_NODE_INO(sbi) || ino == F2FS_META_INO(sbi))
		goto make_now;

	ret = do_read_inode(inode);
	if (ret)
		goto bad_inode;

	if (!sbi->por_doing && inode->i_nlink == 0) {
		ret = -ENOENT;
		goto bad_inode;
	}

make_now:
	if (ino == F2FS_NODE_INO(sbi)) {
		inode->i_mapping->a_ops = &f2fs_node_aops;
		mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_ZERO);
	} else if (ino == F2FS_META_INO(sbi)) {
		inode->i_mapping->a_ops = &f2fs_meta_aops;
		mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_ZERO);
	} else if (S_ISREG(inode->i_mode)) {
		inode->i_op = &f2fs_file_inode_operations;
		inode->i_fop = &f2fs_file_operations;
		inode->i_mapping->a_ops = &f2fs_dblock_aops;
	} else if (S_ISDIR(inode->i_mode)) {
		inode->i_op = &f2fs_dir_inode_operations;
		inode->i_fop = &f2fs_dir_operations;
		inode->i_mapping->a_ops = &f2fs_dblock_aops;
		mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER_MOVABLE |
				__GFP_ZERO);
	} else if (S_ISLNK(inode->i_mode)) {
		inode->i_op = &f2fs_symlink_inode_operations;
		inode->i_mapping->a_ops = &f2fs_dblock_aops;
	} else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
			S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
		inode->i_op = &f2fs_special_inode_operations;
		init_special_inode(inode, inode->i_mode, inode->i_rdev);
	} else {
		ret = -EIO;
		goto bad_inode;
	}
	unlock_new_inode(inode);

	return inode;

bad_inode:
	iget_failed(inode);
	return ERR_PTR(ret);
}

void update_inode(struct inode *inode, struct page *node_page)
{
	struct f2fs_node *rn;
	struct f2fs_inode *ri;

	wait_on_page_writeback(node_page);

	rn = page_address(node_page);
	ri = &(rn->i);

	ri->i_mode = cpu_to_le16(inode->i_mode);
	ri->i_advise = F2FS_I(inode)->i_advise;
	ri->i_uid = cpu_to_le32(i_uid_read(inode));
	ri->i_gid = cpu_to_le32(i_gid_read(inode));
	ri->i_links = cpu_to_le32(inode->i_nlink);
	ri->i_size = cpu_to_le64(i_size_read(inode));
	ri->i_blocks = cpu_to_le64(inode->i_blocks);
	set_raw_extent(&F2FS_I(inode)->ext, &ri->i_ext);

	ri->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
	ri->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
	ri->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
	ri->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
	ri->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
	ri->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
	ri->i_current_depth = cpu_to_le32(F2FS_I(inode)->i_current_depth);
	ri->i_xattr_nid = cpu_to_le32(F2FS_I(inode)->i_xattr_nid);
	ri->i_flags = cpu_to_le32(F2FS_I(inode)->i_flags);
	ri->i_pino = cpu_to_le32(F2FS_I(inode)->i_pino);
	ri->i_generation = cpu_to_le32(inode->i_generation);
	set_cold_node(inode, node_page);
	set_page_dirty(node_page);
}

int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
{
	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
	struct page *node_page;
	bool need_lock = false;

	if (inode->i_ino == F2FS_NODE_INO(sbi) ||
			inode->i_ino == F2FS_META_INO(sbi))
		return 0;

	if (wbc)
		f2fs_balance_fs(sbi);

	node_page = get_node_page(sbi, inode->i_ino);
	if (IS_ERR(node_page))
		return PTR_ERR(node_page);

	if (!PageDirty(node_page)) {
		need_lock = true;
		f2fs_put_page(node_page, 1);
		mutex_lock(&sbi->write_inode);
		node_page = get_node_page(sbi, inode->i_ino);
		if (IS_ERR(node_page)) {
			mutex_unlock(&sbi->write_inode);
			return PTR_ERR(node_page);
		}
	}
	update_inode(inode, node_page);
	f2fs_put_page(node_page, 1);
	if (need_lock)
		mutex_unlock(&sbi->write_inode);
	return 0;
}

/*
 * Called at the last iput() if i_nlink is zero
 */
void f2fs_evict_inode(struct inode *inode)
{
	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);

	truncate_inode_pages(&inode->i_data, 0);

	if (inode->i_ino == F2FS_NODE_INO(sbi) ||
			inode->i_ino == F2FS_META_INO(sbi))
		goto no_delete;

	BUG_ON(atomic_read(&F2FS_I(inode)->dirty_dents));
	remove_dirty_dir_inode(inode);

	if (inode->i_nlink || is_bad_inode(inode))
		goto no_delete;

	set_inode_flag(F2FS_I(inode), FI_NO_ALLOC);
	i_size_write(inode, 0);

	if (F2FS_HAS_BLOCKS(inode))
		f2fs_truncate(inode);

	remove_inode_page(inode);
no_delete:
	clear_inode(inode);
}
Esempio n. 14
0
File: dir.c Progetto: Anjali05/linux
int fuse_reverse_inval_entry(struct super_block *sb, u64 parent_nodeid,
			     u64 child_nodeid, struct qstr *name)
{
	int err = -ENOTDIR;
	struct inode *parent;
	struct dentry *dir;
	struct dentry *entry;

	parent = ilookup5(sb, parent_nodeid, fuse_inode_eq, &parent_nodeid);
	if (!parent)
		return -ENOENT;

	inode_lock(parent);
	if (!S_ISDIR(parent->i_mode))
		goto unlock;

	err = -ENOENT;
	dir = d_find_alias(parent);
	if (!dir)
		goto unlock;

	name->hash = full_name_hash(dir, name->name, name->len);
	entry = d_lookup(dir, name);
	dput(dir);
	if (!entry)
		goto unlock;

	fuse_dir_changed(parent);
	fuse_invalidate_entry(entry);

	if (child_nodeid != 0 && d_really_is_positive(entry)) {
		inode_lock(d_inode(entry));
		if (get_node_id(d_inode(entry)) != child_nodeid) {
			err = -ENOENT;
			goto badentry;
		}
		if (d_mountpoint(entry)) {
			err = -EBUSY;
			goto badentry;
		}
		if (d_is_dir(entry)) {
			shrink_dcache_parent(entry);
			if (!simple_empty(entry)) {
				err = -ENOTEMPTY;
				goto badentry;
			}
			d_inode(entry)->i_flags |= S_DEAD;
		}
		dont_mount(entry);
		clear_nlink(d_inode(entry));
		err = 0;
 badentry:
		inode_unlock(d_inode(entry));
		if (!err)
			d_delete(entry);
	} else {
		err = 0;
	}
	dput(entry);

 unlock:
	inode_unlock(parent);
	iput(parent);
	return err;
}
Esempio n. 15
0
/**
 * ntfs_mft_writepage - check if a metadata page contains dirty mft records
 * @page:	metadata page possibly containing dirty mft records
 * @wbc:	writeback control structure
 *
 * This is called from the VM when it wants to have a dirty $MFT/$DATA metadata
 * page cache page cleaned.  The VM has already locked the page and marked it
 * clean.  Instead of writing the page as a conventional ->writepage function
 * would do, we check if the page still contains any dirty mft records (it must
 * have done at some point in the past since the page was marked dirty) and if
 * none are found, i.e. all mft records are clean, we unlock the page and
 * return.  The VM is then free to do with the page as it pleases.  If on the
 * other hand we do find any dirty mft records in the page, we redirty the page
 * before unlocking it and returning so the VM knows that the page is still
 * busy and cannot be thrown out.
 *
 * Note, we do not actually write any dirty mft records here because they are
 * dirty inodes and hence will be written by the VFS inode dirty code paths.
 * There is no need to write them from the VM page dirty code paths, too and in
 * fact once we implement journalling it would be a complete nightmare having
 * two code paths leading to mft record writeout.
 */
static int ntfs_mft_writepage(struct page *page, struct writeback_control *wbc)
{
	struct inode *mft_vi = page->mapping->host;
	struct super_block *sb = mft_vi->i_sb;
	ntfs_volume *vol = NTFS_SB(sb);
	u8 *maddr;
	MFT_RECORD *m;
	ntfs_inode **extent_nis;
	unsigned long mft_no;
	int nr, i, j;
	BOOL is_dirty = FALSE;

	BUG_ON(!PageLocked(page));
	BUG_ON(PageWriteback(page));
	BUG_ON(mft_vi != vol->mft_ino);
	/* The first mft record number in the page. */
	mft_no = page->index << (PAGE_CACHE_SHIFT - vol->mft_record_size_bits);
	/* Number of mft records in the page. */
	nr = PAGE_CACHE_SIZE >> vol->mft_record_size_bits;
	BUG_ON(!nr);
	ntfs_debug("Entering for %i inodes starting at 0x%lx.", nr, mft_no);
	/* Iterate over the mft records in the page looking for a dirty one. */
	maddr = (u8*)kmap(page);
	for (i = 0; i < nr; ++i, ++mft_no, maddr += vol->mft_record_size) {
		struct inode *vi;
		ntfs_inode *ni, *eni;
		ntfs_attr na;

		na.mft_no = mft_no;
		na.name = NULL;
		na.name_len = 0;
		na.type = AT_UNUSED;
		/*
		 * Check if the inode corresponding to this mft record is in
		 * the VFS inode cache and obtain a reference to it if it is.
		 */
		ntfs_debug("Looking for inode 0x%lx in icache.", mft_no);
		/*
		 * For inode 0, i.e. $MFT itself, we cannot use ilookup5() from
		 * here or we deadlock because the inode is already locked by
		 * the kernel (fs/fs-writeback.c::__sync_single_inode()) and
		 * ilookup5() waits until the inode is unlocked before
		 * returning it and it never gets unlocked because
		 * ntfs_mft_writepage() never returns.  )-:  Fortunately, we
		 * have inode 0 pinned in icache for the duration of the mount
		 * so we can access it directly.
		 */
		if (!mft_no) {
			/* Balance the below iput(). */
			vi = igrab(mft_vi);
			BUG_ON(vi != mft_vi);
		} else
			vi = ilookup5(sb, mft_no, (test_t)ntfs_test_inode, &na);
		if (vi) {
			ntfs_debug("Inode 0x%lx is in icache.", mft_no);
			/* The inode is in icache.  Check if it is dirty. */
			ni = NTFS_I(vi);
			if (!NInoDirty(ni)) {
				/* The inode is not dirty, skip this record. */
				ntfs_debug("Inode 0x%lx is not dirty, "
						"continuing search.", mft_no);
				iput(vi);
				continue;
			}
			ntfs_debug("Inode 0x%lx is dirty, aborting search.",
					mft_no);
			/* The inode is dirty, no need to search further. */
			iput(vi);
			is_dirty = TRUE;
			break;
		}
		ntfs_debug("Inode 0x%lx is not in icache.", mft_no);
		/* The inode is not in icache. */
		/* Skip the record if it is not a mft record (type "FILE"). */
		if (!ntfs_is_mft_recordp(maddr)) {
			ntfs_debug("Mft record 0x%lx is not a FILE record, "
					"continuing search.", mft_no);
			continue;
		}
		m = (MFT_RECORD*)maddr;
		/*
		 * Skip the mft record if it is not in use.  FIXME:  What about
		 * deleted/deallocated (extent) inodes?  (AIA)
		 */
		if (!(m->flags & MFT_RECORD_IN_USE)) {
			ntfs_debug("Mft record 0x%lx is not in use, "
					"continuing search.", mft_no);
			continue;
		}
		/* Skip the mft record if it is a base inode. */
		if (!m->base_mft_record) {
			ntfs_debug("Mft record 0x%lx is a base record, "
					"continuing search.", mft_no);
			continue;
		}
		/*
		 * This is an extent mft record.  Check if the inode
		 * corresponding to its base mft record is in icache.
		 */
		na.mft_no = MREF_LE(m->base_mft_record);
		ntfs_debug("Mft record 0x%lx is an extent record.  Looking "
				"for base inode 0x%lx in icache.", mft_no,
				na.mft_no);
		vi = ilookup5(sb, na.mft_no, (test_t)ntfs_test_inode,
				&na);
		if (!vi) {
			/*
			 * The base inode is not in icache.  Skip this extent
			 * mft record.
			 */
			ntfs_debug("Base inode 0x%lx is not in icache, "
					"continuing search.", na.mft_no);
			continue;
		}
		ntfs_debug("Base inode 0x%lx is in icache.", na.mft_no);
		/*
		 * The base inode is in icache.  Check if it has the extent
		 * inode corresponding to this extent mft record attached.
		 */
		ni = NTFS_I(vi);
		down(&ni->extent_lock);
		if (ni->nr_extents <= 0) {
			/*
			 * The base inode has no attached extent inodes.  Skip
			 * this extent mft record.
			 */
			up(&ni->extent_lock);
			iput(vi);
			continue;
		}
		/* Iterate over the attached extent inodes. */
		extent_nis = ni->ext.extent_ntfs_inos;
		for (eni = NULL, j = 0; j < ni->nr_extents; ++j) {
			if (mft_no == extent_nis[j]->mft_no) {
				/*
				 * Found the extent inode corresponding to this
				 * extent mft record.
				 */
				eni = extent_nis[j];
				break;
			}
		}
		/*
		 * If the extent inode was not attached to the base inode, skip
		 * this extent mft record.
		 */
		if (!eni) {
			up(&ni->extent_lock);
			iput(vi);
			continue;
		}
		/*
		 * Found the extent inode corrsponding to this extent mft
		 * record.  If it is dirty, no need to search further.
		 */
		if (NInoDirty(eni)) {
			up(&ni->extent_lock);
			iput(vi);
			is_dirty = TRUE;
			break;
		}
		/* The extent inode is not dirty, so do the next record. */
		up(&ni->extent_lock);
		iput(vi);
	}
	kunmap(page);
	/* If a dirty mft record was found, redirty the page. */
	if (is_dirty) {
		ntfs_debug("Inode 0x%lx is dirty.  Redirtying the page "
				"starting at inode 0x%lx.", mft_no,
				page->index << (PAGE_CACHE_SHIFT -
				vol->mft_record_size_bits));
		redirty_page_for_writepage(wbc, page);
		unlock_page(page);
	} else {
		/*
		 * Keep the VM happy.  This must be done otherwise the
		 * radix-tree tag PAGECACHE_TAG_DIRTY remains set even though
		 * the page is clean.
		 */
		BUG_ON(PageWriteback(page));
		set_page_writeback(page);
		unlock_page(page);
		end_page_writeback(page);
	}
	ntfs_debug("Done.");
	return 0;
}