示例#1
0
static int ocfs2_get_inode_attr(struct inode *inode, unsigned *flags)
{
	int status;

	status = ocfs2_inode_lock(inode, NULL, 0);
	if (status < 0) {
		mlog_errno(status);
		return status;
	}
	ocfs2_get_inode_flags(OCFS2_I(inode));
	*flags = OCFS2_I(inode)->ip_attr;
	ocfs2_inode_unlock(inode, 0);

	mlog_exit(status);
	return status;
}
示例#2
0
static char *ocfs2_fast_symlink_getlink(struct inode *inode,
					struct buffer_head **bh)
{
	int status;
	char *link = NULL;
	struct ocfs2_dinode *fe;

	mlog_entry_void();

	status = ocfs2_read_block(OCFS2_SB(inode->i_sb),
				  OCFS2_I(inode)->ip_blkno,
				  bh,
				  OCFS2_BH_CACHED,
				  inode);
	if (status < 0) {
		mlog_errno(status);
		link = ERR_PTR(status);
		goto bail;
	}

	fe = (struct ocfs2_dinode *) (*bh)->b_data;
	link = (char *) fe->id2.i_symlink;
bail:
	mlog_exit(status);

	return link;
}
static struct dentry *ocfs2_get_parent(struct dentry *child)
{
	int status;
	u64 blkno;
	struct dentry *parent;
	struct inode *dir = child->d_inode;

	trace_ocfs2_get_parent(child, child->d_name.len, child->d_name.name,
			       (unsigned long long)OCFS2_I(dir)->ip_blkno);

	status = ocfs2_inode_lock(dir, NULL, 0);
	if (status < 0) {
		if (status != -ENOENT)
			mlog_errno(status);
		parent = ERR_PTR(status);
		goto bail;
	}

	status = ocfs2_lookup_ino_from_name(dir, "..", 2, &blkno);
	if (status < 0) {
		parent = ERR_PTR(-ENOENT);
		goto bail_unlock;
	}

	parent = d_obtain_alias(ocfs2_iget(OCFS2_SB(dir->i_sb), blkno, 0, 0));

bail_unlock:
	ocfs2_inode_unlock(dir, 0);

bail:
	trace_ocfs2_get_parent_end(parent);

	return parent;
}
/* Called from locking and called from ocfs2_clear_inode. Dump the
 * cache for a given inode.
 *
 * This function is a few more lines longer than necessary due to some
 * accounting done here, but I think it's worth tracking down those
 * bugs sooner -- Mark */
void ocfs2_metadata_cache_purge(struct inode *inode)
{
	struct ocfs2_inode_info *oi = OCFS2_I(inode);
	unsigned int tree, to_purge, purged;
	struct ocfs2_caching_info *ci = &oi->ip_metadata_cache;
	struct rb_root root = RB_ROOT;

	spin_lock(&oi->ip_lock);
	tree = !(oi->ip_flags & OCFS2_INODE_CACHE_INLINE);
	to_purge = ci->ci_num_cached;

	mlog(0, "Purge %u %s items from Inode %llu\n", to_purge,
	     tree ? "array" : "tree", (unsigned long long)oi->ip_blkno);

	/* If we're a tree, save off the root so that we can safely
	 * initialize the cache. We do the work to free tree members
	 * without the spinlock. */
	if (tree)
		root = ci->ci_cache.ci_tree;

	ocfs2_metadata_cache_init(inode);
	spin_unlock(&oi->ip_lock);

	purged = ocfs2_purge_copied_metadata_tree(&root);
	/* If possible, track the number wiped so that we can more
	 * easily detect counting errors. Unfortunately, this is only
	 * meaningful for trees. */
	if (tree && purged != to_purge)
		mlog(ML_ERROR, "Inode %llu, count = %u, purged = %u\n",
		     (unsigned long long)oi->ip_blkno, to_purge, purged);
}
示例#5
0
static int ocfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
{
	struct page *page = vmf->page;
	struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
	struct buffer_head *di_bh = NULL;
	sigset_t blocked, oldset;
	int ret, ret2;

	ret = ocfs2_vm_op_block_sigs(&blocked, &oldset);
	if (ret < 0) {
		mlog_errno(ret);
		return ret;
	}

	/*
	 * The cluster locks taken will block a truncate from another
	 * node. Taking the data lock will also ensure that we don't
	 * attempt page truncation as part of a downconvert.
	 */
	ret = ocfs2_inode_lock(inode, &di_bh, 1);
	if (ret < 0) {
		mlog_errno(ret);
		goto out;
	}

	/*
	 * The alloc sem should be enough to serialize with
	 * ocfs2_truncate_file() changing i_size as well as any thread
	 * modifying the inode btree.
	 */
	down_write(&OCFS2_I(inode)->ip_alloc_sem);

	ret = __ocfs2_page_mkwrite(inode, di_bh, page);

	up_write(&OCFS2_I(inode)->ip_alloc_sem);

	brelse(di_bh);
	ocfs2_inode_unlock(inode, 1);

out:
	ret2 = ocfs2_vm_op_unblock_sigs(&oldset);
	if (ret2 < 0)
		mlog_errno(ret2);
	if (ret)
		ret = VM_FAULT_SIGBUS;
	return ret;
}
void ocfs2_metadata_cache_init(struct inode *inode)
{
	struct ocfs2_inode_info *oi = OCFS2_I(inode);
	struct ocfs2_caching_info *ci = &oi->ip_metadata_cache;

	oi->ip_flags |= OCFS2_INODE_CACHE_INLINE;
	ci->ci_num_cached = 0;
}
static int ocfs2_writepage(struct page *page, struct writeback_control *wbc)
{
	trace_ocfs2_writepage(
		(unsigned long long)OCFS2_I(page->mapping->host)->ip_blkno,
		page->index);

	return block_write_full_page(page, ocfs2_get_block, wbc);
}
示例#8
0
static struct dentry *ocfs2_get_parent(struct dentry *child)
{
	int status;
	u64 blkno;
	struct dentry *parent;
	struct inode *inode;
	struct inode *dir = child->d_inode;
	struct buffer_head *dirent_bh = NULL;
	struct ocfs2_dir_entry *dirent;

	mlog_entry("(0x%p, '%.*s')\n", child,
		   child->d_name.len, child->d_name.name);

	mlog(0, "find parent of directory %llu\n",
	     (unsigned long long)OCFS2_I(dir)->ip_blkno);

	status = ocfs2_meta_lock(dir, NULL, 0);
	if (status < 0) {
		if (status != -ENOENT)
			mlog_errno(status);
		parent = ERR_PTR(status);
		goto bail;
	}

	status = ocfs2_find_files_on_disk("..", 2, &blkno, dir, &dirent_bh,
					  &dirent);
	if (status < 0) {
		parent = ERR_PTR(-ENOENT);
		goto bail_unlock;
	}

	inode = ocfs2_iget(OCFS2_SB(dir->i_sb), blkno, 0);
	if (IS_ERR(inode)) {
		mlog(ML_ERROR, "Unable to create inode %llu\n",
		     (unsigned long long)blkno);
		parent = ERR_PTR(-EACCES);
		goto bail_unlock;
	}

	parent = d_alloc_anon(inode);
	if (!parent) {
		iput(inode);
		parent = ERR_PTR(-ENOMEM);
	}

	parent->d_op = &ocfs2_dentry_ops;

bail_unlock:
	ocfs2_meta_unlock(dir, 0);

	if (dirent_bh)
		brelse(dirent_bh);

bail:
	mlog_exit_ptr(parent);

	return parent;
}
示例#9
0
static sector_t ocfs2_bmap(struct address_space *mapping, sector_t block)
{
	sector_t status;
	u64 p_blkno = 0;
	int err = 0;
	struct inode *inode = mapping->host;

	mlog_entry("(block = %llu)\n", (unsigned long long)block);

	/* We don't need to lock journal system files, since they aren't
	 * accessed concurrently from multiple nodes.
	 */
	if (!INODE_JOURNAL(inode)) {
		err = ocfs2_inode_lock(inode, NULL, 0);
		if (err) {
			if (err != -ENOENT)
				mlog_errno(err);
			goto bail;
		}
		down_read(&OCFS2_I(inode)->ip_alloc_sem);
	}

	if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL))
		err = ocfs2_extent_map_get_blocks(inode, block, &p_blkno, NULL,
						  NULL);

	if (!INODE_JOURNAL(inode)) {
		up_read(&OCFS2_I(inode)->ip_alloc_sem);
		ocfs2_inode_unlock(inode, 0);
	}

	if (err) {
		mlog(ML_ERROR, "get_blocks() failed, block = %llu\n",
		     (unsigned long long)block);
		mlog_errno(err);
		goto bail;
	}

bail:
	status = err ? 0 : p_blkno;

	mlog_exit((int)status);

	return status;
}
示例#10
0
int ocfs2_extent_map_init(struct inode *inode)
{
	struct ocfs2_extent_map *em = &OCFS2_I(inode)->ip_map;

	em->em_extents = RB_ROOT;
	em->em_clusters = 0;

	return 0;
}
示例#11
0
static int ocfs2_block_group_fill(handle_t *handle,
				  struct inode *alloc_inode,
				  struct buffer_head *bg_bh,
				  u64 group_blkno,
				  u16 my_chain,
				  struct ocfs2_chain_list *cl)
{
	int status = 0;
	struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data;
	struct super_block * sb = alloc_inode->i_sb;

	mlog_entry_void();

	if (((unsigned long long) bg_bh->b_blocknr) != group_blkno) {
		ocfs2_error(alloc_inode->i_sb, "group block (%llu) != "
			    "b_blocknr (%llu)",
			    (unsigned long long)group_blkno,
			    (unsigned long long) bg_bh->b_blocknr);
		status = -EIO;
		goto bail;
	}

	status = ocfs2_journal_access(handle,
				      alloc_inode,
				      bg_bh,
				      OCFS2_JOURNAL_ACCESS_CREATE);
	if (status < 0) {
		mlog_errno(status);
		goto bail;
	}

	memset(bg, 0, sb->s_blocksize);
	strcpy(bg->bg_signature, OCFS2_GROUP_DESC_SIGNATURE);
	bg->bg_generation = cpu_to_le32(OCFS2_SB(sb)->fs_generation);
	bg->bg_size = cpu_to_le16(ocfs2_group_bitmap_size(sb));
	bg->bg_bits = cpu_to_le16(ocfs2_bits_per_group(cl));
	bg->bg_chain = cpu_to_le16(my_chain);
	bg->bg_next_group = cl->cl_recs[my_chain].c_blkno;
	bg->bg_parent_dinode = cpu_to_le64(OCFS2_I(alloc_inode)->ip_blkno);
	bg->bg_blkno = cpu_to_le64(group_blkno);
	/* set the 1st bit in the bitmap to account for the descriptor block */
	ocfs2_set_bit(0, (unsigned long *)bg->bg_bitmap);
	bg->bg_free_bits_count = cpu_to_le16(le16_to_cpu(bg->bg_bits) - 1);

	status = ocfs2_journal_dirty(handle, bg_bh);
	if (status < 0)
		mlog_errno(status);

	/* There is no need to zero out or otherwise initialize the
	 * other blocks in a group - All valid FS metadata in a block
	 * group stores the superblock fs_generation value at
	 * allocation time. */

bail:
	mlog_exit(status);
	return status;
}
示例#12
0
/* Support function for ocfs2_delete_inode. Will help us keep the
 * inode data in a consistent state for clear_inode. Always truncates
 * pages, optionally sync's them first. */
static void ocfs2_cleanup_delete_inode(struct inode *inode,
				       int sync_data)
{
	mlog(0, "Cleanup inode %llu, sync = %d\n",
	     (unsigned long long)OCFS2_I(inode)->ip_blkno, sync_data);
	if (sync_data)
		write_inode_now(inode, 1);
	truncate_inode_pages(&inode->i_data, 0);
}
示例#13
0
/* Support function for ocfs2_delete_inode. Will help us keep the
 * inode data in a consistent state for clear_inode. Always truncates
 * pages, optionally sync's them first. */
static void ocfs2_cleanup_delete_inode(struct inode *inode,
				       int sync_data)
{
	trace_ocfs2_cleanup_delete_inode(
		(unsigned long long)OCFS2_I(inode)->ip_blkno, sync_data);
	if (sync_data)
		filemap_write_and_wait(inode->i_mapping);
	truncate_inode_pages(&inode->i_data, 0);
}
示例#14
0
/*
 * TODO: this should probably be merged into ocfs2_get_block
 *
 * However, you now need to pay attention to the cont_prepare_write()
 * stuff in ocfs2_get_block (that is, ocfs2_get_block pretty much
 * expects never to extend).
 */
struct buffer_head *ocfs2_bread(struct inode *inode,
				int block, int *err, int reada)
{
	struct buffer_head *bh = NULL;
	int tmperr;
	u64 p_blkno;
	int readflags = OCFS2_BH_CACHED;

	if (reada)
		readflags |= OCFS2_BH_READAHEAD;

	if (((u64)block << inode->i_sb->s_blocksize_bits) >=
	    i_size_read(inode)) {
		BUG_ON(!reada);
		return NULL;
	}

	down_read(&OCFS2_I(inode)->ip_alloc_sem);
	tmperr = ocfs2_extent_map_get_blocks(inode, block, &p_blkno, NULL,
					     NULL);
	up_read(&OCFS2_I(inode)->ip_alloc_sem);
	if (tmperr < 0) {
		mlog_errno(tmperr);
		goto fail;
	}

	tmperr = ocfs2_read_block(OCFS2_SB(inode->i_sb), p_blkno, &bh,
				  readflags, inode);
	if (tmperr < 0)
		goto fail;

	tmperr = 0;

	*err = 0;
	return bh;

fail:
	if (bh) {
		brelse(bh);
		bh = NULL;
	}
	*err = -EIO;
	return NULL;
}
示例#15
0
static struct inode * _ocfs2_get_system_file_inode(struct ocfs2_super *osb,
        int type,
        u32 slot)
{
    char namebuf[40];
    struct inode *inode = NULL;
    u64 blkno;
    int status = 0;

    ocfs2_sprintf_system_inode_name(namebuf,
                                    sizeof(namebuf),
                                    type, slot);

    status = ocfs2_lookup_ino_from_name(osb->sys_root_inode, namebuf,
                                        strlen(namebuf), &blkno);
    if (status < 0) {
        goto bail;
    }

    inode = ocfs2_iget(osb, blkno, OCFS2_FI_FLAG_SYSFILE, type);
    if (IS_ERR(inode)) {
        mlog_errno(PTR_ERR(inode));
        inode = NULL;
        goto bail;
    }
#ifdef CONFIG_DEBUG_LOCK_ALLOC
    if (type == LOCAL_USER_QUOTA_SYSTEM_INODE ||
            type == LOCAL_GROUP_QUOTA_SYSTEM_INODE ||
            type == JOURNAL_SYSTEM_INODE) {
        /* Ignore inode lock on these inodes as the lock does not
         * really belong to any process and lockdep cannot handle
         * that */
        OCFS2_I(inode)->ip_inode_lockres.l_lockdep_map.key = NULL;
    } else {
        lockdep_init_map(&OCFS2_I(inode)->ip_inode_lockres.
                         l_lockdep_map,
                         ocfs2_system_inodes[type].si_name,
                         &ocfs2_sysfile_cluster_lock_key[type], 0);
    }
#endif
bail:

    return inode;
}
示例#16
0
void ocfs2_evict_inode(struct inode *inode)
{
	if (!inode->i_nlink ||
	    (OCFS2_I(inode)->ip_flags & OCFS2_INODE_MAYBE_ORPHANED)) {
		ocfs2_delete_inode(inode);
	} else {
		truncate_inode_pages(&inode->i_data, 0);
	}
	ocfs2_clear_inode(inode);
}
示例#17
0
static sector_t ocfs2_bmap(struct address_space *mapping, sector_t block)
{
	sector_t status;
	u64 p_blkno = 0;
	int err = 0;
	struct inode *inode = mapping->host;

	trace_ocfs2_bmap((unsigned long long)OCFS2_I(inode)->ip_blkno,
			 (unsigned long long)block);

	if (!INODE_JOURNAL(inode)) {
		err = ocfs2_inode_lock(inode, NULL, 0);
		if (err) {
			if (err != -ENOENT)
				mlog_errno(err);
			goto bail;
		}
		down_read(&OCFS2_I(inode)->ip_alloc_sem);
	}

	if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL))
		err = ocfs2_extent_map_get_blocks(inode, block, &p_blkno, NULL,
						  NULL);

	if (!INODE_JOURNAL(inode)) {
		up_read(&OCFS2_I(inode)->ip_alloc_sem);
		ocfs2_inode_unlock(inode, 0);
	}

	if (err) {
		mlog(ML_ERROR, "get_blocks() failed, block = %llu\n",
		     (unsigned long long)block);
		mlog_errno(err);
		goto bail;
	}

bail:
	status = err ? 0 : p_blkno;

	return status;
}
示例#18
0
/*
 * initialize the new inode, but don't do anything that would cause
 * us to sleep.
 * return 0 on success, 1 on failure
 */
static int ocfs2_init_locked_inode(struct inode *inode, void *opaque)
{
	struct ocfs2_find_inode_args *args = opaque;

	mlog_entry("inode = %p, opaque = %p\n", inode, opaque);

	inode->i_ino = args->fi_ino;
	OCFS2_I(inode)->ip_blkno = args->fi_blkno;

	mlog_exit(0);
	return 0;
}
示例#19
0
struct inode *ocfs2_iget(struct ocfs2_super *osb, u64 blkno, unsigned flags,
			 int sysfile_type)
{
	struct inode *inode = NULL;
	struct super_block *sb = osb->sb;
	struct ocfs2_find_inode_args args;

	trace_ocfs2_iget_begin((unsigned long long)blkno, flags,
			       sysfile_type);

	/* Ok. By now we've either got the offsets passed to us by the
	 * caller, or we just pulled them off the bh. Lets do some
	 * sanity checks to make sure they're OK. */
	if (blkno == 0) {
		inode = ERR_PTR(-EINVAL);
		mlog_errno(PTR_ERR(inode));
		goto bail;
	}

	args.fi_blkno = blkno;
	args.fi_flags = flags;
	args.fi_ino = ino_from_blkno(sb, blkno);
	args.fi_sysfile_type = sysfile_type;

	inode = iget5_locked(sb, args.fi_ino, ocfs2_find_actor,
			     ocfs2_init_locked_inode, &args);
	/* inode was *not* in the inode cache. 2.6.x requires
	 * us to do our own read_inode call and unlock it
	 * afterwards. */
	if (inode == NULL) {
		inode = ERR_PTR(-ENOMEM);
		mlog_errno(PTR_ERR(inode));
		goto bail;
	}
	trace_ocfs2_iget5_locked(inode->i_state);
	if (inode->i_state & I_NEW) {
		ocfs2_read_locked_inode(inode, &args);
		unlock_new_inode(inode);
	}
	if (is_bad_inode(inode)) {
		iput(inode);
		inode = ERR_PTR(-ESTALE);
		goto bail;
	}

bail:
	if (!IS_ERR(inode)) {
		trace_ocfs2_iget_end(inode, 
			(unsigned long long)OCFS2_I(inode)->ip_blkno);
	}

	return inode;
}
示例#20
0
/*
 * This is called from our getattr.
 */
int ocfs2_inode_revalidate(struct dentry *dentry)
{
	struct inode *inode = dentry->d_inode;
	int status = 0;

	trace_ocfs2_inode_revalidate(inode,
		inode ? (unsigned long long)OCFS2_I(inode)->ip_blkno : 0ULL,
		inode ? (unsigned long long)OCFS2_I(inode)->ip_flags : 0);

	if (!inode) {
		status = -ENOENT;
		goto bail;
	}

	spin_lock(&OCFS2_I(inode)->ip_lock);
	if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_DELETED) {
		spin_unlock(&OCFS2_I(inode)->ip_lock);
		status = -ENOENT;
		goto bail;
	}
	spin_unlock(&OCFS2_I(inode)->ip_lock);

	/* Let ocfs2_inode_lock do the work of updating our struct
	 * inode for us. */
	status = ocfs2_inode_lock(inode, NULL, 0);
	if (status < 0) {
		if (status != -ENOENT)
			mlog_errno(status);
		goto bail;
	}
	ocfs2_inode_unlock(inode, 0);
bail:
	return status;
}
示例#21
0
/*
 * Remove all entries past new_clusters and also clip any extent
 * straddling new_clusters, if there is one.  This does not check
 * or modify ip_clusters
 */
int ocfs2_extent_map_trunc(struct inode *inode, u32 new_clusters)
{
	struct rb_node *free_head = NULL;
	struct ocfs2_extent_map_entry *ent = NULL;

	spin_lock(&OCFS2_I(inode)->ip_lock);

	__ocfs2_extent_map_drop(inode, new_clusters, &free_head, &ent);

	if (ent)
		ent->e_rec.e_clusters = cpu_to_le32(new_clusters -
					       le32_to_cpu(ent->e_rec.e_cpos));

	OCFS2_I(inode)->ip_map.em_clusters = new_clusters;

	spin_unlock(&OCFS2_I(inode)->ip_lock);

	if (free_head)
		__ocfs2_extent_map_drop_cleanup(free_head);

	return 0;
}
示例#22
0
static int ocfs2_fault(struct vm_area_struct *area, struct vm_fault *vmf)
{
	sigset_t oldset;
	int ret;

	ocfs2_block_signals(&oldset);
	ret = filemap_fault(area, vmf);
	ocfs2_unblock_signals(&oldset);

	trace_ocfs2_fault(OCFS2_I(area->vm_file->f_mapping->host)->ip_blkno,
			  area, vmf->page, vmf->pgoff);
	return ret;
}
示例#23
0
struct inode *ocfs2_iget(struct ocfs2_super *osb, u64 blkno, int flags)
{
	struct inode *inode = NULL;
	struct super_block *sb = osb->sb;
	struct ocfs2_find_inode_args args;

	mlog_entry("(blkno = %llu)\n", (unsigned long long)blkno);

	/* Ok. By now we've either got the offsets passed to us by the
	 * caller, or we just pulled them off the bh. Lets do some
	 * sanity checks to make sure they're OK. */
	if (blkno == 0) {
		inode = ERR_PTR(-EINVAL);
		mlog_errno(PTR_ERR(inode));
		goto bail;
	}

	args.fi_blkno = blkno;
	args.fi_flags = flags;
	args.fi_ino = ino_from_blkno(sb, blkno);

	inode = iget5_locked(sb, args.fi_ino, ocfs2_find_actor,
			     ocfs2_init_locked_inode, &args);
	/* inode was *not* in the inode cache. 2.6.x requires
	 * us to do our own read_inode call and unlock it
	 * afterwards. */
	if (inode && inode->i_state & I_NEW) {
		mlog(0, "Inode was not in inode cache, reading it.\n");
		ocfs2_read_locked_inode(inode, &args);
		unlock_new_inode(inode);
	}
	if (inode == NULL) {
		inode = ERR_PTR(-ENOMEM);
		mlog_errno(PTR_ERR(inode));
		goto bail;
	}
	if (is_bad_inode(inode)) {
		iput(inode);
		inode = ERR_PTR(-ESTALE);
		goto bail;
	}

bail:
	if (!IS_ERR(inode)) {
		mlog(0, "returning inode with number %llu\n",
		     (unsigned long long)OCFS2_I(inode)->ip_blkno);
		mlog_exit_ptr(inode);
	}

	return inode;
}
示例#24
0
/* There is a series of simple checks that should be done before a
 * vote is even considered. Encapsulate those in this function. */
static int ocfs2_inode_is_valid_to_delete(struct inode *inode)
{
	int ret = 0;
	struct ocfs2_inode_info *oi = OCFS2_I(inode);
	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);

	/* We shouldn't be getting here for the root directory
	 * inode.. */
	if (inode == osb->root_inode) {
		mlog(ML_ERROR, "Skipping delete of root inode.\n");
		goto bail;
	}

	/* If we're coming from process_vote we can't go into our own
	 * voting [hello, deadlock city!], so unforuntately we just
	 * have to skip deleting this guy. That's OK though because
	 * the node who's doing the actual deleting should handle it
	 * anyway. */
	if (current == osb->vote_task) {
		mlog(0, "Skipping delete of %lu because we're currently "
		     "in process_vote\n", inode->i_ino);
		goto bail;
	}

	spin_lock(&oi->ip_lock);
	/* OCFS2 *never* deletes system files. This should technically
	 * never get here as system file inodes should always have a
	 * positive link count. */
	if (oi->ip_flags & OCFS2_INODE_SYSTEM_FILE) {
		mlog(ML_ERROR, "Skipping delete of system file %llu\n",
		     (unsigned long long)oi->ip_blkno);
		goto bail_unlock;
	}

	/* If we have voted "yes" on the wipe of this inode for
	 * another node, it will be marked here so we can safely skip
	 * it. Recovery will cleanup any inodes we might inadvertantly
	 * skip here. */
	if (oi->ip_flags & OCFS2_INODE_SKIP_DELETE) {
		mlog(0, "Skipping delete of %lu because another node "
		     "has done this for us.\n", inode->i_ino);
		goto bail_unlock;
	}

	ret = 1;
bail_unlock:
	spin_unlock(&oi->ip_lock);
bail:
	return ret;
}
示例#25
0
文件: file.c 项目: Mr-Aloof/wl500g
static int ocfs2_write_remove_suid(struct inode *inode)
{
	int ret;
	struct buffer_head *bh = NULL;
	struct ocfs2_inode_info *oi = OCFS2_I(inode);
	handle_t *handle;
	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
	struct ocfs2_dinode *di;

	mlog_entry("(Inode %llu, mode 0%o)\n",
		   (unsigned long long)oi->ip_blkno, inode->i_mode);

	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
	if (handle == NULL) {
		ret = -ENOMEM;
		mlog_errno(ret);
		goto out;
	}

	ret = ocfs2_read_block(osb, oi->ip_blkno, &bh, OCFS2_BH_CACHED, inode);
	if (ret < 0) {
		mlog_errno(ret);
		goto out_trans;
	}

	ret = ocfs2_journal_access(handle, inode, bh,
				   OCFS2_JOURNAL_ACCESS_WRITE);
	if (ret < 0) {
		mlog_errno(ret);
		goto out_bh;
	}

	inode->i_mode &= ~S_ISUID;
	if ((inode->i_mode & S_ISGID) && (inode->i_mode & S_IXGRP))
		inode->i_mode &= ~S_ISGID;

	di = (struct ocfs2_dinode *) bh->b_data;
	di->i_mode = cpu_to_le16(inode->i_mode);

	ret = ocfs2_journal_dirty(handle, bh);
	if (ret < 0)
		mlog_errno(ret);
out_bh:
	brelse(bh);
out_trans:
	ocfs2_commit_trans(osb, handle);
out:
	mlog_exit(ret);
	return ret;
}
示例#26
0
/*
 * initialize the new inode, but don't do anything that would cause
 * us to sleep.
 * return 0 on success, 1 on failure
 */
static int ocfs2_init_locked_inode(struct inode *inode, void *opaque)
{
	struct ocfs2_find_inode_args *args = opaque;
	static struct lock_class_key ocfs2_quota_ip_alloc_sem_key,
				     ocfs2_file_ip_alloc_sem_key;

	inode->i_ino = args->fi_ino;
	OCFS2_I(inode)->ip_blkno = args->fi_blkno;
	if (args->fi_sysfile_type != 0)
		lockdep_set_class(&inode->i_mutex,
			&ocfs2_sysfile_lock_key[args->fi_sysfile_type]);
	if (args->fi_sysfile_type == USER_QUOTA_SYSTEM_INODE ||
	    args->fi_sysfile_type == GROUP_QUOTA_SYSTEM_INODE ||
	    args->fi_sysfile_type == LOCAL_USER_QUOTA_SYSTEM_INODE ||
	    args->fi_sysfile_type == LOCAL_GROUP_QUOTA_SYSTEM_INODE)
		lockdep_set_class(&OCFS2_I(inode)->ip_alloc_sem,
				  &ocfs2_quota_ip_alloc_sem_key);
	else
		lockdep_set_class(&OCFS2_I(inode)->ip_alloc_sem,
				  &ocfs2_file_ip_alloc_sem_key);

	return 0;
}
示例#27
0
文件: aops.c 项目: Tigrouzen/k1099
static int ocfs2_readpage_inline(struct inode *inode, struct page *page)
{
	int ret;
	struct buffer_head *di_bh = NULL;
	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);

	BUG_ON(!PageLocked(page));
	BUG_ON(!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL));

	ret = ocfs2_read_block(osb, OCFS2_I(inode)->ip_blkno, &di_bh,
			       OCFS2_BH_CACHED, inode);
	if (ret) {
		mlog_errno(ret);
		goto out;
	}

	ret = ocfs2_read_inline_data(inode, page, di_bh);
out:
	unlock_page(page);

	brelse(di_bh);
	return ret;
}
示例#28
0
/*
 * Remove all entries past new_clusters, inclusive of an entry that
 * contains new_clusters.  This is effectively a cache forget.
 *
 * If you want to also clip the last extent by some number of clusters,
 * you need to call ocfs2_extent_map_trunc().
 * This code does not check or modify ip_clusters.
 */
int ocfs2_extent_map_drop(struct inode *inode, u32 new_clusters)
{
	struct rb_node *free_head = NULL;
	struct ocfs2_extent_map *em = &OCFS2_I(inode)->ip_map;
	struct ocfs2_extent_map_entry *ent;

	spin_lock(&OCFS2_I(inode)->ip_lock);

	__ocfs2_extent_map_drop(inode, new_clusters, &free_head, &ent);

	if (ent) {
		rb_erase(&ent->e_node, &em->em_extents);
		ent->e_node.rb_right = free_head;
		free_head = &ent->e_node;
	}

	spin_unlock(&OCFS2_I(inode)->ip_lock);

	if (free_head)
		__ocfs2_extent_map_drop_cleanup(free_head);

	return 0;
}
示例#29
0
/*
 * This is called from our getattr.
 */
int ocfs2_inode_revalidate(struct dentry *dentry)
{
	struct inode *inode = dentry->d_inode;
	int status = 0;

	mlog_entry("(inode = 0x%p, ino = %llu)\n", inode,
		   inode ? (unsigned long long)OCFS2_I(inode)->ip_blkno : 0ULL);

	if (!inode) {
		mlog(0, "eep, no inode!\n");
		status = -ENOENT;
		goto bail;
	}

	spin_lock(&OCFS2_I(inode)->ip_lock);
	if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_DELETED) {
		spin_unlock(&OCFS2_I(inode)->ip_lock);
		mlog(0, "inode deleted!\n");
		status = -ENOENT;
		goto bail;
	}
	spin_unlock(&OCFS2_I(inode)->ip_lock);

	/* Let ocfs2_meta_lock do the work of updating our struct
	 * inode for us. */
	status = ocfs2_meta_lock(inode, NULL, 0);
	if (status < 0) {
		if (status != -ENOENT)
			mlog_errno(status);
		goto bail;
	}
	ocfs2_meta_unlock(inode, 0);
bail:
	mlog_exit(status);

	return status;
}
static int __ocfs2_move_extents_range(struct buffer_head *di_bh,
				struct ocfs2_move_extents_context *context)
{
	int ret = 0, flags, do_defrag, skip = 0;
	u32 cpos, phys_cpos, move_start, len_to_move, alloc_size;
	u32 len_defraged = 0, defrag_thresh = 0, new_phys_cpos = 0;

	struct inode *inode = context->inode;
	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
	struct ocfs2_move_extents *range = context->range;
	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);

	if ((inode->i_size == 0) || (range->me_len == 0))
		return 0;

	if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
		return 0;

	context->refcount_loc = le64_to_cpu(di->i_refcount_loc);

	ocfs2_init_dinode_extent_tree(&context->et, INODE_CACHE(inode), di_bh);
	ocfs2_init_dealloc_ctxt(&context->dealloc);

	/*
	 * TO-DO XXX:
	 *
	 * - xattr extents.
	 */

	do_defrag = context->auto_defrag;

	/*
	 * extents moving happens in unit of clusters, for the sake
	 * of simplicity, we may ignore two clusters where 'byte_start'
	 * and 'byte_start + len' were within.
	 */
	move_start = ocfs2_clusters_for_bytes(osb->sb, range->me_start);
	len_to_move = (range->me_start + range->me_len) >>
						osb->s_clustersize_bits;
	if (len_to_move >= move_start)
		len_to_move -= move_start;
	else
		len_to_move = 0;

	if (do_defrag) {
		defrag_thresh = range->me_threshold >> osb->s_clustersize_bits;
		if (defrag_thresh <= 1)
			goto done;
	} else