Exemple #1
0
STATIC void
xfs_inode_free_callback(
	struct rcu_head		*head)
{
	struct inode		*inode = container_of(head, struct inode, i_rcu);
	struct xfs_inode	*ip = XFS_I(inode);

	switch (VFS_I(ip)->i_mode & S_IFMT) {
	case S_IFREG:
	case S_IFDIR:
	case S_IFLNK:
		xfs_idestroy_fork(ip, XFS_DATA_FORK);
		break;
	}

	if (ip->i_afp)
		xfs_idestroy_fork(ip, XFS_ATTR_FORK);
	if (ip->i_cowfp)
		xfs_idestroy_fork(ip, XFS_COW_FORK);

	if (ip->i_itemp) {
		ASSERT(!(ip->i_itemp->ili_item.li_flags & XFS_LI_IN_AIL));
		xfs_inode_item_destroy(ip);
		ip->i_itemp = NULL;
	}

	kmem_zone_free(xfs_inode_zone, ip);
}
Exemple #2
0
void
xfs_inode_free(
	struct xfs_inode	*ip)
{
	switch (ip->i_d.di_mode & S_IFMT) {
	case S_IFREG:
	case S_IFDIR:
	case S_IFLNK:
		xfs_idestroy_fork(ip, XFS_DATA_FORK);
		break;
	}

	if (ip->i_afp)
		xfs_idestroy_fork(ip, XFS_ATTR_FORK);

	if (ip->i_itemp) {
		/*
		 * Only if we are shutting down the fs will we see an
		 * inode still in the AIL. If it is there, we should remove
		 * it to prevent a use-after-free from occurring.
		 */
		xfs_log_item_t	*lip = &ip->i_itemp->ili_item;
		struct xfs_ail	*ailp = lip->li_ailp;

		ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) ||
				       XFS_FORCED_SHUTDOWN(ip->i_mount));
		if (lip->li_flags & XFS_LI_IN_AIL) {
			spin_lock(&ailp->xa_lock);
			if (lip->li_flags & XFS_LI_IN_AIL)
				xfs_trans_ail_delete(ailp, lip);
			else
				spin_unlock(&ailp->xa_lock);
		}
		xfs_inode_item_destroy(ip);
		ip->i_itemp = NULL;
	}

	/* asserts to verify all state is correct here */
	ASSERT(atomic_read(&ip->i_iocount) == 0);
	ASSERT(atomic_read(&ip->i_pincount) == 0);
	ASSERT(!spin_is_locked(&ip->i_flags_lock));
	ASSERT(completion_done(&ip->i_flush));

	/*
	 * Because we use RCU freeing we need to ensure the inode always
	 * appears to be reclaimed with an invalid inode number when in the
	 * free state. The ip->i_flags_lock provides the barrier against lookup
	 * races.
	 */
	spin_lock(&ip->i_flags_lock);
	ip->i_flags = XFS_IRECLAIM;
	ip->i_ino = 0;
	spin_unlock(&ip->i_flags_lock);

	call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
}
void
xfs_inode_free(
    struct xfs_inode	*ip)
{
    switch (ip->i_d.di_mode & S_IFMT) {
    case S_IFREG:
    case S_IFDIR:
    case S_IFLNK:
        xfs_idestroy_fork(ip, XFS_DATA_FORK);
        break;
    }

    if (ip->i_afp)
        xfs_idestroy_fork(ip, XFS_ATTR_FORK);

    if (ip->i_itemp) {
        /*
         * Only if we are shutting down the fs will we see an
         * inode still in the AIL. If it is there, we should remove
         * it to prevent a use-after-free from occurring.
         */
        xfs_log_item_t	*lip = &ip->i_itemp->ili_item;
        struct xfs_ail	*ailp = lip->li_ailp;

        ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) ||
               XFS_FORCED_SHUTDOWN(ip->i_mount));
        if (lip->li_flags & XFS_LI_IN_AIL) {
            spin_lock(&ailp->xa_lock);
            if (lip->li_flags & XFS_LI_IN_AIL)
                xfs_trans_ail_delete(ailp, lip);
            else
                spin_unlock(&ailp->xa_lock);
        }
        xfs_inode_item_destroy(ip);
        ip->i_itemp = NULL;
    }

    /* asserts to verify all state is correct here */
    ASSERT(atomic_read(&ip->i_iocount) == 0);
    ASSERT(atomic_read(&ip->i_pincount) == 0);
    ASSERT(!spin_is_locked(&ip->i_flags_lock));
    ASSERT(completion_done(&ip->i_flush));

    kmem_zone_free(xfs_inode_zone, ip);
}
void
xfs_inode_free(
	struct xfs_inode	*ip)
{
	switch (ip->i_d.di_mode & S_IFMT) {
	case S_IFREG:
	case S_IFDIR:
	case S_IFLNK:
		xfs_idestroy_fork(ip, XFS_DATA_FORK);
		break;
	}

	if (ip->i_afp)
		xfs_idestroy_fork(ip, XFS_ATTR_FORK);

	if (ip->i_itemp) {
		xfs_log_item_t	*lip = &ip->i_itemp->ili_item;
		struct xfs_ail	*ailp = lip->li_ailp;

		ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) ||
				       XFS_FORCED_SHUTDOWN(ip->i_mount));
		if (lip->li_flags & XFS_LI_IN_AIL) {
			spin_lock(&ailp->xa_lock);
			if (lip->li_flags & XFS_LI_IN_AIL)
				xfs_trans_ail_delete(ailp, lip);
			else
				spin_unlock(&ailp->xa_lock);
		}
		xfs_inode_item_destroy(ip);
		ip->i_itemp = NULL;
	}

	
	ASSERT(atomic_read(&ip->i_pincount) == 0);
	ASSERT(!spin_is_locked(&ip->i_flags_lock));
	ASSERT(!xfs_isiflocked(ip));

	spin_lock(&ip->i_flags_lock);
	ip->i_flags = XFS_IRECLAIM;
	ip->i_ino = 0;
	spin_unlock(&ip->i_flags_lock);

	call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
}
Exemple #5
0
void
xfs_inode_free(
	struct xfs_inode	*ip)
{
	switch (ip->i_d.di_mode & S_IFMT) {
	case S_IFREG:
	case S_IFDIR:
	case S_IFLNK:
		xfs_idestroy_fork(ip, XFS_DATA_FORK);
		break;
	}

	if (ip->i_afp)
		xfs_idestroy_fork(ip, XFS_ATTR_FORK);

	if (ip->i_itemp) {
		ASSERT(!(ip->i_itemp->ili_item.li_flags & XFS_LI_IN_AIL));
		xfs_inode_item_destroy(ip);
		ip->i_itemp = NULL;
	}

	/*
	 * Because we use RCU freeing we need to ensure the inode always
	 * appears to be reclaimed with an invalid inode number when in the
	 * free state. The ip->i_flags_lock provides the barrier against lookup
	 * races.
	 */
	spin_lock(&ip->i_flags_lock);
	ip->i_flags = XFS_IRECLAIM;
	ip->i_ino = 0;
	spin_unlock(&ip->i_flags_lock);

	/* asserts to verify all state is correct here */
	ASSERT(atomic_read(&ip->i_pincount) == 0);
	ASSERT(!xfs_isiflocked(ip));
	XFS_STATS_DEC(vn_active);

	call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
}
/*
 * xfs_attr_inactive kills all traces of an attribute fork on an inode. It
 * removes both the on-disk and in-memory inode fork. Note that this also has to
 * handle the condition of inodes without attributes but with an attribute fork
 * configured, so we can't use xfs_inode_hasattr() here.
 *
 * The in-memory attribute fork is removed even on error.
 */
int
xfs_attr_inactive(
	struct xfs_inode	*dp)
{
	struct xfs_trans	*trans;
	struct xfs_mount	*mp;
	int			cancel_flags = 0;
	int			lock_mode = XFS_ILOCK_SHARED;
	int			error = 0;

	mp = dp->i_mount;
	ASSERT(! XFS_NOT_DQATTACHED(mp, dp));

	xfs_ilock(dp, lock_mode);
	if (!XFS_IFORK_Q(dp))
		goto out_destroy_fork;
	xfs_iunlock(dp, lock_mode);

	/*
	 * Start our first transaction of the day.
	 *
	 * All future transactions during this code must be "chained" off
	 * this one via the trans_dup() call.  All transactions will contain
	 * the inode, and the inode will always be marked with trans_ihold().
	 * Since the inode will be locked in all transactions, we must log
	 * the inode in every transaction to let it float upward through
	 * the log.
	 */
	lock_mode = 0;
	trans = xfs_trans_alloc(mp, XFS_TRANS_ATTRINVAL);
	error = xfs_trans_reserve(trans, &M_RES(mp)->tr_attrinval, 0, 0);
	if (error)
		goto out_cancel;

	lock_mode = XFS_ILOCK_EXCL;
	cancel_flags = XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT;
	xfs_ilock(dp, lock_mode);

	if (!XFS_IFORK_Q(dp))
		goto out_cancel;

	/*
	 * No need to make quota reservations here. We expect to release some
	 * blocks, not allocate, in the common case.
	 */
	xfs_trans_ijoin(trans, dp, 0);

	/*
	 * Invalidate and truncate the attribute fork extents. Make sure the
	 * fork actually has attributes as otherwise the invalidation has no
	 * blocks to read and returns an error. In this case, just do the fork
	 * removal below.
	 */
	if (xfs_inode_hasattr(dp) &&
	    dp->i_d.di_aformat != XFS_DINODE_FMT_LOCAL) {
		error = xfs_attr3_root_inactive(&trans, dp);
		if (error)
			goto out_cancel;

		error = xfs_itruncate_extents(&trans, dp, XFS_ATTR_FORK, 0);
		if (error)
			goto out_cancel;
	}

	/* Reset the attribute fork - this also destroys the in-core fork */
	xfs_attr_fork_remove(dp, trans);

	error = xfs_trans_commit(trans, XFS_TRANS_RELEASE_LOG_RES);
	xfs_iunlock(dp, lock_mode);
	return error;

out_cancel:
	xfs_trans_cancel(trans, cancel_flags);
out_destroy_fork:
	/* kill the in-core attr fork before we drop the inode lock */
	if (dp->i_afp)
		xfs_idestroy_fork(dp, XFS_ATTR_FORK);
	if (lock_mode)
		xfs_iunlock(dp, lock_mode);
	return error;
}