Exemplo n.º 1
0
/*
 * purge a vnode from the cache
 * At this point the vnode is guaranteed to have no references (vn_count == 0)
 * The caller has to make sure that there are no ways someone could
 * get a handle (via vn_get) on the vnode (usually done via a mount/vfs lock).
 */
void
vn_purge(
	struct vnode	*vp,
	vmap_t		*vmap)
{
	vn_trace_entry(vp, "vn_purge", (inst_t *)__return_address);

again:
	/*
	 * Check whether vp has already been reclaimed since our caller
	 * sampled its version while holding a filesystem cache lock that
	 * its VOP_RECLAIM function acquires.
	 */
	VN_LOCK(vp);
	if (vp->v_number != vmap->v_number) {
		VN_UNLOCK(vp, 0);
		return;
	}

	/*
	 * If vp is being reclaimed or inactivated, wait until it is inert,
	 * then proceed.  Can't assume that vnode is actually reclaimed
	 * just because the reclaimed flag is asserted -- a vn_alloc
	 * reclaim can fail.
	 */
	if (vp->v_flag & (VINACT | VRECLM)) {
		ASSERT(vn_count(vp) == 0);
		vp->v_flag |= VWAIT;
		sv_wait(vptosync(vp), PINOD, &vp->v_lock, 0);
		goto again;
	}

	/*
	 * Another process could have raced in and gotten this vnode...
	 */
	if (vn_count(vp) > 0) {
		VN_UNLOCK(vp, 0);
		return;
	}

	XFS_STATS_DEC(vn_active);
	vp->v_flag |= VRECLM;
	VN_UNLOCK(vp, 0);

	/*
	 * Call VOP_RECLAIM and clean vp. The FSYNC_INVAL flag tells
	 * vp's filesystem to flush and invalidate all cached resources.
	 * When vn_reclaim returns, vp should have no private data,
	 * either in a system cache or attached to v_data.
	 */
	if (vn_reclaim(vp) != 0)
		panic("vn_purge: cannot reclaim");

	/*
	 * Wakeup anyone waiting for vp to be reclaimed.
	 */
	vn_wakeup(vp);
}
Exemplo n.º 2
0
/*
 * Reference count of Linux inode if present, -1 if the xfs_inode
 * has no associated Linux inode.
 */
static inline int xfs_icount(struct xfs_inode *ip)
{
    bhv_vnode_t *vp = XFS_ITOV_NULL(ip);

    if (vp)
        return vn_count(vp);
    return -1;
}
Exemplo n.º 3
0
/*
 *  Call VOP_INACTIVE on last reference.
 */
void
vn_rele(
	struct vnode	*vp)
{
	int		vcnt;
	int		cache;

	XFS_STATS_INC(vn_rele);

	VN_LOCK(vp);

	vn_trace_entry(vp, "vn_rele", (inst_t *)__return_address);
	vcnt = vn_count(vp);

	/*
	 * Since we always get called from put_inode we know
	 * that i_count won't be decremented after we
	 * return.
	 */
	if (!vcnt) {
		/*
		 * As soon as we turn this on, noone can find us in vn_get
		 * until we turn off VINACT or VRECLM
		 */
		vp->v_flag |= VINACT;
		VN_UNLOCK(vp, 0);

		/*
		 * Do not make the VOP_INACTIVE call if there
		 * are no behaviors attached to the vnode to call.
		 */
		if (vp->v_fbhv)
			VOP_INACTIVE(vp, NULL, cache);

		VN_LOCK(vp);
		if (vp->v_flag & VWAIT)
			sv_broadcast(vptosync(vp));

		vp->v_flag &= ~(VINACT|VWAIT|VRECLM|VMODIFIED);
	}

	VN_UNLOCK(vp, 0);

	vn_trace_exit(vp, "vn_rele", (inst_t *)__return_address);
}
Exemplo n.º 4
0
/*
 * xfs_unmount_flush implements a set of flush operation on special
 * inodes, which are needed as a separate set of operations so that
 * they can be called as part of relocation process.
 */
int
xfs_unmount_flush(
	xfs_mount_t	*mp,		/* Mount structure we are getting
					   rid of. */
	int             relocation)	/* Called from vfs relocation. */
{
	xfs_inode_t	*rip = mp->m_rootip;
	xfs_inode_t	*rbmip;
	xfs_inode_t	*rsumip = NULL;
	vnode_t		*rvp = XFS_ITOV(rip);
	int		error;

	xfs_ilock(rip, XFS_ILOCK_EXCL);
	xfs_iflock(rip);

	/*
	 * Flush out the real time inodes.
	 */
	if ((rbmip = mp->m_rbmip) != NULL) {
		xfs_ilock(rbmip, XFS_ILOCK_EXCL);
		xfs_iflock(rbmip);
		error = xfs_iflush(rbmip, XFS_IFLUSH_SYNC);
		xfs_iunlock(rbmip, XFS_ILOCK_EXCL);

		if (error == EFSCORRUPTED)
			goto fscorrupt_out;

		ASSERT(vn_count(XFS_ITOV(rbmip)) == 1);

		rsumip = mp->m_rsumip;
		xfs_ilock(rsumip, XFS_ILOCK_EXCL);
		xfs_iflock(rsumip);
		error = xfs_iflush(rsumip, XFS_IFLUSH_SYNC);
		xfs_iunlock(rsumip, XFS_ILOCK_EXCL);

		if (error == EFSCORRUPTED)
			goto fscorrupt_out;

		ASSERT(vn_count(XFS_ITOV(rsumip)) == 1);
	}

	/*
	 * Synchronously flush root inode to disk
	 */
	error = xfs_iflush(rip, XFS_IFLUSH_SYNC);
	if (error == EFSCORRUPTED)
		goto fscorrupt_out2;

	if (vn_count(rvp) != 1 && !relocation) {
		xfs_iunlock(rip, XFS_ILOCK_EXCL);
		return XFS_ERROR(EBUSY);
	}

	/*
	 * Release dquot that rootinode, rbmino and rsumino might be holding,
	 * flush and purge the quota inodes.
	 */
	error = XFS_QM_UNMOUNT(mp);
	if (error == EFSCORRUPTED)
		goto fscorrupt_out2;

	if (rbmip) {
		VN_RELE(XFS_ITOV(rbmip));
		VN_RELE(XFS_ITOV(rsumip));
	}

	xfs_iunlock(rip, XFS_ILOCK_EXCL);
	return 0;

fscorrupt_out:
	xfs_ifunlock(rip);

fscorrupt_out2:
	xfs_iunlock(rip, XFS_ILOCK_EXCL);

	return XFS_ERROR(EFSCORRUPTED);
}
Exemplo n.º 5
0
STATIC int
xfs_unmount(
	bhv_desc_t	*bdp,
	int		flags,
	cred_t		*credp)
{
	struct vfs	*vfsp = bhvtovfs(bdp);
	xfs_mount_t	*mp = XFS_BHVTOM(bdp);
	xfs_inode_t	*rip;
	vnode_t		*rvp;
	int		unmount_event_wanted = 0;
	int		unmount_event_flags = 0;
	int		xfs_unmountfs_needed = 0;
	int		error;

	rip = mp->m_rootip;
	rvp = XFS_ITOV(rip);

	if (vfsp->vfs_flag & VFS_DMI) {
		error = XFS_SEND_PREUNMOUNT(mp, vfsp,
				rvp, DM_RIGHT_NULL, rvp, DM_RIGHT_NULL,
				NULL, NULL, 0, 0,
				(mp->m_dmevmask & (1<<DM_EVENT_PREUNMOUNT))?
					0:DM_FLAGS_UNWANTED);
			if (error)
				return XFS_ERROR(error);
		unmount_event_wanted = 1;
		unmount_event_flags = (mp->m_dmevmask & (1<<DM_EVENT_UNMOUNT))?
					0 : DM_FLAGS_UNWANTED;
	}

	/*
	 * First blow any referenced inode from this file system
	 * out of the reference cache, and delete the timer.
	 */
	xfs_refcache_purge_mp(mp);

	XFS_bflush(mp->m_ddev_targp);
	error = xfs_unmount_flush(mp, 0);
	if (error)
		goto out;

	ASSERT(vn_count(rvp) == 1);

	/*
	 * Drop the reference count
	 */
	VN_RELE(rvp);

	/*
	 * If we're forcing a shutdown, typically because of a media error,
	 * we want to make sure we invalidate dirty pages that belong to
	 * referenced vnodes as well.
	 */
	if (XFS_FORCED_SHUTDOWN(mp)) {
		error = xfs_sync(&mp->m_bhv,
			 (SYNC_WAIT | SYNC_CLOSE), credp);
		ASSERT(error != EFSCORRUPTED);
	}
	xfs_unmountfs_needed = 1;

out:
	/*	Send DMAPI event, if required.
	 *	Then do xfs_unmountfs() if needed.
	 *	Then return error (or zero).
	 */
	if (unmount_event_wanted) {
		/* Note: mp structure must still exist for
		 * XFS_SEND_UNMOUNT() call.
		 */
		XFS_SEND_UNMOUNT(mp, vfsp, error == 0 ? rvp : NULL,
			DM_RIGHT_NULL, 0, error, unmount_event_flags);
	}
	if (xfs_unmountfs_needed) {
		/*
		 * Call common unmount function to flush to disk
		 * and free the super block buffer & mount structures.
		 */
		xfs_unmountfs(mp, credp);
	}

	return XFS_ERROR(error);
}