Пример #1
0
void
xfs_refcache_resize(int xfs_refcache_new_size)
{
	int		i;
	xfs_inode_t	*ip;
	int		iplist_index = 0;
	xfs_inode_t	**iplist;
	int		error;

	/*
	 * If the new size is smaller than the current size,
	 * purge entries to create smaller cache, and
	 * reposition index if necessary.
	 * Don't bother if no refcache yet.
	 */
	if (xfs_refcache && (xfs_refcache_new_size < xfs_refcache_size)) {

		iplist = (xfs_inode_t **)kmem_zalloc(XFS_REFCACHE_SIZE_MAX *
				sizeof(xfs_inode_t *), KM_SLEEP);

		spin_lock(&xfs_refcache_lock);

		for (i = xfs_refcache_new_size; i < xfs_refcache_size; i++) {
			ip = xfs_refcache[i];
			if (ip != NULL) {
				xfs_refcache[i] = NULL;
				ip->i_refcache = NULL;
				xfs_refcache_count--;
				ASSERT(xfs_refcache_count >= 0);
				iplist[iplist_index] = ip;
				iplist_index++;
			}
		}

		xfs_refcache_size = xfs_refcache_new_size;

		/*
		 * Move index to beginning of cache if it's now past the end
		 */
		if (xfs_refcache_index >= xfs_refcache_new_size)
			xfs_refcache_index = 0;

		spin_unlock(&xfs_refcache_lock);

		/*
		 * Now drop the inodes we collected.
		 */
		for (i = 0; i < iplist_index; i++) {
			VOP_RELEASE(XFS_ITOV(iplist[i]), error);
			VN_RELE(XFS_ITOV(iplist[i]));
		}

		kmem_free(iplist, XFS_REFCACHE_SIZE_MAX *
				  sizeof(xfs_inode_t *));
	} else {
		spin_lock(&xfs_refcache_lock);
		xfs_refcache_size = xfs_refcache_new_size;
		spin_unlock(&xfs_refcache_lock);
	}
}
Пример #2
0
/*
 * This is called from the XFS sync code to ensure that the refcache
 * is emptied out over time.  We purge a small number of entries with
 * each call.
 */
void
xfs_refcache_purge_some(xfs_mount_t *mp)
{
	int		error, i;
	xfs_inode_t	*ip;
	int		iplist_index;
	xfs_inode_t	**iplist;

	if ((xfs_refcache == NULL) || (xfs_refcache_count == 0)) {
		return;
	}

	iplist_index = 0;
	iplist = (xfs_inode_t **)kmem_zalloc(xfs_refcache_purge_count *
					  sizeof(xfs_inode_t *), KM_SLEEP);

	spin_lock(&xfs_refcache_lock);

	/*
	 * Store any inodes we find in the next several entries
	 * into the iplist array to be released after dropping
	 * the spinlock.  We always start looking from the currently
	 * oldest place in the cache.  We move the refcache index
	 * forward as we go so that we are sure to eventually clear
	 * out the entire cache when the system goes idle.
	 */
	for (i = 0; i < xfs_refcache_purge_count; i++) {
		ip = xfs_refcache[xfs_refcache_index];
		if (ip != NULL) {
			xfs_refcache[xfs_refcache_index] = NULL;
			ip->i_refcache = NULL;
			xfs_refcache_count--;
			ASSERT(xfs_refcache_count >= 0);
			iplist[iplist_index] = ip;
			iplist_index++;
		}
		xfs_refcache_index++;
		if (xfs_refcache_index == xfs_refcache_size) {
			xfs_refcache_index = 0;
		}
	}

	spin_unlock(&xfs_refcache_lock);

	/*
	 * Now drop the inodes we collected.
	 */
	for (i = 0; i < iplist_index; i++) {
		VOP_RELEASE(XFS_ITOV(iplist[i]), error);
		VN_RELE(XFS_ITOV(iplist[i]));
	}

	kmem_free(iplist, xfs_refcache_purge_count *
			  sizeof(xfs_inode_t *));
}
Пример #3
0
STATIC int
xfs_file_release(
	struct inode	*inode,
	struct file	*filp)
{
	vnode_t		*vp = vn_from_inode(inode);
	int		error = 0;

	if (vp)
		VOP_RELEASE(vp, error);
	return -error;
}
Пример #4
0
STATIC int
linvfs_release(
	struct inode	*inode,
	struct file	*filp)
{
	vnode_t		*vp = LINVFS_GET_VP(inode);
	int		error = 0;

	if (vp)
		VOP_RELEASE(vp, error);
	return -error;
}
Пример #5
0
void
xfs_refcache_iunlock(
	xfs_inode_t	*ip,
	uint		lock_flags)
{
	xfs_inode_t	*release_ip;
	int		error;

	release_ip = ip->i_release;
	ip->i_release = NULL;

	xfs_iunlock(ip, lock_flags);

	if (release_ip != NULL) {
		VOP_RELEASE(XFS_ITOV(release_ip), error);
		VN_RELE(XFS_ITOV(release_ip));
	}
}
Пример #6
0
/*
 * This is called from the XFS unmount code to purge all entries for the
 * given mount from the cache.  It uses the refcache busy counter to
 * make sure that new entries are not added to the cache as we purge them.
 */
void
xfs_refcache_purge_mp(
	xfs_mount_t	*mp)
{
	vnode_t		*vp;
	int		error, i;
	xfs_inode_t	*ip;

	if (xfs_refcache == NULL) {
		return;
	}

	spin_lock(&xfs_refcache_lock);
	/*
	 * Bumping the busy counter keeps new entries from being added
	 * to the cache.  We use a counter since multiple unmounts could
	 * be in here simultaneously.
	 */
	xfs_refcache_busy++;

	for (i = 0; i < xfs_refcache_size; i++) {
		ip = xfs_refcache[i];
		if ((ip != NULL) && (ip->i_mount == mp)) {
			xfs_refcache[i] = NULL;
			ip->i_refcache = NULL;
			xfs_refcache_count--;
			ASSERT(xfs_refcache_count >= 0);
			spin_unlock(&xfs_refcache_lock);
			vp = XFS_ITOV(ip);
			VOP_RELEASE(vp, error);
			VN_RELE(vp);
			spin_lock(&xfs_refcache_lock);
		}
	}

	xfs_refcache_busy--;
	ASSERT(xfs_refcache_busy >= 0);
	spin_unlock(&xfs_refcache_lock);
}
Пример #7
0
/*
 * If the given inode is in the reference cache, purge its entry and
 * release the reference on the vnode.
 */
void
xfs_refcache_purge_ip(
	xfs_inode_t	*ip)
{
	vnode_t	*vp;
	int	error;

	/*
	 * If we're not pointing to our entry in the cache, then
	 * we must not be in the cache.
	 */
	if (ip->i_refcache == NULL) {
		return;
	}

	spin_lock(&xfs_refcache_lock);
	if (ip->i_refcache == NULL) {
		spin_unlock(&xfs_refcache_lock);
		return;
	}

	/*
	 * Clear both our pointer to the cache entry and its pointer
	 * back to us.
	 */
	ASSERT(*(ip->i_refcache) == ip);
	*(ip->i_refcache) = NULL;
	ip->i_refcache = NULL;
	xfs_refcache_count--;
	ASSERT(xfs_refcache_count >= 0);
	spin_unlock(&xfs_refcache_lock);

	vp = XFS_ITOV(ip);
	/* ASSERT(vp->v_count > 1); */
	VOP_RELEASE(vp, error);
	VN_RELE(vp);
}