Пример #1
0
/*
 * Clean a vnode of filesystem-specific data and prepare it for reuse.
 */
STATIC int
vn_reclaim(
	struct vnode	*vp)
{
	int		error;

	XFS_STATS_INC(vn_reclaim);
	vn_trace_entry(vp, "vn_reclaim", (inst_t *)__return_address);

	/*
	 * Only make the VOP_RECLAIM call if there are behaviors
	 * to call.
	 */
	if (vp->v_fbhv) {
		VOP_RECLAIM(vp, error);
		if (error)
			return -error;
	}
	ASSERT(vp->v_fbhv == NULL);

	VN_LOCK(vp);
	vp->v_flag &= (VRECLM|VWAIT);
	VN_UNLOCK(vp, 0);

	vp->v_type = VNON;
	vp->v_fbhv = NULL;

#ifdef XFS_VNODE_TRACE
	ktrace_free(vp->v_trace);
	vp->v_trace = NULL;
#endif

	return 0;
}
Пример #2
0
/*
 * xfs_filestream_uninit() is called at xfs termination time to destroy the
 * memory zone that was used for filestream data structure allocation.
 */
void
xfs_filestream_uninit(void)
{
#ifdef XFS_FILESTREAMS_TRACE
	ktrace_free(xfs_filestreams_trace_buf);
#endif
	kmem_zone_destroy(item_zone);
}
Пример #3
0
/*
 * This is called to free all the memory associated with a dquot
 */
void
xfs_qm_dqdestroy(
	xfs_dquot_t	*dqp)
{
	ASSERT(! XFS_DQ_IS_ON_FREELIST(dqp));

	mutex_destroy(&dqp->q_qlock);
	sv_destroy(&dqp->q_pinwait);

#ifdef XFS_DQUOT_TRACE
	if (dqp->q_trace)
	     ktrace_free(dqp->q_trace);
	dqp->q_trace = NULL;
#endif
	kmem_zone_free(xfs_Gqm->qm_dqzone, dqp);
	atomic_dec(&xfs_Gqm->qm_totaldquots);
}
Пример #4
0
void
xfs_cleanup(void)
{
	extern kmem_zone_t	*xfs_bmap_free_item_zone;
	extern kmem_zone_t	*xfs_btree_cur_zone;
	extern kmem_zone_t	*xfs_inode_zone;
	extern kmem_zone_t	*xfs_trans_zone;
	extern kmem_zone_t	*xfs_da_state_zone;
	extern kmem_zone_t	*xfs_dabuf_zone;
	extern kmem_zone_t	*xfs_efd_zone;
	extern kmem_zone_t	*xfs_efi_zone;
	extern kmem_zone_t	*xfs_buf_item_zone;
	extern kmem_zone_t	*xfs_chashlist_zone;

	xfs_cleanup_procfs();
	xfs_sysctl_unregister();
	xfs_refcache_destroy();

#ifdef XFS_DIR2_TRACE
	ktrace_free(xfs_dir2_trace_buf);
#endif
#ifdef XFS_ATTR_TRACE
	ktrace_free(xfs_attr_trace_buf);
#endif
#ifdef XFS_DIR_TRACE
	ktrace_free(xfs_dir_trace_buf);
#endif
#ifdef XFS_BMBT_TRACE
	ktrace_free(xfs_bmbt_trace_buf);
#endif
#ifdef XFS_BMAP_TRACE
	ktrace_free(xfs_bmap_trace_buf);
#endif
#ifdef XFS_ALLOC_TRACE
	ktrace_free(xfs_alloc_trace_buf);
#endif

	kmem_cache_destroy(xfs_bmap_free_item_zone);
	kmem_cache_destroy(xfs_btree_cur_zone);
	kmem_cache_destroy(xfs_inode_zone);
	kmem_cache_destroy(xfs_trans_zone);
	kmem_cache_destroy(xfs_da_state_zone);
	kmem_cache_destroy(xfs_dabuf_zone);
	kmem_cache_destroy(xfs_buf_item_zone);
	kmem_cache_destroy(xfs_efd_zone);
	kmem_cache_destroy(xfs_efi_zone);
	kmem_cache_destroy(xfs_ifork_zone);
	kmem_cache_destroy(xfs_ili_zone);
	kmem_cache_destroy(xfs_chashlist_zone);
	_ACL_ZONE_DESTROY(xfs_acl_zone);
}
Пример #5
0
/* Try to discard pages, in order to recycle a vcache entry.
 *
 * We also make some sanity checks:  ref count, open count, held locks.
 *
 * We also do some non-VM-related chores, such as releasing the cred pointer
 * (for AIX and Solaris) and releasing the gnode (for AIX).
 *
 * Locking:  afs_xvcache lock is held. It must not be dropped.
 */
int
osi_VM_FlushVCache(struct vcache *avc)
{
    int s, code;
    vnode_t *vp = &avc->v;

    if (avc->vrefCount != 0)
	return EBUSY;

    if (avc->opens != 0)
	return EBUSY;

    /*
     * Just in case someone is still referring to the vnode we give up
     * trying to get rid of this guy.
     */
    if (CheckLock(&avc->lock) || LockWaiters(&avc->lock))
	return EBUSY;

    s = VN_LOCK(vp);

    /*
     * we just need to avoid the race
     * in vn_rele between the ref count going to 0 and VOP_INACTIVE
     * finishing up.
     * Note that although we checked vcount above, we didn't have the lock
     */
    if (vp->v_count > 0 || (vp->v_flag & VINACT)) {
	VN_UNLOCK(vp, s);
	return EBUSY;
    }
    VN_UNLOCK(vp, s);

    /*
     * Since we store on last close and on VOP_INACTIVE
     * there should be NO dirty pages
     * Note that we hold the xvcache lock the entire time.
     */
    AFS_GUNLOCK();
    PTOSSVP(vp, (off_t) 0, (off_t) MAXLONG);
    AFS_GLOCK();

    /* afs_chkpgoob will drop and re-acquire the global lock. */
    afs_chkpgoob(vp, 0);
    osi_Assert(!VN_GET_PGCNT(vp));
    osi_Assert(!AFS_VN_MAPPED(vp));
    osi_Assert(!AFS_VN_DIRTY(&avc->v));

#if defined(AFS_SGI65_ENV)
    if (vp->v_filocks)
	cleanlocks(vp, IGN_PID, 0);
    mutex_destroy(&vp->v_filocksem);
#else /* AFS_SGI65_ENV */
    if (vp->v_filocksem) {
	if (vp->v_filocks)
#ifdef AFS_SGI64_ENV
	    cleanlocks(vp, &curprocp->p_flid);
#else
	    cleanlocks(vp, IGN_PID, 0);
#endif
	osi_Assert(vp->v_filocks == NULL);
	mutex_destroy(vp->v_filocksem);
	kmem_free(vp->v_filocksem, sizeof *vp->v_filocksem);
	vp->v_filocksem = NULL;
    }
#endif /* AFS_SGI65_ENV */

    if (avc->vrefCount)
	osi_Panic("flushVcache: vm race");
#ifdef AFS_SGI64_ENV
    AFS_GUNLOCK();
    vnode_pcache_reclaim(vp);	/* this can sleep */
    vnode_pcache_free(vp);
    if (vp->v_op != &Afs_vnodeops) {
	VOP_RECLAIM(vp, FSYNC_WAIT, code);
    }
    AFS_GLOCK();
#ifdef AFS_SGI65_ENV
#ifdef VNODE_TRACING
    ktrace_free(vp->v_trace);
#endif /* VNODE_TRACING */
    vn_bhv_remove(VN_BHV_HEAD(vp), &(avc->vc_bhv_desc));
    vn_bhv_head_destroy(&(vp->v_bh));
    destroy_bitlock(&vp->v_pcacheflag);
    mutex_destroy(&vp->v_buf_lock);
#else
    bhv_remove(VN_BHV_HEAD(vp), &(avc->vc_bhv_desc));
    bhv_head_destroy(&(vp->v_bh));
#endif
    vp->v_flag = 0;		/* debug */
#if defined(DEBUG) && defined(VNODE_INIT_BITLOCK)
    destroy_bitlock(&vp->v_flag);
#endif
#ifdef INTR_KTHREADS
    AFS_VN_DESTROY_BUF_LOCK(vp);
#endif
#endif /* AFS_SGI64_ENV */

    return 0;
}