Ejemplo n.º 1
0
/*
 * Clean a vnode of filesystem-specific data and prepare it for reuse.
 */
STATIC int
vn_reclaim(
	struct vnode	*vp)
{
	int		error;

	XFS_STATS_INC(vn_reclaim);
	vn_trace_entry(vp, "vn_reclaim", (inst_t *)__return_address);

	/*
	 * Only make the VOP_RECLAIM call if there are behaviors
	 * to call.
	 */
	if (vp->v_fbhv) {
		VOP_RECLAIM(vp, error);
		if (error)
			return -error;
	}
	ASSERT(vp->v_fbhv == NULL);

	VN_LOCK(vp);
	vp->v_flag &= (VRECLM|VWAIT);
	VN_UNLOCK(vp, 0);

	vp->v_type = VNON;
	vp->v_fbhv = NULL;

#ifdef XFS_VNODE_TRACE
	ktrace_free(vp->v_trace);
	vp->v_trace = NULL;
#endif

	return 0;
}
Ejemplo n.º 2
0
/*
 * Decrement refcount.
 * Called by VOP_DECREF.
 * Calls VOP_RECLAIM if the refcount hits zero.
 */
void
vnode_decref(struct vnode *vn)
{
	int result,do_it=0;

	KASSERT(vn != NULL);

	lock_acquire(vn->vn_countlock);

	KASSERT(vn->vn_refcount>0);
	if (vn->vn_refcount>1) {
		vn->vn_refcount--;
	}
	else {
		do_it=1;
	}
	lock_release(vn->vn_countlock);
	if(do_it)
	{
		result = VOP_RECLAIM(vn);
		if (result != 0 && result != EBUSY) {
			// XXX: lame.
			kprintf("vfs: Warning: VOP_RECLAIM: %s\n",
				strerror(result));
		}
	}

	
}
Ejemplo n.º 3
0
/*
 * Decrement refcount.
 * Called by VOP_DECREF.
 * Calls VOP_RECLAIM if the refcount hits zero.
 */
void
vnode_decref(struct vnode *vn)
{
	bool destroy;
	int result;

	KASSERT(vn != NULL);

	spinlock_acquire(&vn->vn_countlock);

	KASSERT(vn->vn_refcount > 0);
	if (vn->vn_refcount > 1) {
		vn->vn_refcount--;
		destroy = false;
	}
	else {
		/* Don't decrement; pass the reference to VOP_RECLAIM. */
		destroy = true;
	}
	spinlock_release(&vn->vn_countlock);

	if (destroy) {
		result = VOP_RECLAIM(vn);
		if (result != 0 && result != EBUSY) {
			// XXX: lame.
			kprintf("vfs: Warning: VOP_RECLAIM: %s\n",
				strerror(result));
		}
	}
}
Ejemplo n.º 4
0
int
RUMP_VOP_RECLAIM(struct vnode *vp)
{
	int error;

	rump_schedule();
	error = VOP_RECLAIM(vp);
	rump_unschedule();

	return error;
}
Ejemplo n.º 5
0
/* Try to discard pages, in order to recycle a vcache entry.
 *
 * We also make some sanity checks:  ref count, open count, held locks.
 *
 * We also do some non-VM-related chores, such as releasing the cred pointer
 * (for AIX and Solaris) and releasing the gnode (for AIX).
 *
 * Locking:  afs_xvcache lock is held. It must not be dropped.
 */
int
osi_VM_FlushVCache(struct vcache *avc)
{
    int s, code;
    vnode_t *vp = &avc->v;

    if (avc->vrefCount != 0)
	return EBUSY;

    if (avc->opens != 0)
	return EBUSY;

    /*
     * Just in case someone is still referring to the vnode we give up
     * trying to get rid of this guy.
     */
    if (CheckLock(&avc->lock) || LockWaiters(&avc->lock))
	return EBUSY;

    s = VN_LOCK(vp);

    /*
     * we just need to avoid the race
     * in vn_rele between the ref count going to 0 and VOP_INACTIVE
     * finishing up.
     * Note that although we checked vcount above, we didn't have the lock
     */
    if (vp->v_count > 0 || (vp->v_flag & VINACT)) {
	VN_UNLOCK(vp, s);
	return EBUSY;
    }
    VN_UNLOCK(vp, s);

    /*
     * Since we store on last close and on VOP_INACTIVE
     * there should be NO dirty pages
     * Note that we hold the xvcache lock the entire time.
     */
    AFS_GUNLOCK();
    PTOSSVP(vp, (off_t) 0, (off_t) MAXLONG);
    AFS_GLOCK();

    /* afs_chkpgoob will drop and re-acquire the global lock. */
    afs_chkpgoob(vp, 0);
    osi_Assert(!VN_GET_PGCNT(vp));
    osi_Assert(!AFS_VN_MAPPED(vp));
    osi_Assert(!AFS_VN_DIRTY(&avc->v));

#if defined(AFS_SGI65_ENV)
    if (vp->v_filocks)
	cleanlocks(vp, IGN_PID, 0);
    mutex_destroy(&vp->v_filocksem);
#else /* AFS_SGI65_ENV */
    if (vp->v_filocksem) {
	if (vp->v_filocks)
#ifdef AFS_SGI64_ENV
	    cleanlocks(vp, &curprocp->p_flid);
#else
	    cleanlocks(vp, IGN_PID, 0);
#endif
	osi_Assert(vp->v_filocks == NULL);
	mutex_destroy(vp->v_filocksem);
	kmem_free(vp->v_filocksem, sizeof *vp->v_filocksem);
	vp->v_filocksem = NULL;
    }
#endif /* AFS_SGI65_ENV */

    if (avc->vrefCount)
	osi_Panic("flushVcache: vm race");
#ifdef AFS_SGI64_ENV
    AFS_GUNLOCK();
    vnode_pcache_reclaim(vp);	/* this can sleep */
    vnode_pcache_free(vp);
    if (vp->v_op != &Afs_vnodeops) {
	VOP_RECLAIM(vp, FSYNC_WAIT, code);
    }
    AFS_GLOCK();
#ifdef AFS_SGI65_ENV
#ifdef VNODE_TRACING
    ktrace_free(vp->v_trace);
#endif /* VNODE_TRACING */
    vn_bhv_remove(VN_BHV_HEAD(vp), &(avc->vc_bhv_desc));
    vn_bhv_head_destroy(&(vp->v_bh));
    destroy_bitlock(&vp->v_pcacheflag);
    mutex_destroy(&vp->v_buf_lock);
#else
    bhv_remove(VN_BHV_HEAD(vp), &(avc->vc_bhv_desc));
    bhv_head_destroy(&(vp->v_bh));
#endif
    vp->v_flag = 0;		/* debug */
#if defined(DEBUG) && defined(VNODE_INIT_BITLOCK)
    destroy_bitlock(&vp->v_flag);
#endif
#ifdef INTR_KTHREADS
    AFS_VN_DESTROY_BUF_LOCK(vp);
#endif
#endif /* AFS_SGI64_ENV */

    return 0;
}