Exemple #1
0
int
RUMP_VOP_INACTIVE(struct vnode *vp,
    bool *recycle)
{
	int error;

	rump_schedule();
	error = VOP_INACTIVE(vp, recycle);
	rump_unschedule();

	return error;
}
Exemple #2
0
void vn_rele(vnode_t *vp)
{
    if (vp->v_count == 0)
        cmn_err(CE_PANIC, "vn_rele: vnode ref count 0");

    mutex_enter(&vp->v_lock);
    if(vp->v_count == 1) {
        mutex_exit(&vp->v_lock);
        /* fprintf(stderr, "VNode %p inactive\n", vp); */
        VOP_INACTIVE(vp, CRED(), NULL);
    } else {
        vp->v_count--;
        mutex_exit(&vp->v_lock);
    }
}
Exemple #3
0
/*
 *  Call VOP_INACTIVE on last reference.
 */
void
vn_rele(
	struct vnode	*vp)
{
	int		vcnt;
	int		cache;

	XFS_STATS_INC(vn_rele);

	VN_LOCK(vp);

	vn_trace_entry(vp, "vn_rele", (inst_t *)__return_address);
	vcnt = vn_count(vp);

	/*
	 * Since we always get called from put_inode we know
	 * that i_count won't be decremented after we
	 * return.
	 */
	if (!vcnt) {
		/*
		 * As soon as we turn this on, noone can find us in vn_get
		 * until we turn off VINACT or VRECLM
		 */
		vp->v_flag |= VINACT;
		VN_UNLOCK(vp, 0);

		/*
		 * Do not make the VOP_INACTIVE call if there
		 * are no behaviors attached to the vnode to call.
		 */
		if (vp->v_fbhv)
			VOP_INACTIVE(vp, NULL, cache);

		VN_LOCK(vp);
		if (vp->v_flag & VWAIT)
			sv_broadcast(vptosync(vp));

		vp->v_flag &= ~(VINACT|VWAIT|VRECLM|VMODIFIED);
	}

	VN_UNLOCK(vp, 0);

	vn_trace_exit(vp, "vn_rele", (inst_t *)__return_address);
}
Exemple #4
0
/*
 * This function is called on the 1->0 transition (which is actually
 * 1->VREF_TERMINATE) when VREF_FINALIZE is set, forcing deactivation
 * of the vnode.
 *
 * Additional vrefs are allowed to race but will not result in a reentrant
 * call to vnode_terminate() due to refcnt being VREF_TERMINATE.  This
 * prevents additional 1->0 transitions.
 *
 * ONLY A VGET() CAN REACTIVATE THE VNODE.
 *
 * Caller must hold the VX lock.
 *
 * NOTE: v_mount may be NULL due to assigmment to dead_vnode_vops
 *
 * NOTE: The vnode may be marked inactive with dirty buffers
 *	 or dirty pages in its cached VM object still present.
 *
 * NOTE: VS_FREE should not be set on entry (the vnode was expected to
 *	 previously be active).  We lose control of the vnode the instant
 *	 it is placed on the free list.
 *
 *	 The VX lock is required when transitioning to VS_CACHED but is
 *	 not sufficient for the vshouldfree() interlocked test or when
 *	 transitioning away from VS_CACHED.  v_spin is also required for
 *	 those cases.
 */
static
void
vnode_terminate(struct vnode *vp)
{
	KKASSERT(vp->v_state == VS_ACTIVE);

	if ((vp->v_flag & VINACTIVE) == 0) {
		_vsetflags(vp, VINACTIVE);
		if (vp->v_mount)
			VOP_INACTIVE(vp);
		/* might deactivate page */
	}
	spin_lock(&vp->v_spin);
	_vinactive(vp);
	spin_unlock(&vp->v_spin);

	vx_unlock(vp);
}
Exemple #5
0
void
mvfs_clear_inode(struct inode *inode_p)
{
    CALL_DATA_T cd;

    ASSERT(MDKI_INOISOURS(inode_p));

    if (MDKI_INOISMVFS(inode_p)) {
        /* If we're an mnode-base vnode, do all this stuff ... */

        VNODE_T *vp = ITOV(inode_p);
        int error;

        ASSERT(I_COUNT(inode_p) == 0);
        ASSERT(inode_p->i_state & I_FREEING);

        mdki_linux_init_call_data(&cd);

        /*
         * Do actual deactivation of the vnode/mnode
         */
        error = VOP_INACTIVE(vp, &cd);
        mdki_linux_destroy_call_data(&cd);

        if (error)
            MDKI_VFS_LOG(VFS_LOG_ERR, "mvfs_clear_inode: inactive error %d\n",
                     error);
    } else if (MDKI_INOISCLRVN(inode_p)) {
        /* cleartext vnode */
        vnlayer_linux_free_clrvnode(ITOV(inode_p));
    } else {
        MDKI_TRACE(TRACE_INACTIVE,"no work: inode_p=%p vp=%p cnt=%d\n", inode_p,
                  ITOV(inode_p), I_COUNT(inode_p));
    }
    MDKI_TRACE(TRACE_INACTIVE,"inode_p=%p vp=%p cnt=%d\n", inode_p,
               ITOV(inode_p), I_COUNT(inode_p));
}
Exemple #6
0
static void vn_rele_inactive(vnode_t *vp)
{
    VOP_INACTIVE(vp, CRED(), NULL);
}