예제 #1
0
/*
 * We need to process our own vnode unlock and then clear the
 * interlock flag as it applies only to our vnode, not the
 * vnodes below us on the stack.
 */
static int
null_unlock(struct vop_unlock_args *ap)
{
	struct vnode *vp = ap->a_vp;
	int flags = ap->a_flags;
	int mtxlkflag = 0;
	struct null_node *nn;
	struct vnode *lvp;
	int error;

	if ((flags & LK_INTERLOCK) != 0)
		mtxlkflag = 1;
	else if (mtx_owned(VI_MTX(vp)) == 0) {
		VI_LOCK(vp);
		mtxlkflag = 2;
	}
	nn = VTONULL(vp);
	if (nn != NULL && (lvp = NULLVPTOLOWERVP(vp)) != NULL) {
		VI_LOCK_FLAGS(lvp, MTX_DUPOK);
		flags |= LK_INTERLOCK;
		vholdl(lvp);
		VI_UNLOCK(vp);
		error = VOP_UNLOCK(lvp, flags);
		vdrop(lvp);
		if (mtxlkflag == 0)
			VI_LOCK(vp);
	} else {
		if (mtxlkflag == 2)
			VI_UNLOCK(vp);
		error = vop_stdunlock(ap);
	}

	return (error);
}
예제 #2
0
int
osi_TryEvictVCache(struct vcache *avc, int *slept, int defersleep)
{
    struct vnode *vp;
    int code;

    vp = AFSTOV(avc);

    if (!VI_TRYLOCK(vp))
	return 0;
    code = osi_fbsd_checkinuse(avc);
    if (code != 0) {
	VI_UNLOCK(vp);
	return 0;
    }

    if ((vp->v_iflag & VI_DOOMED) != 0) {
	VI_UNLOCK(vp);
	return 1;
    }

    /* must hold the vnode before calling vgone()
     * This code largely copied from vfs_subr.c:vlrureclaim() */
    vholdl(vp);
    AFS_GUNLOCK();
    *slept = 1;
    /* use the interlock while locking, so no one else can DOOM this */
    ma_vn_lock(vp, LK_INTERLOCK|LK_EXCLUSIVE|LK_RETRY, curthread);
    vgone(vp);
    MA_VOP_UNLOCK(vp, 0, curthread);
    vdrop(vp);

    AFS_GLOCK();
    return 1;
}
예제 #3
0
파일: osi_vm.c 프로젝트: hwr/openafs
/* Try to discard pages, in order to recycle a vcache entry.
 *
 * We also make some sanity checks:  ref count, open count, held locks.
 *
 * We also do some non-VM-related chores, such as releasing the cred pointer
 * (for AIX and Solaris) and releasing the gnode (for AIX).
 *
 * Locking:  afs_xvcache lock is held.  If it is dropped and re-acquired,
 *   *slept should be set to warn the caller.
 *
 * Formerly, afs_xvcache was dropped and re-acquired for Solaris, but now it
 * is not dropped and re-acquired for any platform.  It may be that *slept is
 * therefore obsolescent.
 *
 */
int
osi_VM_FlushVCache(struct vcache *avc, int *slept)
{
    struct vnode *vp;
    int code;

    vp = AFSTOV(avc);

    if (!VI_TRYLOCK(vp))
	return EBUSY;
    code = osi_fbsd_checkinuse(avc);
    if (code) {
	VI_UNLOCK(vp);
	return code;
    }

    /* must hold the vnode before calling cache_purge()
     * This code largely copied from vfs_subr.c:vlrureclaim() */
    vholdl(vp);
    VI_UNLOCK(vp);

    AFS_GUNLOCK();
    cache_purge(vp);
    AFS_GLOCK();

    vdrop(vp);

    return 0;
}
예제 #4
0
/* Try to discard pages, in order to recycle a vcache entry.
 *
 * We also make some sanity checks:  ref count, open count, held locks.
 *
 * We also do some non-VM-related chores, such as releasing the cred pointer
 * (for AIX and Solaris) and releasing the gnode (for AIX).
 *
 * Locking:  afs_xvcache lock is held.  If it is dropped and re-acquired,
 *   *slept should be set to warn the caller.
 *
 * Formerly, afs_xvcache was dropped and re-acquired for Solaris, but now it
 * is not dropped and re-acquired for any platform.  It may be that *slept is
 * therefore obsolescent.
 *
 */
int
osi_VM_FlushVCache(struct vcache *avc, int *slept)
{
    struct vm_object *obj;
    struct vnode *vp = AFSTOV(avc);

    if (!VI_TRYLOCK(vp)) /* need interlock to check usecount */
	return EBUSY;

    if (vp->v_usecount > 0) {
	VI_UNLOCK(vp);
	return EBUSY;
    }

    /* XXX
     * The value of avc->opens here came to be, at some point,
     * typically -1.  This was caused by incorrectly performing afs_close
     * processing on vnodes being recycled */
    if (avc->opens) {
	VI_UNLOCK(vp);
	return EBUSY;
    }

    /* if a lock is held, give up */
    if (CheckLock(&avc->lock)) {
	VI_UNLOCK(vp);
	return EBUSY;
    }

    if ((vp->v_iflag & VI_DOOMED) != 0) {
	VI_UNLOCK(vp);
	return (0);
    }

    /* must hold the vnode before calling vgone()
     * This code largely copied from vfs_subr.c:vlrureclaim() */
    vholdl(vp);
    AFS_GUNLOCK();
    *slept = 1;
    /* use the interlock while locking, so no one else can DOOM this */
    ilock_vnode(vp);
    vgone(vp);
    unlock_vnode(vp);
    vdrop(vp);

    AFS_GLOCK();
    return 0;
}
예제 #5
0
/* ARGSUSED */
static int
null_getwritemount(struct vop_getwritemount_args *ap)
{
	struct null_node *xp;
	struct vnode *lowervp;
	struct vnode *vp;

	vp = ap->a_vp;
	VI_LOCK(vp);
	xp = VTONULL(vp);
	if (xp && (lowervp = xp->null_lowervp)) {
		VI_LOCK_FLAGS(lowervp, MTX_DUPOK);
		VI_UNLOCK(vp);
		vholdl(lowervp);
		VI_UNLOCK(lowervp);
		VOP_GETWRITEMOUNT(lowervp, ap->a_mpp);
		vdrop(lowervp);
	} else {
		VI_UNLOCK(vp);
		*(ap->a_mpp) = NULL;
	}
	return (0);
}
예제 #6
0
/*
 * We need to process our own vnode lock and then clear the
 * interlock flag as it applies only to our vnode, not the
 * vnodes below us on the stack.
 */
static int
null_lock(struct vop_lock1_args *ap)
{
	struct vnode *vp = ap->a_vp;
	int flags = ap->a_flags;
	struct null_node *nn;
	struct vnode *lvp;
	int error;


	if ((flags & LK_INTERLOCK) == 0) {
		VI_LOCK(vp);
		ap->a_flags = flags |= LK_INTERLOCK;
	}
	nn = VTONULL(vp);
	/*
	 * If we're still active we must ask the lower layer to
	 * lock as ffs has special lock considerations in it's
	 * vop lock.
	 */
	if (nn != NULL && (lvp = NULLVPTOLOWERVP(vp)) != NULL) {
		VI_LOCK_FLAGS(lvp, MTX_DUPOK);
		VI_UNLOCK(vp);
		/*
		 * We have to hold the vnode here to solve a potential
		 * reclaim race.  If we're forcibly vgone'd while we
		 * still have refs, a thread could be sleeping inside
		 * the lowervp's vop_lock routine.  When we vgone we will
		 * drop our last ref to the lowervp, which would allow it
		 * to be reclaimed.  The lowervp could then be recycled,
		 * in which case it is not legal to be sleeping in it's VOP.
		 * We prevent it from being recycled by holding the vnode
		 * here.
		 */
		vholdl(lvp);
		error = VOP_LOCK(lvp, flags);

		/*
		 * We might have slept to get the lock and someone might have
		 * clean our vnode already, switching vnode lock from one in
		 * lowervp to v_lock in our own vnode structure.  Handle this
		 * case by reacquiring correct lock in requested mode.
		 */
		if (VTONULL(vp) == NULL && error == 0) {
			ap->a_flags &= ~(LK_TYPE_MASK | LK_INTERLOCK);
			switch (flags & LK_TYPE_MASK) {
			case LK_SHARED:
				ap->a_flags |= LK_SHARED;
				break;
			case LK_UPGRADE:
			case LK_EXCLUSIVE:
				ap->a_flags |= LK_EXCLUSIVE;
				break;
			default:
				panic("Unsupported lock request %d\n",
				    ap->a_flags);
			}
			VOP_UNLOCK(lvp, 0);
			error = vop_stdlock(ap);
		}
		vdrop(lvp);
	} else
		error = vop_stdlock(ap);

	return (error);
}