Exemplo n.º 1
0
Arquivo: osi_vm.c Projeto: hwr/openafs
/* Try to discard pages, in order to recycle a vcache entry.
 *
 * We also make some sanity checks:  ref count, open count, held locks.
 *
 * We also do some non-VM-related chores, such as releasing the cred pointer
 * (for AIX and Solaris) and releasing the gnode (for AIX).
 *
 * Locking:  afs_xvcache lock is held.  If it is dropped and re-acquired,
 *   *slept should be set to warn the caller.
 *
 * Formerly, afs_xvcache was dropped and re-acquired for Solaris, but now it
 * is not dropped and re-acquired for any platform.  It may be that *slept is
 * therefore obsolescent.
 *
 */
int
osi_VM_FlushVCache(struct vcache *avc, int *slept)
{
    struct vnode *vp;
    int code;

    vp = AFSTOV(avc);

    if (!VI_TRYLOCK(vp))
	return EBUSY;
    code = osi_fbsd_checkinuse(avc);
    if (code) {
	VI_UNLOCK(vp);
	return code;
    }

    /* must hold the vnode before calling cache_purge()
     * This code largely copied from vfs_subr.c:vlrureclaim() */
    vholdl(vp);
    VI_UNLOCK(vp);

    AFS_GUNLOCK();
    cache_purge(vp);
    AFS_GLOCK();

    vdrop(vp);

    return 0;
}
Exemplo n.º 2
0
/*
 * Get the cached vnode.
 */
static struct vnode *
unionfs_get_cached_vnode(struct vnode *uvp, struct vnode *lvp,
			struct vnode *dvp, char *path)
{
	struct unionfs_node_hashhead *hd;
	struct unionfs_node *unp;
	struct vnode   *vp;

	KASSERT((uvp == NULLVP || uvp->v_type == VDIR),
	    ("unionfs_get_cached_vnode: v_type != VDIR"));
	KASSERT((lvp == NULLVP || lvp->v_type == VDIR),
	    ("unionfs_get_cached_vnode: v_type != VDIR"));

	VI_LOCK(dvp);
	hd = unionfs_get_hashhead(dvp, path);
	LIST_FOREACH(unp, hd, un_hash) {
		if (!strcmp(unp->un_path, path)) {
			vp = UNIONFSTOV(unp);
			VI_LOCK_FLAGS(vp, MTX_DUPOK);
			VI_UNLOCK(dvp);
			vp->v_iflag &= ~VI_OWEINACT;
			if ((vp->v_iflag & (VI_DOOMED | VI_DOINGINACT)) != 0) {
				VI_UNLOCK(vp);
				vp = NULLVP;
			} else
				VI_UNLOCK(vp);
			return (vp);
		}
	}
	VI_UNLOCK(dvp);

	return (NULLVP);
}
Exemplo n.º 3
0
int
osi_TryEvictVCache(struct vcache *avc, int *slept, int defersleep)
{
    struct vnode *vp;
    int code;

    vp = AFSTOV(avc);

    if (!VI_TRYLOCK(vp))
	return 0;
    code = osi_fbsd_checkinuse(avc);
    if (code != 0) {
	VI_UNLOCK(vp);
	return 0;
    }

    if ((vp->v_iflag & VI_DOOMED) != 0) {
	VI_UNLOCK(vp);
	return 1;
    }

    /* must hold the vnode before calling vgone()
     * This code largely copied from vfs_subr.c:vlrureclaim() */
    vholdl(vp);
    AFS_GUNLOCK();
    *slept = 1;
    /* use the interlock while locking, so no one else can DOOM this */
    ma_vn_lock(vp, LK_INTERLOCK|LK_EXCLUSIVE|LK_RETRY, curthread);
    vgone(vp);
    MA_VOP_UNLOCK(vp, 0, curthread);
    vdrop(vp);

    AFS_GLOCK();
    return 1;
}
Exemplo n.º 4
0
/*
 * We need to process our own vnode unlock and then clear the
 * interlock flag as it applies only to our vnode, not the
 * vnodes below us on the stack.
 */
static int
null_unlock(struct vop_unlock_args *ap)
{
	struct vnode *vp = ap->a_vp;
	int flags = ap->a_flags;
	int mtxlkflag = 0;
	struct null_node *nn;
	struct vnode *lvp;
	int error;

	if ((flags & LK_INTERLOCK) != 0)
		mtxlkflag = 1;
	else if (mtx_owned(VI_MTX(vp)) == 0) {
		VI_LOCK(vp);
		mtxlkflag = 2;
	}
	nn = VTONULL(vp);
	if (nn != NULL && (lvp = NULLVPTOLOWERVP(vp)) != NULL) {
		VI_LOCK_FLAGS(lvp, MTX_DUPOK);
		flags |= LK_INTERLOCK;
		vholdl(lvp);
		VI_UNLOCK(vp);
		error = VOP_UNLOCK(lvp, flags);
		vdrop(lvp);
		if (mtxlkflag == 0)
			VI_LOCK(vp);
	} else {
		if (mtxlkflag == 2)
			VI_UNLOCK(vp);
		error = vop_stdunlock(ap);
	}

	return (error);
}
Exemplo n.º 5
0
/*
 * Allocate (or lookup) pager for a vnode.
 * Handle is a vnode pointer.
 *
 * MPSAFE
 */
vm_object_t
vnode_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
    vm_ooffset_t offset, struct ucred *cred)
{
	vm_object_t object;
	struct vnode *vp;

	/*
	 * Pageout to vnode, no can do yet.
	 */
	if (handle == NULL)
		return (NULL);

	vp = (struct vnode *) handle;

	/*
	 * If the object is being terminated, wait for it to
	 * go away.
	 */
retry:
	while ((object = vp->v_object) != NULL) {
		VM_OBJECT_LOCK(object);
		if ((object->flags & OBJ_DEAD) == 0)
			break;
		vm_object_set_flag(object, OBJ_DISCONNECTWNT);
		msleep(object, VM_OBJECT_MTX(object), PDROP | PVM, "vadead", 0);
	}

	if (vp->v_usecount == 0)
		panic("vnode_pager_alloc: no vnode reference");

	if (object == NULL) {
		/*
		 * Add an object of the appropriate size
		 */
		object = vm_object_allocate(OBJT_VNODE, OFF_TO_IDX(round_page(size)));

		object->un_pager.vnp.vnp_size = size;
		object->un_pager.vnp.writemappings = 0;

		object->handle = handle;
		VI_LOCK(vp);
		if (vp->v_object != NULL) {
			/*
			 * Object has been created while we were sleeping
			 */
			VI_UNLOCK(vp);
			vm_object_destroy(object);
			goto retry;
		}
		vp->v_object = object;
		VI_UNLOCK(vp);
	} else {
		object->ref_count++;
		VM_OBJECT_UNLOCK(object);
	}
	vref(vp);
	return (object);
}
Exemplo n.º 6
0
static int
msdosfs_sync(struct mount *mp, int waitfor)
{
	struct vnode *vp, *nvp;
	struct thread *td;
	struct denode *dep;
	struct msdosfsmount *pmp = VFSTOMSDOSFS(mp);
	int error, allerror = 0;

	td = curthread;

	/*
	 * If we ever switch to not updating all of the fats all the time,
	 * this would be the place to update them from the first one.
	 */
	if (pmp->pm_fmod != 0) {
		if (pmp->pm_flags & MSDOSFSMNT_RONLY)
			panic("msdosfs_sync: rofs mod");
		else {
			/* update fats here */
		}
	}
	/*
	 * Write back each (modified) denode.
	 */
	MNT_ILOCK(mp);
loop:
	MNT_VNODE_FOREACH(vp, mp, nvp) {
		VI_LOCK(vp);
		if (vp->v_type == VNON || (vp->v_iflag & VI_DOOMED)) {
			VI_UNLOCK(vp);
			continue;
		}
		MNT_IUNLOCK(mp);
		dep = VTODE(vp);
		if ((dep->de_flag &
		    (DE_ACCESS | DE_CREATE | DE_UPDATE | DE_MODIFIED)) == 0 &&
		    (vp->v_bufobj.bo_dirty.bv_cnt == 0 ||
		    waitfor == MNT_LAZY)) {
			VI_UNLOCK(vp);
			MNT_ILOCK(mp);
			continue;
		}
		error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, td);
		if (error) {
			MNT_ILOCK(mp);
			if (error == ENOENT)
				goto loop;
			continue;
		}
		error = VOP_FSYNC(vp, waitfor, td);
		if (error)
			allerror = error;
		VOP_UNLOCK(vp, 0);
		vrele(vp);
		MNT_ILOCK(mp);
	}
Exemplo n.º 7
0
Arquivo: osi_vm.c Projeto: hwr/openafs
/* Try to invalidate pages, for "fs flush" or "fs flushv"; or
 * try to free pages, when deleting a file.
 *
 * Locking:  the vcache entry's lock is held.  It may be dropped and 
 * re-obtained.
 *
 * Since we drop and re-obtain the lock, we can't guarantee that there won't
 * be some pages around when we return, newly created by concurrent activity.
 */
void
osi_VM_TryToSmush(struct vcache *avc, afs_ucred_t *acred, int sync)
{
    struct vnode *vp;
    int tries, code;
    int islocked;

    vp = AFSTOV(avc);

    VI_LOCK(vp);
    if (vp->v_iflag & VI_DOOMED) {
	VI_UNLOCK(vp);
	return;
    }
    VI_UNLOCK(vp);

    islocked = islocked_vnode(vp);
    if (islocked == LK_EXCLOTHER)
	panic("Trying to Smush over someone else's lock");
    else if (islocked == LK_SHARED) {
	afs_warn("Trying to Smush with a shared lock");
	lock_vnode(vp, LK_UPGRADE);
    } else if (!islocked)
	lock_vnode(vp, LK_EXCLUSIVE);

    if (vp->v_bufobj.bo_object != NULL) {
	AFS_VM_OBJECT_WLOCK(vp->v_bufobj.bo_object);
	/*
	 * Do we really want OBJPC_SYNC?  OBJPC_INVAL would be
	 * faster, if invalidation is really what we are being
	 * asked to do.  (It would make more sense, too, since
	 * otherwise this function is practically identical to
	 * osi_VM_StoreAllSegments().)  -GAW
	 */

	/*
	 * Dunno.  We no longer resemble osi_VM_StoreAllSegments,
	 * though maybe that's wrong, now.  And OBJPC_SYNC is the
	 * common thing in 70 file systems, it seems.  Matt.
	 */

	vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC);
	AFS_VM_OBJECT_WUNLOCK(vp->v_bufobj.bo_object);
    }

    tries = 5;
    code = osi_vinvalbuf(vp, V_SAVE, PCATCH, 0);
    while (code && (tries > 0)) {
	afs_warn("TryToSmush retrying vinvalbuf");
	code = osi_vinvalbuf(vp, V_SAVE, PCATCH, 0);
	--tries;
    }
    if (islocked == LK_SHARED)
	lock_vnode(vp, LK_DOWNGRADE);
    else if (!islocked)
	unlock_vnode(vp);
}
Exemplo n.º 8
0
/* Try to discard pages, in order to recycle a vcache entry.
 *
 * We also make some sanity checks:  ref count, open count, held locks.
 *
 * We also do some non-VM-related chores, such as releasing the cred pointer
 * (for AIX and Solaris) and releasing the gnode (for AIX).
 *
 * Locking:  afs_xvcache lock is held.  If it is dropped and re-acquired,
 *   *slept should be set to warn the caller.
 *
 * Formerly, afs_xvcache was dropped and re-acquired for Solaris, but now it
 * is not dropped and re-acquired for any platform.  It may be that *slept is
 * therefore obsolescent.
 *
 */
int
osi_VM_FlushVCache(struct vcache *avc, int *slept)
{
    struct vm_object *obj;
    struct vnode *vp = AFSTOV(avc);

    if (!VI_TRYLOCK(vp)) /* need interlock to check usecount */
	return EBUSY;

    if (vp->v_usecount > 0) {
	VI_UNLOCK(vp);
	return EBUSY;
    }

    /* XXX
     * The value of avc->opens here came to be, at some point,
     * typically -1.  This was caused by incorrectly performing afs_close
     * processing on vnodes being recycled */
    if (avc->opens) {
	VI_UNLOCK(vp);
	return EBUSY;
    }

    /* if a lock is held, give up */
    if (CheckLock(&avc->lock)) {
	VI_UNLOCK(vp);
	return EBUSY;
    }

    if ((vp->v_iflag & VI_DOOMED) != 0) {
	VI_UNLOCK(vp);
	return (0);
    }

    /* must hold the vnode before calling vgone()
     * This code largely copied from vfs_subr.c:vlrureclaim() */
    vholdl(vp);
    AFS_GUNLOCK();
    *slept = 1;
    /* use the interlock while locking, so no one else can DOOM this */
    ilock_vnode(vp);
    vgone(vp);
    unlock_vnode(vp);
    vdrop(vp);

    AFS_GLOCK();
    return 0;
}
Exemplo n.º 9
0
/*
 * Now, the VXLOCK is in force and we're free to destroy the null vnode.
 */
static int
null_reclaim(struct vop_reclaim_args *ap)
{
	struct vnode *vp = ap->a_vp;
	struct null_node *xp = VTONULL(vp);
	struct vnode *lowervp = xp->null_lowervp;

	if (lowervp)
		null_hashrem(xp);
	/*
	 * Use the interlock to protect the clearing of v_data to
	 * prevent faults in null_lock().
	 */
	lockmgr(&vp->v_lock, LK_EXCLUSIVE, NULL);
	VI_LOCK(vp);
	vp->v_data = NULL;
	vp->v_object = NULL;
	vp->v_vnlock = &vp->v_lock;
	VI_UNLOCK(vp);
	if (lowervp)
		vput(lowervp);
	else
		panic("null_reclaim: reclaiming a node with no lowervp");
	free(xp, M_NULLFSNODE);

	return (0);
}
Exemplo n.º 10
0
void
ext2_itimes(struct vnode *vp)
{

	VI_LOCK(vp);
	ext2_itimes_locked(vp);
	VI_UNLOCK(vp);
}
Exemplo n.º 11
0
/* is this vnode under recyle now */
int vnode_isrecycled(vnode_t vp)
{
        int ret;

        VI_LOCK(vp);
        ret =  (vp->v_iflag & VI_DOOMED)? 1 : 0;
        VI_UNLOCK(vp);
        return(ret);
}
Exemplo n.º 12
0
/*
 * Close called.
 *
 * Update the times on the inode.
 */
static int
ext2_close(struct vop_close_args *ap)
{
	struct vnode *vp = ap->a_vp;

	VI_LOCK(vp);
	if (vp->v_usecount > 1)
		ext2_itimes_locked(vp);
	VI_UNLOCK(vp);
	return (0);
}
Exemplo n.º 13
0
/* ARGSUSED */
static int
null_getwritemount(struct vop_getwritemount_args *ap)
{
	struct null_node *xp;
	struct vnode *lowervp;
	struct vnode *vp;

	vp = ap->a_vp;
	VI_LOCK(vp);
	xp = VTONULL(vp);
	if (xp && (lowervp = xp->null_lowervp)) {
		VI_LOCK_FLAGS(lowervp, MTX_DUPOK);
		VI_UNLOCK(vp);
		vholdl(lowervp);
		VI_UNLOCK(lowervp);
		VOP_GETWRITEMOUNT(lowervp, ap->a_mpp);
		vdrop(lowervp);
	} else {
		VI_UNLOCK(vp);
		*(ap->a_mpp) = NULL;
	}
	return (0);
}
Exemplo n.º 14
0
/*
 * Like vn_rele() except if we are going to call VOP_INACTIVE() then do it
 * asynchronously using a taskq. This can avoid deadlocks caused by re-entering
 * the file system as a result of releasing the vnode. Note, file systems
 * already have to handle the race where the vnode is incremented before the
 * inactive routine is called and does its locking.
 *
 * Warning: Excessive use of this routine can lead to performance problems.
 * This is because taskqs throttle back allocation if too many are created.
 */
void
vn_rele_async(vnode_t *vp, taskq_t *taskq)
{
	VERIFY(vp->v_count > 0);
	VI_LOCK(vp);
	if (vp->v_count == 1 && !(vp->v_iflag & VI_DOINGINACT)) {
		VI_UNLOCK(vp);
		VERIFY(taskq_dispatch((taskq_t *)taskq,
		    (task_func_t *)vn_rele_inactive, vp, TQ_SLEEP) != 0);
		return;
	}
	vp->v_usecount--;
	vdropl(vp);
}
Exemplo n.º 15
0
/*
 * Remove the vnode.
 */
static void
unionfs_rem_cached_vnode(struct unionfs_node *unp, struct vnode *dvp)
{
	KASSERT((unp != NULL), ("unionfs_rem_cached_vnode: null node"));
	KASSERT((dvp != NULLVP),
	    ("unionfs_rem_cached_vnode: null parent vnode"));
	KASSERT((unp->un_hash.le_prev != NULL),
	    ("unionfs_rem_cached_vnode: null hash"));

	VI_LOCK(dvp);
	LIST_REMOVE(unp, un_hash);
	unp->un_hash.le_next = NULL;
	unp->un_hash.le_prev = NULL;
	VI_UNLOCK(dvp);
}
Exemplo n.º 16
0
/*
 * Q_SYNC - sync quota files to disk.
 */
int
qsync(struct mount *mp)
{
	struct ufsmount *ump = VFSTOUFS(mp);
	struct thread *td = curthread;		/* XXX */
	struct vnode *vp, *mvp;
	struct dquot *dq;
	int i, error;

	/*
	 * Check if the mount point has any quotas.
	 * If not, simply return.
	 */
	UFS_LOCK(ump);
	for (i = 0; i < MAXQUOTAS; i++)
		if (ump->um_quotas[i] != NULLVP)
			break;
	UFS_UNLOCK(ump);
	if (i == MAXQUOTAS)
		return (0);
	/*
	 * Search vnodes associated with this mount point,
	 * synchronizing any modified dquot structures.
	 */
again:
	MNT_VNODE_FOREACH_ACTIVE(vp, mp, mvp) {
		if (vp->v_type == VNON) {
			VI_UNLOCK(vp);
			continue;
		}
		error = vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td);
		if (error) {
			if (error == ENOENT) {
				MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
				goto again;
			}
			continue;
		}
		for (i = 0; i < MAXQUOTAS; i++) {
			dq = VTOI(vp)->i_dquot[i];
			if (dq != NODQUOT)
				dqsync(vp, dq);
		}
		vput(vp);
	}
	return (0);
}
Exemplo n.º 17
0
/* ARGSUSED */
static int
nfs_sync(struct mount *mp, int waitfor)
{
	struct vnode *vp, *mvp;
	struct thread *td;
	int error, allerror = 0;

	td = curthread;

	MNT_ILOCK(mp);
	/*
	 * If a forced dismount is in progress, return from here so that
	 * the umount(2) syscall doesn't get stuck in VFS_SYNC() before
	 * calling VFS_UNMOUNT().
	 */
	if ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0) {
		MNT_IUNLOCK(mp);
		return (EBADF);
	}

	/*
	 * Force stale buffer cache information to be flushed.
	 */
loop:
	MNT_VNODE_FOREACH(vp, mp, mvp) {
		VI_LOCK(vp);
		MNT_IUNLOCK(mp);
		/* XXX Racy bv_cnt check. */
		if (VOP_ISLOCKED(vp) || vp->v_bufobj.bo_dirty.bv_cnt == 0 ||
		    waitfor == MNT_LAZY) {
			VI_UNLOCK(vp);
			MNT_ILOCK(mp);
			continue;
		}
		if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td)) {
			MNT_ILOCK(mp);
			MNT_VNODE_FOREACH_ABORT_ILOCKED(mp, mvp);
			goto loop;
		}
		error = VOP_FSYNC(vp, waitfor, td);
		if (error)
			allerror = error;
		VOP_UNLOCK(vp, 0);
		vrele(vp);

		MNT_ILOCK(mp);
	}
Exemplo n.º 18
0
/*
 * We need to process our own vnode lock and then clear the
 * interlock flag as it applies only to our vnode, not the
 * vnodes below us on the stack.
 */
static int
null_lock(struct vop_lock1_args *ap)
{
	struct vnode *vp = ap->a_vp;
	int flags = ap->a_flags;
	struct null_node *nn;
	struct vnode *lvp;
	int error;


	if ((flags & LK_INTERLOCK) == 0) {
		VI_LOCK(vp);
		ap->a_flags = flags |= LK_INTERLOCK;
	}
	nn = VTONULL(vp);
	/*
	 * If we're still active we must ask the lower layer to
	 * lock as ffs has special lock considerations in it's
	 * vop lock.
	 */
	if (nn != NULL && (lvp = NULLVPTOLOWERVP(vp)) != NULL) {
		VI_LOCK_FLAGS(lvp, MTX_DUPOK);
		VI_UNLOCK(vp);
		/*
		 * We have to hold the vnode here to solve a potential
		 * reclaim race.  If we're forcibly vgone'd while we
		 * still have refs, a thread could be sleeping inside
		 * the lowervp's vop_lock routine.  When we vgone we will
		 * drop our last ref to the lowervp, which would allow it
		 * to be reclaimed.  The lowervp could then be recycled,
		 * in which case it is not legal to be sleeping in it's VOP.
		 * We prevent it from being recycled by holding the vnode
		 * here.
		 */
		vholdl(lvp);
		error = VOP_LOCK(lvp, flags);

		/*
		 * We might have slept to get the lock and someone might have
		 * clean our vnode already, switching vnode lock from one in
		 * lowervp to v_lock in our own vnode structure.  Handle this
		 * case by reacquiring correct lock in requested mode.
		 */
		if (VTONULL(vp) == NULL && error == 0) {
			ap->a_flags &= ~(LK_TYPE_MASK | LK_INTERLOCK);
			switch (flags & LK_TYPE_MASK) {
			case LK_SHARED:
				ap->a_flags |= LK_SHARED;
				break;
			case LK_UPGRADE:
			case LK_EXCLUSIVE:
				ap->a_flags |= LK_EXCLUSIVE;
				break;
			default:
				panic("Unsupported lock request %d\n",
				    ap->a_flags);
			}
			VOP_UNLOCK(lvp, 0);
			error = vop_stdlock(ap);
		}
		vdrop(lvp);
	} else
		error = vop_stdlock(ap);

	return (error);
}
Exemplo n.º 19
0
static int
vfs_mountroot_shuffle(struct thread *td, struct mount *mpdevfs)
{
	struct nameidata nd;
	struct mount *mporoot, *mpnroot;
	struct vnode *vp, *vporoot, *vpdevfs;
	char *fspath;
	int error;

	mpnroot = TAILQ_NEXT(mpdevfs, mnt_list);

	/* Shuffle the mountlist. */
	mtx_lock(&mountlist_mtx);
	mporoot = TAILQ_FIRST(&mountlist);
	TAILQ_REMOVE(&mountlist, mpdevfs, mnt_list);
	if (mporoot != mpdevfs) {
		TAILQ_REMOVE(&mountlist, mpnroot, mnt_list);
		TAILQ_INSERT_HEAD(&mountlist, mpnroot, mnt_list);
	}
	TAILQ_INSERT_TAIL(&mountlist, mpdevfs, mnt_list);
	mtx_unlock(&mountlist_mtx);

	cache_purgevfs(mporoot);
	if (mporoot != mpdevfs)
		cache_purgevfs(mpdevfs);

	VFS_ROOT(mporoot, LK_EXCLUSIVE, &vporoot);

	VI_LOCK(vporoot);
	vporoot->v_iflag &= ~VI_MOUNT;
	VI_UNLOCK(vporoot);
	vporoot->v_mountedhere = NULL;
	mporoot->mnt_flag &= ~MNT_ROOTFS;
	mporoot->mnt_vnodecovered = NULL;
	vput(vporoot);

	/* Set up the new rootvnode, and purge the cache */
	mpnroot->mnt_vnodecovered = NULL;
	set_rootvnode();
	cache_purgevfs(rootvnode->v_mount);

	if (mporoot != mpdevfs) {
		/* Remount old root under /.mount or /mnt */
		fspath = "/.mount";
		NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE,
		    fspath, td);
		error = namei(&nd);
		if (error) {
			NDFREE(&nd, NDF_ONLY_PNBUF);
			fspath = "/mnt";
			NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE,
			    fspath, td);
			error = namei(&nd);
		}
		if (!error) {
			vp = nd.ni_vp;
			error = (vp->v_type == VDIR) ? 0 : ENOTDIR;
			if (!error)
				error = vinvalbuf(vp, V_SAVE, 0, 0);
			if (!error) {
				cache_purge(vp);
				mporoot->mnt_vnodecovered = vp;
				vp->v_mountedhere = mporoot;
				strlcpy(mporoot->mnt_stat.f_mntonname,
				    fspath, MNAMELEN);
				VOP_UNLOCK(vp, 0);
			} else
				vput(vp);
		}
		NDFREE(&nd, NDF_ONLY_PNBUF);

		if (error && bootverbose)
			printf("mountroot: unable to remount previous root "
			    "under /.mount or /mnt (error %d).\n", error);
	}

	/* Remount devfs under /dev */
	NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, "/dev", td);
	error = namei(&nd);
	if (!error) {
		vp = nd.ni_vp;
		error = (vp->v_type == VDIR) ? 0 : ENOTDIR;
		if (!error)
			error = vinvalbuf(vp, V_SAVE, 0, 0);
		if (!error) {
			vpdevfs = mpdevfs->mnt_vnodecovered;
			if (vpdevfs != NULL) {
				cache_purge(vpdevfs);
				vpdevfs->v_mountedhere = NULL;
				vrele(vpdevfs);
			}
			mpdevfs->mnt_vnodecovered = vp;
			vp->v_mountedhere = mpdevfs;
			VOP_UNLOCK(vp, 0);
		} else
			vput(vp);
	}
	if (error && bootverbose)
		printf("mountroot: unable to remount devfs under /dev "
		    "(error %d).\n", error);
	NDFREE(&nd, NDF_ONLY_PNBUF);

	if (mporoot == mpdevfs) {
		vfs_unbusy(mpdevfs);
		/* Unlink the no longer needed /dev/dev -> / symlink */
		error = kern_unlink(td, "/dev/dev", UIO_SYSSPACE);
		if (error && bootverbose)
			printf("mountroot: unable to unlink /dev/dev "
			    "(error %d)\n", error);
	}

	return (0);
}
Exemplo n.º 20
0
/*
 * Main code to turn off disk quotas for a filesystem. Does not change
 * flags.
 */
static int
quotaoff1(struct thread *td, struct mount *mp, int type)
{
    struct vnode *vp;
    struct vnode *qvp, *mvp;
    struct ufsmount *ump;
    struct dquot *dq;
    struct inode *ip;
    struct ucred *cr;
    int error;

    ump = VFSTOUFS(mp);

    UFS_LOCK(ump);
    KASSERT((ump->um_qflags[type] & QTF_CLOSING) != 0,
            ("quotaoff1: flags are invalid"));
    if ((qvp = ump->um_quotas[type]) == NULLVP) {
        UFS_UNLOCK(ump);
        return (0);
    }
    cr = ump->um_cred[type];
    UFS_UNLOCK(ump);

    /*
     * Search vnodes associated with this mount point,
     * deleting any references to quota file being closed.
     */
again:
    MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
        if (vp->v_type == VNON) {
            VI_UNLOCK(vp);
            continue;
        }
        if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td)) {
            MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
            goto again;
        }
        ip = VTOI(vp);
        dq = ip->i_dquot[type];
        ip->i_dquot[type] = NODQUOT;
        dqrele(vp, dq);
        VOP_UNLOCK(vp, 0);
        vrele(vp);
    }

    error = dqflush(qvp);
    if (error != 0)
        return (error);

    /*
     * Clear um_quotas before closing the quota vnode to prevent
     * access to the closed vnode from dqget/dqsync
     */
    UFS_LOCK(ump);
    ump->um_quotas[type] = NULLVP;
    ump->um_cred[type] = NOCRED;
    UFS_UNLOCK(ump);

    vn_lock(qvp, LK_EXCLUSIVE | LK_RETRY);
    qvp->v_vflag &= ~VV_SYSTEM;
    VOP_UNLOCK(qvp, 0);
    error = vn_close(qvp, FREAD|FWRITE, td->td_ucred, td);
    crfree(cr);

    return (error);
}
Exemplo n.º 21
0
/*
 * Clean up the unionfs node.
 */
void
unionfs_noderem(struct vnode *vp, struct thread *td)
{
	int		count;
	struct unionfs_node *unp, *unp_t1, *unp_t2;
	struct unionfs_node_hashhead *hd;
	struct unionfs_node_status *unsp, *unsp_tmp;
	struct vnode   *lvp;
	struct vnode   *uvp;
	struct vnode   *dvp;

	/*
	 * Use the interlock to protect the clearing of v_data to
	 * prevent faults in unionfs_lock().
	 */
	VI_LOCK(vp);
	unp = VTOUNIONFS(vp);
	lvp = unp->un_lowervp;
	uvp = unp->un_uppervp;
	dvp = unp->un_dvp;
	unp->un_lowervp = unp->un_uppervp = NULLVP;
	vp->v_vnlock = &(vp->v_lock);
	vp->v_data = NULL;
	vp->v_object = NULL;
	VI_UNLOCK(vp);

	if (lvp != NULLVP)
		VOP_UNLOCK(lvp, LK_RELEASE);
	if (uvp != NULLVP)
		VOP_UNLOCK(uvp, LK_RELEASE);

	if (dvp != NULLVP && unp->un_hash.le_prev != NULL)
		unionfs_rem_cached_vnode(unp, dvp);

	if (lockmgr(vp->v_vnlock, LK_EXCLUSIVE, VI_MTX(vp)) != 0)
		panic("the lock for deletion is unacquirable.");

	if (lvp != NULLVP)
		vrele(lvp);
	if (uvp != NULLVP)
		vrele(uvp);
	if (dvp != NULLVP) {
		vrele(dvp);
		unp->un_dvp = NULLVP;
	}
	if (unp->un_path != NULL) {
		free(unp->un_path, M_UNIONFSPATH);
		unp->un_path = NULL;
	}

	if (unp->un_hashtbl != NULL) {
		for (count = 0; count <= unp->un_hashmask; count++) {
			hd = unp->un_hashtbl + count;
			LIST_FOREACH_SAFE(unp_t1, hd, un_hash, unp_t2) {
				LIST_REMOVE(unp_t1, un_hash);
				unp_t1->un_hash.le_next = NULL;
				unp_t1->un_hash.le_prev = NULL;
			}
		}
		hashdestroy(unp->un_hashtbl, M_UNIONFSHASH, unp->un_hashmask);
	}
Exemplo n.º 22
0
/*
 * Unmount the filesystem described by mp.
 */
static int
msdosfs_unmount(struct mount *mp, int mntflags)
{
	struct msdosfsmount *pmp;
	int error, flags;

	error = flags = 0;
	pmp = VFSTOMSDOSFS(mp);
	if ((pmp->pm_flags & MSDOSFSMNT_RONLY) == 0)
		error = msdosfs_sync(mp, MNT_WAIT);
	if ((mntflags & MNT_FORCE) != 0)
		flags |= FORCECLOSE;
	else if (error != 0)
		return (error);
	error = vflush(mp, 0, flags, curthread);
	if (error != 0 && error != ENXIO)
		return (error);
	if ((pmp->pm_flags & MSDOSFSMNT_RONLY) == 0) {
		error = markvoldirty(pmp, 0);
		if (error && error != ENXIO) {
			(void)markvoldirty(pmp, 1);
			return (error);
		}
	}
	if (pmp->pm_flags & MSDOSFSMNT_KICONV && msdosfs_iconv) {
		if (pmp->pm_w2u)
			msdosfs_iconv->close(pmp->pm_w2u);
		if (pmp->pm_u2w)
			msdosfs_iconv->close(pmp->pm_u2w);
		if (pmp->pm_d2u)
			msdosfs_iconv->close(pmp->pm_d2u);
		if (pmp->pm_u2d)
			msdosfs_iconv->close(pmp->pm_u2d);
	}

#ifdef MSDOSFS_DEBUG
	{
		struct vnode *vp = pmp->pm_devvp;
		struct bufobj *bo;

		bo = &vp->v_bufobj;
		BO_LOCK(bo);
		VI_LOCK(vp);
		vn_printf(vp,
		    "msdosfs_umount(): just before calling VOP_CLOSE()\n");
		printf("freef %p, freeb %p, mount %p\n",
		    TAILQ_NEXT(vp, v_actfreelist), vp->v_actfreelist.tqe_prev,
		    vp->v_mount);
		printf("cleanblkhd %p, dirtyblkhd %p, numoutput %ld, type %d\n",
		    TAILQ_FIRST(&vp->v_bufobj.bo_clean.bv_hd),
		    TAILQ_FIRST(&vp->v_bufobj.bo_dirty.bv_hd),
		    vp->v_bufobj.bo_numoutput, vp->v_type);
		VI_UNLOCK(vp);
		BO_UNLOCK(bo);
	}
#endif
	DROP_GIANT();
	if (pmp->pm_devvp->v_type == VCHR && pmp->pm_devvp->v_rdev != NULL)
		pmp->pm_devvp->v_rdev->si_mountpt = NULL;
	g_topology_lock();
	g_vfs_close(pmp->pm_cp);
	g_topology_unlock();
	PICKUP_GIANT();
	vrele(pmp->pm_devvp);
	dev_rel(pmp->pm_dev);
	free(pmp->pm_inusemap, M_MSDOSFSFAT);
	if (pmp->pm_flags & MSDOSFS_LARGEFS)
		msdosfs_fileno_free(mp);
	lockdestroy(&pmp->pm_fatlock);
	free(pmp, M_MSDOSFSMNT);
	mp->mnt_data = NULL;
	MNT_ILOCK(mp);
	mp->mnt_flag &= ~MNT_LOCAL;
	MNT_IUNLOCK(mp);
	return (error);
}
Exemplo n.º 23
0
/*
 * Go through the disk queues to initiate sandbagged IO;
 * go through the inodes to write those that have been modified;
 * initiate the writing of the super block if it has been modified.
 *
 * Note: we are always called with the filesystem marked `MPBUSY'.
 */
static int
ext2_sync(struct mount *mp, int waitfor)
{
	struct vnode *mvp, *vp;
	struct thread *td;
	struct inode *ip;
	struct ext2mount *ump = VFSTOEXT2(mp);
	struct m_ext2fs *fs;
	int error, allerror = 0;

	td = curthread;
	fs = ump->um_e2fs;
	if (fs->e2fs_fmod != 0 && fs->e2fs_ronly != 0) {		/* XXX */
		printf("fs = %s\n", fs->e2fs_fsmnt);
		panic("ext2_sync: rofs mod");
	}

	/*
	 * Write back each (modified) inode.
	 */
loop:
	MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
		if (vp->v_type == VNON) {
			VI_UNLOCK(vp);
			continue;
		}
		ip = VTOI(vp);
		if ((ip->i_flag &
		    (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 &&
		    (vp->v_bufobj.bo_dirty.bv_cnt == 0 ||
		    waitfor == MNT_LAZY)) {
			VI_UNLOCK(vp);
			continue;
		}
		error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, td);
		if (error) {
			if (error == ENOENT) {
				MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
				goto loop;
			}
			continue;
		}
		if ((error = VOP_FSYNC(vp, waitfor, td)) != 0)
			allerror = error;
		VOP_UNLOCK(vp, 0);
		vrele(vp);
	}

	/*
	 * Force stale file system control information to be flushed.
	 */
	if (waitfor != MNT_LAZY) {
		vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
		if ((error = VOP_FSYNC(ump->um_devvp, waitfor, td)) != 0)
			allerror = error;
		VOP_UNLOCK(ump->um_devvp, 0);
	}

	/*
	 * Write back modified superblock.
	 */
	if (fs->e2fs_fmod != 0) {
		fs->e2fs_fmod = 0;
		fs->e2fs->e2fs_wtime = time_second;
		if ((error = ext2_cgupdate(ump, waitfor)) != 0)
			allerror = error;
	}
	return (allerror);
}
Exemplo n.º 24
0
Arquivo: gfs.c Projeto: RJVB/zfs
/*
 * gfs_file_inactive()
 *
 * Called from the VOP_INACTIVE() routine.  If necessary, this routine will
 * remove the given vnode from the parent directory and clean up any references
 * in the VFS layer.
 *
 * If the vnode was not removed (due to a race with vget), then NULL is
 * returned.  Otherwise, a pointer to the private data is returned.
 */
void *
gfs_file_inactive(struct vnode *vp)
{
	int i;
	gfs_dirent_t *ge = NULL;
	gfs_file_t *fp = vnode_fsnode(vp);
	gfs_dir_t *dp = NULL;
	void *data;

    if (!fp) return NULL;

	if (fp->gfs_parent == NULL /*|| (vp->v_flag & V_XATTRDIR)*/)
		goto found;

	/*
	 * XXX cope with a FreeBSD-specific race wherein the parent's
	 * snapshot data can be freed before the parent is
	 */
	if ((dp = vnode_fsnode(fp->gfs_parent)) == NULL)
		return (NULL);

	/*
	 * First, see if this vnode is cached in the parent.
	 */
	gfs_dir_lock(dp);

	/*
	 * Find it in the set of static entries.
	 */
	for (i = 0; i < dp->gfsd_nstatic; i++)  {
		ge = &dp->gfsd_static[i];

		if (ge->gfse_vnode == vp)
			goto found;
	}

	/*
	 * If 'ge' is NULL, then it is a dynamic entry.
	 */
	ge = NULL;

found:
#ifdef TODO
	if (vp->v_flag & V_XATTRDIR)
		VI_LOCK(fp->gfs_parent);
#endif
	VN_HOLD(vp);
	/*
	 * Really remove this vnode
	 */
	data = vnode_fsnode(vp);
	if (ge != NULL) {
		/*
		 * If this was a statically cached entry, simply set the
		 * cached vnode to NULL.
		 */
		ge->gfse_vnode = NULL;
	}
	VN_RELE(vp);

	/*
	 * Free vnode and release parent
	 */
    dprintf("freeing vp %p and parent %p\n", vp, fp->gfs_parent);
	if (fp->gfs_parent) {
		if (dp)
			gfs_dir_unlock(dp);
		//VOP_UNLOCK(vp, 0);
        VN_RELE(fp->gfs_parent);
		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
	} else {
		ASSERT(vp->v_vfsp != NULL);
		VFS_RELE(vp->v_vfsp);
	}
#ifdef TODO
	if (vp->v_flag & V_XATTRDIR)
		VI_UNLOCK(fp->gfs_parent);
#endif
	return (data);
}
Exemplo n.º 25
0
int
mount_snapshot(kthread_t *td, vnode_t **vpp, const char *fstype, char *fspath,
    char *fspec, int fsflags)
{
	struct vfsconf *vfsp;
	struct mount *mp;
	vnode_t *vp, *mvp;
	struct ucred *cr;
	int error;

	/*
	 * Be ultra-paranoid about making sure the type and fspath
	 * variables will fit in our mp buffers, including the
	 * terminating NUL.
	 */
	if (strlen(fstype) >= MFSNAMELEN || strlen(fspath) >= MNAMELEN)
		return (ENAMETOOLONG);

	vfsp = vfs_byname_kld(fstype, td, &error);
	if (vfsp == NULL)
		return (ENODEV);

	vp = *vpp;
	if (vp->v_type != VDIR)
		return (ENOTDIR);
	/*
	 * We need vnode lock to protect v_mountedhere and vnode interlock
	 * to protect v_iflag.
	 */
	vn_lock(vp, LK_SHARED | LK_RETRY);
	VI_LOCK(vp);
	if ((vp->v_iflag & VI_MOUNT) != 0 || vp->v_mountedhere != NULL) {
		VI_UNLOCK(vp);
		VOP_UNLOCK(vp, 0);
		return (EBUSY);
	}
	vp->v_iflag |= VI_MOUNT;
	VI_UNLOCK(vp);
	VOP_UNLOCK(vp, 0);

	/*
	 * Allocate and initialize the filesystem.
	 * We don't want regular user that triggered snapshot mount to be able
	 * to unmount it, so pass credentials of the parent mount.
	 */
	mp = vfs_mount_alloc(vp, vfsp, fspath, vp->v_mount->mnt_cred);

	mp->mnt_optnew = NULL;
	vfs_setmntopt(mp, "from", fspec, 0);
	mp->mnt_optnew = mp->mnt_opt;
	mp->mnt_opt = NULL;

	/*
	 * Set the mount level flags.
	 */
	mp->mnt_flag = fsflags & MNT_UPDATEMASK;
	/*
	 * Snapshots are always read-only.
	 */
	mp->mnt_flag |= MNT_RDONLY;
	/*
	 * We don't want snapshots to allow access to vulnerable setuid
	 * programs, so we turn off setuid when mounting snapshots.
	 */
	mp->mnt_flag |= MNT_NOSUID;
	/*
	 * We don't want snapshots to be visible in regular
	 * mount(8) and df(1) output.
	 */
	mp->mnt_flag |= MNT_IGNORE;
	/*
	 * XXX: This is evil, but we can't mount a snapshot as a regular user.
	 * XXX: Is is safe when snapshot is mounted from within a jail?
	 */
	cr = td->td_ucred;
	td->td_ucred = kcred;
	error = VFS_MOUNT(mp);
	td->td_ucred = cr;

	if (error != 0) {
		VI_LOCK(vp);
		vp->v_iflag &= ~VI_MOUNT;
		VI_UNLOCK(vp);
		vrele(vp);
		vfs_unbusy(mp);
		vfs_mount_destroy(mp);
		*vpp = NULL;
		return (error);
	}

	if (mp->mnt_opt != NULL)
		vfs_freeopts(mp->mnt_opt);
	mp->mnt_opt = mp->mnt_optnew;
	(void)VFS_STATFS(mp, &mp->mnt_stat);

	/*
	 * Prevent external consumers of mount options from reading
	 * mnt_optnew.
	*/
	mp->mnt_optnew = NULL;

	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
#ifdef FREEBSD_NAMECACHE
	cache_purge(vp);
#endif
	VI_LOCK(vp);
	vp->v_iflag &= ~VI_MOUNT;
	VI_UNLOCK(vp);

	vp->v_mountedhere = mp;
	/* Put the new filesystem on the mount list. */
	mtx_lock(&mountlist_mtx);
	TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
	mtx_unlock(&mountlist_mtx);
	vfs_event_signal(NULL, VQ_MOUNT, 0);
	if (VFS_ROOT(mp, LK_EXCLUSIVE, &mvp))
		panic("mount: lost mount");
	vput(vp);
	vfs_unbusy(mp);
	*vpp = mvp;
	return (0);
}
Exemplo n.º 26
0
VMBlockVFSUnmount(struct mount *mp,    // IN: filesystem to unmount
                  int mntflags,        // IN: unmount(2) flags (ex: MNT_FORCE)
                  struct thread *td)   // IN: caller's kernel thread context
#endif
{
   struct VMBlockMount *xmp;
   struct vnode *vp;
   void *mntdata;
   int error;
   int flags = 0, removed = 0;

   VMBLOCKDEBUG("VMBlockVFSUnmount: mp = %p\n", (void *)mp);

   xmp = MNTTOVMBLOCKMNT(mp);
   vp = xmp->rootVnode;

   VI_LOCK(vp);

   /*
    * VMBlocks reference the root vnode.  This check returns EBUSY if
    * VMBlocks still exist & the user isn't forcing us out.
    */
   if ((vp->v_usecount > 1) && !(mntflags & MNT_FORCE)) {
      VI_UNLOCK(vp);
      return EBUSY;
   }

   /*
    * FreeBSD forbids acquiring sleepable locks (ex: sx locks) while holding
    * non-sleepable locks (ex: mutexes).  The vnode interlock acquired above
    * is a mutex, and the Block* routines involve sx locks, so we need to
    * yield the interlock.
    *
    * In order to do this safely, we trade up to locking the entire vnode,
    * and indicate to the lock routine that we hold the interlock.  The lock
    * transfer will happen atomically.  (Er, at least within the scope of
    * the vnode subsystem.)
    */
   COMPAT_VOP_LOCK(vp, LK_EXCLUSIVE|LK_RETRY|LK_INTERLOCK, compat_td);

   removed = BlockRemoveAllBlocks(OS_UNKNOWN_BLOCKER);

   VI_LOCK(vp);
   vp->v_usecount -= removed;
   VI_UNLOCK(vp);
   COMPAT_VOP_UNLOCK(vp, 0, compat_td);

   if (mntflags & MNT_FORCE) {
      flags |= FORCECLOSE;
   }

   /* There is 1 extra root vnode reference (xmp->rootVnode). */
   error = vflush(mp, 1, flags, compat_td);
   if (error) {
      return error;
   }

   /*
    * Finally, throw away the VMBlockMount structure
    */
   mntdata = mp->mnt_data;
   mp->mnt_data = 0;
   free(mntdata, M_VMBLOCKFSMNT);
   return 0;
}
Exemplo n.º 27
0
/*
 * Look up a vnode/nfsnode by file handle.
 * Callers must check for mount points!!
 * In all cases, a pointer to a
 * nfsnode structure is returned.
 * This variant takes a "struct nfsfh *" as second argument and uses
 * that structure up, either by hanging off the nfsnode or FREEing it.
 */
int
nfscl_nget(struct mount *mntp, struct vnode *dvp, struct nfsfh *nfhp,
    struct componentname *cnp, struct thread *td, struct nfsnode **npp,
    void *stuff, int lkflags)
{
	struct nfsnode *np, *dnp;
	struct vnode *vp, *nvp;
	struct nfsv4node *newd, *oldd;
	int error;
	u_int hash;
	struct nfsmount *nmp;

	nmp = VFSTONFS(mntp);
	dnp = VTONFS(dvp);
	*npp = NULL;

	hash = fnv_32_buf(nfhp->nfh_fh, nfhp->nfh_len, FNV1_32_INIT);

	error = vfs_hash_get(mntp, hash, lkflags,
	    td, &nvp, newnfs_vncmpf, nfhp);
	if (error == 0 && nvp != NULL) {
		/*
		 * I believe there is a slight chance that vgonel() could
		 * get called on this vnode between when NFSVOPLOCK() drops
		 * the VI_LOCK() and vget() acquires it again, so that it
		 * hasn't yet had v_usecount incremented. If this were to
		 * happen, the VI_DOOMED flag would be set, so check for
		 * that here. Since we now have the v_usecount incremented,
		 * we should be ok until we vrele() it, if the VI_DOOMED
		 * flag isn't set now.
		 */
		VI_LOCK(nvp);
		if ((nvp->v_iflag & VI_DOOMED)) {
			VI_UNLOCK(nvp);
			vrele(nvp);
			error = ENOENT;
		} else {
			VI_UNLOCK(nvp);
		}
	}
	if (error) {
		FREE((caddr_t)nfhp, M_NFSFH);
		return (error);
	}
	if (nvp != NULL) {
		np = VTONFS(nvp);
		/*
		 * For NFSv4, check to see if it is the same name and
		 * replace the name, if it is different.
		 */
		oldd = newd = NULL;
		if ((nmp->nm_flag & NFSMNT_NFSV4) && np->n_v4 != NULL &&
		    nvp->v_type == VREG &&
		    (np->n_v4->n4_namelen != cnp->cn_namelen ||
		     NFSBCMP(cnp->cn_nameptr, NFS4NODENAME(np->n_v4),
		     cnp->cn_namelen) ||
		     dnp->n_fhp->nfh_len != np->n_v4->n4_fhlen ||
		     NFSBCMP(dnp->n_fhp->nfh_fh, np->n_v4->n4_data,
		     dnp->n_fhp->nfh_len))) {
		    MALLOC(newd, struct nfsv4node *,
			sizeof (struct nfsv4node) + dnp->n_fhp->nfh_len +
			+ cnp->cn_namelen - 1, M_NFSV4NODE, M_WAITOK);
		    NFSLOCKNODE(np);
		    if (newd != NULL && np->n_v4 != NULL && nvp->v_type == VREG
			&& (np->n_v4->n4_namelen != cnp->cn_namelen ||
			 NFSBCMP(cnp->cn_nameptr, NFS4NODENAME(np->n_v4),
			 cnp->cn_namelen) ||
			 dnp->n_fhp->nfh_len != np->n_v4->n4_fhlen ||
			 NFSBCMP(dnp->n_fhp->nfh_fh, np->n_v4->n4_data,
			 dnp->n_fhp->nfh_len))) {
			oldd = np->n_v4;
			np->n_v4 = newd;
			newd = NULL;
			np->n_v4->n4_fhlen = dnp->n_fhp->nfh_len;
			np->n_v4->n4_namelen = cnp->cn_namelen;
			NFSBCOPY(dnp->n_fhp->nfh_fh, np->n_v4->n4_data,
			    dnp->n_fhp->nfh_len);
			NFSBCOPY(cnp->cn_nameptr, NFS4NODENAME(np->n_v4),
			    cnp->cn_namelen);
		    }
		    NFSUNLOCKNODE(np);
		}
Exemplo n.º 28
0
/*
 * gfs_file_inactive()
 *
 * Called from the VOP_INACTIVE() routine.  If necessary, this routine will
 * remove the given vnode from the parent directory and clean up any references
 * in the VFS layer.
 *
 * If the vnode was not removed (due to a race with vget), then NULL is
 * returned.  Otherwise, a pointer to the private data is returned.
 */
void *
gfs_file_inactive(vnode_t *vp)
{
	int i;
	gfs_dirent_t *ge = NULL;
	gfs_file_t *fp = vp->v_data;
	gfs_dir_t *dp = NULL;
	void *data;

	if (fp->gfs_parent == NULL || (vp->v_flag & V_XATTRDIR))
		goto found;

	/*
	 * XXX cope with a FreeBSD-specific race wherein the parent's
	 * snapshot data can be freed before the parent is
	 */
	if ((dp = fp->gfs_parent->v_data) == NULL)
		return (NULL);
		
	/*
	 * First, see if this vnode is cached in the parent.
	 */
	gfs_dir_lock(dp);

	/*
	 * Find it in the set of static entries.
	 */
	for (i = 0; i < dp->gfsd_nstatic; i++)  {
		ge = &dp->gfsd_static[i];

		if (ge->gfse_vnode == vp)
			goto found;
	}

	/*
	 * If 'ge' is NULL, then it is a dynamic entry.
	 */
	ge = NULL;

found:
	if (vp->v_flag & V_XATTRDIR)
		VI_LOCK(fp->gfs_parent);
	VI_LOCK(vp);
	/*
	 * Really remove this vnode
	 */
	data = vp->v_data;
	if (ge != NULL) {
		/*
		 * If this was a statically cached entry, simply set the
		 * cached vnode to NULL.
		 */
		ge->gfse_vnode = NULL;
	}
	VI_UNLOCK(vp);

	/*
	 * Free vnode and release parent
	 */
	if (fp->gfs_parent) {
		if (dp)
			gfs_dir_unlock(dp);
		VI_LOCK(fp->gfs_parent);
		fp->gfs_parent->v_usecount--;
		VI_UNLOCK(fp->gfs_parent);
	} else {
		ASSERT(vp->v_vfsp != NULL);
		VFS_RELE(vp->v_vfsp);
	}
	if (vp->v_flag & V_XATTRDIR)
		VI_UNLOCK(fp->gfs_parent);

	return (data);
}