Example #1
0
static int
smbfs_node_alloc(struct mount *mp, struct vnode *dvp,
	const char *name, int nmlen, struct smbfattr *fap, struct vnode **vpp)
{
	struct thread *td = curthread;	/* XXX */
	struct smbmount *smp = VFSTOSMBFS(mp);
	struct smbnode_hashhead *nhpp;
	struct smbnode *np, *np2, *dnp;
	struct vnode *vp;
	u_long hashval;
	int error;

	*vpp = NULL;
	if (smp->sm_root != NULL && dvp == NULL) {
		SMBERROR("do not allocate root vnode twice!\n");
		return EINVAL;
	}
	if (nmlen == 2 && bcmp(name, "..", 2) == 0) {
		if (dvp == NULL)
			return EINVAL;
		vp = VTOSMB(dvp)->n_parent->n_vnode;
		error = vget(vp, LK_EXCLUSIVE, td);
		if (error == 0)
			*vpp = vp;
		return error;
	} else if (nmlen == 1 && name[0] == '.') {
		SMBERROR("do not call me with dot!\n");
		return EINVAL;
	}
	dnp = dvp ? VTOSMB(dvp) : NULL;
	if (dnp == NULL && dvp != NULL) {
		vprint("smbfs_node_alloc: dead parent vnode", dvp);
		return EINVAL;
	}
	hashval = smbfs_hash(name, nmlen);
retry:
	smbfs_hash_lock(smp, td);
loop:
	nhpp = SMBFS_NOHASH(smp, hashval);
	LIST_FOREACH(np, nhpp, n_hash) {
		vp = SMBTOV(np);
		if (np->n_parent != dnp ||
		    np->n_nmlen != nmlen || bcmp(name, np->n_name, nmlen) != 0)
			continue;
		VI_LOCK(vp);
		smbfs_hash_unlock(smp, td);
		if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td) != 0)
			goto retry;
		*vpp = vp;
		return 0;
	}
Example #2
0
/*
 * Like vn_rele() except if we are going to call VOP_INACTIVE() then do it
 * asynchronously using a taskq. This can avoid deadlocks caused by re-entering
 * the file system as a result of releasing the vnode. Note, file systems
 * already have to handle the race where the vnode is incremented before the
 * inactive routine is called and does its locking.
 *
 * Warning: Excessive use of this routine can lead to performance problems.
 * This is because taskqs throttle back allocation if too many are created.
 */
void
vn_rele_async(vnode_t *vp, taskq_t *taskq)
{
	VERIFY(vp->v_count > 0);
	VI_LOCK(vp);
	if (vp->v_count == 1 && !(vp->v_iflag & VI_DOINGINACT)) {
		VI_UNLOCK(vp);
		VERIFY(taskq_dispatch((taskq_t *)taskq,
		    (task_func_t *)vn_rele_inactive, vp, TQ_SLEEP) != 0);
		return;
	}
	vp->v_usecount--;
	vdropl(vp);
}
Example #3
0
/*
 * Remove the vnode.
 */
static void
unionfs_rem_cached_vnode(struct unionfs_node *unp, struct vnode *dvp)
{
	KASSERT((unp != NULL), ("unionfs_rem_cached_vnode: null node"));
	KASSERT((dvp != NULLVP),
	    ("unionfs_rem_cached_vnode: null parent vnode"));
	KASSERT((unp->un_hash.le_prev != NULL),
	    ("unionfs_rem_cached_vnode: null hash"));

	VI_LOCK(dvp);
	LIST_REMOVE(unp, un_hash);
	unp->un_hash.le_next = NULL;
	unp->un_hash.le_prev = NULL;
	VI_UNLOCK(dvp);
}
Example #4
0
/* ARGSUSED */
static int
nfs_sync(struct mount *mp, int waitfor)
{
	struct vnode *vp, *mvp;
	struct thread *td;
	int error, allerror = 0;

	td = curthread;

	MNT_ILOCK(mp);
	/*
	 * If a forced dismount is in progress, return from here so that
	 * the umount(2) syscall doesn't get stuck in VFS_SYNC() before
	 * calling VFS_UNMOUNT().
	 */
	if ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0) {
		MNT_IUNLOCK(mp);
		return (EBADF);
	}

	/*
	 * Force stale buffer cache information to be flushed.
	 */
loop:
	MNT_VNODE_FOREACH(vp, mp, mvp) {
		VI_LOCK(vp);
		MNT_IUNLOCK(mp);
		/* XXX Racy bv_cnt check. */
		if (VOP_ISLOCKED(vp) || vp->v_bufobj.bo_dirty.bv_cnt == 0 ||
		    waitfor == MNT_LAZY) {
			VI_UNLOCK(vp);
			MNT_ILOCK(mp);
			continue;
		}
		if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td)) {
			MNT_ILOCK(mp);
			MNT_VNODE_FOREACH_ABORT_ILOCKED(mp, mvp);
			goto loop;
		}
		error = VOP_FSYNC(vp, waitfor, td);
		if (error)
			allerror = error;
		VOP_UNLOCK(vp, 0);
		vrele(vp);

		MNT_ILOCK(mp);
	}
static int
ufs_lookup_upgrade_lock(struct vnode *vp)
{
	int error;

	ASSERT_VOP_LOCKED(vp, __FUNCTION__);
	if (VOP_ISLOCKED(vp) == LK_EXCLUSIVE)
		return (0);

	error = 0;

	/*
	 * Upgrade vnode lock, since getinoquota()
	 * requires exclusive lock to modify inode.
	 */
	vhold(vp);
	vn_lock(vp, LK_UPGRADE | LK_RETRY);
	VI_LOCK(vp);
	if (vp->v_iflag & VI_DOOMED)
		error = ENOENT;
	vdropl(vp);
	return (error);
}
Example #6
0
/* ARGSUSED */
static int
null_getwritemount(struct vop_getwritemount_args *ap)
{
	struct null_node *xp;
	struct vnode *lowervp;
	struct vnode *vp;

	vp = ap->a_vp;
	VI_LOCK(vp);
	xp = VTONULL(vp);
	if (xp && (lowervp = xp->null_lowervp)) {
		VI_LOCK_FLAGS(lowervp, MTX_DUPOK);
		VI_UNLOCK(vp);
		vholdl(lowervp);
		VI_UNLOCK(lowervp);
		VOP_GETWRITEMOUNT(lowervp, ap->a_mpp);
		vdrop(lowervp);
	} else {
		VI_UNLOCK(vp);
		*(ap->a_mpp) = NULL;
	}
	return (0);
}
Example #7
0
/*
 * Add the new vnode into cache.
 */
static struct vnode *
unionfs_ins_cached_vnode(struct unionfs_node *uncp,
			struct vnode *dvp, char *path)
{
	struct unionfs_node_hashhead *hd;
	struct unionfs_node *unp;
	struct vnode   *vp;

	KASSERT((uncp->un_uppervp==NULLVP || uncp->un_uppervp->v_type==VDIR),
	    ("unionfs_ins_cached_vnode: v_type != VDIR"));
	KASSERT((uncp->un_lowervp==NULLVP || uncp->un_lowervp->v_type==VDIR),
	    ("unionfs_ins_cached_vnode: v_type != VDIR"));

	VI_LOCK(dvp);
	hd = unionfs_get_hashhead(dvp, path);
	LIST_FOREACH(unp, hd, un_hash) {
		if (!strcmp(unp->un_path, path)) {
			vp = UNIONFSTOV(unp);
			VI_LOCK_FLAGS(vp, MTX_DUPOK);
			vp->v_iflag &= ~VI_OWEINACT;
			if ((vp->v_iflag & (VI_DOOMED | VI_DOINGINACT)) != 0) {
				LIST_INSERT_HEAD(hd, uncp, un_hash);
				VI_UNLOCK(vp);
				vp = NULLVP;
			} else
				VI_UNLOCK(vp);
			VI_UNLOCK(dvp);
			return (vp);
		}
	}

	LIST_INSERT_HEAD(hd, uncp, un_hash);
	VI_UNLOCK(dvp);

	return (NULLVP);
}
Example #8
0
static int
smbfs_node_alloc(struct mount *mp, struct vnode *dvp,
	const char *name, int nmlen, struct smbfattr *fap, struct vnode **vpp)
{
	struct vattr vattr;
	struct thread *td = curthread;	/* XXX */
	struct smbmount *smp = VFSTOSMBFS(mp);
	struct smbnode_hashhead *nhpp;
	struct smbnode *np, *np2, *dnp;
	struct vnode *vp;
	u_long hashval;
	int error;

	*vpp = NULL;
	if (smp->sm_root != NULL && dvp == NULL) {
		SMBERROR("do not allocate root vnode twice!\n");
		return EINVAL;
	}
	if (nmlen == 2 && bcmp(name, "..", 2) == 0) {
		if (dvp == NULL)
			return EINVAL;
		vp = VTOSMB(VTOSMB(dvp)->n_parent)->n_vnode;
		error = vget(vp, LK_EXCLUSIVE, td);
		if (error == 0)
			*vpp = vp;
		return error;
	} else if (nmlen == 1 && name[0] == '.') {
		SMBERROR("do not call me with dot!\n");
		return EINVAL;
	}
	dnp = dvp ? VTOSMB(dvp) : NULL;
	if (dnp == NULL && dvp != NULL) {
		vprint("smbfs_node_alloc: dead parent vnode", dvp);
		return EINVAL;
	}
	hashval = smbfs_hash(name, nmlen);
retry:
	smbfs_hash_lock(smp);
loop:
	nhpp = SMBFS_NOHASH(smp, hashval);
	LIST_FOREACH(np, nhpp, n_hash) {
		vp = SMBTOV(np);
		if (np->n_parent != dvp ||
		    np->n_nmlen != nmlen || bcmp(name, np->n_name, nmlen) != 0)
			continue;
		VI_LOCK(vp);
		smbfs_hash_unlock(smp);
		if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td) != 0)
			goto retry;
		/* Force cached attributes to be refreshed if stale. */
		(void)VOP_GETATTR(vp, &vattr, td->td_ucred);
		/*
		 * If the file type on the server is inconsistent with
		 * what it was when we created the vnode, kill the
		 * bogus vnode now and fall through to the code below
		 * to create a new one with the right type.
		 */
		if ((vp->v_type == VDIR && (np->n_dosattr & SMB_FA_DIR) == 0) ||
		    (vp->v_type == VREG && (np->n_dosattr & SMB_FA_DIR) != 0)) {
			vgone(vp);
			vput(vp);
			break;
		}
		*vpp = vp;
		return 0;
	}
Example #9
0
static int
vfs_mountroot_shuffle(struct thread *td, struct mount *mpdevfs)
{
	struct nameidata nd;
	struct mount *mporoot, *mpnroot;
	struct vnode *vp, *vporoot, *vpdevfs;
	char *fspath;
	int error;

	mpnroot = TAILQ_NEXT(mpdevfs, mnt_list);

	/* Shuffle the mountlist. */
	mtx_lock(&mountlist_mtx);
	mporoot = TAILQ_FIRST(&mountlist);
	TAILQ_REMOVE(&mountlist, mpdevfs, mnt_list);
	if (mporoot != mpdevfs) {
		TAILQ_REMOVE(&mountlist, mpnroot, mnt_list);
		TAILQ_INSERT_HEAD(&mountlist, mpnroot, mnt_list);
	}
	TAILQ_INSERT_TAIL(&mountlist, mpdevfs, mnt_list);
	mtx_unlock(&mountlist_mtx);

	cache_purgevfs(mporoot);
	if (mporoot != mpdevfs)
		cache_purgevfs(mpdevfs);

	VFS_ROOT(mporoot, LK_EXCLUSIVE, &vporoot);

	VI_LOCK(vporoot);
	vporoot->v_iflag &= ~VI_MOUNT;
	VI_UNLOCK(vporoot);
	vporoot->v_mountedhere = NULL;
	mporoot->mnt_flag &= ~MNT_ROOTFS;
	mporoot->mnt_vnodecovered = NULL;
	vput(vporoot);

	/* Set up the new rootvnode, and purge the cache */
	mpnroot->mnt_vnodecovered = NULL;
	set_rootvnode();
	cache_purgevfs(rootvnode->v_mount);

	if (mporoot != mpdevfs) {
		/* Remount old root under /.mount or /mnt */
		fspath = "/.mount";
		NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE,
		    fspath, td);
		error = namei(&nd);
		if (error) {
			NDFREE(&nd, NDF_ONLY_PNBUF);
			fspath = "/mnt";
			NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE,
			    fspath, td);
			error = namei(&nd);
		}
		if (!error) {
			vp = nd.ni_vp;
			error = (vp->v_type == VDIR) ? 0 : ENOTDIR;
			if (!error)
				error = vinvalbuf(vp, V_SAVE, 0, 0);
			if (!error) {
				cache_purge(vp);
				mporoot->mnt_vnodecovered = vp;
				vp->v_mountedhere = mporoot;
				strlcpy(mporoot->mnt_stat.f_mntonname,
				    fspath, MNAMELEN);
				VOP_UNLOCK(vp, 0);
			} else
				vput(vp);
		}
		NDFREE(&nd, NDF_ONLY_PNBUF);

		if (error && bootverbose)
			printf("mountroot: unable to remount previous root "
			    "under /.mount or /mnt (error %d).\n", error);
	}

	/* Remount devfs under /dev */
	NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, "/dev", td);
	error = namei(&nd);
	if (!error) {
		vp = nd.ni_vp;
		error = (vp->v_type == VDIR) ? 0 : ENOTDIR;
		if (!error)
			error = vinvalbuf(vp, V_SAVE, 0, 0);
		if (!error) {
			vpdevfs = mpdevfs->mnt_vnodecovered;
			if (vpdevfs != NULL) {
				cache_purge(vpdevfs);
				vpdevfs->v_mountedhere = NULL;
				vrele(vpdevfs);
			}
			mpdevfs->mnt_vnodecovered = vp;
			vp->v_mountedhere = mpdevfs;
			VOP_UNLOCK(vp, 0);
		} else
			vput(vp);
	}
	if (error && bootverbose)
		printf("mountroot: unable to remount devfs under /dev "
		    "(error %d).\n", error);
	NDFREE(&nd, NDF_ONLY_PNBUF);

	if (mporoot == mpdevfs) {
		vfs_unbusy(mpdevfs);
		/* Unlink the no longer needed /dev/dev -> / symlink */
		error = kern_unlink(td, "/dev/dev", UIO_SYSSPACE);
		if (error && bootverbose)
			printf("mountroot: unable to unlink /dev/dev "
			    "(error %d)\n", error);
	}

	return (0);
}
Example #10
0
File: gfs.c Project: RJVB/zfs
/*
 * gfs_file_inactive()
 *
 * Called from the VOP_INACTIVE() routine.  If necessary, this routine will
 * remove the given vnode from the parent directory and clean up any references
 * in the VFS layer.
 *
 * If the vnode was not removed (due to a race with vget), then NULL is
 * returned.  Otherwise, a pointer to the private data is returned.
 */
void *
gfs_file_inactive(struct vnode *vp)
{
	int i;
	gfs_dirent_t *ge = NULL;
	gfs_file_t *fp = vnode_fsnode(vp);
	gfs_dir_t *dp = NULL;
	void *data;

    if (!fp) return NULL;

	if (fp->gfs_parent == NULL /*|| (vp->v_flag & V_XATTRDIR)*/)
		goto found;

	/*
	 * XXX cope with a FreeBSD-specific race wherein the parent's
	 * snapshot data can be freed before the parent is
	 */
	if ((dp = vnode_fsnode(fp->gfs_parent)) == NULL)
		return (NULL);

	/*
	 * First, see if this vnode is cached in the parent.
	 */
	gfs_dir_lock(dp);

	/*
	 * Find it in the set of static entries.
	 */
	for (i = 0; i < dp->gfsd_nstatic; i++)  {
		ge = &dp->gfsd_static[i];

		if (ge->gfse_vnode == vp)
			goto found;
	}

	/*
	 * If 'ge' is NULL, then it is a dynamic entry.
	 */
	ge = NULL;

found:
#ifdef TODO
	if (vp->v_flag & V_XATTRDIR)
		VI_LOCK(fp->gfs_parent);
#endif
	VN_HOLD(vp);
	/*
	 * Really remove this vnode
	 */
	data = vnode_fsnode(vp);
	if (ge != NULL) {
		/*
		 * If this was a statically cached entry, simply set the
		 * cached vnode to NULL.
		 */
		ge->gfse_vnode = NULL;
	}
	VN_RELE(vp);

	/*
	 * Free vnode and release parent
	 */
    dprintf("freeing vp %p and parent %p\n", vp, fp->gfs_parent);
	if (fp->gfs_parent) {
		if (dp)
			gfs_dir_unlock(dp);
		//VOP_UNLOCK(vp, 0);
        VN_RELE(fp->gfs_parent);
		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
	} else {
		ASSERT(vp->v_vfsp != NULL);
		VFS_RELE(vp->v_vfsp);
	}
#ifdef TODO
	if (vp->v_flag & V_XATTRDIR)
		VI_UNLOCK(fp->gfs_parent);
#endif
	return (data);
}
Example #11
0
/*
 * Look up a vnode/nfsnode by file handle.
 * Callers must check for mount points!!
 * In all cases, a pointer to a
 * nfsnode structure is returned.
 * This variant takes a "struct nfsfh *" as second argument and uses
 * that structure up, either by hanging off the nfsnode or FREEing it.
 */
int
nfscl_nget(struct mount *mntp, struct vnode *dvp, struct nfsfh *nfhp,
    struct componentname *cnp, struct thread *td, struct nfsnode **npp,
    void *stuff, int lkflags)
{
	struct nfsnode *np, *dnp;
	struct vnode *vp, *nvp;
	struct nfsv4node *newd, *oldd;
	int error;
	u_int hash;
	struct nfsmount *nmp;

	nmp = VFSTONFS(mntp);
	dnp = VTONFS(dvp);
	*npp = NULL;

	hash = fnv_32_buf(nfhp->nfh_fh, nfhp->nfh_len, FNV1_32_INIT);

	error = vfs_hash_get(mntp, hash, lkflags,
	    td, &nvp, newnfs_vncmpf, nfhp);
	if (error == 0 && nvp != NULL) {
		/*
		 * I believe there is a slight chance that vgonel() could
		 * get called on this vnode between when NFSVOPLOCK() drops
		 * the VI_LOCK() and vget() acquires it again, so that it
		 * hasn't yet had v_usecount incremented. If this were to
		 * happen, the VI_DOOMED flag would be set, so check for
		 * that here. Since we now have the v_usecount incremented,
		 * we should be ok until we vrele() it, if the VI_DOOMED
		 * flag isn't set now.
		 */
		VI_LOCK(nvp);
		if ((nvp->v_iflag & VI_DOOMED)) {
			VI_UNLOCK(nvp);
			vrele(nvp);
			error = ENOENT;
		} else {
			VI_UNLOCK(nvp);
		}
	}
	if (error) {
		FREE((caddr_t)nfhp, M_NFSFH);
		return (error);
	}
	if (nvp != NULL) {
		np = VTONFS(nvp);
		/*
		 * For NFSv4, check to see if it is the same name and
		 * replace the name, if it is different.
		 */
		oldd = newd = NULL;
		if ((nmp->nm_flag & NFSMNT_NFSV4) && np->n_v4 != NULL &&
		    nvp->v_type == VREG &&
		    (np->n_v4->n4_namelen != cnp->cn_namelen ||
		     NFSBCMP(cnp->cn_nameptr, NFS4NODENAME(np->n_v4),
		     cnp->cn_namelen) ||
		     dnp->n_fhp->nfh_len != np->n_v4->n4_fhlen ||
		     NFSBCMP(dnp->n_fhp->nfh_fh, np->n_v4->n4_data,
		     dnp->n_fhp->nfh_len))) {
		    MALLOC(newd, struct nfsv4node *,
			sizeof (struct nfsv4node) + dnp->n_fhp->nfh_len +
			+ cnp->cn_namelen - 1, M_NFSV4NODE, M_WAITOK);
		    NFSLOCKNODE(np);
		    if (newd != NULL && np->n_v4 != NULL && nvp->v_type == VREG
			&& (np->n_v4->n4_namelen != cnp->cn_namelen ||
			 NFSBCMP(cnp->cn_nameptr, NFS4NODENAME(np->n_v4),
			 cnp->cn_namelen) ||
			 dnp->n_fhp->nfh_len != np->n_v4->n4_fhlen ||
			 NFSBCMP(dnp->n_fhp->nfh_fh, np->n_v4->n4_data,
			 dnp->n_fhp->nfh_len))) {
			oldd = np->n_v4;
			np->n_v4 = newd;
			newd = NULL;
			np->n_v4->n4_fhlen = dnp->n_fhp->nfh_len;
			np->n_v4->n4_namelen = cnp->cn_namelen;
			NFSBCOPY(dnp->n_fhp->nfh_fh, np->n_v4->n4_data,
			    dnp->n_fhp->nfh_len);
			NFSBCOPY(cnp->cn_nameptr, NFS4NODENAME(np->n_v4),
			    cnp->cn_namelen);
		    }
		    NFSUNLOCKNODE(np);
		}
Example #12
0
/* Try to invalidate pages, for "fs flush" or "fs flushv"; or
 * try to free pages, when deleting a file.
 *
 * Locking:  the vcache entry's lock is held.  It may be dropped and 
 * re-obtained.
 *
 * Since we drop and re-obtain the lock, we can't guarantee that there won't
 * be some pages around when we return, newly created by concurrent activity.
 */
void
osi_VM_TryToSmush(struct vcache *avc, afs_ucred_t *acred, int sync)
{
    struct vnode *vp;
    int tries, code;
    int islocked;

    SPLVAR;

    vp = AFSTOV(avc);

    VI_LOCK(vp);
    if (vp->v_iflag & VI_DOOMED) {
	VI_UNLOCK(vp);
	USERPRI;
	return;
    }
    VI_UNLOCK(vp);

    islocked = VOP_ISLOCKED(vp);
    if (islocked == LK_EXCLOTHER)
	panic("Trying to Smush over someone else's lock");
    else if (islocked == LK_SHARED) {
	afs_warn("Trying to Smush with a shared lock");
	vn_lock(vp, LK_UPGRADE);
    } else if (!islocked)
	vn_lock(vp, LK_EXCLUSIVE);

    if (vp->v_bufobj.bo_object != NULL) {
	VM_OBJECT_LOCK(vp->v_bufobj.bo_object);
	/*
	 * Do we really want OBJPC_SYNC?  OBJPC_INVAL would be
	 * faster, if invalidation is really what we are being
	 * asked to do.  (It would make more sense, too, since
	 * otherwise this function is practically identical to
	 * osi_VM_StoreAllSegments().)  -GAW
	 */

	/*
	 * Dunno.  We no longer resemble osi_VM_StoreAllSegments,
	 * though maybe that's wrong, now.  And OBJPC_SYNC is the
	 * common thing in 70 file systems, it seems.  Matt.
	 */

	vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC);
	VM_OBJECT_UNLOCK(vp->v_bufobj.bo_object);
    }

    tries = 5;
    code = osi_vinvalbuf(vp, V_SAVE, PCATCH, 0);
    while (code && (tries > 0)) {
	afs_warn("TryToSmush retrying vinvalbuf");
	code = osi_vinvalbuf(vp, V_SAVE, PCATCH, 0);
	--tries;
    }
    if (islocked == LK_SHARED)
	vn_lock(vp, LK_DOWNGRADE);
    else if (!islocked)
	VOP_UNLOCK(vp, 0);
    USERPRI;
}
Example #13
0
int
fdesc_allocvp(fdntype ftype, unsigned fd_fd, int ix, struct mount *mp,
    struct vnode **vpp)
{
	struct fdescmount *fmp;
	struct fdhashhead *fc;
	struct fdescnode *fd, *fd2;
	struct vnode *vp, *vp2;
	struct thread *td;
	int error = 0;

	td = curthread;
	fc = FD_NHASH(ix);
loop:
	mtx_lock(&fdesc_hashmtx);
	/*
	 * If a forced unmount is progressing, we need to drop it. The flags are
	 * protected by the hashmtx.
	 */
	fmp = (struct fdescmount *)mp->mnt_data;
	if (fmp == NULL || fmp->flags & FMNT_UNMOUNTF) {
		mtx_unlock(&fdesc_hashmtx);
		return (-1);
	}

	LIST_FOREACH(fd, fc, fd_hash) {
		if (fd->fd_ix == ix && fd->fd_vnode->v_mount == mp) {
			/* Get reference to vnode in case it's being free'd */
			vp = fd->fd_vnode;
			VI_LOCK(vp);
			mtx_unlock(&fdesc_hashmtx);
			if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td))
				goto loop;
			*vpp = vp;
			return (0);
		}
	}
	mtx_unlock(&fdesc_hashmtx);

	fd = malloc(sizeof(struct fdescnode), M_TEMP, M_WAITOK);

	error = getnewvnode("fdescfs", mp, &fdesc_vnodeops, &vp);
	if (error) {
		free(fd, M_TEMP);
		return (error);
	}
	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
	vp->v_data = fd;
	fd->fd_vnode = vp;
	fd->fd_type = ftype;
	fd->fd_fd = fd_fd;
	fd->fd_ix = ix;
	error = insmntque1(vp, mp, fdesc_insmntque_dtr, NULL);
	if (error != 0) {
		*vpp = NULLVP;
		return (error);
	}

	/* Make sure that someone didn't beat us when inserting the vnode. */
	mtx_lock(&fdesc_hashmtx);
	/*
	 * If a forced unmount is progressing, we need to drop it. The flags are
	 * protected by the hashmtx.
	 */
	fmp = (struct fdescmount *)mp->mnt_data;
	if (fmp == NULL || fmp->flags & FMNT_UNMOUNTF) {
		mtx_unlock(&fdesc_hashmtx);
		vgone(vp);
		vput(vp);
		*vpp = NULLVP;
		return (-1);
	}

	LIST_FOREACH(fd2, fc, fd_hash) {
		if (fd2->fd_ix == ix && fd2->fd_vnode->v_mount == mp) {
			/* Get reference to vnode in case it's being free'd */
			vp2 = fd2->fd_vnode;
			VI_LOCK(vp2);
			mtx_unlock(&fdesc_hashmtx);
			error = vget(vp2, LK_EXCLUSIVE | LK_INTERLOCK, td);
			/* Someone beat us, dec use count and wait for reclaim */
			vgone(vp);
			vput(vp);
			/* If we didn't get it, return no vnode. */
			if (error)
				vp2 = NULLVP;
			*vpp = vp2;
			return (error);
		}
	}

	/* If we came here, we can insert it safely. */
	LIST_INSERT_HEAD(fc, fd, fd_hash);
	mtx_unlock(&fdesc_hashmtx);
	*vpp = vp;
	return (0);
}
Example #14
0
int
mount_snapshot(kthread_t *td, vnode_t **vpp, const char *fstype, char *fspath,
    char *fspec, int fsflags)
{
	struct vfsconf *vfsp;
	struct mount *mp;
	vnode_t *vp, *mvp;
	struct ucred *cr;
	int error;

	/*
	 * Be ultra-paranoid about making sure the type and fspath
	 * variables will fit in our mp buffers, including the
	 * terminating NUL.
	 */
	if (strlen(fstype) >= MFSNAMELEN || strlen(fspath) >= MNAMELEN)
		return (ENAMETOOLONG);

	vfsp = vfs_byname_kld(fstype, td, &error);
	if (vfsp == NULL)
		return (ENODEV);

	vp = *vpp;
	if (vp->v_type != VDIR)
		return (ENOTDIR);
	/*
	 * We need vnode lock to protect v_mountedhere and vnode interlock
	 * to protect v_iflag.
	 */
	vn_lock(vp, LK_SHARED | LK_RETRY);
	VI_LOCK(vp);
	if ((vp->v_iflag & VI_MOUNT) != 0 || vp->v_mountedhere != NULL) {
		VI_UNLOCK(vp);
		VOP_UNLOCK(vp, 0);
		return (EBUSY);
	}
	vp->v_iflag |= VI_MOUNT;
	VI_UNLOCK(vp);
	VOP_UNLOCK(vp, 0);

	/*
	 * Allocate and initialize the filesystem.
	 * We don't want regular user that triggered snapshot mount to be able
	 * to unmount it, so pass credentials of the parent mount.
	 */
	mp = vfs_mount_alloc(vp, vfsp, fspath, vp->v_mount->mnt_cred);

	mp->mnt_optnew = NULL;
	vfs_setmntopt(mp, "from", fspec, 0);
	mp->mnt_optnew = mp->mnt_opt;
	mp->mnt_opt = NULL;

	/*
	 * Set the mount level flags.
	 */
	mp->mnt_flag = fsflags & MNT_UPDATEMASK;
	/*
	 * Snapshots are always read-only.
	 */
	mp->mnt_flag |= MNT_RDONLY;
	/*
	 * We don't want snapshots to allow access to vulnerable setuid
	 * programs, so we turn off setuid when mounting snapshots.
	 */
	mp->mnt_flag |= MNT_NOSUID;
	/*
	 * We don't want snapshots to be visible in regular
	 * mount(8) and df(1) output.
	 */
	mp->mnt_flag |= MNT_IGNORE;
	/*
	 * XXX: This is evil, but we can't mount a snapshot as a regular user.
	 * XXX: Is is safe when snapshot is mounted from within a jail?
	 */
	cr = td->td_ucred;
	td->td_ucred = kcred;
	error = VFS_MOUNT(mp);
	td->td_ucred = cr;

	if (error != 0) {
		VI_LOCK(vp);
		vp->v_iflag &= ~VI_MOUNT;
		VI_UNLOCK(vp);
		vrele(vp);
		vfs_unbusy(mp);
		vfs_mount_destroy(mp);
		*vpp = NULL;
		return (error);
	}

	if (mp->mnt_opt != NULL)
		vfs_freeopts(mp->mnt_opt);
	mp->mnt_opt = mp->mnt_optnew;
	(void)VFS_STATFS(mp, &mp->mnt_stat);

	/*
	 * Prevent external consumers of mount options from reading
	 * mnt_optnew.
	*/
	mp->mnt_optnew = NULL;

	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
#ifdef FREEBSD_NAMECACHE
	cache_purge(vp);
#endif
	VI_LOCK(vp);
	vp->v_iflag &= ~VI_MOUNT;
	VI_UNLOCK(vp);

	vp->v_mountedhere = mp;
	/* Put the new filesystem on the mount list. */
	mtx_lock(&mountlist_mtx);
	TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
	mtx_unlock(&mountlist_mtx);
	vfs_event_signal(NULL, VQ_MOUNT, 0);
	if (VFS_ROOT(mp, LK_EXCLUSIVE, &mvp))
		panic("mount: lost mount");
	vput(vp);
	vfs_unbusy(mp);
	*vpp = mvp;
	return (0);
}
Example #15
0
/*
 * Allocate (or lookup) pager for a vnode.
 * Handle is a vnode pointer.
 *
 * MPSAFE
 */
vm_object_t
vnode_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
    vm_ooffset_t offset, struct ucred *cred)
{
	vm_object_t object;
	struct vnode *vp;

	/*
	 * Pageout to vnode, no can do yet.
	 */
	if (handle == NULL)
		return (NULL);

	vp = (struct vnode *) handle;

	/*
	 * If the object is being terminated, wait for it to
	 * go away.
	 */
retry:
	while ((object = vp->v_object) != NULL) {
		VM_OBJECT_WLOCK(object);
		if ((object->flags & OBJ_DEAD) == 0)
			break;
		vm_object_set_flag(object, OBJ_DISCONNECTWNT);
		VM_OBJECT_SLEEP(object, object, PDROP | PVM, "vadead", 0);
	}

	KASSERT(vp->v_usecount != 0, ("vnode_pager_alloc: no vnode reference"));

	if (object == NULL) {
		/*
		 * Add an object of the appropriate size
		 */
		object = vm_object_allocate(OBJT_VNODE, OFF_TO_IDX(round_page(size)));

		object->un_pager.vnp.vnp_size = size;
		object->un_pager.vnp.writemappings = 0;

		object->handle = handle;
		VI_LOCK(vp);
		if (vp->v_object != NULL) {
			/*
			 * Object has been created while we were sleeping
			 */
			VI_UNLOCK(vp);
			VM_OBJECT_WLOCK(object);
			KASSERT(object->ref_count == 1,
			    ("leaked ref %p %d", object, object->ref_count));
			object->type = OBJT_DEAD;
			object->ref_count = 0;
			VM_OBJECT_WUNLOCK(object);
			vm_object_destroy(object);
			goto retry;
		}
		vp->v_object = object;
		VI_UNLOCK(vp);
	} else {
		object->ref_count++;
#if VM_NRESERVLEVEL > 0
		vm_object_color(object, 0);
#endif
		VM_OBJECT_WUNLOCK(object);
	}
	vref(vp);
	return (object);
}
Example #16
0
VMBlockVFSUnmount(struct mount *mp,    // IN: filesystem to unmount
                  int mntflags,        // IN: unmount(2) flags (ex: MNT_FORCE)
                  struct thread *td)   // IN: caller's kernel thread context
#endif
{
   struct VMBlockMount *xmp;
   struct vnode *vp;
   void *mntdata;
   int error;
   int flags = 0, removed = 0;

   VMBLOCKDEBUG("VMBlockVFSUnmount: mp = %p\n", (void *)mp);

   xmp = MNTTOVMBLOCKMNT(mp);
   vp = xmp->rootVnode;

   VI_LOCK(vp);

   /*
    * VMBlocks reference the root vnode.  This check returns EBUSY if
    * VMBlocks still exist & the user isn't forcing us out.
    */
   if ((vp->v_usecount > 1) && !(mntflags & MNT_FORCE)) {
      VI_UNLOCK(vp);
      return EBUSY;
   }

   /*
    * FreeBSD forbids acquiring sleepable locks (ex: sx locks) while holding
    * non-sleepable locks (ex: mutexes).  The vnode interlock acquired above
    * is a mutex, and the Block* routines involve sx locks, so we need to
    * yield the interlock.
    *
    * In order to do this safely, we trade up to locking the entire vnode,
    * and indicate to the lock routine that we hold the interlock.  The lock
    * transfer will happen atomically.  (Er, at least within the scope of
    * the vnode subsystem.)
    */
   COMPAT_VOP_LOCK(vp, LK_EXCLUSIVE|LK_RETRY|LK_INTERLOCK, compat_td);

   removed = BlockRemoveAllBlocks(OS_UNKNOWN_BLOCKER);

   VI_LOCK(vp);
   vp->v_usecount -= removed;
   VI_UNLOCK(vp);
   COMPAT_VOP_UNLOCK(vp, 0, compat_td);

   if (mntflags & MNT_FORCE) {
      flags |= FORCECLOSE;
   }

   /* There is 1 extra root vnode reference (xmp->rootVnode). */
   error = vflush(mp, 1, flags, compat_td);
   if (error) {
      return error;
   }

   /*
    * Finally, throw away the VMBlockMount structure
    */
   mntdata = mp->mnt_data;
   mp->mnt_data = 0;
   free(mntdata, M_VMBLOCKFSMNT);
   return 0;
}
Example #17
0
/*
 * Allocate a vnode
 */
int
pfs_vncache_alloc(struct mount *mp, struct vnode **vpp,
		  struct pfs_node *pn, pid_t pid)
{
	struct pfs_vdata *pvd, *pvd2;
	struct vnode *vp;
	int error;

	/*
	 * See if the vnode is in the cache.
	 * XXX linear search is not very efficient.
	 */
retry:
	mtx_lock(&pfs_vncache_mutex);
	for (pvd = pfs_vncache; pvd; pvd = pvd->pvd_next) {
		if (pvd->pvd_pn == pn && pvd->pvd_pid == pid &&
		    pvd->pvd_vnode->v_mount == mp) {
			vp = pvd->pvd_vnode;
			VI_LOCK(vp);
			mtx_unlock(&pfs_vncache_mutex);
			if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, curthread) == 0) {
				++pfs_vncache_hits;
				*vpp = vp;
				/*
				 * Some callers cache_enter(vp) later, so
				 * we have to make sure it's not in the
				 * VFS cache so it doesn't get entered
				 * twice.  A better solution would be to
				 * make pfs_vncache_alloc() responsible
				 * for entering the vnode in the VFS
				 * cache.
				 */
				cache_purge(vp);
				return (0);
			}
			goto retry;
		}
	}
	mtx_unlock(&pfs_vncache_mutex);

	/* nope, get a new one */
	pvd = malloc(sizeof *pvd, M_PFSVNCACHE, M_WAITOK);
	pvd->pvd_next = pvd->pvd_prev = NULL;
	error = getnewvnode("pseudofs", mp, &pfs_vnodeops, vpp);
	if (error) {
		free(pvd, M_PFSVNCACHE);
		return (error);
	}
	pvd->pvd_pn = pn;
	pvd->pvd_pid = pid;
	(*vpp)->v_data = pvd;
	switch (pn->pn_type) {
	case pfstype_root:
		(*vpp)->v_vflag = VV_ROOT;
#if 0
		printf("root vnode allocated\n");
#endif
		/* fall through */
	case pfstype_dir:
	case pfstype_this:
	case pfstype_parent:
	case pfstype_procdir:
		(*vpp)->v_type = VDIR;
		break;
	case pfstype_file:
		(*vpp)->v_type = VREG;
		break;
	case pfstype_symlink:
		(*vpp)->v_type = VLNK;
		break;
	case pfstype_none:
		KASSERT(0, ("pfs_vncache_alloc called for null node\n"));
	default:
		panic("%s has unexpected type: %d", pn->pn_name, pn->pn_type);
	}
	/*
	 * Propagate flag through to vnode so users know it can change
	 * if the process changes (i.e. execve)
	 */
	if ((pn->pn_flags & PFS_PROCDEP) != 0)
		(*vpp)->v_vflag |= VV_PROCDEP;
	pvd->pvd_vnode = *vpp;
	vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY);
	VN_LOCK_AREC(*vpp);
	error = insmntque(*vpp, mp);
	if (error != 0) {
		free(pvd, M_PFSVNCACHE);
		*vpp = NULLVP;
		return (error);
	}
retry2:
	mtx_lock(&pfs_vncache_mutex);
	/*
	 * Other thread may race with us, creating the entry we are
	 * going to insert into the cache. Recheck after
	 * pfs_vncache_mutex is reacquired.
	 */
	for (pvd2 = pfs_vncache; pvd2; pvd2 = pvd2->pvd_next) {
		if (pvd2->pvd_pn == pn && pvd2->pvd_pid == pid &&
		    pvd2->pvd_vnode->v_mount == mp) {
			vp = pvd2->pvd_vnode;
			VI_LOCK(vp);
			mtx_unlock(&pfs_vncache_mutex);
			if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, curthread) == 0) {
				++pfs_vncache_hits;
				vgone(*vpp);
				vput(*vpp);
				*vpp = vp;
				cache_purge(vp);
				return (0);
			}
			goto retry2;
		}
	}
	++pfs_vncache_misses;
	if (++pfs_vncache_entries > pfs_vncache_maxentries)
		pfs_vncache_maxentries = pfs_vncache_entries;
	pvd->pvd_prev = NULL;
	pvd->pvd_next = pfs_vncache;
	if (pvd->pvd_next)
		pvd->pvd_next->pvd_prev = pvd;
	pfs_vncache = pvd;
	mtx_unlock(&pfs_vncache_mutex);
	return (0);
}
Example #18
0
/*
 * Unmount the filesystem described by mp.
 */
static int
msdosfs_unmount(struct mount *mp, int mntflags)
{
	struct msdosfsmount *pmp;
	int error, flags;

	error = flags = 0;
	pmp = VFSTOMSDOSFS(mp);
	if ((pmp->pm_flags & MSDOSFSMNT_RONLY) == 0)
		error = msdosfs_sync(mp, MNT_WAIT);
	if ((mntflags & MNT_FORCE) != 0)
		flags |= FORCECLOSE;
	else if (error != 0)
		return (error);
	error = vflush(mp, 0, flags, curthread);
	if (error != 0 && error != ENXIO)
		return (error);
	if ((pmp->pm_flags & MSDOSFSMNT_RONLY) == 0) {
		error = markvoldirty(pmp, 0);
		if (error && error != ENXIO) {
			(void)markvoldirty(pmp, 1);
			return (error);
		}
	}
	if (pmp->pm_flags & MSDOSFSMNT_KICONV && msdosfs_iconv) {
		if (pmp->pm_w2u)
			msdosfs_iconv->close(pmp->pm_w2u);
		if (pmp->pm_u2w)
			msdosfs_iconv->close(pmp->pm_u2w);
		if (pmp->pm_d2u)
			msdosfs_iconv->close(pmp->pm_d2u);
		if (pmp->pm_u2d)
			msdosfs_iconv->close(pmp->pm_u2d);
	}

#ifdef MSDOSFS_DEBUG
	{
		struct vnode *vp = pmp->pm_devvp;
		struct bufobj *bo;

		bo = &vp->v_bufobj;
		BO_LOCK(bo);
		VI_LOCK(vp);
		vn_printf(vp,
		    "msdosfs_umount(): just before calling VOP_CLOSE()\n");
		printf("freef %p, freeb %p, mount %p\n",
		    TAILQ_NEXT(vp, v_actfreelist), vp->v_actfreelist.tqe_prev,
		    vp->v_mount);
		printf("cleanblkhd %p, dirtyblkhd %p, numoutput %ld, type %d\n",
		    TAILQ_FIRST(&vp->v_bufobj.bo_clean.bv_hd),
		    TAILQ_FIRST(&vp->v_bufobj.bo_dirty.bv_hd),
		    vp->v_bufobj.bo_numoutput, vp->v_type);
		VI_UNLOCK(vp);
		BO_UNLOCK(bo);
	}
#endif
	DROP_GIANT();
	if (pmp->pm_devvp->v_type == VCHR && pmp->pm_devvp->v_rdev != NULL)
		pmp->pm_devvp->v_rdev->si_mountpt = NULL;
	g_topology_lock();
	g_vfs_close(pmp->pm_cp);
	g_topology_unlock();
	PICKUP_GIANT();
	vrele(pmp->pm_devvp);
	dev_rel(pmp->pm_dev);
	free(pmp->pm_inusemap, M_MSDOSFSFAT);
	if (pmp->pm_flags & MSDOSFS_LARGEFS)
		msdosfs_fileno_free(mp);
	lockdestroy(&pmp->pm_fatlock);
	free(pmp, M_MSDOSFSMNT);
	mp->mnt_data = NULL;
	MNT_ILOCK(mp);
	mp->mnt_flag &= ~MNT_LOCAL;
	MNT_IUNLOCK(mp);
	return (error);
}
Example #19
0
/*
 * Clean up the unionfs node.
 */
void
unionfs_noderem(struct vnode *vp, struct thread *td)
{
	int		vfslocked;
	int		count;
	struct unionfs_node *unp, *unp_t1, *unp_t2;
	struct unionfs_node_hashhead *hd;
	struct unionfs_node_status *unsp, *unsp_tmp;
	struct vnode   *lvp;
	struct vnode   *uvp;
	struct vnode   *dvp;

	/*
	 * Use the interlock to protect the clearing of v_data to
	 * prevent faults in unionfs_lock().
	 */
	VI_LOCK(vp);
	unp = VTOUNIONFS(vp);
	lvp = unp->un_lowervp;
	uvp = unp->un_uppervp;
	dvp = unp->un_dvp;
	unp->un_lowervp = unp->un_uppervp = NULLVP;

	vp->v_vnlock = &(vp->v_lock);
	vp->v_data = NULL;
	lockmgr(vp->v_vnlock, LK_EXCLUSIVE | LK_INTERLOCK, VI_MTX(vp), td);
	if (lvp != NULLVP)
		VOP_UNLOCK(lvp, 0, td);
	if (uvp != NULLVP)
		VOP_UNLOCK(uvp, 0, td);
	vp->v_object = NULL;

	if (dvp != NULLVP && unp->un_hash.le_prev != NULL)
		unionfs_rem_cached_vnode(unp, dvp);

	if (lvp != NULLVP) {
		vfslocked = VFS_LOCK_GIANT(lvp->v_mount);
		vrele(lvp);
		VFS_UNLOCK_GIANT(vfslocked);
	}
	if (uvp != NULLVP) {
		vfslocked = VFS_LOCK_GIANT(uvp->v_mount);
		vrele(uvp);
		VFS_UNLOCK_GIANT(vfslocked);
	}
	if (dvp != NULLVP) {
		vfslocked = VFS_LOCK_GIANT(dvp->v_mount);
		vrele(dvp);
		VFS_UNLOCK_GIANT(vfslocked);
		unp->un_dvp = NULLVP;
	}
	if (unp->un_path != NULL) {
		free(unp->un_path, M_UNIONFSPATH);
		unp->un_path = NULL;
	}

	if (unp->un_hashtbl != NULL) {
		for (count = 0; count <= unp->un_hashmask; count++) {
			hd = unp->un_hashtbl + count;
			LIST_FOREACH_SAFE(unp_t1, hd, un_hash, unp_t2) {
				LIST_REMOVE(unp_t1, un_hash);
				unp_t1->un_hash.le_next = NULL;
				unp_t1->un_hash.le_prev = NULL;
			}
		}
		hashdestroy(unp->un_hashtbl, M_UNIONFSHASH, unp->un_hashmask);
	}
Example #20
0
/*
 * Clean up the unionfs node.
 */
void
unionfs_noderem(struct vnode *vp, struct thread *td)
{
	int		count;
	struct unionfs_node *unp, *unp_t1, *unp_t2;
	struct unionfs_node_hashhead *hd;
	struct unionfs_node_status *unsp, *unsp_tmp;
	struct vnode   *lvp;
	struct vnode   *uvp;
	struct vnode   *dvp;

	/*
	 * Use the interlock to protect the clearing of v_data to
	 * prevent faults in unionfs_lock().
	 */
	VI_LOCK(vp);
	unp = VTOUNIONFS(vp);
	lvp = unp->un_lowervp;
	uvp = unp->un_uppervp;
	dvp = unp->un_dvp;
	unp->un_lowervp = unp->un_uppervp = NULLVP;
	vp->v_vnlock = &(vp->v_lock);
	vp->v_data = NULL;
	vp->v_object = NULL;
	VI_UNLOCK(vp);

	if (lvp != NULLVP)
		VOP_UNLOCK(lvp, LK_RELEASE);
	if (uvp != NULLVP)
		VOP_UNLOCK(uvp, LK_RELEASE);

	if (dvp != NULLVP && unp->un_hash.le_prev != NULL)
		unionfs_rem_cached_vnode(unp, dvp);

	if (lockmgr(vp->v_vnlock, LK_EXCLUSIVE, VI_MTX(vp)) != 0)
		panic("the lock for deletion is unacquirable.");

	if (lvp != NULLVP)
		vrele(lvp);
	if (uvp != NULLVP)
		vrele(uvp);
	if (dvp != NULLVP) {
		vrele(dvp);
		unp->un_dvp = NULLVP;
	}
	if (unp->un_path != NULL) {
		free(unp->un_path, M_UNIONFSPATH);
		unp->un_path = NULL;
	}

	if (unp->un_hashtbl != NULL) {
		for (count = 0; count <= unp->un_hashmask; count++) {
			hd = unp->un_hashtbl + count;
			LIST_FOREACH_SAFE(unp_t1, hd, un_hash, unp_t2) {
				LIST_REMOVE(unp_t1, un_hash);
				unp_t1->un_hash.le_next = NULL;
				unp_t1->un_hash.le_prev = NULL;
			}
		}
		hashdestroy(unp->un_hashtbl, M_UNIONFSHASH, unp->un_hashmask);
	}
Example #21
0
/*
 * We need to process our own vnode lock and then clear the
 * interlock flag as it applies only to our vnode, not the
 * vnodes below us on the stack.
 */
static int
null_lock(struct vop_lock1_args *ap)
{
	struct vnode *vp = ap->a_vp;
	int flags = ap->a_flags;
	struct null_node *nn;
	struct vnode *lvp;
	int error;


	if ((flags & LK_INTERLOCK) == 0) {
		VI_LOCK(vp);
		ap->a_flags = flags |= LK_INTERLOCK;
	}
	nn = VTONULL(vp);
	/*
	 * If we're still active we must ask the lower layer to
	 * lock as ffs has special lock considerations in it's
	 * vop lock.
	 */
	if (nn != NULL && (lvp = NULLVPTOLOWERVP(vp)) != NULL) {
		VI_LOCK_FLAGS(lvp, MTX_DUPOK);
		VI_UNLOCK(vp);
		/*
		 * We have to hold the vnode here to solve a potential
		 * reclaim race.  If we're forcibly vgone'd while we
		 * still have refs, a thread could be sleeping inside
		 * the lowervp's vop_lock routine.  When we vgone we will
		 * drop our last ref to the lowervp, which would allow it
		 * to be reclaimed.  The lowervp could then be recycled,
		 * in which case it is not legal to be sleeping in it's VOP.
		 * We prevent it from being recycled by holding the vnode
		 * here.
		 */
		vholdl(lvp);
		error = VOP_LOCK(lvp, flags);

		/*
		 * We might have slept to get the lock and someone might have
		 * clean our vnode already, switching vnode lock from one in
		 * lowervp to v_lock in our own vnode structure.  Handle this
		 * case by reacquiring correct lock in requested mode.
		 */
		if (VTONULL(vp) == NULL && error == 0) {
			ap->a_flags &= ~(LK_TYPE_MASK | LK_INTERLOCK);
			switch (flags & LK_TYPE_MASK) {
			case LK_SHARED:
				ap->a_flags |= LK_SHARED;
				break;
			case LK_UPGRADE:
			case LK_EXCLUSIVE:
				ap->a_flags |= LK_EXCLUSIVE;
				break;
			default:
				panic("Unsupported lock request %d\n",
				    ap->a_flags);
			}
			VOP_UNLOCK(lvp, 0);
			error = vop_stdlock(ap);
		}
		vdrop(lvp);
	} else
		error = vop_stdlock(ap);

	return (error);
}
Example #22
0
/*
 * gfs_file_inactive()
 *
 * Called from the VOP_INACTIVE() routine.  If necessary, this routine will
 * remove the given vnode from the parent directory and clean up any references
 * in the VFS layer.
 *
 * If the vnode was not removed (due to a race with vget), then NULL is
 * returned.  Otherwise, a pointer to the private data is returned.
 */
void *
gfs_file_inactive(vnode_t *vp)
{
	int i;
	gfs_dirent_t *ge = NULL;
	gfs_file_t *fp = vp->v_data;
	gfs_dir_t *dp = NULL;
	void *data;

	if (fp->gfs_parent == NULL || (vp->v_flag & V_XATTRDIR))
		goto found;

	/*
	 * XXX cope with a FreeBSD-specific race wherein the parent's
	 * snapshot data can be freed before the parent is
	 */
	if ((dp = fp->gfs_parent->v_data) == NULL)
		return (NULL);
		
	/*
	 * First, see if this vnode is cached in the parent.
	 */
	gfs_dir_lock(dp);

	/*
	 * Find it in the set of static entries.
	 */
	for (i = 0; i < dp->gfsd_nstatic; i++)  {
		ge = &dp->gfsd_static[i];

		if (ge->gfse_vnode == vp)
			goto found;
	}

	/*
	 * If 'ge' is NULL, then it is a dynamic entry.
	 */
	ge = NULL;

found:
	if (vp->v_flag & V_XATTRDIR)
		VI_LOCK(fp->gfs_parent);
	VI_LOCK(vp);
	/*
	 * Really remove this vnode
	 */
	data = vp->v_data;
	if (ge != NULL) {
		/*
		 * If this was a statically cached entry, simply set the
		 * cached vnode to NULL.
		 */
		ge->gfse_vnode = NULL;
	}
	VI_UNLOCK(vp);

	/*
	 * Free vnode and release parent
	 */
	if (fp->gfs_parent) {
		if (dp)
			gfs_dir_unlock(dp);
		VI_LOCK(fp->gfs_parent);
		fp->gfs_parent->v_usecount--;
		VI_UNLOCK(fp->gfs_parent);
	} else {
		ASSERT(vp->v_vfsp != NULL);
		VFS_RELE(vp->v_vfsp);
	}
	if (vp->v_flag & V_XATTRDIR)
		VI_UNLOCK(fp->gfs_parent);

	return (data);
}