Beispiel #1
0
/*
 * XXX: We cannot use this function as a cache constructor, because
 *      there is one global cache for all file systems and we need
 *      to pass vfsp here, which is not possible, because argument
 *      'cdrarg' is defined at kmem_cache_create() time.
 */
static int
zfs_znode_cache_constructor(void *buf, void *arg, int kmflags)
{
	znode_t *zp = buf;
	vnode_t *vp;
	vfs_t *vfsp = arg;
	int error;

	POINTER_INVALIDATE(&zp->z_zfsvfs);
	ASSERT(!POINTER_IS_VALID(zp->z_zfsvfs));

	if (vfsp != NULL) {
		error = getnewvnode("zfs", vfsp, &zfs_vnodeops, &vp);
		if (error != 0 && (kmflags & KM_NOSLEEP))
			return (-1);
		ASSERT(error == 0);
		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
		zp->z_vnode = vp;
		vp->v_data = (caddr_t)zp;
		VN_LOCK_AREC(vp);
	} else {
		zp->z_vnode = NULL;
	}

	list_link_init(&zp->z_link_node);

	mutex_init(&zp->z_lock, NULL, MUTEX_DEFAULT, NULL);
	rw_init(&zp->z_map_lock, NULL, RW_DEFAULT, NULL);
	rw_init(&zp->z_parent_lock, NULL, RW_DEFAULT, NULL);
	rw_init(&zp->z_name_lock, NULL, RW_DEFAULT, NULL);
	mutex_init(&zp->z_acl_lock, NULL, MUTEX_DEFAULT, NULL);

	mutex_init(&zp->z_range_lock, NULL, MUTEX_DEFAULT, NULL);
	avl_create(&zp->z_range_avl, zfs_range_compare,
	    sizeof (rl_t), offsetof(rl_t, r_node));

	zp->z_dbuf = NULL;
	zp->z_dirlocks = NULL;
	return (0);
}
Beispiel #2
0
/*
 * Q_QUOTAON - set up a quota file for a particular filesystem.
 */
int
quotaon(struct thread *td, struct mount *mp, int type, void *fname)
{
    struct ufsmount *ump;
    struct vnode *vp, **vpp;
    struct vnode *mvp;
    struct dquot *dq;
    int error, flags;
    struct nameidata nd;

    error = priv_check(td, PRIV_UFS_QUOTAON);
    if (error != 0) {
        vfs_unbusy(mp);
        return (error);
    }

    if ((mp->mnt_flag & MNT_RDONLY) != 0) {
        vfs_unbusy(mp);
        return (EROFS);
    }

    ump = VFSTOUFS(mp);
    dq = NODQUOT;

    NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, fname, td);
    flags = FREAD | FWRITE;
    vfs_ref(mp);
    vfs_unbusy(mp);
    error = vn_open(&nd, &flags, 0, NULL);
    if (error != 0) {
        vfs_rel(mp);
        return (error);
    }
    NDFREE(&nd, NDF_ONLY_PNBUF);
    vp = nd.ni_vp;
    error = vfs_busy(mp, MBF_NOWAIT);
    vfs_rel(mp);
    if (error == 0) {
        if (vp->v_type != VREG) {
            error = EACCES;
            vfs_unbusy(mp);
        }
    }
    if (error != 0) {
        VOP_UNLOCK(vp, 0);
        (void) vn_close(vp, FREAD|FWRITE, td->td_ucred, td);
        return (error);
    }

    UFS_LOCK(ump);
    if ((ump->um_qflags[type] & (QTF_OPENING|QTF_CLOSING)) != 0) {
        UFS_UNLOCK(ump);
        VOP_UNLOCK(vp, 0);
        (void) vn_close(vp, FREAD|FWRITE, td->td_ucred, td);
        vfs_unbusy(mp);
        return (EALREADY);
    }
    ump->um_qflags[type] |= QTF_OPENING|QTF_CLOSING;
    UFS_UNLOCK(ump);
    if ((error = dqopen(vp, ump, type)) != 0) {
        VOP_UNLOCK(vp, 0);
        UFS_LOCK(ump);
        ump->um_qflags[type] &= ~(QTF_OPENING|QTF_CLOSING);
        UFS_UNLOCK(ump);
        (void) vn_close(vp, FREAD|FWRITE, td->td_ucred, td);
        vfs_unbusy(mp);
        return (error);
    }
    VOP_UNLOCK(vp, 0);
    MNT_ILOCK(mp);
    mp->mnt_flag |= MNT_QUOTA;
    MNT_IUNLOCK(mp);

    vpp = &ump->um_quotas[type];
    if (*vpp != vp)
        quotaoff1(td, mp, type);

    /*
     * When the directory vnode containing the quota file is
     * inactivated, due to the shared lookup of the quota file
     * vput()ing the dvp, the qsyncvp() call for the containing
     * directory would try to acquire the quota lock exclusive.
     * At the same time, lookup already locked the quota vnode
     * shared.  Mark the quota vnode lock as allowing recursion
     * and automatically converting shared locks to exclusive.
     *
     * Also mark quota vnode as system.
     */
    vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
    vp->v_vflag |= VV_SYSTEM;
    VN_LOCK_AREC(vp);
    VN_LOCK_DSHARE(vp);
    VOP_UNLOCK(vp, 0);
    *vpp = vp;
    /*
     * Save the credential of the process that turned on quotas.
     * Set up the time limits for this quota.
     */
    ump->um_cred[type] = crhold(td->td_ucred);
    ump->um_btime[type] = MAX_DQ_TIME;
    ump->um_itime[type] = MAX_IQ_TIME;
    if (dqget(NULLVP, 0, ump, type, &dq) == 0) {
        if (dq->dq_btime > 0)
            ump->um_btime[type] = dq->dq_btime;
        if (dq->dq_itime > 0)
            ump->um_itime[type] = dq->dq_itime;
        dqrele(NULLVP, dq);
    }
    /*
     * Allow the getdq from getinoquota below to read the quota
     * from file.
     */
    UFS_LOCK(ump);
    ump->um_qflags[type] &= ~QTF_CLOSING;
    UFS_UNLOCK(ump);
    /*
     * Search vnodes associated with this mount point,
     * adding references to quota file being opened.
     * NB: only need to add dquot's for inodes being modified.
     */
again:
    MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
        if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td)) {
            MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
            goto again;
        }
        if (vp->v_type == VNON || vp->v_writecount == 0) {
            VOP_UNLOCK(vp, 0);
            vrele(vp);
            continue;
        }
        error = getinoquota(VTOI(vp));
        VOP_UNLOCK(vp, 0);
        vrele(vp);
        if (error) {
            MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
            break;
        }
    }

    if (error)
        quotaoff_inchange(td, mp, type);
    UFS_LOCK(ump);
    ump->um_qflags[type] &= ~QTF_OPENING;
    KASSERT((ump->um_qflags[type] & QTF_CLOSING) == 0,
            ("quotaon: leaking flags"));
    UFS_UNLOCK(ump);

    vfs_unbusy(mp);
    return (error);
}
Beispiel #3
0
/*
 * ONLY USED FOR THE ROOT DIRECTORY. nfscl_nget() does the rest. If this
 * function is going to be used to get Regular Files, code must be added
 * to fill in the "struct nfsv4node".
 * Look up a vnode/nfsnode by file handle.
 * Callers must check for mount points!!
 * In all cases, a pointer to a
 * nfsnode structure is returned.
 */
int
ncl_nget(struct mount *mntp, u_int8_t *fhp, int fhsize, struct nfsnode **npp,
    int lkflags)
{
	struct thread *td = curthread;	/* XXX */
	struct nfsnode *np;
	struct vnode *vp;
	struct vnode *nvp;
	int error;
	u_int hash;
	struct nfsmount *nmp;
	struct nfsfh *nfhp;

	nmp = VFSTONFS(mntp);
	*npp = NULL;

	hash = fnv_32_buf(fhp, fhsize, FNV1_32_INIT);

	MALLOC(nfhp, struct nfsfh *, sizeof (struct nfsfh) + fhsize,
	    M_NFSFH, M_WAITOK);
	bcopy(fhp, &nfhp->nfh_fh[0], fhsize);
	nfhp->nfh_len = fhsize;
	error = vfs_hash_get(mntp, hash, lkflags,
	    td, &nvp, newnfs_vncmpf, nfhp);
	FREE(nfhp, M_NFSFH);
	if (error)
		return (error);
	if (nvp != NULL) {
		*npp = VTONFS(nvp);
		return (0);
	}
	np = uma_zalloc(newnfsnode_zone, M_WAITOK | M_ZERO);

	error = getnewvnode("newnfs", mntp, &newnfs_vnodeops, &nvp);
	if (error) {
		uma_zfree(newnfsnode_zone, np);
		return (error);
	}
	vp = nvp;
	KASSERT(vp->v_bufobj.bo_bsize != 0, ("ncl_nget: bo_bsize == 0"));
	vp->v_bufobj.bo_ops = &buf_ops_newnfs;
	vp->v_data = np;
	np->n_vnode = vp;
	/* 
	 * Initialize the mutex even if the vnode is going to be a loser.
	 * This simplifies the logic in reclaim, which can then unconditionally
	 * destroy the mutex (in the case of the loser, or if hash_insert
	 * happened to return an error no special casing is needed).
	 */
	mtx_init(&np->n_mtx, "NEWNFSnode lock", NULL, MTX_DEF | MTX_DUPOK);
	/*
	 * NFS supports recursive and shared locking.
	 */
	lockmgr(vp->v_vnlock, LK_EXCLUSIVE | LK_NOWITNESS, NULL);
	VN_LOCK_AREC(vp);
	VN_LOCK_ASHARE(vp);
	/* 
	 * Are we getting the root? If so, make sure the vnode flags
	 * are correct 
	 */
	if ((fhsize == nmp->nm_fhsize) &&
	    !bcmp(fhp, nmp->nm_fh, fhsize)) {
		if (vp->v_type == VNON)
			vp->v_type = VDIR;
		vp->v_vflag |= VV_ROOT;
	}
	
	MALLOC(np->n_fhp, struct nfsfh *, sizeof (struct nfsfh) + fhsize,
	    M_NFSFH, M_WAITOK);
	bcopy(fhp, np->n_fhp->nfh_fh, fhsize);
	np->n_fhp->nfh_len = fhsize;
	error = insmntque(vp, mntp);
	if (error != 0) {
		*npp = NULL;
		FREE((caddr_t)np->n_fhp, M_NFSFH);
		mtx_destroy(&np->n_mtx);
		uma_zfree(newnfsnode_zone, np);
		return (error);
	}
	error = vfs_hash_insert(vp, hash, lkflags, 
	    td, &nvp, newnfs_vncmpf, np->n_fhp);
	if (error)
		return (error);
	if (nvp != NULL) {
		*npp = VTONFS(nvp);
		/* vfs_hash_insert() vput()'s the losing vnode */
		return (0);
	}
	*npp = np;

	return (0);
}
Beispiel #4
0
/*
 * Look up a vnode/nfsnode by file handle.
 * Callers must check for mount points!!
 * In all cases, a pointer to a
 * nfsnode structure is returned.
 */
int
nfs_nget(struct mount *mntp, nfsfh_t *fhp, int fhsize, struct nfsnode **npp, int flags)
{
	struct thread *td = curthread;	/* XXX */
	struct nfsnode *np;
	struct vnode *vp;
	struct vnode *nvp;
	int error;
	u_int hash;
	struct nfsmount *nmp;
	struct nfs_vncmp ncmp;

	nmp = VFSTONFS(mntp);
	*npp = NULL;

	hash = fnv_32_buf(fhp->fh_bytes, fhsize, FNV1_32_INIT);
	ncmp.fhsize = fhsize;
	ncmp.fh = fhp;

	error = vfs_hash_get(mntp, hash, flags,
	    td, &nvp, nfs_vncmpf, &ncmp);
	if (error)
		return (error);
	if (nvp != NULL) {
		*npp = VTONFS(nvp);
		return (0);
	}

	/*
	 * Allocate before getnewvnode since doing so afterward
	 * might cause a bogus v_data pointer to get dereferenced
	 * elsewhere if zalloc should block.
	 */
	np = uma_zalloc(nfsnode_zone, M_WAITOK | M_ZERO);

	error = getnewvnode("nfs", mntp, &nfs_vnodeops, &nvp);
	if (error) {
		uma_zfree(nfsnode_zone, np);
		return (error);
	}
	vp = nvp;
	vp->v_bufobj.bo_ops = &buf_ops_nfs;
	vp->v_data = np;
	np->n_vnode = vp;
	/* 
	 * Initialize the mutex even if the vnode is going to be a loser.
	 * This simplifies the logic in reclaim, which can then unconditionally
	 * destroy the mutex (in the case of the loser, or if hash_insert happened
	 * to return an error no special casing is needed).
	 */
	mtx_init(&np->n_mtx, "NFSnode lock", NULL, MTX_DEF);
	/*
	 * NFS supports recursive and shared locking.
	 */
	lockmgr(vp->v_vnlock, LK_EXCLUSIVE | LK_NOWITNESS, NULL);
	VN_LOCK_AREC(vp);
	VN_LOCK_ASHARE(vp);
	if (fhsize > NFS_SMALLFH) {
		np->n_fhp = malloc(fhsize, M_NFSBIGFH, M_WAITOK);
	} else
		np->n_fhp = &np->n_fh;
	bcopy((caddr_t)fhp, (caddr_t)np->n_fhp, fhsize);
	np->n_fhsize = fhsize;
	error = insmntque(vp, mntp);
	if (error != 0) {
		*npp = NULL;
		if (np->n_fhsize > NFS_SMALLFH) {
			free((caddr_t)np->n_fhp, M_NFSBIGFH);
		}
		mtx_destroy(&np->n_mtx);
		uma_zfree(nfsnode_zone, np);
		return (error);
	}
	error = vfs_hash_insert(vp, hash, flags, 
	    td, &nvp, nfs_vncmpf, &ncmp);
	if (error)
		return (error);
	if (nvp != NULL) {
		*npp = VTONFS(nvp);
		/* vfs_hash_insert() vput()'s the losing vnode */
		return (0);
	}
	*npp = np;

	return (0);
}
Beispiel #5
0
/*
 * Allocate a vnode
 */
int
pfs_vncache_alloc(struct mount *mp, struct vnode **vpp,
		  struct pfs_node *pn, pid_t pid)
{
	struct pfs_vdata *pvd, *pvd2;
	struct vnode *vp;
	int error;

	/*
	 * See if the vnode is in the cache.
	 * XXX linear search is not very efficient.
	 */
retry:
	mtx_lock(&pfs_vncache_mutex);
	for (pvd = pfs_vncache; pvd; pvd = pvd->pvd_next) {
		if (pvd->pvd_pn == pn && pvd->pvd_pid == pid &&
		    pvd->pvd_vnode->v_mount == mp) {
			vp = pvd->pvd_vnode;
			VI_LOCK(vp);
			mtx_unlock(&pfs_vncache_mutex);
			if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, curthread) == 0) {
				++pfs_vncache_hits;
				*vpp = vp;
				/*
				 * Some callers cache_enter(vp) later, so
				 * we have to make sure it's not in the
				 * VFS cache so it doesn't get entered
				 * twice.  A better solution would be to
				 * make pfs_vncache_alloc() responsible
				 * for entering the vnode in the VFS
				 * cache.
				 */
				cache_purge(vp);
				return (0);
			}
			goto retry;
		}
	}
	mtx_unlock(&pfs_vncache_mutex);

	/* nope, get a new one */
	pvd = malloc(sizeof *pvd, M_PFSVNCACHE, M_WAITOK);
	pvd->pvd_next = pvd->pvd_prev = NULL;
	error = getnewvnode("pseudofs", mp, &pfs_vnodeops, vpp);
	if (error) {
		free(pvd, M_PFSVNCACHE);
		return (error);
	}
	pvd->pvd_pn = pn;
	pvd->pvd_pid = pid;
	(*vpp)->v_data = pvd;
	switch (pn->pn_type) {
	case pfstype_root:
		(*vpp)->v_vflag = VV_ROOT;
#if 0
		printf("root vnode allocated\n");
#endif
		/* fall through */
	case pfstype_dir:
	case pfstype_this:
	case pfstype_parent:
	case pfstype_procdir:
		(*vpp)->v_type = VDIR;
		break;
	case pfstype_file:
		(*vpp)->v_type = VREG;
		break;
	case pfstype_symlink:
		(*vpp)->v_type = VLNK;
		break;
	case pfstype_none:
		KASSERT(0, ("pfs_vncache_alloc called for null node\n"));
	default:
		panic("%s has unexpected type: %d", pn->pn_name, pn->pn_type);
	}
	/*
	 * Propagate flag through to vnode so users know it can change
	 * if the process changes (i.e. execve)
	 */
	if ((pn->pn_flags & PFS_PROCDEP) != 0)
		(*vpp)->v_vflag |= VV_PROCDEP;
	pvd->pvd_vnode = *vpp;
	vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY);
	VN_LOCK_AREC(*vpp);
	error = insmntque(*vpp, mp);
	if (error != 0) {
		free(pvd, M_PFSVNCACHE);
		*vpp = NULLVP;
		return (error);
	}
retry2:
	mtx_lock(&pfs_vncache_mutex);
	/*
	 * Other thread may race with us, creating the entry we are
	 * going to insert into the cache. Recheck after
	 * pfs_vncache_mutex is reacquired.
	 */
	for (pvd2 = pfs_vncache; pvd2; pvd2 = pvd2->pvd_next) {
		if (pvd2->pvd_pn == pn && pvd2->pvd_pid == pid &&
		    pvd2->pvd_vnode->v_mount == mp) {
			vp = pvd2->pvd_vnode;
			VI_LOCK(vp);
			mtx_unlock(&pfs_vncache_mutex);
			if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, curthread) == 0) {
				++pfs_vncache_hits;
				vgone(*vpp);
				vput(*vpp);
				*vpp = vp;
				cache_purge(vp);
				return (0);
			}
			goto retry2;
		}
	}
	++pfs_vncache_misses;
	if (++pfs_vncache_entries > pfs_vncache_maxentries)
		pfs_vncache_maxentries = pfs_vncache_entries;
	pvd->pvd_prev = NULL;
	pvd->pvd_next = pfs_vncache;
	if (pvd->pvd_next)
		pvd->pvd_next->pvd_prev = pvd;
	pfs_vncache = pvd;
	mtx_unlock(&pfs_vncache_mutex);
	return (0);
}