Example #1
0
/*
 * Unmount a file descriptor from a node in the file system.
 * If the user is not the owner of the file and is not privileged,
 * the request is denied.
 * Otherwise, remove the namenode from the hash list.
 * If the mounted file descriptor was that of a stream and this
 * was the last mount of the stream, turn off the STRMOUNT flag.
 * If the rootvp is referenced other than through the mount,
 * nm_inactive will clean up.
 */
static int
nm_unmount(vfs_t *vfsp, int flag, cred_t *crp)
{
	struct namenode *nodep = (struct namenode *)vfsp->vfs_data;
	vnode_t *vp, *thisvp;
	struct file *fp = NULL;

	ASSERT((nodep->nm_flag & NMNMNT) == 0);

	/*
	 * forced unmount is not supported by this file system
	 * and thus, ENOTSUP, is being returned.
	 */
	if (flag & MS_FORCE) {
		return (ENOTSUP);
	}

	vp = nodep->nm_filevp;
	mutex_enter(&nodep->nm_lock);
	if (secpolicy_vnode_owner(crp, nodep->nm_vattr.va_uid) != 0) {
		mutex_exit(&nodep->nm_lock);
		return (EPERM);
	}

	mutex_exit(&nodep->nm_lock);

	mutex_enter(&ntable_lock);
	nameremove(nodep);
	thisvp = NMTOV(nodep);
	mutex_enter(&thisvp->v_lock);
	if (thisvp->v_count-- == 1) {
		fp = nodep->nm_filep;
		mutex_exit(&thisvp->v_lock);
		vn_invalid(thisvp);
		vn_free(thisvp);
		VFS_RELE(vfsp);
		namenodeno_free(nodep->nm_vattr.va_nodeid);
		kmem_free(nodep, sizeof (struct namenode));
	} else {
		thisvp->v_flag &= ~VROOT;
		mutex_exit(&thisvp->v_lock);
	}
	if (namefind(vp, NULLVP) == NULL && vp->v_stream) {
		struct stdata *stp = vp->v_stream;
		mutex_enter(&stp->sd_lock);
		stp->sd_flag &= ~STRMOUNT;
		mutex_exit(&stp->sd_lock);
	}
	mutex_exit(&ntable_lock);
	if (fp != NULL)
		(void) closef(fp);
	return (0);
}
Example #2
0
/* ARGSUSED */
static void
fdinactive(vnode_t *vp, cred_t *cr, caller_context_t *ct)
{
	mutex_enter(&vp->v_lock);
	ASSERT(vp->v_count >= 1);
	if (--vp->v_count != 0) {
		mutex_exit(&vp->v_lock);
		return;
	}
	mutex_exit(&vp->v_lock);
	vn_invalid(vp);
	vn_free(vp);
}
void
sv_inactive(vnode_t *vp)
{
	svnode_t *svp;
	rnode4_t *rp;
	vnode_t *mvp;

	sv_stats.sv_inactive++;

	svp = VTOSV(vp);
	rp = VTOR4(vp);
	mvp = rp->r_vnode;

	ASSERT(mvp != vp);

	/*
	 * Remove the shadow vnode from the list.  The serialization
	 * is provided by the svnode list lock.  This could be done
	 * with the r_statelock, but that would require more locking
	 * in the activation path.
	 */

	mutex_enter(&rp->r_svlock);
	mutex_enter(&vp->v_lock);
	/* check if someone slipped in while locks were dropped */
	if (vp->v_count > 1) {
		vp->v_count--;
		mutex_exit(&vp->v_lock);
		mutex_exit(&rp->r_svlock);
		return;
	}
	remque(svp);
	mutex_exit(&vp->v_lock);
	mutex_exit(&rp->r_svlock);

	sv_uninit(svp);
	svp->sv_forw = svp->sv_back = NULL;
	kmem_cache_free(svnode_cache, svp);
	vn_invalid(vp);
	vn_free(vp);

	/* release the reference held by this shadow on the master */

	VN_RELE(mvp);
}
Example #4
0
static void
fnode_destructor(void *buf, void *cdrarg)
{
	fifodata_t *fdp = buf;
	fifolock_t *flp = &fdp->fifo_lock;
	fifonode_t *fnp = &fdp->fifo_fnode[0];
	size_t size = (uintptr_t)cdrarg;

	mutex_destroy(&flp->flk_lock);
	cv_destroy(&flp->flk_wait_cv);
	ASSERT(flp->flk_ocsync == 0);

	while ((char *)fnp < (char *)buf + size) {

		vnode_t *vp = FTOV(fnp);

		if (vp == NULL) {
			return; /* constructor failed here */
		}

		ASSERT(fnp->fn_mp == NULL);
		ASSERT(fnp->fn_count == 0);
		ASSERT(fnp->fn_lock == flp);
		ASSERT(fnp->fn_open == 0);
		ASSERT(fnp->fn_insync == 0);
		ASSERT(fnp->fn_rsynccnt == 0 && fnp->fn_wsynccnt == 0);
		ASSERT(fnp->fn_wwaitcnt == 0);
		ASSERT(fnp->fn_pcredp == NULL);
		ASSERT(vn_matchops(vp, fifo_vnodeops));
		ASSERT(vp->v_stream == NULL);
		ASSERT(vp->v_type == VFIFO);
		ASSERT(vp->v_data == (caddr_t)fnp);
		ASSERT((vp->v_flag & (VNOMAP|VNOSWAP)) == (VNOMAP|VNOSWAP));

		cv_destroy(&fnp->fn_wait_cv);
		vn_invalid(vp);
		vn_free(vp);

		fnp++;
	}
}
/*
 * This routine destroys all the resources of an rnode
 * and finally the rnode itself.
 */
static void
destroy_rnode4(rnode4_t *rp)
{
	vnode_t *vp;
	vfs_t *vfsp;

	ASSERT(rp->r_deleg_type == OPEN_DELEGATE_NONE);

	vp = RTOV4(rp);
	vfsp = vp->v_vfsp;

	uninit_rnode4(rp);
	atomic_add_long((ulong_t *)&rnode4_new, -1);
#ifdef DEBUG
	clstat4_debug.nrnode.value.ui64--;
#endif
	kmem_cache_free(rnode4_cache, rp);
	vn_invalid(vp);
	vn_free(vp);
	VFS_RELE(vfsp);
}
Example #6
0
/* ARGSUSED */
static void
nm_inactive(vnode_t *vp, cred_t *crp, caller_context_t *ct)
{
	struct namenode *nodep = VTONM(vp);
	vfs_t *vfsp = vp->v_vfsp;

	mutex_enter(&vp->v_lock);
	ASSERT(vp->v_count >= 1);
	if (--vp->v_count != 0) {
		mutex_exit(&vp->v_lock);
		return;
	}
	mutex_exit(&vp->v_lock);
	if (!(nodep->nm_flag & NMNMNT)) {
		ASSERT(nodep->nm_filep->f_vnode == nodep->nm_filevp);
		(void) closef(nodep->nm_filep);
	}
	vn_invalid(vp);
	vn_free(vp);
	if (vfsp != &namevfs)
		VFS_RELE(vfsp);
	namenodeno_free(nodep->nm_vattr.va_nodeid);
	kmem_free(nodep, sizeof (struct namenode));
}
Example #7
0
void
pc_rele(struct pcnode *pcp)
{
	struct pcfs *fsp;
	struct vnode *vp;
	int err;

	vp = PCTOV(pcp);
	PC_DPRINTF1(8, "pc_rele vp=0x%p\n", (void *)vp);

	fsp = VFSTOPCFS(vp->v_vfsp);
	ASSERT(fsp->pcfs_flags & PCFS_LOCKED);

	rw_enter(&pcnodes_lock, RW_WRITER);
	pcp->pc_flags |= PC_RELEHOLD;

retry:
	if (vp->v_type != VDIR && (pcp->pc_flags & PC_INVAL) == 0) {
		/*
		 * If the file was removed while active it may be safely
		 * truncated now.
		 */

		if (pcp->pc_entry.pcd_filename[0] == PCD_ERASED) {
			(void) pc_truncate(pcp, 0);
		} else if (pcp->pc_flags & PC_CHG) {
			(void) pc_nodeupdate(pcp);
		}
		err = syncpcp(pcp, B_INVAL);
		if (err) {
			(void) syncpcp(pcp, B_INVAL | B_FORCE);
		}
	}
	if (vn_has_cached_data(vp)) {
		/*
		 * pvn_vplist_dirty will abort all old pages
		 */
		(void) pvn_vplist_dirty(vp, (u_offset_t)0,
		    pcfs_putapage, B_INVAL, (struct cred *)NULL);
	}

	(void) pc_syncfat(fsp);
	mutex_enter(&vp->v_lock);
	if (vn_has_cached_data(vp)) {
		mutex_exit(&vp->v_lock);
		goto retry;
	}
	ASSERT(!vn_has_cached_data(vp));

	vp->v_count--;  /* release our hold from vn_rele */
	if (vp->v_count > 0) { /* Is this check still needed? */
		PC_DPRINTF1(3, "pc_rele: pcp=0x%p HELD AGAIN!\n", (void *)pcp);
		mutex_exit(&vp->v_lock);
		pcp->pc_flags &= ~PC_RELEHOLD;
		rw_exit(&pcnodes_lock);
		return;
	}

	remque(pcp);
	rw_exit(&pcnodes_lock);
	/*
	 * XXX - old code had a check for !(pcp->pc_flags & PC_INVAL)
	 * here. Seems superfluous/incorrect, but then earlier on PC_INVAL
	 * was never set anywhere in PCFS. Now it is, and we _have_ to drop
	 * the file reference here. Else, we'd screw up umount/modunload.
	 */
	if ((vp->v_type == VREG)) {
		fsp->pcfs_frefs--;
	}
	fsp->pcfs_nrefs--;
	VFS_RELE(vp->v_vfsp);

	if (fsp->pcfs_nrefs < 0) {
		panic("pc_rele: nrefs count");
	}
	if (fsp->pcfs_frefs < 0) {
		panic("pc_rele: frefs count");
	}

	mutex_exit(&vp->v_lock);
	vn_invalid(vp);
	vn_free(vp);
	kmem_free(pcp, sizeof (struct pcnode));
}
Example #8
0
/*
 * Remove a lnode from the table
 */
void
freelonode(lnode_t *lp)
{
    lnode_t *lt;
    lnode_t *ltprev = NULL;
    struct lfsnode *lfs, *nextlfs;
    struct vfs *vfsp;
    struct vnode *vp = ltov(lp);
    struct vnode *realvp = realvp(vp);
    struct loinfo *li = vtoli(vp->v_vfsp);

#ifdef LODEBUG
    lo_dprint(4, "freelonode lp %p hash %d\n",
              lp, ltablehash(lp->lo_vp, li));
#endif
    TABLE_LOCK_ENTER(lp->lo_vp, li);

    mutex_enter(&vp->v_lock);
    if (vp->v_count > 1) {
        vp->v_count--;	/* release our hold from vn_rele */
        mutex_exit(&vp->v_lock);
        TABLE_LOCK_EXIT(lp->lo_vp, li);
        return;
    }
    mutex_exit(&vp->v_lock);

    for (lt = TABLE_BUCKET(lp->lo_vp, li); lt != NULL;
            ltprev = lt, lt = lt->lo_next) {
        if (lt == lp) {
#ifdef LODEBUG
            lo_dprint(4, "freeing %p, vfsp %p\n",
                      vp, vp->v_vfsp);
#endif
            atomic_dec_32(&li->li_refct);
            vfsp = vp->v_vfsp;
            vn_invalid(vp);
            if (vfsp != li->li_mountvfs) {
                mutex_enter(&li->li_lfslock);
                /*
                 * Check for unused lfs
                 */
                lfs = li->li_lfs;
                while (lfs != NULL) {
                    nextlfs = lfs->lfs_next;
                    if (vfsp == &lfs->lfs_vfs) {
                        lfs_rele(lfs, li);
                        break;
                    }
                    if (lfs->lfs_vfs.vfs_count == 1) {
                        /*
                         * Lfs is idle
                         */
                        freelfsnode(lfs, li);
                    }
                    lfs = nextlfs;
                }
                mutex_exit(&li->li_lfslock);
            }
            if (ltprev == NULL) {
                TABLE_BUCKET(lt->lo_vp, li) = lt->lo_next;
            } else {
                ltprev->lo_next = lt->lo_next;
            }
            TABLE_COUNT(lt->lo_vp, li)--;
            TABLE_LOCK_EXIT(lt->lo_vp, li);
            kmem_cache_free(lnode_cache, lt);
            vn_free(vp);
            VN_RELE(realvp);
            return;
        }
    }
    panic("freelonode");
    /*NOTREACHED*/
}
static vnode_t *
make_rnode4(nfs4_sharedfh_t *fh, r4hashq_t *rhtp, struct vfs *vfsp,
    struct vnodeops *vops,
    int (*putapage)(vnode_t *, page_t *, u_offset_t *, size_t *, int, cred_t *),
    int *newnode, cred_t *cr)
{
	rnode4_t *rp;
	rnode4_t *trp;
	vnode_t *vp;
	mntinfo4_t *mi;

	ASSERT(RW_READ_HELD(&rhtp->r_lock));

	mi = VFTOMI4(vfsp);

start:
	if ((rp = r4find(rhtp, fh, vfsp)) != NULL) {
		vp = RTOV4(rp);
		*newnode = 0;
		return (vp);
	}
	rw_exit(&rhtp->r_lock);

	mutex_enter(&rp4freelist_lock);

	if (rp4freelist != NULL && rnode4_new >= nrnode) {
		rp = rp4freelist;
		rp4_rmfree(rp);
		mutex_exit(&rp4freelist_lock);

		vp = RTOV4(rp);

		if (rp->r_flags & R4HASHED) {
			rw_enter(&rp->r_hashq->r_lock, RW_WRITER);
			mutex_enter(&vp->v_lock);
			if (vp->v_count > 1) {
				vp->v_count--;
				mutex_exit(&vp->v_lock);
				rw_exit(&rp->r_hashq->r_lock);
				rw_enter(&rhtp->r_lock, RW_READER);
				goto start;
			}
			mutex_exit(&vp->v_lock);
			rp4_rmhash_locked(rp);
			rw_exit(&rp->r_hashq->r_lock);
		}

		r4inactive(rp, cr);

		mutex_enter(&vp->v_lock);
		if (vp->v_count > 1) {
			vp->v_count--;
			mutex_exit(&vp->v_lock);
			rw_enter(&rhtp->r_lock, RW_READER);
			goto start;
		}
		mutex_exit(&vp->v_lock);
		vn_invalid(vp);

		/*
		 * destroy old locks before bzero'ing and
		 * recreating the locks below.
		 */
		uninit_rnode4(rp);

		/*
		 * Make sure that if rnode is recycled then
		 * VFS count is decremented properly before
		 * reuse.
		 */
		VFS_RELE(vp->v_vfsp);
		vn_reinit(vp);
	} else {
		vnode_t *new_vp;

		mutex_exit(&rp4freelist_lock);

		rp = kmem_cache_alloc(rnode4_cache, KM_SLEEP);
		new_vp = vn_alloc(KM_SLEEP);

		atomic_add_long((ulong_t *)&rnode4_new, 1);
#ifdef DEBUG
		clstat4_debug.nrnode.value.ui64++;
#endif
		vp = new_vp;
	}

	bzero(rp, sizeof (*rp));
	rp->r_vnode = vp;
	nfs_rw_init(&rp->r_rwlock, NULL, RW_DEFAULT, NULL);
	nfs_rw_init(&rp->r_lkserlock, NULL, RW_DEFAULT, NULL);
	mutex_init(&rp->r_svlock, NULL, MUTEX_DEFAULT, NULL);
	mutex_init(&rp->r_statelock, NULL, MUTEX_DEFAULT, NULL);
	mutex_init(&rp->r_statev4_lock, NULL, MUTEX_DEFAULT, NULL);
	mutex_init(&rp->r_os_lock, NULL, MUTEX_DEFAULT, NULL);
	rp->created_v4 = 0;
	list_create(&rp->r_open_streams, sizeof (nfs4_open_stream_t),
	    offsetof(nfs4_open_stream_t, os_node));
	rp->r_lo_head.lo_prev_rnode = &rp->r_lo_head;
	rp->r_lo_head.lo_next_rnode = &rp->r_lo_head;
	cv_init(&rp->r_cv, NULL, CV_DEFAULT, NULL);
	cv_init(&rp->r_commit.c_cv, NULL, CV_DEFAULT, NULL);
	rp->r_flags = R4READDIRWATTR;
	rp->r_fh = fh;
	rp->r_hashq = rhtp;
	sfh4_hold(rp->r_fh);
	rp->r_server = mi->mi_curr_serv;
	rp->r_deleg_type = OPEN_DELEGATE_NONE;
	rp->r_deleg_needs_recovery = OPEN_DELEGATE_NONE;
	nfs_rw_init(&rp->r_deleg_recall_lock, NULL, RW_DEFAULT, NULL);

	rddir4_cache_create(rp);
	rp->r_putapage = putapage;
	vn_setops(vp, vops);
	vp->v_data = (caddr_t)rp;
	vp->v_vfsp = vfsp;
	VFS_HOLD(vfsp);
	vp->v_type = VNON;
	if (isrootfh(fh, rp))
		vp->v_flag = VROOT;
	vn_exists(vp);

	/*
	 * There is a race condition if someone else
	 * alloc's the rnode while no locks are held, so we
	 * check again and recover if found.
	 */
	rw_enter(&rhtp->r_lock, RW_WRITER);
	if ((trp = r4find(rhtp, fh, vfsp)) != NULL) {
		vp = RTOV4(trp);
		*newnode = 0;
		rw_exit(&rhtp->r_lock);
		rp4_addfree(rp, cr);
		rw_enter(&rhtp->r_lock, RW_READER);
		return (vp);
	}
	rp4_addhash(rp);
	*newnode = 1;
	return (vp);
}