Beispiel #1
0
int
fdesc_allocvp(fdntype ftype, int ix, struct mount *mp, struct vnode **vpp)
{
	struct fdhashhead *fc;
	struct fdescnode *fd;
	int error = 0;

	fc = FD_NHASH(ix);
loop:
	LIST_FOREACH(fd, fc, fd_hash) {
		if (fd->fd_ix == ix && fd->fd_vnode->v_mount == mp) {
			if (vget(fd->fd_vnode, LK_EXCLUSIVE|LK_SLEEPFAIL))
				goto loop;
			*vpp = fd->fd_vnode;
			return (error);
		}
	}

	/*
	 * otherwise lock the array while we call getnewvnode
	 * since that can block.
	 */
	if (fdcache_lock & FDL_LOCKED) {
		fdcache_lock |= FDL_WANT;
		tsleep((caddr_t) &fdcache_lock, 0, "fdalvp", 0);
		goto loop;
	}
	fdcache_lock |= FDL_LOCKED;

	/*
	 * Do the MALLOC before the getnewvnode since doing so afterward
	 * might cause a bogus v_data pointer to get dereferenced
	 * elsewhere if MALLOC should block.
	 */
	fd = kmalloc(sizeof(struct fdescnode), M_TEMP, M_WAITOK);

	error = getnewvnode(VT_FDESC, mp, vpp, 0, 0);
	if (error) {
		kfree(fd, M_TEMP);
		goto out;
	}
	(*vpp)->v_data = fd;
	fd->fd_vnode = *vpp;
	fd->fd_type = ftype;
	fd->fd_fd = -1;
	fd->fd_ix = ix;
	LIST_INSERT_HEAD(fc, fd, fd_hash);
	vx_unlock(*vpp);

out:
	fdcache_lock &= ~FDL_LOCKED;

	if (fdcache_lock & FDL_WANT) {
		fdcache_lock &= ~FDL_WANT;
		wakeup((caddr_t) &fdcache_lock);
	}

	return (error);
}
Beispiel #2
0
/*
 * Create a new filesystem syncer vnode for the specified mount point.
 * This vnode is placed on the worklist and is responsible for sync'ing
 * the filesystem.
 *
 * NOTE: read-only mounts are also placed on the worklist.  The filesystem
 * sync code is also responsible for cleaning up vnodes.
 */
int
vfs_allocate_syncvnode(struct mount *mp)
{
	struct vnode *vp;
	static long start, incr, next;
	int error;

	/* Allocate a new vnode */
	error = getspecialvnode(VT_VFS, mp, &sync_vnode_vops_p, &vp, 0, 0);
	if (error) {
		mp->mnt_syncer = NULL;
		return (error);
	}
	vp->v_type = VNON;
	/*
	 * Place the vnode onto the syncer worklist. We attempt to
	 * scatter them about on the list so that they will go off
	 * at evenly distributed times even if all the filesystems
	 * are mounted at once.
	 */
	next += incr;
	if (next == 0 || next > SYNCER_MAXDELAY) {
		start /= 2;
		incr /= 2;
		if (start == 0) {
			start = SYNCER_MAXDELAY / 2;
			incr = SYNCER_MAXDELAY;
		}
		next = start;
	}

	/*
	 * Only put the syncer vnode onto the syncer list if we have a
	 * syncer thread.  Some VFS's (aka NULLFS) don't need a syncer
	 * thread.
	 */
	if (mp->mnt_syncer_ctx)
		vn_syncer_add(vp, syncdelay > 0 ? next % syncdelay : 0);

	/*
	 * The mnt_syncer field inherits the vnode reference, which is
	 * held until later decomissioning.
	 */
	mp->mnt_syncer = vp;
	vx_unlock(vp);
	return (0);
}
Beispiel #3
0
/*
 * Release a ref on an active or inactive vnode.
 *
 * Caller has no other requirements.
 *
 * If VREF_FINALIZE is set this will deactivate the vnode on the 1->0
 * transition, otherwise we leave the vnode in the active list and
 * do a lockless transition to 0, which is very important for the
 * critical path.
 *
 * (vrele() is not called when a vnode is being destroyed w/kfree)
 */
void
vrele(struct vnode *vp)
{
	for (;;) {
		int count = vp->v_refcnt;
		cpu_ccfence();
		KKASSERT((count & VREF_MASK) > 0);
		KKASSERT(vp->v_state == VS_ACTIVE ||
			 vp->v_state == VS_INACTIVE);

		/*
		 * 2+ case
		 */
		if ((count & VREF_MASK) > 1) {
			if (atomic_cmpset_int(&vp->v_refcnt, count, count - 1))
				break;
			continue;
		}

		/*
		 * 1->0 transition case must handle possible finalization.
		 * When finalizing we transition 1->0x40000000.  Note that
		 * cachedvnodes is only adjusted on transitions to ->0.
		 *
		 * WARNING! VREF_TERMINATE can be cleared at any point
		 *	    when the refcnt is non-zero (by vget()) and
		 *	    the vnode has not been reclaimed.  Thus
		 *	    transitions out of VREF_TERMINATE do not have
		 *	    to mess with cachedvnodes.
		 */
		if (count & VREF_FINALIZE) {
			vx_lock(vp);
			if (atomic_cmpset_int(&vp->v_refcnt,
					      count, VREF_TERMINATE)) {
				vnode_terminate(vp);
				break;
			}
			vx_unlock(vp);
		} else {
			if (atomic_cmpset_int(&vp->v_refcnt, count, 0)) {
				atomic_add_int(&mycpu->gd_cachedvnodes, 1);
				break;
			}
		}
		/* retry */
	}
}
Beispiel #4
0
int
freesomevnodes(int n)
{
	struct vnode *vp;
	int count = 0;

	while (n) {
		if ((vp = cleanfreevnode(n)) == NULL)
			break;
		vx_unlock(vp);
		--n;
		++count;
		kfree(vp, M_VNODE);
		atomic_add_int(&numvnodes, -1);
	}
	return(count);
}
Beispiel #5
0
/*
 * This function is called on the 1->0 transition (which is actually
 * 1->VREF_TERMINATE) when VREF_FINALIZE is set, forcing deactivation
 * of the vnode.
 *
 * Additional vrefs are allowed to race but will not result in a reentrant
 * call to vnode_terminate() due to refcnt being VREF_TERMINATE.  This
 * prevents additional 1->0 transitions.
 *
 * ONLY A VGET() CAN REACTIVATE THE VNODE.
 *
 * Caller must hold the VX lock.
 *
 * NOTE: v_mount may be NULL due to assigmment to dead_vnode_vops
 *
 * NOTE: The vnode may be marked inactive with dirty buffers
 *	 or dirty pages in its cached VM object still present.
 *
 * NOTE: VS_FREE should not be set on entry (the vnode was expected to
 *	 previously be active).  We lose control of the vnode the instant
 *	 it is placed on the free list.
 *
 *	 The VX lock is required when transitioning to VS_CACHED but is
 *	 not sufficient for the vshouldfree() interlocked test or when
 *	 transitioning away from VS_CACHED.  v_spin is also required for
 *	 those cases.
 */
static
void
vnode_terminate(struct vnode *vp)
{
	KKASSERT(vp->v_state == VS_ACTIVE);

	if ((vp->v_flag & VINACTIVE) == 0) {
		_vsetflags(vp, VINACTIVE);
		if (vp->v_mount)
			VOP_INACTIVE(vp);
		/* might deactivate page */
	}
	spin_lock(&vp->v_spin);
	_vinactive(vp);
	spin_unlock(&vp->v_spin);

	vx_unlock(vp);
}
/*
 * vp is the current namei directory
 * cnp is the name to locate in that directory...
 *
 * portal_lookup(struct vnode *a_dvp, struct vnode **a_vpp,
 *		 struct componentname *a_cnp)
 */
static int
portal_lookup(struct vop_old_lookup_args *ap)
{
	struct componentname *cnp = ap->a_cnp;
	struct vnode **vpp = ap->a_vpp;
	struct vnode *dvp = ap->a_dvp;
	char *pname = cnp->cn_nameptr;
	struct portalnode *pt;
	int error;
	struct vnode *fvp = 0;
	char *path;
	int size;

	*vpp = NULLVP;

	if (cnp->cn_nameiop == NAMEI_DELETE || cnp->cn_nameiop == NAMEI_RENAME)
		return (EROFS);

	if (cnp->cn_namelen == 1 && *pname == '.') {
		*vpp = dvp;
		vref(dvp);
		return (0);
	}

	/*
	 * Do the MALLOC before the getnewvnode since doing so afterward
	 * might cause a bogus v_data pointer to get dereferenced
	 * elsewhere if MALLOC should block.
	 */
	pt = kmalloc(sizeof(struct portalnode), M_TEMP, M_WAITOK);

	error = getnewvnode(VT_PORTAL, dvp->v_mount, &fvp, 0, 0);
	if (error) {
		kfree(pt, M_TEMP);
		goto bad;
	}
	fvp->v_type = VREG;
	fvp->v_data = pt;

	/*
	 * Save all of the remaining pathname and
	 * advance the namei next pointer to the end
	 * of the string.
	 */
	for (size = 0, path = pname; *path; path++)
		size++;
	cnp->cn_consume = size - cnp->cn_namelen;

	pt->pt_arg = kmalloc(size+1, M_TEMP, M_WAITOK);
	pt->pt_size = size+1;
	bcopy(pname, pt->pt_arg, pt->pt_size);
	pt->pt_fileid = portal_fileid++;

	*vpp = fvp;
	vx_unlock(fvp);
	return (0);

bad:;
	if (fvp)
		vrele(fvp);
	return (error);
}