示例#1
0
/*
 * NOTE: VAGE is always cleared when calling VOP_OPEN().
 */
int
vop_open(struct vop_ops *ops, struct vnode *vp, int mode, struct ucred *cred,
	struct file *fp)
{
	struct vop_open_args ap;
	VFS_MPLOCK_DECLARE;
	int error;

	/*
	 * Decrement 3-2-1-0.  Does not decrement beyond 0
	 */
	if (vp->v_flag & VAGE0) {
		vclrflags(vp, VAGE0);
	} else if (vp->v_flag & VAGE1) {
		vclrflags(vp, VAGE1);
		vsetflags(vp, VAGE0);
	}

	ap.a_head.a_desc = &vop_open_desc;
	ap.a_head.a_ops = ops;
	ap.a_vp = vp;
	ap.a_fp = fp;
	ap.a_mode = mode;
	ap.a_cred = cred;

	VFS_MPLOCK1(vp->v_mount);
	DO_OPS(ops, error, &ap, vop_open);
	VFS_MPUNLOCK(vp->v_mount);
	return(error);
}
示例#2
0
/*
 * Add a ref to a vnode's existing VM object, return the object or
 * NULL if the vnode did not have one.  This does not create the
 * object (we can't since we don't know what the proper blocksize/boff
 * is to match the VFS's use of the buffer cache).
 */
vm_object_t
vnode_pager_reference(struct vnode *vp)
{
	vm_object_t object;

	/*
	 * Prevent race condition when allocating the object. This
	 * can happen with NFS vnodes since the nfsnode isn't locked.
	 *
	 * Serialize potential vnode/object teardowns and interlocks
	 */
	lwkt_gettoken(&vp->v_token);
	while (vp->v_flag & VOLOCK) {
		vsetflags(vp, VOWANT);
		tsleep(vp, 0, "vnpobj", 0);
	}
	vsetflags(vp, VOLOCK);
	lwkt_reltoken(&vp->v_token);

	/*
	 * Prevent race conditions against deallocation of the VM
	 * object.
	 */
	while ((object = vp->v_object) != NULL) {
		vm_object_hold(object);
		if ((object->flags & OBJ_DEAD) == 0)
			break;
		vm_object_dead_sleep(object, "vadead");
		vm_object_drop(object);
	}

	/*
	 * The object is expected to exist, the caller will handle
	 * NULL returns if it does not.
	 */
	if (object) {
		object->ref_count++;
		vref(vp);
	}

	lwkt_gettoken(&vp->v_token);
	vclrflags(vp, VOLOCK);
	if (vp->v_flag & VOWANT) {
		vclrflags(vp, VOWANT);
		wakeup(vp);
	}
	lwkt_reltoken(&vp->v_token);
	if (object)
		vm_object_drop(object);

	return (object);
}
示例#3
0
/*
 * This opens /dev/tty.  Because multiple opens of /dev/tty only
 * generate a single open to the actual tty, the file modes are
 * locked to FREAD|FWRITE.
 */
static	int
cttyopen(struct dev_open_args *ap)
{
	struct proc *p = curproc;
	struct vnode *ttyvp;
	int error;

	KKASSERT(p);
retry:
	if ((ttyvp = cttyvp(p)) == NULL)
		return (ENXIO);
	if (ttyvp->v_flag & VCTTYISOPEN)
		return (0);

	/*
	 * Messy interlock, don't let the vnode go away while we try to
	 * lock it and check for race after we might have blocked.
	 */
	vhold(ttyvp);
	vn_lock(ttyvp, LK_EXCLUSIVE | LK_RETRY);
	if (ttyvp != cttyvp(p) || (ttyvp->v_flag & VCTTYISOPEN)) {
		kprintf("Warning: cttyopen: race avoided\n");
		vn_unlock(ttyvp);
		vdrop(ttyvp);
		goto retry;
	}
	vsetflags(ttyvp, VCTTYISOPEN);
	error = VOP_OPEN(ttyvp, FREAD|FWRITE, ap->a_cred, NULL);
	if (error)
		vclrflags(ttyvp, VCTTYISOPEN);
	vn_unlock(ttyvp);
	vdrop(ttyvp);
	return(error);
}
示例#4
0
void
vclrobjdirty(struct vnode *vp)
{
	vclrflags(vp, VOBJDIRTY);
	if (vp->v_flag & VONWORKLST)
		vn_syncer_remove(vp);
}
示例#5
0
/*
 * vnode must be locked
 */
void
vclrisdirty(struct vnode *vp)
{
	vclrflags(vp, VISDIRTY);
	if (vp->v_flag & VONWORKLST)
		vn_syncer_remove(vp, 0);
}
示例#6
0
/*
 * Change access and modification times of the given vnode.
 * Caller should execute tmpfs_update on vp after a successful execution.
 * The vnode must be locked on entry and remain locked on exit.
 */
int
tmpfs_chtimes(struct vnode *vp, struct timespec *atime, struct timespec *mtime,
	      int vaflags, struct ucred *cred)
{
	struct tmpfs_node *node;

	KKASSERT(vn_islocked(vp));

	node = VP_TO_TMPFS_NODE(vp);

	/* Disallow this operation if the file system is mounted read-only. */
	if (vp->v_mount->mnt_flag & MNT_RDONLY)
		return EROFS;

	/* Immutable or append-only files cannot be modified, either. */
	if (node->tn_flags & (IMMUTABLE | APPEND))
		return EPERM;

	TMPFS_NODE_LOCK(node);
	if (atime->tv_sec != VNOVAL && atime->tv_nsec != VNOVAL)
		node->tn_status |= TMPFS_NODE_ACCESSED;

	if (mtime->tv_sec != VNOVAL && mtime->tv_nsec != VNOVAL) {
		node->tn_status |= TMPFS_NODE_MODIFIED;
		vclrflags(vp, VLASTWRITETS);
	}

	TMPFS_NODE_UNLOCK(node);

	tmpfs_itimes(vp, atime, mtime);

	KKASSERT(vn_islocked(vp));

	return 0;
}
示例#7
0
/*
 * Removes the vnode from the syncer list.  Since we might block while
 * acquiring the syncer_token we have to [re]check conditions to determine
 * that it is ok to remove the vnode.
 *
 * Force removal if force != 0.  This can only occur during a forced unmount.
 *
 * vp->v_token held on call
 */
void
vn_syncer_remove(struct vnode *vp, int force)
{
	struct syncer_ctx *ctx;

	ctx = vp->v_mount->mnt_syncer_ctx;
	lwkt_gettoken(&ctx->sc_token);

	if ((vp->v_flag & (VISDIRTY | VONWORKLST | VOBJDIRTY)) == VONWORKLST &&
	    RB_EMPTY(&vp->v_rbdirty_tree)) {
		vclrflags(vp, VONWORKLST);
		LIST_REMOVE(vp, v_synclist);
	} else if (force && (vp->v_flag & VONWORKLST)) {
		vclrflags(vp, VONWORKLST);
		LIST_REMOVE(vp, v_synclist);
	}

	lwkt_reltoken(&ctx->sc_token);
}
示例#8
0
/*
 * Removes the vnode from the syncer list.  Since we might block while
 * acquiring the syncer_token we have to recheck conditions.
 *
 * vp->v_token held on call
 */
void
vn_syncer_remove(struct vnode *vp)
{
	struct syncer_ctx *ctx;

	ctx = vn_get_syncer(vp);

	lwkt_gettoken(&ctx->sc_token);

	if ((vp->v_flag & VONWORKLST) && RB_EMPTY(&vp->v_rbdirty_tree)) {
		vclrflags(vp, VONWORKLST);
		LIST_REMOVE(vp, v_synclist);
	}

	lwkt_reltoken(&ctx->sc_token);
}
static void
vnode_pager_dealloc(vm_object_t object)
{
	struct vnode *vp = object->handle;

	if (vp == NULL)
		panic("vnode_pager_dealloc: pager already dealloced");

	vm_object_pip_wait(object, "vnpdea");

	object->handle = NULL;
	object->type = OBJT_DEAD;
	vp->v_object = NULL;
	vp->v_filesize = NOOFFSET;
	vclrflags(vp, VTEXT | VOBJBUF);
	swap_pager_freespace_all(object);
}
示例#10
0
/*
 * The syncer vnode is no longer needed and is being decommissioned.
 * This can only occur when the last reference has been released on
 * mp->mnt_syncer, so mp->mnt_syncer had better be NULL.
 *
 * Modifications to the worklist must be protected with a critical
 * section.
 *
 *	sync_reclaim { struct vnode *a_vp }
 */
static int
sync_reclaim(struct vop_reclaim_args *ap)
{
	struct vnode *vp = ap->a_vp;
	struct syncer_ctx *ctx;

	ctx = vn_get_syncer(vp);

	lwkt_gettoken(&ctx->sc_token);
	KKASSERT(vp->v_mount->mnt_syncer != vp);
	if (vp->v_flag & VONWORKLST) {
		LIST_REMOVE(vp, v_synclist);
		vclrflags(vp, VONWORKLST);
	}
	lwkt_reltoken(&ctx->sc_token);

	return (0);
}
示例#11
0
/*
 * This closes /dev/tty.  Because multiple opens of /dev/tty only
 * generate a single open to the actual tty, the file modes are
 * locked to FREAD|FWRITE.
 */
static int
cttyclose(struct dev_close_args *ap)
{
	struct proc *p = curproc;
	struct vnode *ttyvp;
	int error;

	KKASSERT(p);
retry:
	/*
	 * The tty may have been TIOCNOTTY'd, don't return an
	 * error on close.  We just have nothing to do.
	 */
	if ((ttyvp = cttyvp(p)) == NULL)
		return(0);
	if (ttyvp->v_flag & VCTTYISOPEN) {
		/*
		 * Avoid a nasty race if we block while getting the lock.
		 */
		vref(ttyvp);
		error = vn_lock(ttyvp, LK_EXCLUSIVE | LK_RETRY |
				       LK_FAILRECLAIM);
		if (error) {
			vrele(ttyvp);
			goto retry;
		}
		if (ttyvp != cttyvp(p) || (ttyvp->v_flag & VCTTYISOPEN) == 0) {
			kprintf("Warning: cttyclose: race avoided\n");
			vn_unlock(ttyvp);
			vrele(ttyvp);
			goto retry;
		}
		vclrflags(ttyvp, VCTTYISOPEN);
		error = VOP_CLOSE(ttyvp, FREAD|FWRITE);
		vn_unlock(ttyvp);
		vrele(ttyvp);
	} else {
		error = 0;
	}
	return(error);
}
示例#12
0
int
ufs_quotaoff(struct mount *mp, int type)
{
	struct vnode *qvp;
	struct ufsmount *ump = VFSTOUFS(mp);
	int error;
	struct scaninfo scaninfo;

	if ((qvp = ump->um_quotas[type]) == NULLVP)
		return (0);
	ump->um_qflags[type] |= QTF_CLOSING;

	/*
	 * Search vnodes associated with this mount point,
	 * deleting any references to quota file being closed.
	 */
	scaninfo.rescan = 1;
	scaninfo.type = type;
	while (scaninfo.rescan) {
		scaninfo.rescan = 0;
		vmntvnodescan(mp, VMSC_GETVP, NULL, ufs_quotaoff_scan, &scaninfo);
	}
	ufs_dqflush(qvp);
	vclrflags(qvp, VSYSTEM);
	error = vn_close(qvp, FREAD|FWRITE);
	ump->um_quotas[type] = NULLVP;
	crfree(ump->um_cred[type]);
	ump->um_cred[type] = NOCRED;
	ump->um_qflags[type] &= ~QTF_CLOSING;
	for (type = 0; type < MAXQUOTAS; type++) {
		if (ump->um_quotas[type] != NULLVP)
			break;
	}
	if (type == MAXQUOTAS)
		mp->mnt_flag &= ~MNT_QUOTA;
	return (error);
}
示例#13
0
/*
 * Mount a remote root fs via. nfs. This depends on the info in the
 * nfs_diskless structure that has been filled in properly by some primary
 * bootstrap.
 * It goes something like this:
 * - do enough of "ifconfig" by calling ifioctl() so that the system
 *   can talk to the server
 * - If nfs_diskless.mygateway is filled in, use that address as
 *   a default gateway.
 * - build the rootfs mount point and call mountnfs() to do the rest.
 */
int
nfs_mountroot(struct mount *mp)
{
	struct mount  *swap_mp;
	struct nfsv3_diskless *nd = &nfsv3_diskless;
	struct socket *so;
	struct vnode *vp;
	struct thread *td = curthread;		/* XXX */
	int error, i;
	u_long l;
	char buf[128];

#if defined(BOOTP_NFSROOT) && defined(BOOTP)
	bootpc_init();		/* use bootp to get nfs_diskless filled in */
#endif

	/*
	 * XXX time must be non-zero when we init the interface or else
	 * the arp code will wedge...
	 */
	while (mycpu->gd_time_seconds == 0)
		tsleep(mycpu, 0, "arpkludge", 10);

	/*
	 * The boot code may have passed us a diskless structure.
	 */
	kprintf("DISKLESS %d\n", nfs_diskless_valid);
	if (nfs_diskless_valid == 1)
		nfs_convert_diskless();

	/*
	 * NFSv3 is required.
	 */
	nd->root_args.flags |= NFSMNT_NFSV3 | NFSMNT_RDIRPLUS;
	nd->swap_args.flags |= NFSMNT_NFSV3;

#define SINP(sockaddr)	((struct sockaddr_in *)(sockaddr))
	kprintf("nfs_mountroot: interface %s ip %s",
		nd->myif.ifra_name,
		inet_ntoa(SINP(&nd->myif.ifra_addr)->sin_addr));
	kprintf(" bcast %s",
		inet_ntoa(SINP(&nd->myif.ifra_broadaddr)->sin_addr));
	kprintf(" mask %s\n",
		inet_ntoa(SINP(&nd->myif.ifra_mask)->sin_addr));
#undef SINP

	/*
	 * XXX splnet, so networks will receive...
	 */
	crit_enter();

	/*
	 * BOOTP does not necessarily have to be compiled into the kernel
	 * for an NFS root to work.  If we inherited the network
	 * configuration for PXEBOOT then pxe_setup_nfsdiskless() has figured
	 * out our interface for us and all we need to do is ifconfig the
	 * interface.  We only do this if the interface has not already been
	 * ifconfig'd by e.g. BOOTP.
	 */
	error = socreate(nd->myif.ifra_addr.sa_family, &so, SOCK_DGRAM, 0, td);
	if (error) {
		panic("nfs_mountroot: socreate(%04x): %d",
			nd->myif.ifra_addr.sa_family, error);
	}

	error = ifioctl(so, SIOCAIFADDR, (caddr_t)&nd->myif, proc0.p_ucred);
	if (error)
		panic("nfs_mountroot: SIOCAIFADDR: %d", error);

	soclose(so, FNONBLOCK);

	/*
	 * If the gateway field is filled in, set it as the default route.
	 */
	if (nd->mygateway.sin_len != 0) {
		struct sockaddr_in mask, sin;

		bzero((caddr_t)&mask, sizeof(mask));
		sin = mask;
		sin.sin_family = AF_INET;
		sin.sin_len = sizeof(sin);
		kprintf("nfs_mountroot: gateway %s\n",
			inet_ntoa(nd->mygateway.sin_addr));
		error = rtrequest_global(RTM_ADD, (struct sockaddr *)&sin,
					(struct sockaddr *)&nd->mygateway,
					(struct sockaddr *)&mask,
					RTF_UP | RTF_GATEWAY);
		if (error)
			kprintf("nfs_mountroot: unable to set gateway, error %d, continuing anyway\n", error);
	}

	/*
	 * Create the rootfs mount point.
	 */
	nd->root_args.fh = nd->root_fh;
	nd->root_args.fhsize = nd->root_fhsize;
	l = ntohl(nd->root_saddr.sin_addr.s_addr);
	ksnprintf(buf, sizeof(buf), "%ld.%ld.%ld.%ld:%s",
		(l >> 24) & 0xff, (l >> 16) & 0xff,
		(l >>  8) & 0xff, (l >>  0) & 0xff,nd->root_hostnam);
	kprintf("NFS_ROOT: %s\n",buf);
	error = nfs_mountdiskless(buf, "/", MNT_RDONLY, &nd->root_saddr,
				  &nd->root_args, td, &vp, &mp);
	if (error) {
		mp->mnt_vfc->vfc_refcount--;
		crit_exit();
		return (error);
	}

	swap_mp = NULL;
	if (nd->swap_nblks) {

		/* Convert to DEV_BSIZE instead of Kilobyte */
		nd->swap_nblks *= 2;

		/*
		 * Create a fake mount point just for the swap vnode so that the
		 * swap file can be on a different server from the rootfs.
		 */
		nd->swap_args.fh = nd->swap_fh;
		nd->swap_args.fhsize = nd->swap_fhsize;
		l = ntohl(nd->swap_saddr.sin_addr.s_addr);
		ksnprintf(buf, sizeof(buf), "%ld.%ld.%ld.%ld:%s",
			(l >> 24) & 0xff, (l >> 16) & 0xff,
			(l >>  8) & 0xff, (l >>  0) & 0xff,nd->swap_hostnam);
		kprintf("NFS SWAP: %s\n",buf);
		error = nfs_mountdiskless(buf, "/swap", 0, &nd->swap_saddr,
					  &nd->swap_args, td, &vp, &swap_mp);
		if (error) {
			crit_exit();
			return (error);
		}
		vfs_unbusy(swap_mp);

		VTONFS(vp)->n_size = VTONFS(vp)->n_vattr.va_size =
				nd->swap_nblks * DEV_BSIZE ;

		/*
		 * Since the swap file is not the root dir of a file system,
		 * hack it to a regular file.
		 */
		vclrflags(vp, VROOT);
		vref(vp);
		nfs_setvtype(vp, VREG);
		swaponvp(td, vp, nd->swap_nblks);
	}
示例#14
0
/*
 * Common code for vnode open operations.  Check permissions, and call
 * the VOP_NOPEN or VOP_NCREATE routine.
 *
 * The caller is responsible for setting up nd with nlookup_init() and
 * for cleaning it up with nlookup_done(), whether we return an error
 * or not.
 *
 * On success nd->nl_open_vp will hold a referenced and, if requested,
 * locked vnode.  A locked vnode is requested via NLC_LOCKVP.  If fp
 * is non-NULL the vnode will be installed in the file pointer.
 *
 * NOTE: The vnode is referenced just once on return whether or not it
 * is also installed in the file pointer.
 */
int
vn_open(struct nlookupdata *nd, struct file *fp, int fmode, int cmode)
{
	struct vnode *vp;
	struct ucred *cred = nd->nl_cred;
	struct vattr vat;
	struct vattr *vap = &vat;
	int error;
	u_int flags;
	uint64_t osize;
	struct mount *mp;

	/*
	 * Certain combinations are illegal
	 */
	if ((fmode & (FWRITE | O_TRUNC)) == O_TRUNC)
		return(EACCES);

	/*
	 * Lookup the path and create or obtain the vnode.  After a
	 * successful lookup a locked nd->nl_nch will be returned.
	 *
	 * The result of this section should be a locked vnode.
	 *
	 * XXX with only a little work we should be able to avoid locking
	 * the vnode if FWRITE, O_CREAT, and O_TRUNC are *not* set.
	 */
	nd->nl_flags |= NLC_OPEN;
	if (fmode & O_APPEND)
		nd->nl_flags |= NLC_APPEND;
	if (fmode & O_TRUNC)
		nd->nl_flags |= NLC_TRUNCATE;
	if (fmode & FREAD)
		nd->nl_flags |= NLC_READ;
	if (fmode & FWRITE)
		nd->nl_flags |= NLC_WRITE;
	if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0)
		nd->nl_flags |= NLC_FOLLOW;

	if (fmode & O_CREAT) {
		/*
		 * CONDITIONAL CREATE FILE CASE
		 *
		 * Setting NLC_CREATE causes a negative hit to store
		 * the negative hit ncp and not return an error.  Then
		 * nc_error or nc_vp may be checked to see if the ncp 
		 * represents a negative hit.  NLC_CREATE also requires
		 * write permission on the governing directory or EPERM
		 * is returned.
		 */
		nd->nl_flags |= NLC_CREATE;
		nd->nl_flags |= NLC_REFDVP;
		bwillinode(1);
		error = nlookup(nd);
	} else {
		/*
		 * NORMAL OPEN FILE CASE
		 */
		error = nlookup(nd);
	}

	if (error)
		return (error);

	/*
	 * split case to allow us to re-resolve and retry the ncp in case
	 * we get ESTALE.
	 */
again:
	if (fmode & O_CREAT) {
		if (nd->nl_nch.ncp->nc_vp == NULL) {
			if ((error = ncp_writechk(&nd->nl_nch)) != 0)
				return (error);
			VATTR_NULL(vap);
			vap->va_type = VREG;
			vap->va_mode = cmode;
			if (fmode & O_EXCL)
				vap->va_vaflags |= VA_EXCLUSIVE;
			error = VOP_NCREATE(&nd->nl_nch, nd->nl_dvp, &vp,
					    nd->nl_cred, vap);
			if (error)
				return (error);
			fmode &= ~O_TRUNC;
			/* locked vnode is returned */
		} else {
			if (fmode & O_EXCL) {
				error = EEXIST;
			} else {
				error = cache_vget(&nd->nl_nch, cred, 
						    LK_EXCLUSIVE, &vp);
			}
			if (error)
				return (error);
			fmode &= ~O_CREAT;
		}
	} else {
		error = cache_vget(&nd->nl_nch, cred, LK_EXCLUSIVE, &vp);
		if (error)
			return (error);
	}

	/*
	 * We have a locked vnode and ncp now.  Note that the ncp will
	 * be cleaned up by the caller if nd->nl_nch is left intact.
	 */
	if (vp->v_type == VLNK) {
		error = EMLINK;
		goto bad;
	}
	if (vp->v_type == VSOCK) {
		error = EOPNOTSUPP;
		goto bad;
	}
	if ((fmode & O_CREAT) == 0) {
		if (fmode & (FWRITE | O_TRUNC)) {
			if (vp->v_type == VDIR) {
				error = EISDIR;
				goto bad;
			}
			error = vn_writechk(vp, &nd->nl_nch);
			if (error) {
				/*
				 * Special stale handling, re-resolve the
				 * vnode.
				 */
				if (error == ESTALE) {
					vput(vp);
					vp = NULL;
					cache_setunresolved(&nd->nl_nch);
					error = cache_resolve(&nd->nl_nch, cred);
					if (error == 0)
						goto again;
				}
				goto bad;
			}
		}
	}
	if (fmode & O_TRUNC) {
		vn_unlock(vp);				/* XXX */
		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);	/* XXX */
		osize = vp->v_filesize;
		VATTR_NULL(vap);
		vap->va_size = 0;
		error = VOP_SETATTR(vp, vap, cred);
		if (error)
			goto bad;
		error = VOP_GETATTR(vp, vap);
		if (error)
			goto bad;
		mp = vq_vptomp(vp);
		VFS_ACCOUNT(mp, vap->va_uid, vap->va_gid, -osize);
	}

	/*
	 * Set or clear VNSWAPCACHE on the vp based on nd->nl_nch.ncp->nc_flag.
	 * These particular bits a tracked all the way from the root.
	 *
	 * NOTE: Might not work properly on NFS servers due to the
	 * disconnected namecache.
	 */
	flags = nd->nl_nch.ncp->nc_flag;
	if ((flags & (NCF_UF_CACHE | NCF_UF_PCACHE)) &&
	    (flags & (NCF_SF_NOCACHE | NCF_SF_PNOCACHE)) == 0) {
		vsetflags(vp, VSWAPCACHE);
	} else {
		vclrflags(vp, VSWAPCACHE);
	}

	/*
	 * Setup the fp so VOP_OPEN can override it.  No descriptor has been
	 * associated with the fp yet so we own it clean.  
	 *
	 * f_nchandle inherits nl_nch.  This used to be necessary only for
	 * directories but now we do it unconditionally so f*() ops
	 * such as fchmod() can access the actual namespace that was
	 * used to open the file.
	 */
	if (fp) {
		if (nd->nl_flags & NLC_APPENDONLY)
			fmode |= FAPPENDONLY;
		fp->f_nchandle = nd->nl_nch;
		cache_zero(&nd->nl_nch);
		cache_unlock(&fp->f_nchandle);
	}

	/*
	 * Get rid of nl_nch.  vn_open does not return it (it returns the
	 * vnode or the file pointer).  Note: we can't leave nl_nch locked
	 * through the VOP_OPEN anyway since the VOP_OPEN may block, e.g.
	 * on /dev/ttyd0
	 */
	if (nd->nl_nch.ncp)
		cache_put(&nd->nl_nch);

	error = VOP_OPEN(vp, fmode, cred, fp);
	if (error) {
		/*
		 * setting f_ops to &badfileops will prevent the descriptor
		 * code from trying to close and release the vnode, since
		 * the open failed we do not want to call close.
		 */
		if (fp) {
			fp->f_data = NULL;
			fp->f_ops = &badfileops;
		}
		goto bad;
	}

#if 0
	/*
	 * Assert that VREG files have been setup for vmio.
	 */
	KASSERT(vp->v_type != VREG || vp->v_object != NULL,
		("vn_open: regular file was not VMIO enabled!"));
#endif

	/*
	 * Return the vnode.  XXX needs some cleaning up.  The vnode is
	 * only returned in the fp == NULL case.
	 */
	if (fp == NULL) {
		nd->nl_open_vp = vp;
		nd->nl_vp_fmode = fmode;
		if ((nd->nl_flags & NLC_LOCKVP) == 0)
			vn_unlock(vp);
	} else {
		vput(vp);
	}
	return (0);
bad:
	if (vp)
		vput(vp);
	return (error);
}
示例#15
0
int
vfs_mountroot_devfs(void)
{
	struct vnode *vp;
	struct nchandle nch;
	struct nlookupdata nd;
	struct mount *mp;
	struct vfsconf *vfsp;
	int error;
	struct ucred *cred = proc0.p_ucred;
	const char *devfs_path, *init_chroot;
	char *dev_malloced = NULL;

	if ((init_chroot = kgetenv("init_chroot")) != NULL) {
		size_t l;

		l = strlen(init_chroot) + sizeof("/dev");
		dev_malloced = kmalloc(l, M_MOUNT, M_WAITOK);
		ksnprintf(dev_malloced, l, "%s/dev", init_chroot);
		devfs_path = dev_malloced;
	} else {
		devfs_path = "/dev";
	}
	/*
	 * Lookup the requested path and extract the nch and vnode.
	 */
	error = nlookup_init_raw(&nd,
	     devfs_path, UIO_SYSSPACE, NLC_FOLLOW,
	     cred, &rootnch);

	if (error == 0) {
		devfs_debug(DEVFS_DEBUG_DEBUG, "vfs_mountroot_devfs: nlookup_init is ok...\n");
		if ((error = nlookup(&nd)) == 0) {
			devfs_debug(DEVFS_DEBUG_DEBUG, "vfs_mountroot_devfs: nlookup is ok...\n");
			if (nd.nl_nch.ncp->nc_vp == NULL) {
				devfs_debug(DEVFS_DEBUG_SHOW, "vfs_mountroot_devfs: nlookup: simply not found\n");
				error = ENOENT;
			}
		}
	}
	if (dev_malloced != NULL)
		kfree(dev_malloced, M_MOUNT), dev_malloced = NULL;
	devfs_path = NULL;
	if (error) {
		nlookup_done(&nd);
		devfs_debug(DEVFS_DEBUG_SHOW, "vfs_mountroot_devfs: nlookup failed, error: %d\n", error);
		return (error);
	}

	/*
	 * Extract the locked+refd ncp and cleanup the nd structure
	 */
	nch = nd.nl_nch;
	cache_zero(&nd.nl_nch);
	nlookup_done(&nd);

	/*
	 * now we have the locked ref'd nch and unreferenced vnode.
	 */
	vp = nch.ncp->nc_vp;
	if ((error = vget(vp, LK_EXCLUSIVE)) != 0) {
		cache_put(&nch);
		devfs_debug(DEVFS_DEBUG_SHOW, "vfs_mountroot_devfs: vget failed\n");
		return (error);
	}
	cache_unlock(&nch);

	if ((error = vinvalbuf(vp, V_SAVE, 0, 0)) != 0) {
		cache_drop(&nch);
		vput(vp);
		devfs_debug(DEVFS_DEBUG_SHOW, "vfs_mountroot_devfs: vinvalbuf failed\n");
		return (error);
	}
	if (vp->v_type != VDIR) {
		cache_drop(&nch);
		vput(vp);
		devfs_debug(DEVFS_DEBUG_SHOW, "vfs_mountroot_devfs: vp is not VDIR\n");
		return (ENOTDIR);
	}

	vfsp = vfsconf_find_by_name("devfs");
	vsetflags(vp, VMOUNT);

	/*
	 * Allocate and initialize the filesystem.
	 */
	mp = kmalloc(sizeof(struct mount), M_MOUNT, M_ZERO|M_WAITOK);
	mount_init(mp);
	vfs_busy(mp, LK_NOWAIT);
	mp->mnt_op = vfsp->vfc_vfsops;
	mp->mnt_vfc = vfsp;
	vfsp->vfc_refcount++;
	mp->mnt_stat.f_type = vfsp->vfc_typenum;
	mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
	strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN);
	mp->mnt_stat.f_owner = cred->cr_uid;
	vn_unlock(vp);

	/*
	 * Mount the filesystem.
	 */
	error = VFS_MOUNT(mp, "/dev", NULL, cred);

	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);

	/*
	 * Put the new filesystem on the mount list after root.  The mount
	 * point gets its own mnt_ncmountpt (unless the VFS already set one
	 * up) which represents the root of the mount.  The lookup code
	 * detects the mount point going forward and checks the root of
	 * the mount going backwards.
	 *
	 * It is not necessary to invalidate or purge the vnode underneath
	 * because elements under the mount will be given their own glue
	 * namecache record.
	 */
	if (!error) {
		if (mp->mnt_ncmountpt.ncp == NULL) {
			/*
			 * allocate, then unlock, but leave the ref intact
			 */
			cache_allocroot(&mp->mnt_ncmountpt, mp, NULL);
			cache_unlock(&mp->mnt_ncmountpt);
		}
		mp->mnt_ncmounton = nch;		/* inherits ref */
		nch.ncp->nc_flag |= NCF_ISMOUNTPT;

		/* XXX get the root of the fs and cache_setvp(mnt_ncmountpt...) */
		vclrflags(vp, VMOUNT);
		mountlist_insert(mp, MNTINS_LAST);
		vn_unlock(vp);
		//checkdirs(&mp->mnt_ncmounton, &mp->mnt_ncmountpt);
		error = vfs_allocate_syncvnode(mp);
		if (error) {
			devfs_debug(DEVFS_DEBUG_SHOW, "vfs_mountroot_devfs: vfs_allocate_syncvnode failed\n");
		}
		vfs_unbusy(mp);
		error = VFS_START(mp, 0);
		vrele(vp);
	} else {
		vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_coherency_ops);
		vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_journal_ops);
		vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_norm_ops);
		vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_spec_ops);
		vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_fifo_ops);
		vclrflags(vp, VMOUNT);
		mp->mnt_vfc->vfc_refcount--;
		vfs_unbusy(mp);
		kfree(mp, M_MOUNT);
		cache_drop(&nch);
		vput(vp);
		devfs_debug(DEVFS_DEBUG_SHOW, "vfs_mountroot_devfs: mount failed\n");
	}

	devfs_debug(DEVFS_DEBUG_DEBUG, "rootmount_devfs done with error: %d\n", error);
	return (error);
}
示例#16
0
/*
 * Allocate a VM object for a vnode, typically a regular file vnode.
 *
 * Some additional information is required to generate a properly sized
 * object which covers the entire buffer cache buffer straddling the file
 * EOF.  Userland does not see the extra pages as the VM fault code tests
 * against v_filesize.
 */
vm_object_t
vnode_pager_alloc(void *handle, off_t length, vm_prot_t prot, off_t offset,
		  int blksize, int boff)
{
	vm_object_t object;
	struct vnode *vp;
	off_t loffset;
	vm_pindex_t lsize;

	/*
	 * Pageout to vnode, no can do yet.
	 */
	if (handle == NULL)
		return (NULL);

	/*
	 * XXX hack - This initialization should be put somewhere else.
	 */
	if (vnode_pbuf_freecnt < 0) {
	    vnode_pbuf_freecnt = nswbuf / 2 + 1;
	}

	/*
	 * Serialize potential vnode/object teardowns and interlocks
	 */
	vp = (struct vnode *)handle;
	lwkt_gettoken(&vp->v_token);

	/*
	 * Prevent race condition when allocating the object. This
	 * can happen with NFS vnodes since the nfsnode isn't locked.
	 */
	while (vp->v_flag & VOLOCK) {
		vsetflags(vp, VOWANT);
		tsleep(vp, 0, "vnpobj", 0);
	}
	vsetflags(vp, VOLOCK);
	lwkt_reltoken(&vp->v_token);

	/*
	 * If the object is being terminated, wait for it to
	 * go away.
	 */
	while ((object = vp->v_object) != NULL) {
		vm_object_hold(object);
		if ((object->flags & OBJ_DEAD) == 0)
			break;
		vm_object_dead_sleep(object, "vadead");
		vm_object_drop(object);
	}

	if (vp->v_sysref.refcnt <= 0)
		panic("vnode_pager_alloc: no vnode reference");

	/*
	 * Round up to the *next* block, then destroy the buffers in question.
	 * Since we are only removing some of the buffers we must rely on the
	 * scan count to determine whether a loop is necessary.
	 *
	 * Destroy any pages beyond the last buffer.
	 */
	if (boff < 0)
		boff = (int)(length % blksize);
	if (boff)
		loffset = length + (blksize - boff);
	else
		loffset = length;
	lsize = OFF_TO_IDX(round_page64(loffset));

	if (object == NULL) {
		/*
		 * And an object of the appropriate size
		 */
		object = vm_object_allocate_hold(OBJT_VNODE, lsize);
		object->handle = handle;
		vp->v_object = object;
		vp->v_filesize = length;
		if (vp->v_mount && (vp->v_mount->mnt_kern_flag & MNTK_NOMSYNC))
			vm_object_set_flag(object, OBJ_NOMSYNC);
	} else {
		object->ref_count++;
		if (object->size != lsize) {
			kprintf("vnode_pager_alloc: Warning, objsize "
				"mismatch %jd/%jd vp=%p obj=%p\n",
				(intmax_t)object->size,
				(intmax_t)lsize,
				vp, object);
		}
		if (vp->v_filesize != length) {
			kprintf("vnode_pager_alloc: Warning, filesize "
				"mismatch %jd/%jd vp=%p obj=%p\n",
				(intmax_t)vp->v_filesize,
				(intmax_t)length,
				vp, object);
		}
	}

	vref(vp);
	lwkt_gettoken(&vp->v_token);
	vclrflags(vp, VOLOCK);
	if (vp->v_flag & VOWANT) {
		vclrflags(vp, VOWANT);
		wakeup(vp);
	}
	lwkt_reltoken(&vp->v_token);

	vm_object_drop(object);

	return (object);
}