Пример #1
0
/*
 * This opens /dev/tty.  Because multiple opens of /dev/tty only
 * generate a single open to the actual tty, the file modes are
 * locked to FREAD|FWRITE.
 */
static	int
cttyopen(struct dev_open_args *ap)
{
	struct proc *p = curproc;
	struct vnode *ttyvp;
	int error;

	KKASSERT(p);
retry:
	if ((ttyvp = cttyvp(p)) == NULL)
		return (ENXIO);
	if (ttyvp->v_flag & VCTTYISOPEN)
		return (0);

	/*
	 * Messy interlock, don't let the vnode go away while we try to
	 * lock it and check for race after we might have blocked.
	 */
	vhold(ttyvp);
	vn_lock(ttyvp, LK_EXCLUSIVE | LK_RETRY);
	if (ttyvp != cttyvp(p) || (ttyvp->v_flag & VCTTYISOPEN)) {
		kprintf("Warning: cttyopen: race avoided\n");
		vn_unlock(ttyvp);
		vdrop(ttyvp);
		goto retry;
	}
	vsetflags(ttyvp, VCTTYISOPEN);
	error = VOP_OPEN(ttyvp, FREAD|FWRITE, ap->a_cred, NULL);
	if (error)
		vclrflags(ttyvp, VCTTYISOPEN);
	vn_unlock(ttyvp);
	vdrop(ttyvp);
	return(error);
}
Пример #2
0
static int
iso_mountroot(struct mount *mp)
{
	struct iso_args args;
	struct vnode *rootvp;
	int error;

	if ((error = bdevvp(rootdev, &rootvp))) {
		kprintf("iso_mountroot: can't find rootvp\n");
		return (error);
	}
	args.flags = ISOFSMNT_ROOT;

	vn_lock(rootvp, LK_EXCLUSIVE | LK_RETRY);
	error = VOP_OPEN(rootvp, FREAD, FSCRED, NULL);
	vn_unlock(rootvp);
	if (error)
		return (error);

	args.ssector = iso_get_ssector(rootdev);

	vn_lock(rootvp, LK_EXCLUSIVE | LK_RETRY);
	VOP_CLOSE(rootvp, FREAD, NULL);
	vn_unlock(rootvp);

	if (bootverbose)
		kprintf("iso_mountroot(): using session at block %d\n",
		       args.ssector);
	if ((error = iso_mountfs(rootvp, mp, &args)) != 0)
		return (error);

	cd9660_statfs(mp, &mp->mnt_stat, proc0.p_ucred);
	return (0);
}
Пример #3
0
/*
 * vop_compat_resolve { struct nchandle *a_nch, struct vnode *dvp }
 * XXX STOPGAP FUNCTION
 *
 * XXX OLD API ROUTINE!  WHEN ALL VFSs HAVE BEEN CLEANED UP THIS PROCEDURE
 * WILL BE REMOVED.  This procedure exists for all VFSs which have not
 * yet implemented VOP_NRESOLVE().  It converts VOP_NRESOLVE() into a 
 * vop_old_lookup() and does appropriate translations.
 *
 * Resolve a ncp for VFSs which do not support the VOP.  Eventually all
 * VFSs will support this VOP and this routine can be removed, since
 * VOP_NRESOLVE() is far less complex then the older LOOKUP/CACHEDLOOKUP
 * API.
 *
 * A locked ncp is passed in to be resolved.  The NCP is resolved by
 * figuring out the vnode (if any) and calling cache_setvp() to attach the
 * vnode to the entry.  If the entry represents a non-existant node then
 * cache_setvp() is called with a NULL vnode to resolve the entry into a
 * negative cache entry.  No vnode locks are retained and the
 * ncp is left locked on return.
 *
 * The ncp will NEVER represent "", "." or "..", or contain any slashes.
 *
 * There is a potential directory and vnode interlock.   The lock order
 * requirement is: namecache, governing directory, resolved vnode.
 */
int
vop_compat_nresolve(struct vop_nresolve_args *ap)
{
	int error;
	struct vnode *dvp;
	struct vnode *vp;
	struct nchandle *nch;
	struct namecache *ncp;
	struct componentname cnp;

	nch = ap->a_nch;	/* locked namecache node */
	ncp = nch->ncp;
	dvp = ap->a_dvp;

	/*
	 * UFS currently stores all sorts of side effects, including a loop
	 * variable, in the directory inode.  That needs to be fixed and the
	 * other VFS's audited before we can switch to LK_SHARED.
	 */
	if ((error = vget(dvp, LK_EXCLUSIVE)) != 0) {
		kprintf("[diagnostic] vop_compat_resolve: EAGAIN on ncp %p %s\n",
			ncp, ncp->nc_name);
		return(EAGAIN);
	}

	bzero(&cnp, sizeof(cnp));
	cnp.cn_nameiop = NAMEI_LOOKUP;
	cnp.cn_flags = 0;
	cnp.cn_nameptr = ncp->nc_name;
	cnp.cn_namelen = ncp->nc_nlen;
	cnp.cn_cred = ap->a_cred;
	cnp.cn_td = curthread; /* XXX */

	/*
	 * vop_old_lookup() always returns vp locked.  dvp may or may not be
	 * left locked depending on CNP_PDIRUNLOCK.
	 */
	error = vop_old_lookup(ap->a_head.a_ops, dvp, &vp, &cnp);
	if (error == 0)
		vn_unlock(vp);
	if ((cnp.cn_flags & CNP_PDIRUNLOCK) == 0)
		vn_unlock(dvp);
	if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
		/* was resolved by another process while we were unlocked */
		if (error == 0)
			vrele(vp);
	} else if (error == 0) {
		KKASSERT(vp != NULL);
		cache_setvp(nch, vp);
		vrele(vp);
	} else if (error == ENOENT) {
		KKASSERT(vp == NULL);
		if (cnp.cn_flags & CNP_ISWHITEOUT)
			ncp->nc_flag |= NCF_WHITEOUT;
		cache_setvp(nch, NULL);
	}
	vrele(dvp);
	return (error);
}
Пример #4
0
/*
 * This opens /dev/tty.  Because multiple opens of /dev/tty only
 * generate a single open to the actual tty, the file modes are
 * locked to FREAD|FWRITE.
 */
static	int
cttyopen(struct dev_open_args *ap)
{
	struct proc *p = curproc;
	struct vnode *ttyvp;
	int error;

	KKASSERT(p);
retry:
	if ((ttyvp = cttyvp(p)) == NULL)
		return (ENXIO);
	if (ttyvp->v_flag & VCTTYISOPEN)
		return (0);

	/*
	 * Messy interlock, don't let the vnode go away while we try to
	 * lock it and check for race after we might have blocked.
	 *
	 * WARNING! The device open (devfs_spec_open()) temporarily
	 *	    releases the vnode lock on ttyvp when issuing the
	 *	    dev_dopen(), which means that the VCTTYISOPEn flag
	 *	    can race during the VOP_OPEN().
	 *
	 *	    If something does race we have to undo our potentially
	 *	    extra open.
	 */
	vhold(ttyvp);
	vn_lock(ttyvp, LK_EXCLUSIVE | LK_RETRY);
	if (ttyvp != cttyvp(p) || (ttyvp->v_flag & VCTTYISOPEN)) {
		kprintf("Warning: cttyopen: race-1 avoided\n");
		vn_unlock(ttyvp);
		vdrop(ttyvp);
		goto retry;
	}
	error = VOP_OPEN(ttyvp, FREAD|FWRITE, ap->a_cred, NULL);

	/*
	 * Race against ctty close or change.  This case has been validated
	 * and occurs every so often during synth builds.
	 */
	if (ttyvp != cttyvp(p) || (ttyvp->v_flag & VCTTYISOPEN)) {
		if (error == 0)
			VOP_CLOSE(ttyvp, FREAD|FWRITE, NULL);
		vn_unlock(ttyvp);
		vdrop(ttyvp);
		goto retry;
	}
	if (error == 0)
		vsetflags(ttyvp, VCTTYISOPEN);
	vn_unlock(ttyvp);
	vdrop(ttyvp);
	return(error);
}
Пример #5
0
void
debug_vput(struct vnode *vp, const char *filename, int line)
{
	kprintf("vput(%p) %s:%d\n", vp, filename, line);
	vn_unlock(vp);
	vrele(vp);
}
Пример #6
0
static int
devfs_spec_read(struct vop_read_args *ap)
{
	struct devfs_node *node;
	struct vnode *vp;
	struct uio *uio;
	cdev_t dev;
	int error;

	vp = ap->a_vp;
	dev = vp->v_rdev;
	uio = ap->a_uio;
	node = DEVFS_NODE(vp);

	if (dev == NULL)		/* device was revoked */
		return (EBADF);
	if (uio->uio_resid == 0)
		return (0);

	vn_unlock(vp);
	error = dev_dread(dev, uio, ap->a_ioflag, NULL);
	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);

	if (node)
		nanotime(&node->atime);

	return (error);
}
Пример #7
0
/*
 * union_mkdir(struct vnode *a_dvp, struct vnode **a_vpp,
 *		struct componentname *a_cnp, struct vattr *a_vap)
 */
static int
union_mkdir(struct vop_old_mkdir_args *ap)
{
	struct union_node *dun = VTOUNION(ap->a_dvp);
	struct componentname *cnp = ap->a_cnp;
	struct thread *td = cnp->cn_td;
	struct vnode *upperdvp;
	int error = EROFS;

	if ((upperdvp = union_lock_upper(dun, td)) != NULLVP) {
		struct vnode *vp;

		error = VOP_MKDIR(upperdvp, &vp, cnp, ap->a_vap);
		union_unlock_upper(upperdvp, td);

		if (error == 0) {
			vn_unlock(vp);
			UDEBUG(("ALLOCVP-2 FROM %p REFS %d\n", vp, vp->v_sysref.refcnt));
			error = union_allocvp(ap->a_vpp, ap->a_dvp->v_mount,
				ap->a_dvp, NULLVP, cnp, vp, NULLVP, 1);
			UDEBUG(("ALLOCVP-2B FROM %p REFS %d\n", *ap->a_vpp, vp->v_sysref.refcnt));
		}
	}
	return (error);
}
Пример #8
0
/*
 * Package up an I/O request on a vnode into a uio and do it.
 *
 * MPSAFE
 */
int
vn_rdwr(enum uio_rw rw, struct vnode *vp, caddr_t base, int len,
	off_t offset, enum uio_seg segflg, int ioflg, 
	struct ucred *cred, int *aresid)
{
	struct uio auio;
	struct iovec aiov;
	int error;

	if ((ioflg & IO_NODELOCKED) == 0)
		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
	auio.uio_iov = &aiov;
	auio.uio_iovcnt = 1;
	aiov.iov_base = base;
	aiov.iov_len = len;
	auio.uio_resid = len;
	auio.uio_offset = offset;
	auio.uio_segflg = segflg;
	auio.uio_rw = rw;
	auio.uio_td = curthread;
	if (rw == UIO_READ) {
		error = VOP_READ(vp, &auio, ioflg, cred);
	} else {
		error = VOP_WRITE(vp, &auio, ioflg, cred);
	}
	if (aresid)
		*aresid = auio.uio_resid;
	else
		if (auio.uio_resid && error == 0)
			error = EIO;
	if ((ioflg & IO_NODELOCKED) == 0)
		vn_unlock(vp);
	return (error);
}
Пример #9
0
/*
 * 	union_create:
 *
 * a_dvp is locked on entry and remains locked on return.  a_vpp is returned
 * locked if no error occurs, otherwise it is garbage.
 *
 * union_create(struct vnode *a_dvp, struct vnode **a_vpp,
 *		struct componentname *a_cnp, struct vattr *a_vap)
 */
static int
union_create(struct vop_old_create_args *ap)
{
	struct union_node *dun = VTOUNION(ap->a_dvp);
	struct componentname *cnp = ap->a_cnp;
	struct thread *td = cnp->cn_td;
	struct vnode *dvp;
	int error = EROFS;

	if ((dvp = union_lock_upper(dun, td)) != NULL) {
		struct vnode *vp;
		struct mount *mp;

		error = VOP_CREATE(dvp, &vp, cnp, ap->a_vap);
		if (error == 0) {
			mp = ap->a_dvp->v_mount;
			vn_unlock(vp);
			UDEBUG(("ALLOCVP-1 FROM %p REFS %d\n", vp, vp->v_sysref.refcnt));
			error = union_allocvp(ap->a_vpp, mp, NULLVP, NULLVP,
				cnp, vp, NULLVP, 1);
			UDEBUG(("ALLOCVP-2B FROM %p REFS %d\n", *ap->a_vpp, vp->v_sysref.refcnt));
		}
		union_unlock_upper(dvp, td);
	}
	return (error);
}
Пример #10
0
static int
tmpfs_nlookupdotdot(struct vop_nlookupdotdot_args *v)
{
	struct vnode *dvp = v->a_dvp;
	struct vnode **vpp = v->a_vpp;
	struct tmpfs_node *dnode = VP_TO_TMPFS_NODE(dvp);
	struct ucred *cred = v->a_cred;
	struct mount *mp;
	int error;

	*vpp = NULL;

	mp = dvp->v_mount;

	/* Check accessibility of requested node as a first step. */
	error = VOP_ACCESS(dvp, VEXEC, cred);
	if (error != 0)
		return error;

	if (dnode->tn_dir.tn_parent != NULL) {
		/* Allocate a new vnode on the matching entry. */
		error = tmpfs_alloc_vp(dvp->v_mount, dnode->tn_dir.tn_parent,
				       LK_EXCLUSIVE | LK_RETRY, vpp);

		if (*vpp)
			vn_unlock(*vpp);
	}
	return (*vpp == NULL) ? ENOENT : 0;
}
Пример #11
0
/*
 * Vnode op for write
 *
 * spec_write(struct vnode *a_vp, struct uio *a_uio, int a_ioflag,
 *	      struct ucred *a_cred)
 */
static int
devfs_spec_write(struct vop_write_args *ap)
{
	struct devfs_node *node;
	struct vnode *vp;
	struct uio *uio;
	cdev_t dev;
	int error;

	vp = ap->a_vp;
	dev = vp->v_rdev;
	uio = ap->a_uio;
	node = DEVFS_NODE(vp);

	KKASSERT(uio->uio_segflg != UIO_NOCOPY);

	if (dev == NULL)		/* device was revoked */
		return (EBADF);

	vn_unlock(vp);
	error = dev_dwrite(dev, uio, ap->a_ioflag, NULL);
	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);

	if (node) {
		nanotime(&node->atime);
		nanotime(&node->mtime);
	}

	return (error);
}
Пример #12
0
/*
 * File pointers can no longer get ripped up by revoke so
 * we don't need to lock access to the vp.
 *
 * f_offset updates are not guaranteed against multiple readers
 */
static int
vn_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
{
	struct vnode *vp;
	int error, ioflag;

	KASSERT(uio->uio_td == curthread,
		("uio_td %p is not td %p", uio->uio_td, curthread));
	vp = (struct vnode *)fp->f_data;

	ioflag = 0;
	if (flags & O_FBLOCKING) {
		/* ioflag &= ~IO_NDELAY; */
	} else if (flags & O_FNONBLOCKING) {
		ioflag |= IO_NDELAY;
	} else if (fp->f_flag & FNONBLOCK) {
		ioflag |= IO_NDELAY;
	}
	if (fp->f_flag & O_DIRECT) {
		ioflag |= IO_DIRECT;
	}
	if ((flags & O_FOFFSET) == 0 && (vp->v_flag & VNOTSEEKABLE) == 0)
		uio->uio_offset = vn_get_fpf_offset(fp);
	vn_lock(vp, LK_SHARED | LK_RETRY);
	ioflag |= sequential_heuristic(uio, fp);

	error = VOP_READ(vp, uio, ioflag, cred);
	fp->f_nextoff = uio->uio_offset;
	vn_unlock(vp);
	if ((flags & O_FOFFSET) == 0 && (vp->v_flag & VNOTSEEKABLE) == 0)
		vn_set_fpf_offset(fp, uio->uio_offset);
	return (error);
}
Пример #13
0
/*
 * XXX: can't use callremove now because can't catch setbacks with
 * it due to lack of a pnode argument.
 */
static int
puffs_vnop_remove(struct vop_nremove_args *ap)
{
    PUFFS_MSG_VARS(vn, remove);
    struct vnode *dvp = ap->a_dvp;
    struct vnode *vp;
    struct puffs_node *dpn = VPTOPP(dvp);
    struct puffs_node *pn;
    struct nchandle *nch = ap->a_nch;
    struct namecache *ncp = nch->ncp;
    struct ucred *cred = ap->a_cred;
    struct mount *mp = dvp->v_mount;
    struct puffs_mount *pmp = MPTOPUFFSMP(mp);
    int error;

    if (!EXISTSOP(pmp, REMOVE))
        return EOPNOTSUPP;

    error = vget(dvp, LK_EXCLUSIVE);
    if (error != 0) {
        DPRINTF(("puffs_vnop_remove: EAGAIN on parent vnode %p %s\n",
                 dvp, ncp->nc_name));
        return EAGAIN;
    }

    error = cache_vget(nch, cred, LK_EXCLUSIVE, &vp);
    if (error != 0) {
        DPRINTF(("puffs_vnop_remove: cache_vget error: %p %s\n",
                 dvp, ncp->nc_name));
        return EAGAIN;
    }
    if (vp->v_type == VDIR) {
        error = EISDIR;
        goto out;
    }

    pn = VPTOPP(vp);
    PUFFS_MSG_ALLOC(vn, remove);
    remove_msg->pvnr_cookie_targ = VPTOPNC(vp);
    puffs_makecn(&remove_msg->pvnr_cn, &remove_msg->pvnr_cn_cred,
                 ncp, cred);
    puffs_msg_setinfo(park_remove, PUFFSOP_VN,
                      PUFFS_VN_REMOVE, VPTOPNC(dvp));

    puffs_msg_enqueue(pmp, park_remove);
    error = puffs_msg_wait2(pmp, park_remove, dpn, pn);

    PUFFS_MSG_RELEASE(remove);

    error = checkerr(pmp, error, __func__);

out:
    vput(dvp);
    vn_unlock(vp);
    if (error == 0)
        cache_unlink(nch);
    vrele(vp);
    return error;
}
Пример #14
0
/*
 * Check access permission on the union vnode.
 * The access check being enforced is to check
 * against both the underlying vnode, and any
 * copied vnode.  This ensures that no additional
 * file permissions are given away simply because
 * the user caused an implicit file copy.
 *
 * union_access(struct vnode *a_vp, int a_mode,
 *		struct ucred *a_cred, struct thread *a_td)
 */
static int
union_access(struct vop_access_args *ap)
{
	struct union_node *un = VTOUNION(ap->a_vp);
	struct thread *td = ap->a_td;
	int error = EACCES;
	struct vnode *vp;

	/*
	 * Disallow write attempts on filesystems mounted read-only.
	 */
	if ((ap->a_mode & VWRITE) && 
	    (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)) {
		switch (ap->a_vp->v_type) {
		case VREG: 
		case VDIR:
		case VLNK:
			return (EROFS);
		default:
			break;
		}
	}

	if ((vp = union_lock_upper(un, td)) != NULLVP) {
		ap->a_head.a_ops = *vp->v_ops;
		ap->a_vp = vp;
		error = vop_access_ap(ap);
		union_unlock_upper(vp, td);
		return(error);
	}

	if ((vp = un->un_lowervp) != NULLVP) {
		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
		ap->a_head.a_ops = *vp->v_ops;
		ap->a_vp = vp;

		/*
		 * Remove VWRITE from a_mode if our mount point is RW, because
		 * we want to allow writes and lowervp may be read-only.
		 */
		if ((un->un_vnode->v_mount->mnt_flag & MNT_RDONLY) == 0)
			ap->a_mode &= ~VWRITE;

		error = vop_access_ap(ap);
		if (error == 0) {
			struct union_mount *um;

			um = MOUNTTOUNIONMOUNT(un->un_vnode->v_mount);

			if (um->um_op == UNMNT_BELOW) {
				ap->a_cred = um->um_cred;
				error = vop_access_ap(ap);
			}
		}
		vn_unlock(vp);
	}
	return(error);
}
Пример #15
0
/*
 * Update the disk quota in the quota file.
 */
static int
ufs_dqsync(struct vnode *vp, struct ufs_dquot *dq)
{
	struct vnode *dqvp;
	struct iovec aiov;
	struct uio auio;
	int error;

	if (dq == NODQUOT)
		panic("dqsync: dquot");
	if ((dq->dq_flags & DQ_MOD) == 0)
		return (0);
	if ((dqvp = dq->dq_ump->um_quotas[dq->dq_type]) == NULLVP)
		panic("dqsync: file");
	if (vp != dqvp)
		vn_lock(dqvp, LK_EXCLUSIVE | LK_RETRY);
	while (dq->dq_flags & DQ_LOCK) {
		dq->dq_flags |= DQ_WANT;
		(void) tsleep((caddr_t)dq, 0, "dqsync", 0);
		if ((dq->dq_flags & DQ_MOD) == 0) {
			if (vp != dqvp)
				vn_unlock(dqvp);
			return (0);
		}
	}
	dq->dq_flags |= DQ_LOCK;
	auio.uio_iov = &aiov;
	auio.uio_iovcnt = 1;
	aiov.iov_base = (caddr_t)&dq->dq_dqb;
	aiov.iov_len = sizeof (struct ufs_dqblk);
	auio.uio_resid = sizeof (struct ufs_dqblk);
	auio.uio_offset = (off_t)(dq->dq_id * sizeof (struct ufs_dqblk));
	auio.uio_segflg = UIO_SYSSPACE;
	auio.uio_rw = UIO_WRITE;
	auio.uio_td = NULL;
	error = VOP_WRITE(dqvp, &auio, 0, dq->dq_ump->um_cred[dq->dq_type]);
	if (auio.uio_resid && error == 0)
		error = EIO;
	if (dq->dq_flags & DQ_WANT)
		wakeup((caddr_t)dq);
	dq->dq_flags &= ~(DQ_MOD|DQ_LOCK|DQ_WANT);
	if (vp != dqvp)
		vn_unlock(dqvp);
	return (error);
}
Пример #16
0
static int
tmpfs_nresolve(struct vop_nresolve_args *v)
{
	struct vnode *dvp = v->a_dvp;
	struct vnode *vp = NULL;
	struct namecache *ncp = v->a_nch->ncp;
	struct tmpfs_node *tnode;
	struct mount *mp;
	struct tmpfs_dirent *de;
	struct tmpfs_node *dnode;
	int error;

	mp = dvp->v_mount;

	dnode = VP_TO_TMPFS_DIR(dvp);

	TMPFS_NODE_LOCK_SH(dnode);
	de = tmpfs_dir_lookup(dnode, NULL, ncp);
	if (de == NULL) {
		error = ENOENT;
	} else {
		/*
		 * Allocate a vnode for the node we found.
		 */
		tnode = de->td_node;
		error = tmpfs_alloc_vp(dvp->v_mount, tnode,
				       LK_EXCLUSIVE | LK_RETRY, &vp);
		if (error)
			goto out;
		KKASSERT(vp);
	}

out:
	TMPFS_NODE_UNLOCK(dnode);

	if ((dnode->tn_status & TMPFS_NODE_ACCESSED) == 0) {
		TMPFS_NODE_LOCK(dnode);
		dnode->tn_status |= TMPFS_NODE_ACCESSED;
		TMPFS_NODE_UNLOCK(dnode);
	}

	/*
	 * Store the result of this lookup in the cache.  Avoid this if the
	 * request was for creation, as it does not improve timings on
	 * emprical tests.
	 */
	if (vp) {
		vn_unlock(vp);
		cache_setvp(v->a_nch, vp);
		vrele(vp);
	} else if (error == ENOENT) {
		cache_setvp(v->a_nch, NULL);
	}
	return (error);
}
Пример #17
0
/*
 * MPSAFE
 */
static int
vn_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
{
	struct ccms_lock ccms_lock;
	struct vnode *vp;
	int error, ioflag;

	KASSERT(uio->uio_td == curthread,
		("uio_td %p is not p %p", uio->uio_td, curthread));
	vp = (struct vnode *)fp->f_data;

	ioflag = IO_UNIT;
	if (vp->v_type == VREG &&
	   ((fp->f_flag & O_APPEND) || (flags & O_FAPPEND))) {
		ioflag |= IO_APPEND;
	}

	if (flags & O_FBLOCKING) {
		/* ioflag &= ~IO_NDELAY; */
	} else if (flags & O_FNONBLOCKING) {
		ioflag |= IO_NDELAY;
	} else if (fp->f_flag & FNONBLOCK) {
		ioflag |= IO_NDELAY;
	}
	if (flags & O_FBUFFERED) {
		/* ioflag &= ~IO_DIRECT; */
	} else if (flags & O_FUNBUFFERED) {
		ioflag |= IO_DIRECT;
	} else if (fp->f_flag & O_DIRECT) {
		ioflag |= IO_DIRECT;
	}
	if (flags & O_FASYNCWRITE) {
		/* ioflag &= ~IO_SYNC; */
	} else if (flags & O_FSYNCWRITE) {
		ioflag |= IO_SYNC;
	} else if (fp->f_flag & O_FSYNC) {
		ioflag |= IO_SYNC;
	}

	if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))
		ioflag |= IO_SYNC;
	if ((flags & O_FOFFSET) == 0)
		uio->uio_offset = vn_get_fpf_offset(fp);
	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
	ioflag |= sequential_heuristic(uio, fp);
	ccms_lock_get_uio(&vp->v_ccms, &ccms_lock, uio);
	error = VOP_WRITE(vp, uio, ioflag, cred);
	ccms_lock_put(&vp->v_ccms, &ccms_lock);
	fp->f_nextoff = uio->uio_offset;
	vn_unlock(vp);
	if ((flags & O_FOFFSET) == 0)
		vn_set_fpf_offset(fp, uio->uio_offset);
	return (error);
}
Пример #18
0
/*
 * This closes /dev/tty.  Because multiple opens of /dev/tty only
 * generate a single open to the actual tty, the file modes are
 * locked to FREAD|FWRITE.
 */
static int
cttyclose(struct dev_close_args *ap)
{
	struct proc *p = curproc;
	struct vnode *ttyvp;
	int error;

	KKASSERT(p);
retry:
	/*
	 * The tty may have been TIOCNOTTY'd, don't return an
	 * error on close.  We just have nothing to do.
	 */
	if ((ttyvp = cttyvp(p)) == NULL)
		return(0);
	if (ttyvp->v_flag & VCTTYISOPEN) {
		/*
		 * Avoid a nasty race if we block while getting the lock.
		 */
		vref(ttyvp);
		error = vn_lock(ttyvp, LK_EXCLUSIVE | LK_RETRY |
				       LK_FAILRECLAIM);
		if (error) {
			vrele(ttyvp);
			goto retry;
		}
		if (ttyvp != cttyvp(p) || (ttyvp->v_flag & VCTTYISOPEN) == 0) {
			kprintf("Warning: cttyclose: race avoided\n");
			vn_unlock(ttyvp);
			vrele(ttyvp);
			goto retry;
		}
		vclrflags(ttyvp, VCTTYISOPEN);
		error = VOP_CLOSE(ttyvp, FREAD|FWRITE);
		vn_unlock(ttyvp);
		vrele(ttyvp);
	} else {
		error = 0;
	}
	return(error);
}
static void
hammer_close_device(struct vnode **devvpp, int ronly)
{
	if (*devvpp) {
		vn_lock(*devvpp, LK_EXCLUSIVE | LK_RETRY);
		vinvalbuf(*devvpp, ronly ? 0 : V_SAVE, 0, 0);
		VOP_CLOSE(*devvpp, (ronly ? FREAD : FREAD|FWRITE), NULL);
		vn_unlock(*devvpp);
		vrele(*devvpp);
		*devvpp = NULL;
	}
}
Пример #20
0
/*
 * Vnode close call
 *
 * MPSAFE
 */
int
vn_close(struct vnode *vp, int flags)
{
	int error;

	error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
	if (error == 0) {
		error = VOP_CLOSE(vp, flags);
		vn_unlock(vp);
	}
	vrele(vp);
	return (error);
}
Пример #21
0
/*
 * Vnode close call
 *
 * MPSAFE
 */
int
vn_close(struct vnode *vp, int flags, struct file *fp)
{
	int error;

	error = vn_lock(vp, LK_SHARED | LK_RETRY | LK_FAILRECLAIM);
	if (error == 0) {
		error = VOP_CLOSE(vp, flags, fp);
		vn_unlock(vp);
	}
	vrele(vp);
	return (error);
}
Пример #22
0
static int
devfs_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap)
{
	struct devfs_node *dnode = DEVFS_NODE(ap->a_dvp);

	*ap->a_vpp = NULL;
	if (!devfs_node_is_accessible(dnode))
		return ENOENT;

	lockmgr(&devfs_lock, LK_EXCLUSIVE);
	if (dnode->parent != NULL) {
		devfs_allocv(ap->a_vpp, dnode->parent);
		vn_unlock(*ap->a_vpp);
	}
	lockmgr(&devfs_lock, LK_RELEASE);

	return ((*ap->a_vpp == NULL) ? ENOENT : 0);
}
Пример #23
0
/*
 * Cleanup a nlookupdata structure after we are through with it.  This may
 * be called on any nlookupdata structure initialized with nlookup_init().
 * Calling nlookup_done() is mandatory in all cases except where nlookup_init()
 * returns an error, even if as a consumer you believe you have taken all
 * dynamic elements out of the nlookupdata structure.
 */
void
nlookup_done(struct nlookupdata *nd)
{
    if (nd->nl_nch.ncp) {
	if (nd->nl_flags & NLC_NCPISLOCKED) {
	    nd->nl_flags &= ~NLC_NCPISLOCKED;
	    cache_unlock(&nd->nl_nch);
	}
	if (nd->nl_flags & NLC_NCDIR) {
		cache_drop_ncdir(&nd->nl_nch);
		nd->nl_flags &= ~NLC_NCDIR;
	} else {
		cache_drop(&nd->nl_nch);	/* NULL's out the nch */
	}
    }
    if (nd->nl_rootnch.ncp)
	cache_drop_and_cache(&nd->nl_rootnch);
    if (nd->nl_jailnch.ncp)
	cache_drop_and_cache(&nd->nl_jailnch);
    if ((nd->nl_flags & NLC_HASBUF) && nd->nl_path) {
	objcache_put(namei_oc, nd->nl_path);
	nd->nl_path = NULL;
    }
    if (nd->nl_cred) {
	if ((nd->nl_flags & NLC_BORROWCRED) == 0)
	    crfree(nd->nl_cred);
	nd->nl_cred = NULL;
	nd->nl_flags &= ~NLC_BORROWCRED;
    }
    if (nd->nl_open_vp) {
	if (nd->nl_flags & NLC_LOCKVP) {
		vn_unlock(nd->nl_open_vp);
		nd->nl_flags &= ~NLC_LOCKVP;
	}
	vn_close(nd->nl_open_vp, nd->nl_vp_fmode, NULL);
	nd->nl_open_vp = NULL;
    }
    if (nd->nl_dvp) {
	vrele(nd->nl_dvp);
	nd->nl_dvp = NULL;
    }
    nd->nl_flags = 0;	/* clear remaining flags (just clear everything) */
}
Пример #24
0
/*
 * vm_contig_pg_clean:
 * 
 * Do a thorough cleanup of the specified 'queue', which can be either
 * PQ_ACTIVE or PQ_INACTIVE by doing a walkthrough.  If the page is not
 * marked dirty, it is shoved into the page cache, provided no one has
 * currently aqcuired it, otherwise localized action per object type
 * is taken for cleanup:
 *
 * 	In the OBJT_VNODE case, the whole page range is cleaned up
 * 	using the vm_object_page_clean() routine, by specyfing a
 * 	start and end of '0'.
 *
 * 	Otherwise if the object is of any other type, the generic
 * 	pageout (daemon) flush routine is invoked.
 *
 * The caller must hold vm_token.
 */
static int
vm_contig_pg_clean(int queue)
{
	vm_object_t object;
	vm_page_t m, m_tmp, next;

	ASSERT_LWKT_TOKEN_HELD(&vm_token);

	for (m = TAILQ_FIRST(&vm_page_queues[queue].pl); m != NULL; m = next) {
		KASSERT(m->queue == queue,
			("vm_contig_clean: page %p's queue is not %d", 
			m, queue));
		next = TAILQ_NEXT(m, pageq);

		if (m->flags & PG_MARKER)
			continue;
		
		if (vm_page_sleep_busy(m, TRUE, "vpctw0"))
			return (TRUE);
		
		vm_page_test_dirty(m);
		if (m->dirty) {
			object = m->object;
			if (object->type == OBJT_VNODE) {
				vn_lock(object->handle, LK_EXCLUSIVE|LK_RETRY);
				vm_object_page_clean(object, 0, 0, OBJPC_SYNC);
				vn_unlock(((struct vnode *)object->handle));
				return (TRUE);
			} else if (object->type == OBJT_SWAP ||
					object->type == OBJT_DEFAULT) {
				m_tmp = m;
				vm_pageout_flush(&m_tmp, 1, 0);
				return (TRUE);
			}
		}
		KKASSERT(m->busy == 0);
		if (m->dirty == 0 && m->hold_count == 0) {
			vm_page_busy(m);
			vm_page_cache(m);
		}
	}
	return (FALSE);
}
static int
hammer_setup_device(struct vnode **devvpp, const char *dev_path, int ronly)
{
	int error;
	struct nlookupdata nd;

	/*
	 * Get the device vnode
	 */
	if (*devvpp == NULL) {
		error = nlookup_init(&nd, dev_path, UIO_SYSSPACE, NLC_FOLLOW);
		if (error == 0)
			error = nlookup(&nd);
		if (error == 0)
			error = cache_vref(&nd.nl_nch, nd.nl_cred, devvpp);
		nlookup_done(&nd);
	} else {
		error = 0;
	}

	if (error == 0) {
		if (vn_isdisk(*devvpp, &error)) {
			error = vfs_mountedon(*devvpp);
		}
	}
	if (error == 0 && vcount(*devvpp) > 0)
		error = EBUSY;
	if (error == 0) {
		vn_lock(*devvpp, LK_EXCLUSIVE | LK_RETRY);
		error = vinvalbuf(*devvpp, V_SAVE, 0, 0);
		if (error == 0) {
			error = VOP_OPEN(*devvpp,
					 (ronly ? FREAD : FREAD|FWRITE),
					 FSCRED, NULL);
		}
		vn_unlock(*devvpp);
	}
	if (error && *devvpp) {
		vrele(*devvpp);
		*devvpp = NULL;
	}
	return (error);
}
Пример #26
0
/*
 * nwfs_readdir call
 *
 * nwfs_readdir(struct vnode *a_vp, struct uio *a_uio, struct ucred *a_cred,
 *		int *a_eofflag, off_t *a_cookies, int a_ncookies)
 */
static int
nwfs_readdir(struct vop_readdir_args *ap)
{
	struct vnode *vp = ap->a_vp;
	struct uio *uio = ap->a_uio;
	int error;

	if (vp->v_type != VDIR)
		return (EPERM);
	if (ap->a_ncookies) {
		kprintf("nwfs_readdir: no support for cookies now...");
		return (EOPNOTSUPP);
	}
	error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_FAILRECLAIM);
	if (error)
		return (error);
	error = nwfs_readvnode(vp, uio, ap->a_cred);
	vn_unlock(vp);
	return error;
}
Пример #27
0
int
vn_opendisk(const char *devname, int fmode, struct vnode **vpp)
{
	struct vnode *vp;
	int error;

	if (strncmp(devname, "/dev/", 5) == 0)
		devname += 5;
	if ((vp = getsynthvnode(devname)) == NULL) {
		error = ENODEV;
	} else {
		error = VOP_OPEN(vp, fmode, proc0.p_ucred, NULL);
		vn_unlock(vp);
		if (error) {
			vrele(vp);
			vp = NULL;
		}
	}
	*vpp = vp;
	return (error);
}
Пример #28
0
/*
 *	union_inactive:
 *
 *	Called with the vnode locked.  We are expected to unlock the vnode.
 *
 * union_inactive(struct vnode *a_vp, struct thread *a_td)
 */
static int
union_inactive(struct vop_inactive_args *ap)
{
	struct vnode *vp = ap->a_vp;
	/*struct thread *td = ap->a_td;*/
	struct union_node *un = VTOUNION(vp);
	struct vnode **vpp;

	/*
	 * Do nothing (and _don't_ bypass).
	 * Wait to vrele lowervp until reclaim,
	 * so that until then our union_node is in the
	 * cache and reusable.
	 *
	 * NEEDSWORK: Someday, consider inactive'ing
	 * the lowervp and then trying to reactivate it
	 * with capabilities (v_id)
	 * like they do in the name lookup cache code.
	 * That's too much work for now.
	 */

	if (un->un_dircache != 0) {
		for (vpp = un->un_dircache; *vpp != NULLVP; vpp++)
			vrele(*vpp);
		kfree (un->un_dircache, M_TEMP);
		un->un_dircache = 0;
	}

#if 0
	if ((un->un_flags & UN_ULOCK) && un->un_uppervp) {
		un->un_flags &= ~UN_ULOCK;
		vn_unlock(un->un_uppervp);
	}
#endif

	if ((un->un_flags & UN_CACHED) == 0)
		vgone_vxlocked(vp);

	return (0);
}
Пример #29
0
/*
 * vop_compat_nlookupdotdot { struct vnode *a_dvp,
 *			struct vnode **a_vpp,
 *			struct ucred *a_cred }
 *
 * Lookup the vnode representing the parent directory of the specified
 * directory vnode.  a_dvp should not be locked.  If no error occurs *a_vpp
 * will contained the parent vnode, locked and refd, else *a_vpp will be NULL.
 *
 * This function is designed to aid NFS server-side operations and is
 * used by cache_fromdvp() to create a consistent, connected namecache
 * topology.
 *
 * As part of the NEW API work, VFSs will first split their CNP_ISDOTDOT
 * code out from their *_lookup() and create *_nlookupdotdot().  Then as time
 * permits VFSs will implement the remaining *_n*() calls and finally get
 * rid of their *_lookup() call.
 */
int
vop_compat_nlookupdotdot(struct vop_nlookupdotdot_args *ap)
{
	struct componentname cnp;
	int error;

	/*
	 * UFS currently stores all sorts of side effects, including a loop
	 * variable, in the directory inode.  That needs to be fixed and the
	 * other VFS's audited before we can switch to LK_SHARED.
	 */
	*ap->a_vpp = NULL;
	if ((error = vget(ap->a_dvp, LK_EXCLUSIVE)) != 0)
		return (error);
	if (ap->a_dvp->v_type != VDIR) {
		vput(ap->a_dvp);
		return (ENOTDIR);
	}

	bzero(&cnp, sizeof(cnp));
	cnp.cn_nameiop = NAMEI_LOOKUP;
	cnp.cn_flags = CNP_ISDOTDOT;
	cnp.cn_nameptr = "..";
	cnp.cn_namelen = 2;
	cnp.cn_cred = ap->a_cred;
	cnp.cn_td = curthread; /* XXX */

	/*
	 * vop_old_lookup() always returns vp locked.  dvp may or may not be
	 * left locked depending on CNP_PDIRUNLOCK.
	 */
	error = vop_old_lookup(ap->a_head.a_ops, ap->a_dvp, ap->a_vpp, &cnp);
	if (error == 0)
		vn_unlock(*ap->a_vpp);
	if (cnp.cn_flags & CNP_PDIRUNLOCK)
		vrele(ap->a_dvp);
	else
		vput(ap->a_dvp);
	return (error);
}
Пример #30
0
/*
 * readdir() returns directory entries from pfsnode (vp).
 *
 * We generate just one directory entry at a time, as it would probably
 * not pay off to buffer several entries locally to save uiomove calls.
 *
 * procfs_readdir(struct vnode *a_vp, struct uio *a_uio, struct ucred *a_cred,
 *		  int *a_eofflag, int *a_ncookies, off_t **a_cookies)
 */
static int
procfs_readdir(struct vop_readdir_args *ap)
{
	struct pfsnode *pfs;
	int error;

	if (ap->a_uio->uio_offset < 0 || ap->a_uio->uio_offset > INT_MAX)
		return (EINVAL);
	if ((error = vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY)) != 0)
		return (error);
	pfs = VTOPFS(ap->a_vp);

	switch (pfs->pfs_type) {
	case Pproc:
		/*
		 * this is for the process-specific sub-directories.
		 * all that is needed to is copy out all the entries
		 * from the procent[] table (top of this file).
		 */
		error = procfs_readdir_proc(ap);
		break;
	case Proot:
		/*
		 * this is for the root of the procfs filesystem
		 * what is needed is a special entry for "curproc"
		 * followed by an entry for each process on allproc
		 */
		error = procfs_readdir_root(ap);
		break;
	default:
		error = ENOTDIR;
		break;
	}

	vn_unlock(ap->a_vp);
	return (error);
}