Example #1
0
/*
 * This opens /dev/tty.  Because multiple opens of /dev/tty only
 * generate a single open to the actual tty, the file modes are
 * locked to FREAD|FWRITE.
 */
static	int
cttyopen(struct dev_open_args *ap)
{
	struct proc *p = curproc;
	struct vnode *ttyvp;
	int error;

	KKASSERT(p);
retry:
	if ((ttyvp = cttyvp(p)) == NULL)
		return (ENXIO);
	if (ttyvp->v_flag & VCTTYISOPEN)
		return (0);

	/*
	 * Messy interlock, don't let the vnode go away while we try to
	 * lock it and check for race after we might have blocked.
	 */
	vhold(ttyvp);
	vn_lock(ttyvp, LK_EXCLUSIVE | LK_RETRY);
	if (ttyvp != cttyvp(p) || (ttyvp->v_flag & VCTTYISOPEN)) {
		kprintf("Warning: cttyopen: race avoided\n");
		vn_unlock(ttyvp);
		vdrop(ttyvp);
		goto retry;
	}
	vsetflags(ttyvp, VCTTYISOPEN);
	error = VOP_OPEN(ttyvp, FREAD|FWRITE, ap->a_cred, NULL);
	if (error)
		vclrflags(ttyvp, VCTTYISOPEN);
	vn_unlock(ttyvp);
	vdrop(ttyvp);
	return(error);
}
Example #2
0
/*
 * This opens /dev/tty.  Because multiple opens of /dev/tty only
 * generate a single open to the actual tty, the file modes are
 * locked to FREAD|FWRITE.
 */
static	int
cttyopen(struct dev_open_args *ap)
{
	struct proc *p = curproc;
	struct vnode *ttyvp;
	int error;

	KKASSERT(p);
retry:
	if ((ttyvp = cttyvp(p)) == NULL)
		return (ENXIO);
	if (ttyvp->v_flag & VCTTYISOPEN)
		return (0);

	/*
	 * Messy interlock, don't let the vnode go away while we try to
	 * lock it and check for race after we might have blocked.
	 *
	 * WARNING! The device open (devfs_spec_open()) temporarily
	 *	    releases the vnode lock on ttyvp when issuing the
	 *	    dev_dopen(), which means that the VCTTYISOPEn flag
	 *	    can race during the VOP_OPEN().
	 *
	 *	    If something does race we have to undo our potentially
	 *	    extra open.
	 */
	vhold(ttyvp);
	vn_lock(ttyvp, LK_EXCLUSIVE | LK_RETRY);
	if (ttyvp != cttyvp(p) || (ttyvp->v_flag & VCTTYISOPEN)) {
		kprintf("Warning: cttyopen: race-1 avoided\n");
		vn_unlock(ttyvp);
		vdrop(ttyvp);
		goto retry;
	}
	error = VOP_OPEN(ttyvp, FREAD|FWRITE, ap->a_cred, NULL);

	/*
	 * Race against ctty close or change.  This case has been validated
	 * and occurs every so often during synth builds.
	 */
	if (ttyvp != cttyvp(p) || (ttyvp->v_flag & VCTTYISOPEN)) {
		if (error == 0)
			VOP_CLOSE(ttyvp, FREAD|FWRITE, NULL);
		vn_unlock(ttyvp);
		vdrop(ttyvp);
		goto retry;
	}
	if (error == 0)
		vsetflags(ttyvp, VCTTYISOPEN);
	vn_unlock(ttyvp);
	vdrop(ttyvp);
	return(error);
}
Example #3
0
/*ARGSUSED*/
int
cttyopen(dev_t dev, int flag, int mode, struct proc *p)
{
    struct vnode *ttyvp = cttyvp(p);
    int error;

    if (ttyvp == NULL)
        return (ENXIO);
    vn_lock(ttyvp, LK_EXCLUSIVE | LK_RETRY, p);
#ifdef PARANOID
    /*
     * Since group is tty and mode is 620 on most terminal lines
     * and since sessions protect terminals from processes outside
     * your session, this check is probably no longer necessary.
     * Since it inhibits setuid root programs that later switch
     * to another user from accessing /dev/tty, we have decided
     * to delete this test. (mckusick 5/93)
     */
    error = VOP_ACCESS(ttyvp,
                       (flag&FREAD ? VREAD : 0) | (flag&FWRITE ? VWRITE : 0), p->p_ucred, p);
    if (!error)
#endif /* PARANOID */
        error = VOP_OPEN(ttyvp, flag, NOCRED, p);
    VOP_UNLOCK(ttyvp, 0, p);
    return (error);
}
Example #4
0
static int
cttykqfilter(struct dev_kqfilter_args *ap)
{
	cdev_t dev = ap->a_head.a_dev;
	struct proc *p = curproc;
	struct knote *kn = ap->a_kn;
	struct vnode *ttyvp;

	KKASSERT(p);
	ttyvp = cttyvp(p);

	if (ttyvp != NULL)
		return (VOP_KQFILTER(ttyvp, kn));

	ap->a_result = 0;

	switch (kn->kn_filter) {
	case EVFILT_READ:
		kn->kn_fop = &cttyfiltops_read;
		kn->kn_hook = (caddr_t)dev;
		break;
	case EVFILT_WRITE:
		kn->kn_fop = &cttyfiltops_write;
		kn->kn_hook = (caddr_t)dev;
		break;
	default:
		ap->a_result = EOPNOTSUPP;
		return (0);
	}

	return (0);
}
Example #5
0
int
cttykqfilter(dev_t dev, struct knote *kn)
{
	struct vnode *ttyvp = cttyvp(curproc);

	if (ttyvp == NULL)
		return (ENXIO);
	return (VOP_KQFILTER(ttyvp, kn));
}
/*ARGSUSED*/
static int
cttypoll(dev_t dev, int events, struct lwp *l)
{
	struct vnode *ttyvp = cttyvp(l->l_proc);

	if (ttyvp == NULL)
		return (seltrue(dev, events, l));
	return (VOP_POLL(ttyvp, events));
}
Example #7
0
/*ARGSUSED*/
int
cttypoll(dev_t dev, int events, struct proc *p)
{
    struct vnode *ttyvp = cttyvp(p);

    if (ttyvp == NULL)	/* try operation to get EOF/failure */
        return (seltrue(dev, events, p));
    return (VOP_POLL(ttyvp, events, p));
}
static int
cttykqfilter(dev_t dev, struct knote *kn)
{
	/* This is called from filt_fileattach() by the attaching process. */
	struct proc *p = curproc;
	struct vnode *ttyvp = cttyvp(p);

	if (ttyvp == NULL)
		return (1);
	return (VOP_KQFILTER(ttyvp, kn));
}
Example #9
0
/*
 * This closes /dev/tty.  Because multiple opens of /dev/tty only
 * generate a single open to the actual tty, the file modes are
 * locked to FREAD|FWRITE.
 */
static int
cttyclose(struct dev_close_args *ap)
{
	struct proc *p = curproc;
	struct vnode *ttyvp;
	int error;

	KKASSERT(p);
retry:
	/*
	 * The tty may have been TIOCNOTTY'd, don't return an
	 * error on close.  We just have nothing to do.
	 */
	if ((ttyvp = cttyvp(p)) == NULL)
		return(0);
	if (ttyvp->v_flag & VCTTYISOPEN) {
		/*
		 * Avoid a nasty race if we block while getting the lock.
		 */
		vref(ttyvp);
		error = vn_lock(ttyvp, LK_EXCLUSIVE | LK_RETRY |
				       LK_FAILRECLAIM);
		if (error) {
			vrele(ttyvp);
			goto retry;
		}
		if (ttyvp != cttyvp(p) || (ttyvp->v_flag & VCTTYISOPEN) == 0) {
			kprintf("Warning: cttyclose: race avoided\n");
			vn_unlock(ttyvp);
			vrele(ttyvp);
			goto retry;
		}
		vclrflags(ttyvp, VCTTYISOPEN);
		error = VOP_CLOSE(ttyvp, FREAD|FWRITE);
		vn_unlock(ttyvp);
		vrele(ttyvp);
	} else {
		error = 0;
	}
	return(error);
}
Example #10
0
int
cttyopen(dev_t dev, int flag, int mode, struct proc *p)
{
	struct vnode *ttyvp = cttyvp(p);
	int error;

	if (ttyvp == NULL)
		return (ENXIO);
	vn_lock(ttyvp, LK_EXCLUSIVE | LK_RETRY, p);
	error = VOP_OPEN(ttyvp, flag, NOCRED);
	VOP_UNLOCK(ttyvp, 0);
	return (error);
}
/*ARGSUSED*/
static int
cttywrite(dev_t dev, struct uio *uio, int flag)
{
	struct vnode *ttyvp = cttyvp(curproc);
	int error;

	if (ttyvp == NULL)
		return (EIO);
	vn_lock(ttyvp, LK_EXCLUSIVE | LK_RETRY);
	error = VOP_WRITE(ttyvp, uio, flag, NOCRED);
	VOP_UNLOCK(ttyvp);
	return (error);
}
Example #12
0
/*ARGSUSED*/
int
cttywrite(dev_t dev, struct uio *uio, int flag)
{
    struct proc *p = uio->uio_procp;
    struct vnode *ttyvp = cttyvp(uio->uio_procp);
    int error;

    if (ttyvp == NULL)
        return (EIO);
    vn_lock(ttyvp, LK_EXCLUSIVE | LK_RETRY, p);
    error = VOP_WRITE(ttyvp, uio, flag, NOCRED);
    VOP_UNLOCK(ttyvp, 0, p);
    return (error);
}
Example #13
0
int
cttyselect(__unused dev_t dev, int flag, void* wql, __unused proc_t p)
{
	vnode_t ttyvp = cttyvp(current_proc());
	struct vfs_context context;
	int error;

	context.vc_thread = current_thread();
	context.vc_ucred = NOCRED;

	if (ttyvp == NULL)
		return (1);	/* try operation to get EOF/failure */
	error = VNOP_SELECT(ttyvp, flag, FREAD|FWRITE, wql, &context);
	vnode_put(ttyvp);
	return (error);
}
Example #14
0
/*
 * Read from the controlling terminal (/dev/tty).  The tty is refed as
 * of the cttyvp(), but the ref can get ripped out from under us if
 * the controlling terminal is revoked while we are blocked on the lock,
 * so use vget() instead of vn_lock().
 */
static	int
cttywrite(struct dev_write_args *ap)
{
	struct proc *p = curproc;
	struct vnode *ttyvp;
	int error;

	KKASSERT(p);
	ttyvp = cttyvp(p);
	if (ttyvp == NULL)
		return (EIO);
	if ((error = vget(ttyvp, LK_EXCLUSIVE | LK_RETRY)) == 0) {
		error = VOP_WRITE(ttyvp, ap->a_uio, ap->a_ioflag, NOCRED);
		vput(ttyvp);
	}
	return (error);
}
Example #15
0
int
cttywrite(__unused dev_t dev, struct uio *uio, int flag)
{
	vnode_t ttyvp = cttyvp(current_proc());
	struct vfs_context context;
	int error;

	if (ttyvp == NULL)
		return (EIO);

	context.vc_thread = current_thread();
	context.vc_ucred = NOCRED;

	error = VNOP_WRITE(ttyvp, uio, flag, &context);
	vnode_put(ttyvp);

	return (error);
}
Example #16
0
/*ARGSUSED*/
int
cttyioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p)
{
    struct vnode *ttyvp = cttyvp(p);

    if (ttyvp == NULL)
        return (EIO);
    if (cmd == TIOCSCTTY)		/* XXX */
        return (EINVAL);
    if (cmd == TIOCNOTTY) {
        if (!SESS_LEADER(p)) {
            atomic_clearbits_int(&p->p_flag, P_CONTROLT);
            return (0);
        } else
            return (EINVAL);
    }
    return (VOP_IOCTL(ttyvp, cmd, addr, flag, NOCRED, p));
}
Example #17
0
File: tty_tty.c Project: 0xffea/xnu
int
cttyopen(__unused dev_t dev, int flag, __unused int mode, proc_t p)
{
	vnode_t ttyvp = cttyvp(p);
	struct vfs_context context;
	int error;

	if (ttyvp == NULL)
		return (ENXIO);

	context.vc_thread = current_thread();
	context.vc_ucred = kauth_cred_proc_ref(p);

	error = VNOP_OPEN(ttyvp, flag, &context);
	vnode_put(ttyvp);
	kauth_cred_unref(&context.vc_ucred);

	return (error);
}
Example #18
0
/*ARGSUSED*/
static	int
cttyioctl(struct dev_ioctl_args *ap)
{
	struct vnode *ttyvp;
	struct proc *p = curproc;

	KKASSERT(p);
	lwkt_gettoken(&p->p_token);
	lwkt_gettoken(&proc_token);
	ttyvp = cttyvp(p);
	if (ttyvp == NULL) {
		lwkt_reltoken(&proc_token);
		lwkt_reltoken(&p->p_token);
		return (EIO);
	}
	/*
	 * Don't allow controlling tty to be set to the controlling tty
	 * (infinite recursion).
	 */
	if (ap->a_cmd == TIOCSCTTY) {
		lwkt_reltoken(&proc_token);
		lwkt_reltoken(&p->p_token);
		return EINVAL;
	}
	if (ap->a_cmd == TIOCNOTTY) {
		if (!SESS_LEADER(p)) {
			p->p_flags &= ~P_CONTROLT;
			lwkt_reltoken(&proc_token);
			lwkt_reltoken(&p->p_token);
			return (0);
		} else {
			lwkt_reltoken(&proc_token);
			lwkt_reltoken(&p->p_token);
			return (EINVAL);
		}
	}
	lwkt_reltoken(&proc_token);
	lwkt_reltoken(&p->p_token);

	return (VOP_IOCTL(ttyvp, ap->a_cmd, ap->a_data, ap->a_fflag,
			  ap->a_cred, ap->a_sysmsg));
}
/*ARGSUSED*/
static int
cttyioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l)
{
	struct vnode *ttyvp = cttyvp(l->l_proc);
	int rv;

	if (ttyvp == NULL)
		return (EIO);
	if (cmd == TIOCSCTTY)		/* XXX */
		return (EINVAL);
	if (cmd == TIOCNOTTY) {
		mutex_enter(proc_lock);
		if (!SESS_LEADER(l->l_proc)) {
			l->l_proc->p_lflag &= ~PL_CONTROLT;
			rv = 0;
		} else
			rv = EINVAL;
		mutex_exit(proc_lock);
		return (rv);
	}
	return (VOP_IOCTL(ttyvp, cmd, addr, flag, NOCRED));
}
Example #20
0
int
cttyioctl(__unused dev_t dev, u_long cmd, caddr_t addr, int flag, proc_t p)
{
	vnode_t ttyvp = cttyvp(current_proc());
	struct vfs_context context;
	struct session *sessp;
	int error = 0;

	if (ttyvp == NULL)
		return (EIO);
	if (cmd == TIOCSCTTY)  { /* don't allow controlling tty to be set    */
		error = EINVAL; /* to controlling tty -- infinite recursion */
		goto out;
	}
	if (cmd == TIOCNOTTY) {
		sessp = proc_session(p);
		if (!SESS_LEADER(p, sessp)) {
			OSBitAndAtomic(~((uint32_t)P_CONTROLT), &p->p_flag);
			if (sessp != SESSION_NULL)
				session_rele(sessp);
			error = 0;
			goto out;
		} else {
			if (sessp != SESSION_NULL)
				session_rele(sessp);
			error = EINVAL;
			goto out;
		}
	}
	context.vc_thread = current_thread();
	context.vc_ucred = NOCRED;

	error = VNOP_IOCTL(ttyvp, cmd, addr, flag, &context);
out:
	vnode_put(ttyvp);
	return (error);
}
Example #21
0
int
cttyopen(dev_t dev, int flag, __unused int mode, proc_t p)
{
	vnode_t ttyvp = cttyvp(p);
	struct vfs_context context;
	int error = 0;
	int cttyflag, doclose = 0;
	struct session *sessp;

	if (ttyvp == NULL)
		return (ENXIO);

	context.vc_thread = current_thread();
	context.vc_ucred = kauth_cred_proc_ref(p);

	sessp = proc_session(p);
	session_lock(sessp);
	cttyflag = sessp->s_flags & S_CTTYREF;	
	session_unlock(sessp);

	/*
	 * A little hack--this device, used by many processes,
	 * happens to do an open on another device, which can 
	 * cause unhappiness if the second-level open blocks indefinitely 
	 * (as could be the case if the master side has hung up).  Since
	 * we know that this driver doesn't care about the serializing
	 * opens and closes, we can drop the lock. To avoid opencount leak,
	 * open the vnode only for the first time. 
	 */
	if (cttyflag == 0) {
		devsw_unlock(dev, S_IFCHR);
		error = VNOP_OPEN(ttyvp, flag, &context);
		devsw_lock(dev, S_IFCHR);

		if (error) 
			goto out;
	
		/*
		 * If S_CTTYREF is set, some other thread did an open
		 * and was able to set the flag, now perform a close, else
		 * set the flag.
		 */
		session_lock(sessp);
		if (cttyflag == (sessp->s_flags & S_CTTYREF))
			sessp->s_flags |= S_CTTYREF;
		else
			doclose = 1;
		session_unlock(sessp);

		/*
		 * We have to take a reference here to make sure a close
		 * gets called during revoke. Note that once a controlling 
		 * tty gets opened by this driver, the only way close will
		 * get called is when the session leader , whose controlling
		 * tty is ttyvp, exits and vnode is revoked. We cannot 
		 * redirect close from this driver because underlying controlling
		 * terminal might change and close may get redirected to a 
		 * wrong vnode causing panic.
		 */
		if (doclose) {
			devsw_unlock(dev, S_IFCHR);
			VNOP_CLOSE(ttyvp, flag, &context);
			devsw_lock(dev, S_IFCHR);
		} else {
			error = vnode_ref(ttyvp);
		}
	}
out:
	session_rele(sessp);

	vnode_put(ttyvp);
	kauth_cred_unref(&context.vc_ucred);

	return (error);
}