Пример #1
0
/*
 * Exit: deallocate address space and other resources, change proc state to
 * zombie, and unlink proc from allproc and parent's lists.  Save exit status
 * and rusage for wait().  Check for child processes and orphan them.
 */
void
exit1(struct thread *td, int rv)
{
	struct proc *p, *nq, *q;
	struct vnode *vtmp;
	struct vnode *ttyvp = NULL;
#ifdef KTRACE
	struct vnode *tracevp;
	struct ucred *tracecred;
#endif
	struct plimit *plim;
	int locked;

	mtx_assert(&Giant, MA_NOTOWNED);

	p = td->td_proc;
	/*
	 * XXX in case we're rebooting we just let init die in order to
	 * work around an unsolved stack overflow seen very late during
	 * shutdown on sparc64 when the gmirror worker process exists.
	 */ 
	if (p == initproc && rebooting == 0) {
		printf("init died (signal %d, exit %d)\n",
		    WTERMSIG(rv), WEXITSTATUS(rv));
		panic("Going nowhere without my init!");
	}

	/*
	 * MUST abort all other threads before proceeding past here.
	 */
	PROC_LOCK(p);
	while (p->p_flag & P_HADTHREADS) {
		/*
		 * First check if some other thread got here before us..
		 * if so, act apropriatly, (exit or suspend);
		 */
		thread_suspend_check(0);

		/*
		 * Kill off the other threads. This requires
		 * some co-operation from other parts of the kernel
		 * so it may not be instantaneous.  With this state set
		 * any thread entering the kernel from userspace will
		 * thread_exit() in trap().  Any thread attempting to
		 * sleep will return immediately with EINTR or EWOULDBLOCK
		 * which will hopefully force them to back out to userland
		 * freeing resources as they go.  Any thread attempting
		 * to return to userland will thread_exit() from userret().
		 * thread_exit() will unsuspend us when the last of the
		 * other threads exits.
		 * If there is already a thread singler after resumption,
		 * calling thread_single will fail; in that case, we just
		 * re-check all suspension request, the thread should
		 * either be suspended there or exit.
		 */
		if (! thread_single(SINGLE_EXIT))
			break;

		/*
		 * All other activity in this process is now stopped.
		 * Threading support has been turned off.
		 */
	}
	KASSERT(p->p_numthreads == 1,
	    ("exit1: proc %p exiting with %d threads", p, p->p_numthreads));
	/*
	 * Wakeup anyone in procfs' PIOCWAIT.  They should have a hold
	 * on our vmspace, so we should block below until they have
	 * released their reference to us.  Note that if they have
	 * requested S_EXIT stops we will block here until they ack
	 * via PIOCCONT.
	 */
	_STOPEVENT(p, S_EXIT, rv);

	/*
	 * Note that we are exiting and do another wakeup of anyone in
	 * PIOCWAIT in case they aren't listening for S_EXIT stops or
	 * decided to wait again after we told them we are exiting.
	 */
	p->p_flag |= P_WEXIT;
	wakeup(&p->p_stype);

	/*
	 * Wait for any processes that have a hold on our vmspace to
	 * release their reference.
	 */
	while (p->p_lock > 0)
		msleep(&p->p_lock, &p->p_mtx, PWAIT, "exithold", 0);

	PROC_UNLOCK(p);
	/* Drain the limit callout while we don't have the proc locked */
	callout_drain(&p->p_limco);

#ifdef AUDIT
	/*
	 * The Sun BSM exit token contains two components: an exit status as
	 * passed to exit(), and a return value to indicate what sort of exit
	 * it was.  The exit status is WEXITSTATUS(rv), but it's not clear
	 * what the return value is.
	 */
	AUDIT_ARG_EXIT(WEXITSTATUS(rv), 0);
	AUDIT_SYSCALL_EXIT(0, td);
#endif

	/* Are we a task leader? */
	if (p == p->p_leader) {
		mtx_lock(&ppeers_lock);
		q = p->p_peers;
		while (q != NULL) {
			PROC_LOCK(q);
			psignal(q, SIGKILL);
			PROC_UNLOCK(q);
			q = q->p_peers;
		}
		while (p->p_peers != NULL)
			msleep(p, &ppeers_lock, PWAIT, "exit1", 0);
		mtx_unlock(&ppeers_lock);
	}

	/*
	 * Check if any loadable modules need anything done at process exit.
	 * E.g. SYSV IPC stuff
	 * XXX what if one of these generates an error?
	 */
	EVENTHANDLER_INVOKE(process_exit, p);

	/*
	 * If parent is waiting for us to exit or exec,
	 * P_PPWAIT is set; we will wakeup the parent below.
	 */
	PROC_LOCK(p);
	stopprofclock(p);
	p->p_flag &= ~(P_TRACED | P_PPWAIT);

	/*
	 * Stop the real interval timer.  If the handler is currently
	 * executing, prevent it from rearming itself and let it finish.
	 */
	if (timevalisset(&p->p_realtimer.it_value) &&
	    callout_stop(&p->p_itcallout) == 0) {
		timevalclear(&p->p_realtimer.it_interval);
		msleep(&p->p_itcallout, &p->p_mtx, PWAIT, "ritwait", 0);
		KASSERT(!timevalisset(&p->p_realtimer.it_value),
		    ("realtime timer is still armed"));
	}
	PROC_UNLOCK(p);

	/*
	 * Reset any sigio structures pointing to us as a result of
	 * F_SETOWN with our pid.
	 */
	funsetownlst(&p->p_sigiolst);

	/*
	 * If this process has an nlminfo data area (for lockd), release it
	 */
	if (nlminfo_release_p != NULL && p->p_nlminfo != NULL)
		(*nlminfo_release_p)(p);

	/*
	 * Close open files and release open-file table.
	 * This may block!
	 */
	fdfree(td);

	/*
	 * If this thread tickled GEOM, we need to wait for the giggling to
	 * stop before we return to userland
	 */
	if (td->td_pflags & TDP_GEOM)
		g_waitidle();

	/*
	 * Remove ourself from our leader's peer list and wake our leader.
	 */
	mtx_lock(&ppeers_lock);
	if (p->p_leader->p_peers) {
		q = p->p_leader;
		while (q->p_peers != p)
			q = q->p_peers;
		q->p_peers = p->p_peers;
		wakeup(p->p_leader);
	}
	mtx_unlock(&ppeers_lock);

	vmspace_exit(td);

	sx_xlock(&proctree_lock);
	if (SESS_LEADER(p)) {
		struct session *sp = p->p_session;
		struct tty *tp;

		/*
		 * s_ttyp is not zero'd; we use this to indicate that
		 * the session once had a controlling terminal. (for
		 * logging and informational purposes)
		 */
		SESS_LOCK(sp);
		ttyvp = sp->s_ttyvp;
		tp = sp->s_ttyp;
		sp->s_ttyvp = NULL;
		sp->s_leader = NULL;
		SESS_UNLOCK(sp);

		/*
		 * Signal foreground pgrp and revoke access to
		 * controlling terminal if it has not been revoked
		 * already.
		 *
		 * Because the TTY may have been revoked in the mean
		 * time and could already have a new session associated
		 * with it, make sure we don't send a SIGHUP to a
		 * foreground process group that does not belong to this
		 * session.
		 */

		if (tp != NULL) {
			tty_lock(tp);
			if (tp->t_session == sp)
				tty_signal_pgrp(tp, SIGHUP);
			tty_unlock(tp);
		}

		if (ttyvp != NULL) {
			sx_xunlock(&proctree_lock);
			if (vn_lock(ttyvp, LK_EXCLUSIVE) == 0) {
				VOP_REVOKE(ttyvp, REVOKEALL);
				VOP_UNLOCK(ttyvp, 0);
			}
			sx_xlock(&proctree_lock);
		}
	}
	fixjobc(p, p->p_pgrp, 0);
	sx_xunlock(&proctree_lock);
	(void)acct_process(td);

	/* Release the TTY now we've unlocked everything. */
	if (ttyvp != NULL)
		vrele(ttyvp);
#ifdef KTRACE
	/*
	 * Disable tracing, then drain any pending records and release
	 * the trace file.
	 */
	if (p->p_traceflag != 0) {
		PROC_LOCK(p);
		mtx_lock(&ktrace_mtx);
		p->p_traceflag = 0;
		mtx_unlock(&ktrace_mtx);
		PROC_UNLOCK(p);
		ktrprocexit(td);
		PROC_LOCK(p);
		mtx_lock(&ktrace_mtx);
		tracevp = p->p_tracevp;
		p->p_tracevp = NULL;
		tracecred = p->p_tracecred;
		p->p_tracecred = NULL;
		mtx_unlock(&ktrace_mtx);
		PROC_UNLOCK(p);
		if (tracevp != NULL) {
			locked = VFS_LOCK_GIANT(tracevp->v_mount);
			vrele(tracevp);
			VFS_UNLOCK_GIANT(locked);
		}
		if (tracecred != NULL)
			crfree(tracecred);
	}
#endif
	/*
	 * Release reference to text vnode
	 */
	if ((vtmp = p->p_textvp) != NULL) {
		p->p_textvp = NULL;
		locked = VFS_LOCK_GIANT(vtmp->v_mount);
		vrele(vtmp);
		VFS_UNLOCK_GIANT(locked);
	}

	/*
	 * Release our limits structure.
	 */
	PROC_LOCK(p);
	plim = p->p_limit;
	p->p_limit = NULL;
	PROC_UNLOCK(p);
	lim_free(plim);

	/*
	 * Remove proc from allproc queue and pidhash chain.
	 * Place onto zombproc.  Unlink from parent's child list.
	 */
	sx_xlock(&allproc_lock);
	LIST_REMOVE(p, p_list);
	LIST_INSERT_HEAD(&zombproc, p, p_list);
	LIST_REMOVE(p, p_hash);
	sx_xunlock(&allproc_lock);

	/*
	 * Call machine-dependent code to release any
	 * machine-dependent resources other than the address space.
	 * The address space is released by "vmspace_exitfree(p)" in
	 * vm_waitproc().
	 */
	cpu_exit(td);

	WITNESS_WARN(WARN_PANIC, NULL, "process (pid %d) exiting", p->p_pid);

	/*
	 * Reparent all of our children to init.
	 */
	sx_xlock(&proctree_lock);
	q = LIST_FIRST(&p->p_children);
	if (q != NULL)		/* only need this if any child is S_ZOMB */
		wakeup(initproc);
	for (; q != NULL; q = nq) {
		nq = LIST_NEXT(q, p_sibling);
		PROC_LOCK(q);
		proc_reparent(q, initproc);
		q->p_sigparent = SIGCHLD;
		/*
		 * Traced processes are killed
		 * since their existence means someone is screwing up.
		 */
		if (q->p_flag & P_TRACED) {
			struct thread *temp;

			q->p_flag &= ~(P_TRACED | P_STOPPED_TRACE);
			FOREACH_THREAD_IN_PROC(q, temp)
				temp->td_dbgflags &= ~TDB_SUSPEND;
			psignal(q, SIGKILL);
		}
		PROC_UNLOCK(q);
	}

	/* Save exit status. */
	PROC_LOCK(p);
	p->p_xstat = rv;
	p->p_xthread = td;

	/* Tell the prison that we are gone. */
	prison_proc_free(p->p_ucred->cr_prison);

#ifdef KDTRACE_HOOKS
	/*
	 * Tell the DTrace fasttrap provider about the exit if it
	 * has declared an interest.
	 */
	if (dtrace_fasttrap_exit)
		dtrace_fasttrap_exit(p);
#endif

	/*
	 * Notify interested parties of our demise.
	 */
	KNOTE_LOCKED(&p->p_klist, NOTE_EXIT);

#ifdef KDTRACE_HOOKS
	int reason = CLD_EXITED;
	if (WCOREDUMP(rv))
		reason = CLD_DUMPED;
	else if (WIFSIGNALED(rv))
		reason = CLD_KILLED;
	SDT_PROBE(proc, kernel, , exit, reason, 0, 0, 0, 0);
#endif

	/*
	 * Just delete all entries in the p_klist. At this point we won't
	 * report any more events, and there are nasty race conditions that
	 * can beat us if we don't.
	 */
	knlist_clear(&p->p_klist, 1);

	/*
	 * Notify parent that we're gone.  If parent has the PS_NOCLDWAIT
	 * flag set, or if the handler is set to SIG_IGN, notify process
	 * 1 instead (and hope it will handle this situation).
	 */
	PROC_LOCK(p->p_pptr);
	mtx_lock(&p->p_pptr->p_sigacts->ps_mtx);
	if (p->p_pptr->p_sigacts->ps_flag & (PS_NOCLDWAIT | PS_CLDSIGIGN)) {
		struct proc *pp;

		mtx_unlock(&p->p_pptr->p_sigacts->ps_mtx);
		pp = p->p_pptr;
		PROC_UNLOCK(pp);
		proc_reparent(p, initproc);
		p->p_sigparent = SIGCHLD;
		PROC_LOCK(p->p_pptr);

		/*
		 * Notify parent, so in case he was wait(2)ing or
		 * executing waitpid(2) with our pid, he will
		 * continue.
		 */
		wakeup(pp);
	} else
		mtx_unlock(&p->p_pptr->p_sigacts->ps_mtx);

	if (p->p_pptr == initproc)
		psignal(p->p_pptr, SIGCHLD);
	else if (p->p_sigparent != 0) {
		if (p->p_sigparent == SIGCHLD)
			childproc_exited(p);
		else	/* LINUX thread */
			psignal(p->p_pptr, p->p_sigparent);
	}
	sx_xunlock(&proctree_lock);

	/*
	 * The state PRS_ZOMBIE prevents other proesses from sending
	 * signal to the process, to avoid memory leak, we free memory
	 * for signal queue at the time when the state is set.
	 */
	sigqueue_flush(&p->p_sigqueue);
	sigqueue_flush(&td->td_sigqueue);

	/*
	 * We have to wait until after acquiring all locks before
	 * changing p_state.  We need to avoid all possible context
	 * switches (including ones from blocking on a mutex) while
	 * marked as a zombie.  We also have to set the zombie state
	 * before we release the parent process' proc lock to avoid
	 * a lost wakeup.  So, we first call wakeup, then we grab the
	 * sched lock, update the state, and release the parent process'
	 * proc lock.
	 */
	wakeup(p->p_pptr);
	cv_broadcast(&p->p_pwait);
	sched_exit(p->p_pptr, td);
	PROC_SLOCK(p);
	p->p_state = PRS_ZOMBIE;
	PROC_UNLOCK(p->p_pptr);

	/*
	 * Hopefully no one will try to deliver a signal to the process this
	 * late in the game.
	 */
	knlist_destroy(&p->p_klist);

	/*
	 * Save our children's rusage information in our exit rusage.
	 */
	ruadd(&p->p_ru, &p->p_rux, &p->p_stats->p_cru, &p->p_crux);

	/*
	 * Make sure the scheduler takes this thread out of its tables etc.
	 * This will also release this thread's reference to the ucred.
	 * Other thread parts to release include pcb bits and such.
	 */
	thread_exit();
}
Пример #2
0
Файл: km.c Проект: 0xffea/xnu
/*
 * cdevsw interface to km driver.
 */
int 
kmopen(dev_t dev, int flag, __unused int devtype, proc_t pp)
{
	int unit;
	struct tty *tp;
	struct winsize *wp;
	int ret;
	
	unit = minor(dev);
	if(unit >= 1)
		return (ENXIO);

	tp = km_tty[unit];

	tty_lock(tp);

	tp->t_oproc = kmstart;
	tp->t_param = NULL;
	tp->t_dev = dev;
	
	if ( !(tp->t_state & TS_ISOPEN) ) {
		tp->t_iflag = TTYDEF_IFLAG;
		tp->t_oflag = TTYDEF_OFLAG;
		tp->t_cflag = (CREAD | CS8 | CLOCAL);
		tp->t_lflag = TTYDEF_LFLAG;
		tp->t_ispeed = tp->t_ospeed = TTYDEF_SPEED;
		termioschars(&tp->t_termios);
		ttsetwater(tp);
	} else if ((tp->t_state & TS_XCLUDE) && proc_suser(pp)) {
		ret = EBUSY;
		goto out;
	}

	tp->t_state |= TS_CARR_ON; /* lie and say carrier exists and is on. */

	ret = ((*linesw[tp->t_line].l_open)(dev, tp));
	{
		PE_Video video;
		wp = &tp->t_winsize;
		/*
		 * Magic numbers.  These are CHARWIDTH and CHARHEIGHT
		 * from osfmk/ppc/POWERMAC/video_console.c
		 */
		wp->ws_xpixel = 8;
		wp->ws_ypixel = 16;

		tty_unlock(tp);		/* XXX race window */

		if (flag & O_POPUP)
			PE_initialize_console(0, kPETextScreen);

		bzero(&video, sizeof(video));
		PE_current_console(&video);

		tty_lock(tp);

		if( video.v_width != 0 && video.v_height != 0 ) {
			wp->ws_col = video.v_width / wp->ws_xpixel;
			wp->ws_row = video.v_height / wp->ws_ypixel;
		} else {
			wp->ws_col = 100;
			wp->ws_row = 36;
		}
	}

out:
	tty_unlock(tp);

	return ret;
}
Пример #3
0
int tty_set_ldisc(struct tty_struct *tty, int ldisc)
{
    int retval;
    struct tty_ldisc *o_ldisc, *new_ldisc;
    int work, o_work = 0;
    struct tty_struct *o_tty;

    new_ldisc = tty_ldisc_get(ldisc);
    if (IS_ERR(new_ldisc))
        return PTR_ERR(new_ldisc);

    tty_lock();
    /*
     *	We need to look at the tty locking here for pty/tty pairs
     *	when both sides try to change in parallel.
     */

    o_tty = tty->link;	/* o_tty is the pty side or NULL */


    /*
     *	Check the no-op case
     */

    if (tty->ldisc->ops->num == ldisc) {
        tty_unlock();
        tty_ldisc_put(new_ldisc);
        return 0;
    }

    tty_unlock();
    /*
     *	Problem: What do we do if this blocks ?
     *	We could deadlock here
     */

    tty_wait_until_sent(tty, 0);

    tty_lock();
    mutex_lock(&tty->ldisc_mutex);

    /*
     *	We could be midstream of another ldisc change which has
     *	dropped the lock during processing. If so we need to wait.
     */

    while (test_bit(TTY_LDISC_CHANGING, &tty->flags)) {
        mutex_unlock(&tty->ldisc_mutex);
        tty_unlock();
        wait_event(tty_ldisc_wait,
                   test_bit(TTY_LDISC_CHANGING, &tty->flags) == 0);
        tty_lock();
        mutex_lock(&tty->ldisc_mutex);
    }

    set_bit(TTY_LDISC_CHANGING, &tty->flags);

    /*
     *	No more input please, we are switching. The new ldisc
     *	will update this value in the ldisc open function
     */

    tty->receive_room = 0;

    o_ldisc = tty->ldisc;

    tty_unlock();
    /*
     *	Make sure we don't change while someone holds a
     *	reference to the line discipline. The TTY_LDISC bit
     *	prevents anyone taking a reference once it is clear.
     *	We need the lock to avoid racing reference takers.
     *
     *	We must clear the TTY_LDISC bit here to avoid a livelock
     *	with a userspace app continually trying to use the tty in
     *	parallel to the change and re-referencing the tty.
     */

    work = tty_ldisc_halt(tty);
    if (o_tty)
        o_work = tty_ldisc_halt(o_tty);

    /*
     * Wait for ->hangup_work and ->buf.work handlers to terminate.
     * We must drop the mutex here in case a hangup is also in process.
     */

    mutex_unlock(&tty->ldisc_mutex);

    tty_ldisc_flush_works(tty);

    retval = tty_ldisc_wait_idle(tty, 5 * HZ);

    tty_lock();
    mutex_lock(&tty->ldisc_mutex);

    /* handle wait idle failure locked */
    if (retval) {
        tty_ldisc_put(new_ldisc);
        goto enable;
    }

    if (test_bit(TTY_HUPPING, &tty->flags)) {
        /* We were raced by the hangup method. It will have stomped
           the ldisc data and closed the ldisc down */
        clear_bit(TTY_LDISC_CHANGING, &tty->flags);
        mutex_unlock(&tty->ldisc_mutex);
        tty_ldisc_put(new_ldisc);
        tty_unlock();
        return -EIO;
    }

    /* Shutdown the current discipline. */
    tty_ldisc_close(tty, o_ldisc);

    /* Now set up the new line discipline. */
    tty_ldisc_assign(tty, new_ldisc);
    tty_set_termios_ldisc(tty, ldisc);

    retval = tty_ldisc_open(tty, new_ldisc);
    if (retval < 0) {
        /* Back to the old one or N_TTY if we can't */
        tty_ldisc_put(new_ldisc);
        tty_ldisc_restore(tty, o_ldisc);
    }

    /* At this point we hold a reference to the new ldisc and a
       a reference to the old ldisc. If we ended up flipping back
       to the existing ldisc we have two references to it */

    if (tty->ldisc->ops->num != o_ldisc->ops->num && tty->ops->set_ldisc)
        tty->ops->set_ldisc(tty);

    tty_ldisc_put(o_ldisc);

enable:
    /*
     *	Allow ldisc referencing to occur again
     */

    tty_ldisc_enable(tty);
    if (o_tty)
        tty_ldisc_enable(o_tty);

    /* Restart the work queue in case no characters kick it off. Safe if
       already running */
    if (work)
        schedule_work(&tty->buf.work);
    if (o_work)
        schedule_work(&o_tty->buf.work);
    mutex_unlock(&tty->ldisc_mutex);
    tty_unlock();
    return retval;
}
Пример #4
0
void tty_ldisc_hangup(struct tty_struct *tty)
{
    struct tty_ldisc *ld;
    int reset = tty->driver->flags & TTY_DRIVER_RESET_TERMIOS;
    int err = 0;

    /*
     * FIXME! What are the locking issues here? This may me overdoing
     * things... This question is especially important now that we've
     * removed the irqlock.
     */
    ld = tty_ldisc_ref(tty);
    if (ld != NULL) {
        /* We may have no line discipline at this point */
        if (ld->ops->flush_buffer)
            ld->ops->flush_buffer(tty);
        tty_driver_flush_buffer(tty);
        if ((test_bit(TTY_DO_WRITE_WAKEUP, &tty->flags)) &&
                ld->ops->write_wakeup)
            ld->ops->write_wakeup(tty);
        if (ld->ops->hangup)
            ld->ops->hangup(tty);
        tty_ldisc_deref(ld);
    }
    /*
     * FIXME: Once we trust the LDISC code better we can wait here for
     * ldisc completion and fix the driver call race
     */
    wake_up_interruptible_poll(&tty->write_wait, POLLOUT);
    wake_up_interruptible_poll(&tty->read_wait, POLLIN);
    /*
     * Shutdown the current line discipline, and reset it to
     * N_TTY if need be.
     *
     * Avoid racing set_ldisc or tty_ldisc_release
     */
    mutex_lock(&tty->ldisc_mutex);

    /*
     * this is like tty_ldisc_halt, but we need to give up
     * the BTM before calling cancel_work_sync, which may
     * need to wait for another function taking the BTM
     */
    clear_bit(TTY_LDISC, &tty->flags);
    tty_unlock();
    cancel_work_sync(&tty->buf.work);
    mutex_unlock(&tty->ldisc_mutex);
retry:
    tty_lock();
    mutex_lock(&tty->ldisc_mutex);

    /* At this point we have a closed ldisc and we want to
       reopen it. We could defer this to the next open but
       it means auditing a lot of other paths so this is
       a FIXME */
    if (tty->ldisc) {	/* Not yet closed */
        if (atomic_read(&tty->ldisc->users) != 1) {
            char cur_n[TASK_COMM_LEN], tty_n[64];
            long timeout = 3 * HZ;
            tty_unlock();

            while (tty_ldisc_wait_idle(tty, timeout) == -EBUSY) {
                timeout = MAX_SCHEDULE_TIMEOUT;
                printk_ratelimited(KERN_WARNING
                                   "%s: waiting (%s) for %s took too long, but we keep waiting...\n",
                                   __func__, get_task_comm(cur_n, current),
                                   tty_name(tty, tty_n));
            }
            mutex_unlock(&tty->ldisc_mutex);
            goto retry;
        }

        if (reset == 0) {

            if (!tty_ldisc_reinit(tty, tty->termios->c_line))
                err = tty_ldisc_open(tty, tty->ldisc);
            else
                err = 1;
        }
        /* If the re-open fails or we reset then go to N_TTY. The
           N_TTY open cannot fail */
        if (reset || err) {
            BUG_ON(tty_ldisc_reinit(tty, N_TTY));
            WARN_ON(tty_ldisc_open(tty, tty->ldisc));
        }
        tty_ldisc_enable(tty);
    }
    mutex_unlock(&tty->ldisc_mutex);
    if (reset)
        tty_reset_termios(tty);
}
Пример #5
0
/*
 * ------------------------------------------------------------
 * rs_open() and friends
 * ------------------------------------------------------------
 */
static int block_til_ready(struct tty_struct *tty, struct file * filp,
			   struct m68k_serial *info)
{
	DECLARE_WAITQUEUE(wait, current);
	int		retval;
	int		do_clocal = 0;

	/*
	 * If the device is in the middle of being closed, then block
	 * until it's done, and then try again.
	 */
	if (info->flags & S_CLOSING) {
		interruptible_sleep_on(&info->close_wait);
#ifdef SERIAL_DO_RESTART
		if (info->flags & S_HUP_NOTIFY)
			return -EAGAIN;
		else
			return -ERESTARTSYS;
#else
		return -EAGAIN;
#endif
	}
	
	/*
	 * If non-blocking mode is set, or the port is not enabled,
	 * then make the check up front and then exit.
	 */
	if ((filp->f_flags & O_NONBLOCK) ||
	    (tty->flags & (1 << TTY_IO_ERROR))) {
		info->flags |= S_NORMAL_ACTIVE;
		return 0;
	}

	if (tty->termios->c_cflag & CLOCAL)
		do_clocal = 1;

	/*
	 * Block waiting for the carrier detect and the line to become
	 * free (i.e., not in use by the callout).  While we are in
	 * this loop, info->count is dropped by one, so that
	 * rs_close() knows when to free things.  We restore it upon
	 * exit, either normal or abnormal.
	 */
	retval = 0;
	add_wait_queue(&info->open_wait, &wait);

	info->count--;
	info->blocked_open++;
	while (1) {
		local_irq_disable();
		m68k_rtsdtr(info, 1);
		local_irq_enable();
		current->state = TASK_INTERRUPTIBLE;
		if (tty_hung_up_p(filp) ||
		    !(info->flags & S_INITIALIZED)) {
#ifdef SERIAL_DO_RESTART
			if (info->flags & S_HUP_NOTIFY)
				retval = -EAGAIN;
			else
				retval = -ERESTARTSYS;	
#else
			retval = -EAGAIN;
#endif
			break;
		}
		if (!(info->flags & S_CLOSING) && do_clocal)
			break;
                if (signal_pending(current)) {
			retval = -ERESTARTSYS;
			break;
		}
		tty_unlock();
		schedule();
		tty_lock();
	}
	current->state = TASK_RUNNING;
	remove_wait_queue(&info->open_wait, &wait);
	if (!tty_hung_up_p(filp))
		info->count++;
	info->blocked_open--;

	if (retval)
		return retval;
	info->flags |= S_NORMAL_ACTIVE;
	return 0;
}	
Пример #6
0
int
ttyinq_read_uio(struct ttyinq *ti, struct tty *tp, struct uio *uio,
    size_t rlen, size_t flen)
{

	MPASS(rlen <= uio->uio_resid);

	while (rlen > 0) {
		int error;
		struct ttyinq_block *tib;
		size_t cbegin, cend, clen;

		/* See if there still is data. */
		if (ti->ti_begin == ti->ti_linestart)
			return (0);
		tib = ti->ti_firstblock;
		if (tib == NULL)
			return (0);

		/*
		 * The end address should be the lowest of these three:
		 * - The write pointer
		 * - The blocksize - we can't read beyond the block
		 * - The end address if we could perform the full read
		 */
		cbegin = ti->ti_begin;
		cend = MIN(MIN(ti->ti_linestart, ti->ti_begin + rlen),
		    TTYINQ_DATASIZE);
		clen = cend - cbegin;
		MPASS(clen >= flen);
		rlen -= clen;

		/*
		 * We can prevent buffering in some cases:
		 * - We need to read the block until the end.
		 * - We don't need to read the block until the end, but
		 *   there is no data beyond it, which allows us to move
		 *   the write pointer to a new block.
		 */
		if (cend == TTYINQ_DATASIZE || cend == ti->ti_end) {
			/*
			 * Fast path: zero copy. Remove the first block,
			 * so we can unlock the TTY temporarily.
			 */
			TTYINQ_REMOVE_HEAD(ti);
			ti->ti_begin = 0;

			/*
			 * Because we remove the first block, we must
			 * fix up the block offsets.
			 */
#define CORRECT_BLOCK(t) do {			\
	if (t <= TTYINQ_DATASIZE)		\
		t = 0;				\
	else					\
		t -= TTYINQ_DATASIZE;		\
} while (0)
			CORRECT_BLOCK(ti->ti_linestart);
			CORRECT_BLOCK(ti->ti_reprint);
			CORRECT_BLOCK(ti->ti_end);
#undef CORRECT_BLOCK

			/*
			 * Temporary unlock and copy the data to
			 * userspace. We may need to flush trailing
			 * bytes, like EOF characters.
			 */
			tty_unlock(tp);
			error = uiomove(tib->tib_data + cbegin,
			    clen - flen, uio);
			tty_lock(tp);

			/* Block can now be readded to the list. */
			TTYINQ_RECYCLE(ti, tib);
		} else {
			char ob[TTYINQ_DATASIZE - 1];

			/*
			 * Slow path: store data in a temporary buffer.
			 */
			memcpy(ob, tib->tib_data + cbegin, clen - flen);
			ti->ti_begin += clen;
			MPASS(ti->ti_begin < TTYINQ_DATASIZE);

			/* Temporary unlock and copy the data to userspace. */
			tty_unlock(tp);
			error = uiomove(ob, clen - flen, uio);
			tty_lock(tp);
		}

		if (error != 0)
			return (error);
		if (tty_gone(tp))
			return (ENXIO);
	}

	return (0);
}
Пример #7
0
static int
ptsdev_ioctl(struct file *fp, u_long cmd, void *data,
    struct ucred *active_cred, struct thread *td)
{
	struct tty *tp = fp->f_data;
	struct pts_softc *psc = tty_softc(tp);
	int error = 0, sig;

	switch (cmd) {
	case FIONBIO:
		/* This device supports non-blocking operation. */
		return (0);
	case FIONREAD:
		tty_lock(tp);
		if (psc->pts_flags & PTS_FINISHED) {
			/* Force read() to be called. */
			*(int *)data = 1;
		} else {
			*(int *)data = ttydisc_getc_poll(tp);
		}
		tty_unlock(tp);
		return (0);
	case FIODGNAME: {
		struct fiodgname_arg *fgn;
		const char *p;
		int i;

		/* Reverse device name lookups, for ptsname() and ttyname(). */
		fgn = data;
		p = tty_devname(tp);
		i = strlen(p) + 1;
		if (i > fgn->len)
			return (EINVAL);
		return copyout(p, fgn->buf, i);
	}

	/*
	 * We need to implement TIOCGPGRP and TIOCGSID here again. When
	 * called on the pseudo-terminal master, it should not check if
	 * the terminal is the foreground terminal of the calling
	 * process.
	 *
	 * TIOCGETA is also implemented here. Various Linux PTY routines
	 * often call isatty(), which is implemented by tcgetattr().
	 */
#ifdef PTS_LINUX
	case TIOCGETA:
		/* Obtain terminal flags through tcgetattr(). */
		tty_lock(tp);
		*(struct termios*)data = tp->t_termios;
		tty_unlock(tp);
		return (0);
#endif /* PTS_LINUX */
	case TIOCSETAF:
	case TIOCSETAW:
		/*
		 * We must make sure we turn tcsetattr() calls of TCSAFLUSH and
		 * TCSADRAIN into something different. If an application would
		 * call TCSAFLUSH or TCSADRAIN on the master descriptor, it may
		 * deadlock waiting for all data to be read.
		 */
		cmd = TIOCSETA;
		break;
#if defined(PTS_COMPAT) || defined(PTS_LINUX)
	case TIOCGPTN:
		/*
		 * Get the device unit number.
		 */
		if (psc->pts_unit < 0)
			return (ENOTTY);
		*(unsigned int *)data = psc->pts_unit;
		return (0);
#endif /* PTS_COMPAT || PTS_LINUX */
	case TIOCGPGRP:
		/* Get the foreground process group ID. */
		tty_lock(tp);
		if (tp->t_pgrp != NULL)
			*(int *)data = tp->t_pgrp->pg_id;
		else
			*(int *)data = NO_PID;
		tty_unlock(tp);
		return (0);
	case TIOCGSID:
		/* Get the session leader process ID. */
		tty_lock(tp);
		if (tp->t_session == NULL)
			error = ENOTTY;
		else
			*(int *)data = tp->t_session->s_sid;
		tty_unlock(tp);
		return (error);
	case TIOCPTMASTER:
		/* Yes, we are a pseudo-terminal master. */
		return (0);
	case TIOCSIG:
		/* Signal the foreground process group. */
		sig = *(int *)data;
		if (sig < 1 || sig >= NSIG)
			return (EINVAL);

		tty_lock(tp);
		tty_signal_pgrp(tp, sig);
		tty_unlock(tp);
		return (0);
	case TIOCPKT:
		/* Enable/disable packet mode. */
		tty_lock(tp);
		if (*(int *)data)
			psc->pts_flags |= PTS_PKT;
		else
			psc->pts_flags &= ~PTS_PKT;
		tty_unlock(tp);
		return (0);
	}

	/* Just redirect this ioctl to the slave device. */
	tty_lock(tp);
	error = tty_ioctl(tp, cmd, data, fp->f_flag, td);
	tty_unlock(tp);
	if (error == ENOIOCTL)
		error = ENOTTY;

	return (error);
}
Пример #8
0
static ssize_t r3964_write(struct tty_struct *tty, struct file *file,
			   const unsigned char *data, size_t count)
{
	struct r3964_info *pInfo = tty->disc_data;
	struct r3964_block_header *pHeader;
	struct r3964_client_info *pClient;
	unsigned char *new_data;

	TRACE_L("write request, %d characters", count);
/* 
 * Verify the pointers 
 */

	if (!pInfo)
		return -EIO;

/*
 * Ensure that the caller does not wish to send too much.
 */
	if (count > R3964_MTU) {
		if (pInfo->flags & R3964_DEBUG) {
			TRACE_L(KERN_WARNING "r3964_write: truncating user "
				"packet from %u to mtu %d", count, R3964_MTU);
		}
		count = R3964_MTU;
	}
/*
 * Allocate a buffer for the data and copy it from the buffer with header prepended
 */
	new_data = kmalloc(count + sizeof(struct r3964_block_header),
			GFP_KERNEL);
	TRACE_M("r3964_write - kmalloc %p", new_data);
	if (new_data == NULL) {
		if (pInfo->flags & R3964_DEBUG) {
			printk(KERN_ERR "r3964_write: no memory\n");
		}
		return -ENOSPC;
	}

	pHeader = (struct r3964_block_header *)new_data;
	pHeader->data = new_data + sizeof(struct r3964_block_header);
	pHeader->length = count;
	pHeader->locks = 0;
	pHeader->owner = NULL;

	tty_lock();

	pClient = findClient(pInfo, task_pid(current));
	if (pClient) {
		pHeader->owner = pClient;
	}

	memcpy(pHeader->data, data, count);	/* We already verified this */

	if (pInfo->flags & R3964_DEBUG) {
		dump_block(pHeader->data, count);
	}

/*
 * Add buffer to transmit-queue:
 */
	add_tx_queue(pInfo, pHeader);
	trigger_transmit(pInfo);

	tty_unlock();

	return 0;
}
Пример #9
0
static int
ptsdev_read(struct file *fp, struct uio *uio, struct ucred *active_cred,
    int flags, struct thread *td)
{
	struct tty *tp = fp->f_data;
	struct pts_softc *psc = tty_softc(tp);
	int error = 0;
	char pkt;

	if (uio->uio_resid == 0)
		return (0);

	tty_lock(tp);

	for (;;) {
		/*
		 * Implement packet mode. When packet mode is turned on,
		 * the first byte contains a bitmask of events that
		 * occured (start, stop, flush, window size, etc).
		 */
		if (psc->pts_flags & PTS_PKT && psc->pts_pkt) {
			pkt = psc->pts_pkt;
			psc->pts_pkt = 0;
			tty_unlock(tp);

			error = ureadc(pkt, uio);
			return (error);
		}

		/*
		 * Transmit regular data.
		 *
		 * XXX: We shouldn't use ttydisc_getc_poll()! Even
		 * though in this implementation, there is likely going
		 * to be data, we should just call ttydisc_getc_uio()
		 * and use its return value to sleep.
		 */
		if (ttydisc_getc_poll(tp)) {
			if (psc->pts_flags & PTS_PKT) {
				/*
				 * XXX: Small race. Fortunately PTY
				 * consumers aren't multithreaded.
				 */

				tty_unlock(tp);
				error = ureadc(TIOCPKT_DATA, uio);
				if (error)
					return (error);
				tty_lock(tp);
			}

			error = ttydisc_getc_uio(tp, uio);
			break;
		}

		/* Maybe the device isn't used anyway. */
		if (psc->pts_flags & PTS_FINISHED)
			break;

		/* Wait for more data. */
		if (fp->f_flag & O_NONBLOCK) {
			error = EWOULDBLOCK;
			break;
		}
		error = cv_wait_sig(&psc->pts_outwait, tp->t_mtx);
		if (error != 0)
			break;
	}

	tty_unlock(tp);

	return (error);
}
Пример #10
0
static int
ptsdev_write(struct file *fp, struct uio *uio, struct ucred *active_cred,
    int flags, struct thread *td)
{
	struct tty *tp = fp->f_data;
	struct pts_softc *psc = tty_softc(tp);
	char ib[256], *ibstart;
	size_t iblen, rintlen;
	int error = 0;

	if (uio->uio_resid == 0)
		return (0);

	for (;;) {
		ibstart = ib;
		iblen = MIN(uio->uio_resid, sizeof ib);
		error = uiomove(ib, iblen, uio);

		tty_lock(tp);
		if (error != 0) {
			iblen = 0;
			goto done;
		}

		/*
		 * When possible, avoid the slow path. rint_bypass()
		 * copies all input to the input queue at once.
		 */
		MPASS(iblen > 0);
		do {
			rintlen = ttydisc_rint_simple(tp, ibstart, iblen);
			ibstart += rintlen;
			iblen -= rintlen;
			if (iblen == 0) {
				/* All data written. */
				break;
			}

			/* Maybe the device isn't used anyway. */
			if (psc->pts_flags & PTS_FINISHED) {
				error = EIO;
				goto done;
			}

			/* Wait for more data. */
			if (fp->f_flag & O_NONBLOCK) {
				error = EWOULDBLOCK;
				goto done;
			}

			/* Wake up users on the slave side. */
			ttydisc_rint_done(tp);
			error = cv_wait_sig(&psc->pts_inwait, tp->t_mtx);
			if (error != 0)
				goto done;
		} while (iblen > 0);

		if (uio->uio_resid == 0)
			break;
		tty_unlock(tp);
	}

done:	ttydisc_rint_done(tp);
	tty_unlock(tp);

	/*
	 * Don't account for the part of the buffer that we couldn't
	 * pass to the TTY.
	 */
	uio->uio_resid += iblen;
	return (error);
}
Пример #11
0
void
si_intr(void *arg)
{
	struct si_softc *sc;
	struct si_port *pp;
	volatile struct si_channel *ccbp;
	struct tty *tp;
	volatile caddr_t maddr;
	BYTE op, ip;
	int x, card, port, n, i, isopen;
	volatile BYTE *z;
	BYTE c;

	sc = arg;
	mtx_assert(&Giant, MA_OWNED);

	DPRINT((0, arg == NULL ? DBG_POLL:DBG_INTR, "si_intr\n"));

	/*
	 * When we get an int we poll all the channels and do ALL pending
	 * work, not just the first one we find. This allows all cards to
	 * share the same vector.
	 *
	 * XXX - But if we're sharing the vector with something that's NOT
	 * a SI/XIO/SX card, we may be making more work for ourselves.
	 */
	for (card = 0; card < si_numunits; card++) {
		sc = devclass_get_softc(si_devclass, card);
		if (sc == NULL || sc->sc_type == SIEMPTY)
			continue;

		/*
		 * First, clear the interrupt
		 */
		switch(sc->sc_type) {
		case SIHOST:
			maddr = sc->sc_maddr;
			((volatile struct si_reg *)maddr)->int_pending = 0;
							/* flag nothing pending */
			*(maddr+SIINTCL) = 0x00;	/* Set IRQ clear */
			*(maddr+SIINTCL_CL) = 0x00;	/* Clear IRQ clear */
			break;
		case SIHOST2:
			maddr = sc->sc_maddr;
			((volatile struct si_reg *)maddr)->int_pending = 0;
			*(maddr+SIPLIRQCLR) = 0x00;
			*(maddr+SIPLIRQCLR) = 0x10;
			break;
		case SIPCI:
			maddr = sc->sc_maddr;
			((volatile struct si_reg *)maddr)->int_pending = 0;
			*(maddr+SIPCIINTCL) = 0x0;
			break;
		case SIJETPCI:	/* fall through to JETISA case */
		case SIJETISA:
			maddr = sc->sc_maddr;
			((volatile struct si_reg *)maddr)->int_pending = 0;
			*(maddr+SIJETINTCL) = 0x0;
			break;
#ifdef DEV_EISA
		case SIEISA:
			maddr = sc->sc_maddr;
			((volatile struct si_reg *)maddr)->int_pending = 0;
			(void)inb(sc->sc_iobase + 3);
			break;
#endif
		case SIEMPTY:
		default:
			continue;
		}
		((volatile struct si_reg *)maddr)->int_scounter = 0;

		/*
		 * check each port
		 */
		for (pp = sc->sc_ports, port = 0; port < sc->sc_nport;
		     pp++, port++) {
			ccbp = pp->sp_ccb;
			tp = pp->sp_tty;
			tty_lock(tp);

			/*
			 * See if a command has completed ?
			 */
			if (ccbp->hi_stat != pp->sp_pend) {
				DPRINT((pp, DBG_INTR,
					"si_intr hi_stat = %s, pend = %s\n",
					si_cmdname(ccbp->hi_stat),
					si_cmdname(pp->sp_pend)));
				switch(pp->sp_pend) {
				case LOPEN:
				case MPEND:
				case MOPEN:
				case FCLOSE:
				case CONFIG:
				case SBREAK:
				case EBREAK:
					/* sleeping in si_command */
					DPRINT((pp, DBG_INTR, "do wakeup\n"));
					wakeup(&pp->sp_state);
					break;
				}
				pp->sp_pend = ccbp->hi_stat;
			}

			/*
			 * Continue on if it's closed
			 */
			if (ccbp->hi_stat == IDLE_CLOSE) {
				tty_unlock(tp);
				continue;
			}

			/*
			 * Do modem state change if not a local device
			 */
			si_modem_state(pp, tp, ccbp->hi_ip);

			/*
			 * Check to see if we should 'receive' characters.
			 */
			isopen = tty_opened(tp);

			/*
			 * Do input break processing
			 */
			if (ccbp->hi_state & ST_BREAK) {
				if (isopen)
					ttydisc_rint(tp, 0, TRE_BREAK);
				ccbp->hi_state &= ~ST_BREAK;   /* A Bit iffy this */
				DPRINT((pp, DBG_INTR, "si_intr break\n"));
			}

			/*
			 * Do RX stuff - if not open then dump any characters.
			 * XXX: This is VERY messy and needs to be cleaned up.
			 *
			 * XXX: can we leave data in the host adapter buffer
			 * when the clists are full?  That may be dangerous
			 * if the user cannot get an interrupt signal through.
			 */

	more_rx:

			if (!isopen) {
				DPRINT((pp, DBG_INTR, "intr1: not open\n"));
				ccbp->hi_rxopos = ccbp->hi_rxipos;
				goto end_rx;
			}

#if 0 /* XXXMPSAFETTY */
			/*
			 * If the tty input buffers are blocked, stop emptying
			 * the incoming buffers and let the auto flow control
			 * assert..
			 */
			if (tp->t_state & TS_TBLOCK)
				goto end_rx;
#endif

			/*
			 * Process read characters if not skipped above
			 */
			op = ccbp->hi_rxopos;
			ip = ccbp->hi_rxipos;
			c = ip - op;
			if (c == 0)
				goto end_rx;

			n = c & 0xff;
			if (n > 250)
				n = 250;

			DPRINT((pp, DBG_INTR, "n = %d, op = %d, ip = %d\n",
						n, op, ip));

			/*
			 * Suck characters out of host card buffer into the
			 * "input staging buffer" - so that we dont leave the
			 * host card in limbo while we're possibly echoing
			 * characters and possibly flushing input inside the
			 * ldisc l_rint() routine.
			 */
			if (n <= SI_BUFFERSIZE - op) {

				z = ccbp->hi_rxbuf + op;
				si_vbcopy(z, si_rxbuf, n);

				op += n;
			} else {
				x = SI_BUFFERSIZE - op;

				z = ccbp->hi_rxbuf + op;
				si_vbcopy(z, si_rxbuf, x);

				z = ccbp->hi_rxbuf;
				si_vbcopy(z, si_rxbuf + x, n - x);

				op += n;
			}

			/* clear collected characters from buffer */
			ccbp->hi_rxopos = op;

			/*
			 * at this point...
			 * n = number of chars placed in si_rxbuf
			 */

			if (0 && ttydisc_can_bypass(tp)) {

				i =  ttydisc_rint_bypass(tp, (char *)si_rxbuf, n);
				if (i < n)
					pp->sp_delta_overflows += (n - i);

			} else {
				/*
				 * It'd be nice to not have to go through the
				 * function call overhead for each char here.
				 * It'd be nice to block input it, saving a
				 * loop here and the call/return overhead.
				 */
				for(x = 0; x < n; x++) {
					i = si_rxbuf[x];
					if (ttydisc_rint(tp, i, 0) == -1)
						pp->sp_delta_overflows++;
				}
			}
			goto more_rx;	/* try for more until RXbuf is empty */

	end_rx:

			ttydisc_rint_done(tp);

			/*
			 * Do TX stuff
			 */
			si_start(tp);
			tty_unlock(tp);

		} /* end of for (all ports on this controller) */
	} /* end of for (all controllers) */

	DPRINT((0, arg == NULL ? DBG_POLL:DBG_INTR, "end si_intr\n"));
}
Пример #12
0
/*
 * An optimized version of ttyoutq_read() which can be used in pseudo
 * TTY drivers to directly copy data from the outq to userspace, instead
 * of buffering it.
 *
 * We can only copy data directly if we need to read the entire block
 * back to the user, because we temporarily remove the block from the
 * queue. Otherwise we need to copy it to a temporary buffer first, to
 * make sure data remains in the correct order.
 */
int
ttyoutq_read_uio(struct ttyoutq *to, struct tty *tp, struct uio *uio)
{

	while (uio->uio_resid > 0) {
		int error;
		struct ttyoutq_block *tob;
		size_t cbegin, cend, clen;

		/* See if there still is data. */
		if (to->to_begin == to->to_end)
			return (0);
		tob = to->to_firstblock;
		if (tob == NULL)
			return (0);

		/*
		 * The end address should be the lowest of these three:
		 * - The write pointer
		 * - The blocksize - we can't read beyond the block
		 * - The end address if we could perform the full read
		 */
		cbegin = to->to_begin;
		cend = MIN(MIN(to->to_end, to->to_begin + uio->uio_resid),
		    TTYOUTQ_DATASIZE);
		clen = cend - cbegin;

		/*
		 * We can prevent buffering in some cases:
		 * - We need to read the block until the end.
		 * - We don't need to read the block until the end, but
		 *   there is no data beyond it, which allows us to move
		 *   the write pointer to a new block.
		 */
		if (cend == TTYOUTQ_DATASIZE || cend == to->to_end) {
			/*
			 * Fast path: zero copy. Remove the first block,
			 * so we can unlock the TTY temporarily.
			 */
			TTYOUTQ_REMOVE_HEAD(to);
			to->to_begin = 0;
			if (to->to_end <= TTYOUTQ_DATASIZE)
				to->to_end = 0;
			else
				to->to_end -= TTYOUTQ_DATASIZE;

			/* Temporary unlock and copy the data to userspace. */
			tty_unlock(tp);
			error = uiomove(tob->tob_data + cbegin, clen, uio);
			tty_lock(tp);

			/* Block can now be readded to the list. */
			TTYOUTQ_RECYCLE(to, tob);
		} else {
			char ob[TTYOUTQ_DATASIZE - 1];

			/*
			 * Slow path: store data in a temporary buffer.
			 */
			memcpy(ob, tob->tob_data + cbegin, clen);
			to->to_begin += clen;
			MPASS(to->to_begin < TTYOUTQ_DATASIZE);

			/* Temporary unlock and copy the data to userspace. */
			tty_unlock(tp);
			error = uiomove(ob, clen, uio);
			tty_lock(tp);
		}

		if (error != 0)
			return (error);
	}

	return (0);
}
Пример #13
0
static int set_serial_info(struct tty_struct *tty, struct serial_state *state,
			   struct serial_struct __user * new_info)
{
	struct tty_port *port = &state->tport;
	struct serial_struct new_serial;
	bool change_spd;
	int 			retval = 0;

	if (copy_from_user(&new_serial,new_info,sizeof(new_serial)))
		return -EFAULT;

	tty_lock(tty);
	change_spd = ((new_serial.flags ^ port->flags) & ASYNC_SPD_MASK) ||
		new_serial.custom_divisor != state->custom_divisor;
	if (new_serial.irq || new_serial.port != state->port ||
			new_serial.xmit_fifo_size != state->xmit_fifo_size) {
		tty_unlock(tty);
		return -EINVAL;
	}
  
	if (!serial_isroot()) {
		if ((new_serial.baud_base != state->baud_base) ||
		    (new_serial.close_delay != port->close_delay) ||
		    (new_serial.xmit_fifo_size != state->xmit_fifo_size) ||
		    ((new_serial.flags & ~ASYNC_USR_MASK) !=
		     (port->flags & ~ASYNC_USR_MASK))) {
			tty_unlock(tty);
			return -EPERM;
		}
		port->flags = ((port->flags & ~ASYNC_USR_MASK) |
			       (new_serial.flags & ASYNC_USR_MASK));
		state->custom_divisor = new_serial.custom_divisor;
		goto check_and_exit;
	}

	if (new_serial.baud_base < 9600) {
		tty_unlock(tty);
		return -EINVAL;
	}

	/*
	 * OK, past this point, all the error checking has been done.
	 * At this point, we start making changes.....
	 */

	state->baud_base = new_serial.baud_base;
	port->flags = ((port->flags & ~ASYNC_FLAGS) |
			(new_serial.flags & ASYNC_FLAGS));
	state->custom_divisor = new_serial.custom_divisor;
	port->close_delay = new_serial.close_delay * HZ/100;
	port->closing_wait = new_serial.closing_wait * HZ/100;
	port->low_latency = (port->flags & ASYNC_LOW_LATENCY) ? 1 : 0;

check_and_exit:
	if (tty_port_initialized(port)) {
		if (change_spd) {
			/* warn about deprecation unless clearing */
			if (new_serial.flags & ASYNC_SPD_MASK)
				dev_warn_ratelimited(tty->dev, "use of SPD flags is deprecated\n");
			change_speed(tty, state, NULL);
		}
	} else
		retval = startup(tty, state);
	tty_unlock(tty);
	return retval;
}
Пример #14
0
void tty_ldisc_hangup(struct tty_struct *tty)
{
	struct tty_ldisc *ld;
	int reset = tty->driver->flags & TTY_DRIVER_RESET_TERMIOS;
	int err = 0;

	tty_ldisc_debug(tty, "closing ldisc: %p\n", tty->ldisc);

	ld = tty_ldisc_ref(tty);
	if (ld != NULL) {
		if (ld->ops->flush_buffer)
			ld->ops->flush_buffer(tty);
		tty_driver_flush_buffer(tty);
		if ((test_bit(TTY_DO_WRITE_WAKEUP, &tty->flags)) &&
		    ld->ops->write_wakeup)
			ld->ops->write_wakeup(tty);
		if (ld->ops->hangup)
			ld->ops->hangup(tty);
		tty_ldisc_deref(ld);
	}

	wake_up_interruptible_poll(&tty->write_wait, POLLOUT);
	wake_up_interruptible_poll(&tty->read_wait, POLLIN);

	tty_unlock(tty);

	/*
	 * Shutdown the current line discipline, and reset it to
	 * N_TTY if need be.
	 *
	 * Avoid racing set_ldisc or tty_ldisc_release
	 */
	tty_ldisc_lock_pair(tty, tty->link);
	tty_lock(tty);

	if (tty->ldisc) {

		/* At this point we have a halted ldisc; we want to close it and
		   reopen a new ldisc. We could defer the reopen to the next
		   open but it means auditing a lot of other paths so this is
		   a FIXME */
		if (reset == 0) {

			if (!tty_ldisc_reinit(tty, tty->termios.c_line))
				err = tty_ldisc_open(tty, tty->ldisc);
			else
				err = 1;
		}
		/* If the re-open fails or we reset then go to N_TTY. The
		   N_TTY open cannot fail */
		if (reset || err) {
			BUG_ON(tty_ldisc_reinit(tty, N_TTY));
			WARN_ON(tty_ldisc_open(tty, tty->ldisc));
		}
	}
	tty_ldisc_enable_pair(tty, tty->link);
	if (reset)
		tty_reset_termios(tty);

	tty_ldisc_debug(tty, "re-opened ldisc: %p\n", tty->ldisc);
}
Пример #15
0
int tty_set_ldisc(struct tty_struct *tty, int ldisc)
{
	int retval;
	struct tty_ldisc *o_ldisc, *new_ldisc;
	struct tty_struct *o_tty = tty->link;

	new_ldisc = tty_ldisc_get(tty, ldisc);
	if (IS_ERR(new_ldisc))
		return PTR_ERR(new_ldisc);

	retval = tty_ldisc_lock_pair_timeout(tty, o_tty, 5 * HZ);
	if (retval) {
		tty_ldisc_put(new_ldisc);
		return retval;
	}

	/*
	 *	Check the no-op case
	 */

	if (tty->ldisc->ops->num == ldisc) {
		tty_ldisc_enable_pair(tty, o_tty);
		tty_ldisc_put(new_ldisc);
		return 0;
	}

	/* FIXME: why 'shutoff' input if the ldisc is locked? */
	tty->receive_room = 0;

	o_ldisc = tty->ldisc;
	tty_lock(tty);

	/* FIXME: for testing only */
	WARN_ON(test_bit(TTY_HUPPED, &tty->flags));

	if (test_bit(TTY_HUPPING, &tty->flags)) {
		/* We were raced by the hangup method. It will have stomped
		   the ldisc data and closed the ldisc down */
		tty_ldisc_enable_pair(tty, o_tty);
		tty_ldisc_put(new_ldisc);
		tty_unlock(tty);
		return -EIO;
	}

	/* Shutdown the current discipline. */
	tty_ldisc_close(tty, o_ldisc);

	/* Now set up the new line discipline. */
	tty->ldisc = new_ldisc;
	tty_set_termios_ldisc(tty, ldisc);

	retval = tty_ldisc_open(tty, new_ldisc);
	if (retval < 0) {
		/* Back to the old one or N_TTY if we can't */
		tty_ldisc_put(new_ldisc);
		tty_ldisc_restore(tty, o_ldisc);
	}

	/* At this point we hold a reference to the new ldisc and a
	   a reference to the old ldisc. If we ended up flipping back
	   to the existing ldisc we have two references to it */

	if (tty->ldisc->ops->num != o_ldisc->ops->num && tty->ops->set_ldisc)
		tty->ops->set_ldisc(tty);

	tty_ldisc_put(o_ldisc);

	/*
	 *	Allow ldisc referencing to occur again
	 */
	tty_ldisc_enable_pair(tty, o_tty);

	/* Restart the work queue in case no characters kick it off. Safe if
	   already running */
	schedule_work(&tty->port->buf.work);
	if (o_tty)
		schedule_work(&o_tty->port->buf.work);

	tty_unlock(tty);
	return retval;
}