static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old) { char buf[64]; struct tty_ldisc new_ldisc; /* There is an outstanding reference here so this is safe */ tty_ldisc_get(old->ops->num, old); tty_ldisc_assign(tty, old); tty_set_termios_ldisc(tty, old->ops->num); if (old->ops->open && (old->ops->open(tty) < 0)) { tty_ldisc_put(old->ops); /* This driver is always present */ if (tty_ldisc_get(N_TTY, &new_ldisc) < 0) panic("n_tty: get"); tty_ldisc_assign(tty, &new_ldisc); tty_set_termios_ldisc(tty, N_TTY); if (new_ldisc.ops->open) { int r = new_ldisc.ops->open(tty); if (r < 0) panic("Couldn't open N_TTY ldisc for " "%s --- error %d.", tty_name(tty, buf), r); } } }
static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old) { char buf[64]; struct tty_ldisc *new_ldisc; int r; /* There is an outstanding reference here so this is safe */ old = tty_ldisc_get(old->ops->num); WARN_ON(IS_ERR(old)); tty_ldisc_assign(tty, old); tty_set_termios_ldisc(tty, old->ops->num); if (tty_ldisc_open(tty, old) < 0) { tty_ldisc_put(old); /* This driver is always present */ new_ldisc = tty_ldisc_get(N_TTY); if (IS_ERR(new_ldisc)) panic("n_tty: get"); tty_ldisc_assign(tty, new_ldisc); tty_set_termios_ldisc(tty, N_TTY); r = tty_ldisc_open(tty, new_ldisc); if (r < 0) panic("Couldn't open N_TTY ldisc for " "%s --- error %d.", tty_name(tty, buf), r); } }
void tty_ldisc_release(struct tty_struct *tty, struct tty_struct *o_tty) { unsigned long flags; struct tty_ldisc ld; /* * Prevent flush_to_ldisc() from rescheduling the work for later. Then * kill any delayed work. As this is the final close it does not * race with the set_ldisc code path. */ clear_bit(TTY_LDISC, &tty->flags); cancel_delayed_work(&tty->buf.work); /* * Wait for ->hangup_work and ->buf.work handlers to terminate */ flush_scheduled_work(); /* * Wait for any short term users (we know they are just driver * side waiters as the file is closing so user count on the file * side is zero. */ spin_lock_irqsave(&tty_ldisc_lock, flags); while (tty->ldisc.refcount) { spin_unlock_irqrestore(&tty_ldisc_lock, flags); wait_event(tty_ldisc_wait, tty->ldisc.refcount == 0); spin_lock_irqsave(&tty_ldisc_lock, flags); } spin_unlock_irqrestore(&tty_ldisc_lock, flags); /* * Shutdown the current line discipline, and reset it to N_TTY. * * FIXME: this MUST get fixed for the new reflocking */ if (tty->ldisc.ops->close) (tty->ldisc.ops->close)(tty); tty_ldisc_put(tty->ldisc.ops); /* * Switch the line discipline back */ WARN_ON(tty_ldisc_get(N_TTY, &ld)); tty_ldisc_assign(tty, &ld); tty_set_termios_ldisc(tty, N_TTY); if (o_tty) { /* FIXME: could o_tty be in setldisc here ? */ clear_bit(TTY_LDISC, &o_tty->flags); if (o_tty->ldisc.ops->close) (o_tty->ldisc.ops->close)(o_tty); tty_ldisc_put(o_tty->ldisc.ops); WARN_ON(tty_ldisc_get(N_TTY, &ld)); tty_ldisc_assign(o_tty, &ld); tty_set_termios_ldisc(o_tty, N_TTY); } }
void tty_ldisc_init(struct tty_struct *tty) { struct tty_ldisc ld; if (tty_ldisc_get(N_TTY, &ld) < 0) panic("n_tty: init_tty"); tty_ldisc_assign(tty, &ld); }
void tty_ldisc_init(struct tty_struct *tty) { struct tty_ldisc *ld = tty_ldisc_get(N_TTY); if (IS_ERR(ld)) panic("n_tty: init_tty"); tty_ldisc_assign(tty, ld); }
static void tty_ldisc_reinit(struct tty_struct *tty) { struct tty_ldisc *ld; tty_ldisc_close(tty, tty->ldisc); tty_ldisc_put(tty->ldisc); tty->ldisc = NULL; /* * Switch the line discipline back */ ld = tty_ldisc_get(N_TTY); BUG_ON(IS_ERR(ld)); tty_ldisc_assign(tty, ld); tty_set_termios_ldisc(tty, N_TTY); }
static int tty_ldisc_reinit(struct tty_struct *tty, int ldisc) { struct tty_ldisc *ld = tty_ldisc_get(ldisc); if (IS_ERR(ld)) return -1; tty_ldisc_close(tty, tty->ldisc); tty_ldisc_put(tty->ldisc); tty->ldisc = NULL; /* * Switch the line discipline back */ tty_ldisc_assign(tty, ld); tty_set_termios_ldisc(tty, ldisc); return 0; }
static int tty_ldisc_reinit(struct tty_struct *tty, int ldisc) #else //static void tty_ldisc_reinit(struct tty_struct *tty, int ldisc) #endif //20110402 [email protected] [LS855] handle exception for tty daemon [END] { //20110402 [email protected] [LS855] handle exception for tty daemon [START] #if 1 struct tty_ldisc *ld = tty_ldisc_get(ldisc); #else // struct tty_ldisc *ld; #endif //20110402 [email protected] [LS855] handle exception for tty daemon [END] if (IS_ERR(ld)) return -1; tty_ldisc_close(tty, tty->ldisc); tty_ldisc_put(tty->ldisc); tty->ldisc = NULL; /* * Switch the line discipline back */ //20110402 [email protected] [LS855] handle exception for tty daemon [START] #if 0 ld = tty_ldisc_get(ldisc); BUG_ON(IS_ERR(ld)); #endif //20110402 [email protected] [LS855] handle exception for tty daemon [END] tty_ldisc_assign(tty, ld); tty_set_termios_ldisc(tty, ldisc); return 0; //20110402 [email protected] [LS855] handle exception for tty daemon }
int tty_set_ldisc(struct tty_struct *tty, int ldisc) { int retval; struct tty_ldisc o_ldisc, new_ldisc; int work; unsigned long flags; struct tty_struct *o_tty; restart: /* This is a bit ugly for now but means we can break the 'ldisc is part of the tty struct' assumption later */ retval = tty_ldisc_get(ldisc, &new_ldisc); if (retval) return retval; /* * Problem: What do we do if this blocks ? */ tty_wait_until_sent(tty, 0); if (tty->ldisc.ops->num == ldisc) { tty_ldisc_put(new_ldisc.ops); return 0; } /* * No more input please, we are switching. The new ldisc * will update this value in the ldisc open function */ tty->receive_room = 0; o_ldisc = tty->ldisc; o_tty = tty->link; /* * Make sure we don't change while someone holds a * reference to the line discipline. The TTY_LDISC bit * prevents anyone taking a reference once it is clear. * We need the lock to avoid racing reference takers. * * We must clear the TTY_LDISC bit here to avoid a livelock * with a userspace app continually trying to use the tty in * parallel to the change and re-referencing the tty. */ clear_bit(TTY_LDISC, &tty->flags); if (o_tty) clear_bit(TTY_LDISC, &o_tty->flags); spin_lock_irqsave(&tty_ldisc_lock, flags); if (tty->ldisc.refcount || (o_tty && o_tty->ldisc.refcount)) { if (tty->ldisc.refcount) { /* Free the new ldisc we grabbed. Must drop the lock first. */ spin_unlock_irqrestore(&tty_ldisc_lock, flags); tty_ldisc_put(new_ldisc.ops); /* * There are several reasons we may be busy, including * random momentary I/O traffic. We must therefore * retry. We could distinguish between blocking ops * and retries if we made tty_ldisc_wait() smarter. * That is up for discussion. */ if (wait_event_interruptible(tty_ldisc_wait, tty->ldisc.refcount == 0) < 0) return -ERESTARTSYS; goto restart; } if (o_tty && o_tty->ldisc.refcount) { spin_unlock_irqrestore(&tty_ldisc_lock, flags); tty_ldisc_put(o_tty->ldisc.ops); if (wait_event_interruptible(tty_ldisc_wait, o_tty->ldisc.refcount == 0) < 0) return -ERESTARTSYS; goto restart; } } /* * If the TTY_LDISC bit is set, then we are racing against * another ldisc change */ if (test_bit(TTY_LDISC_CHANGING, &tty->flags)) { struct tty_ldisc *ld; spin_unlock_irqrestore(&tty_ldisc_lock, flags); tty_ldisc_put(new_ldisc.ops); ld = tty_ldisc_ref_wait(tty); tty_ldisc_deref(ld); goto restart; } /* * This flag is used to avoid two parallel ldisc changes. Once * open and close are fine grained locked this may work better * as a mutex shared with the open/close/hup paths */ set_bit(TTY_LDISC_CHANGING, &tty->flags); if (o_tty) set_bit(TTY_LDISC_CHANGING, &o_tty->flags); spin_unlock_irqrestore(&tty_ldisc_lock, flags); /* * From this point on we know nobody has an ldisc * usage reference, nor can they obtain one until * we say so later on. */ work = cancel_delayed_work(&tty->buf.work); /* * Wait for ->hangup_work and ->buf.work handlers to terminate * MUST NOT hold locks here. */ flush_scheduled_work(); /* Shutdown the current discipline. */ if (o_ldisc.ops->close) (o_ldisc.ops->close)(tty); /* Now set up the new line discipline. */ tty_ldisc_assign(tty, &new_ldisc); tty_set_termios_ldisc(tty, ldisc); if (new_ldisc.ops->open) retval = (new_ldisc.ops->open)(tty); if (retval < 0) { tty_ldisc_put(new_ldisc.ops); tty_ldisc_restore(tty, &o_ldisc); } /* At this point we hold a reference to the new ldisc and a a reference to the old ldisc. If we ended up flipping back to the existing ldisc we have two references to it */ if (tty->ldisc.ops->num != o_ldisc.ops->num && tty->ops->set_ldisc) tty->ops->set_ldisc(tty); tty_ldisc_put(o_ldisc.ops); /* * Allow ldisc referencing to occur as soon as the driver * ldisc callback completes. */ tty_ldisc_enable(tty); if (o_tty) tty_ldisc_enable(o_tty); /* Restart it in case no characters kick it off. Safe if already running */ if (work) schedule_delayed_work(&tty->buf.work, 1); return retval; }
int tty_set_ldisc(struct tty_struct *tty, int ldisc) { int retval; struct tty_ldisc *o_ldisc, *new_ldisc; int work, o_work = 0; struct tty_struct *o_tty; new_ldisc = tty_ldisc_get(ldisc); if (IS_ERR(new_ldisc)) return PTR_ERR(new_ldisc); /* * We need to look at the tty locking here for pty/tty pairs * when both sides try to change in parallel. */ o_tty = tty->link; /* o_tty is the pty side or NULL */ /* * Check the no-op case */ if (tty->ldisc->ops->num == ldisc) { tty_ldisc_put(new_ldisc); return 0; } /* * Problem: What do we do if this blocks ? * We could deadlock here */ tty_wait_until_sent(tty, 0); mutex_lock(&tty->ldisc_mutex); /* * We could be midstream of another ldisc change which has * dropped the lock during processing. If so we need to wait. */ while (test_bit(TTY_LDISC_CHANGING, &tty->flags)) { mutex_unlock(&tty->ldisc_mutex); wait_event(tty_ldisc_wait, test_bit(TTY_LDISC_CHANGING, &tty->flags) == 0); mutex_lock(&tty->ldisc_mutex); } set_bit(TTY_LDISC_CHANGING, &tty->flags); /* * No more input please, we are switching. The new ldisc * will update this value in the ldisc open function */ tty->receive_room = 0; o_ldisc = tty->ldisc; /* * Make sure we don't change while someone holds a * reference to the line discipline. The TTY_LDISC bit * prevents anyone taking a reference once it is clear. * We need the lock to avoid racing reference takers. * * We must clear the TTY_LDISC bit here to avoid a livelock * with a userspace app continually trying to use the tty in * parallel to the change and re-referencing the tty. */ work = tty_ldisc_halt(tty); if (o_tty) o_work = tty_ldisc_halt(o_tty); /* * Wait for ->hangup_work and ->buf.work handlers to terminate. * We must drop the mutex here in case a hangup is also in process. */ mutex_unlock(&tty->ldisc_mutex); flush_scheduled_work(); mutex_lock(&tty->ldisc_mutex); if (test_bit(TTY_HUPPED, &tty->flags)) { /* We were raced by the hangup method. It will have stomped the ldisc data and closed the ldisc down */ clear_bit(TTY_LDISC_CHANGING, &tty->flags); mutex_unlock(&tty->ldisc_mutex); tty_ldisc_put(new_ldisc); return -EIO; } /* Shutdown the current discipline. */ tty_ldisc_close(tty, o_ldisc); /* Now set up the new line discipline. */ tty_ldisc_assign(tty, new_ldisc); tty_set_termios_ldisc(tty, ldisc); retval = tty_ldisc_open(tty, new_ldisc); if (retval < 0) { /* Back to the old one or N_TTY if we can't */ tty_ldisc_put(new_ldisc); tty_ldisc_restore(tty, o_ldisc); } /* At this point we hold a reference to the new ldisc and a a reference to the old ldisc. If we ended up flipping back to the existing ldisc we have two references to it */ if (tty->ldisc->ops->num != o_ldisc->ops->num && tty->ops->set_ldisc) tty->ops->set_ldisc(tty); tty_ldisc_put(o_ldisc); /* * Allow ldisc referencing to occur again */ tty_ldisc_enable(tty); if (o_tty) tty_ldisc_enable(o_tty); /* Restart the work queue in case no characters kick it off. Safe if already running */ if (work) schedule_delayed_work(&tty->buf.work, 1); if (o_work) schedule_delayed_work(&o_tty->buf.work, 1); mutex_unlock(&tty->ldisc_mutex); return retval; }
/** * tty_ldisc_init - ldisc cleanup for new tty * @tty: tty that was allocated recently * * The tty structure must not becompletely set up (tty_ldisc_setup) when * this call is made. */ void tty_ldisc_deinit(struct tty_struct *tty) { put_ldisc(tty->ldisc); tty_ldisc_assign(tty, NULL); }