static void change_termios(struct tty_struct * tty, struct termios * new_termios) { int canon_change; struct termios old_termios = *tty->termios; struct tty_ldisc *ld; /* * Perform the actual termios internal changes under lock. */ /* FIXME: we need to decide on some locking/ordering semantics for the set_termios notification eventually */ down(&tty->termios_sem); *tty->termios = *new_termios; unset_locked_termios(tty->termios, &old_termios, tty->termios_locked); canon_change = (old_termios.c_lflag ^ tty->termios->c_lflag) & ICANON; if (canon_change) { memset(&tty->read_flags, 0, sizeof tty->read_flags); tty->canon_head = tty->read_tail; tty->canon_data = 0; tty->erasing = 0; } if (canon_change && !L_ICANON(tty) && tty->read_cnt) /* Get characters left over from canonical mode. */ wake_up_interruptible(&tty->read_wait); /* See if packet mode change of state. */ if (tty->link && tty->link->packet) { int old_flow = ((old_termios.c_iflag & IXON) && (old_termios.c_cc[VSTOP] == '\023') && (old_termios.c_cc[VSTART] == '\021')); int new_flow = (I_IXON(tty) && STOP_CHAR(tty) == '\023' && START_CHAR(tty) == '\021'); if (old_flow != new_flow) { tty->ctrl_status &= ~(TIOCPKT_DOSTOP | TIOCPKT_NOSTOP); if (new_flow) tty->ctrl_status |= TIOCPKT_DOSTOP; else tty->ctrl_status |= TIOCPKT_NOSTOP; wake_up_interruptible(&tty->link->read_wait); } } if (tty->driver->set_termios) (*tty->driver->set_termios)(tty, &old_termios); ld = tty_ldisc_ref(tty); if (ld != NULL) { if (ld->set_termios) (ld->set_termios)(tty, &old_termios); tty_ldisc_deref(ld); } up(&tty->termios_sem); }
/* Insert the contents of the selection buffer into the * queue of the tty associated with the current console. * Invoked by ioctl(). */ int paste_selection(struct tty_struct *tty) { struct vt_struct *vt = (struct vt_struct *) tty->driver_data; int pasted = 0, count; struct tty_ldisc *ld; DECLARE_WAITQUEUE(wait, current); acquire_console_sem(); poke_blanked_console(); release_console_sem(); ld = tty_ldisc_ref_wait(tty); add_wait_queue(&vt->paste_wait, &wait); while (sel_buffer && sel_buffer_lth > pasted) { set_current_state(TASK_INTERRUPTIBLE); if (test_bit(TTY_THROTTLED, &tty->flags)) { schedule(); continue; } count = sel_buffer_lth - pasted; count = min(count, tty->ldisc.receive_room(tty)); tty->ldisc.receive_buf(tty, sel_buffer + pasted, NULL, count); pasted += count; } remove_wait_queue(&vt->paste_wait, &wait); current->state = TASK_RUNNING; tty_ldisc_deref(ld); return 0; }
void uart_handle_dcd_change(struct uart_port *uport, unsigned int status) { struct uart_state *state = uport->state; struct tty_port *port = &state->port; struct tty_ldisc *ld = tty_ldisc_ref(port->tty); struct pps_event_time ts; if (ld && ld->ops->dcd_change) pps_get_ts(&ts); uport->icount.dcd++; #ifdef CONFIG_HARD_PPS if ((uport->flags & UPF_HARDPPS_CD) && status) hardpps(); #endif if (port->flags & ASYNC_CHECK_CD) { if (status) wake_up_interruptible(&port->open_wait); else if (port->tty) tty_hangup(port->tty); } if (ld && ld->ops->dcd_change) ld->ops->dcd_change(port->tty, status, &ts); if (ld) tty_ldisc_deref(ld); }
void tty_ldisc_hangup(struct tty_struct *tty) { struct tty_ldisc *ld; int reset = tty->driver->flags & TTY_DRIVER_RESET_TERMIOS; int err = 0; /* * FIXME! What are the locking issues here? This may me overdoing * things... This question is especially important now that we've * removed the irqlock. */ ld = tty_ldisc_ref(tty); if (ld != NULL) { /* We may have no line discipline at this point */ if (ld->ops->flush_buffer) ld->ops->flush_buffer(tty); tty_driver_flush_buffer(tty); if ((test_bit(TTY_DO_WRITE_WAKEUP, &tty->flags)) && ld->ops->write_wakeup) ld->ops->write_wakeup(tty); if (ld->ops->hangup) ld->ops->hangup(tty); tty_ldisc_deref(ld); } /* * FIXME: Once we trust the LDISC code better we can wait here for * ldisc completion and fix the driver call race */ wake_up_interruptible_poll(&tty->write_wait, POLLOUT); wake_up_interruptible_poll(&tty->read_wait, POLLIN); /* * Shutdown the current line discipline, and reset it to * N_TTY if need be. * * Avoid racing set_ldisc or tty_ldisc_release */ mutex_lock(&tty->ldisc_mutex); tty_ldisc_halt(tty); /* At this point we have a closed ldisc and we want to reopen it. We could defer this to the next open but it means auditing a lot of other paths so this is a FIXME */ if (tty->ldisc) { /* Not yet closed */ if (reset == 0) { tty_ldisc_reinit(tty, tty->termios->c_line); err = tty_ldisc_open(tty, tty->ldisc); } /* If the re-open fails or we reset then go to N_TTY. The N_TTY open cannot fail */ if (reset || err) { tty_ldisc_reinit(tty, N_TTY); WARN_ON(tty_ldisc_open(tty, tty->ldisc)); } tty_ldisc_enable(tty); } mutex_unlock(&tty->ldisc_mutex); if (reset) tty_reset_termios(tty); }
static void flush_to_ldisc(struct work_struct *work) { struct tty_struct *tty = container_of(work, struct tty_struct, buf.work); unsigned long flags; struct tty_ldisc *disc; disc = tty_ldisc_ref(tty); if (disc == NULL) /* !TTY_LDISC */ return; spin_lock_irqsave(&tty->buf.lock, flags); if (!test_and_set_bit(TTY_FLUSHING, &tty->flags)) { struct tty_buffer *head; while ((head = tty->buf.head) != NULL) { int count; char *char_buf; unsigned char *flag_buf; count = head->commit - head->read; if (!count) { if (head->next == NULL) break; tty->buf.head = head->next; tty_buffer_free(tty, head); continue; } /* Ldisc or user is trying to flush the buffers we are feeding to the ldisc, stop feeding the line discipline as we want to empty the queue */ if (test_bit(TTY_FLUSHPENDING, &tty->flags)) break; if (!tty->receive_room) break; if (count > tty->receive_room) count = tty->receive_room; char_buf = head->char_buf_ptr + head->read; flag_buf = head->flag_buf_ptr + head->read; head->read += count; spin_unlock_irqrestore(&tty->buf.lock, flags); disc->ops->receive_buf(tty, char_buf, flag_buf, count); spin_lock_irqsave(&tty->buf.lock, flags); } clear_bit(TTY_FLUSHING, &tty->flags); } /* We may have a deferred request to flush the input buffer, if so pull the chain under the lock and empty the queue */ if (test_bit(TTY_FLUSHPENDING, &tty->flags)) { __tty_buffer_flush(tty); clear_bit(TTY_FLUSHPENDING, &tty->flags); wake_up(&tty->read_wait); } spin_unlock_irqrestore(&tty->buf.lock, flags); tty_ldisc_deref(disc); }
/** * tty_set_termios - update termios values * @tty: tty to update * @new_termios: desired new value * * Perform updates to the termios values set on this terminal. There * is a bit of layering violation here with n_tty in terms of the * internal knowledge of this function. * * Locking: termios_mutex */ int tty_set_termios(struct tty_struct *tty, struct ktermios *new_termios) { struct ktermios old_termios; struct tty_ldisc *ld; unsigned long flags; /* * Perform the actual termios internal changes under lock. */ /* FIXME: we need to decide on some locking/ordering semantics for the set_termios notification eventually */ mutex_lock(&tty->termios_mutex); old_termios = *tty->termios; *tty->termios = *new_termios; unset_locked_termios(tty->termios, &old_termios, tty->termios_locked); /* See if packet mode change of state. */ if (tty->link && tty->link->packet) { int extproc = (old_termios.c_lflag & EXTPROC) | (tty->termios->c_lflag & EXTPROC); int old_flow = ((old_termios.c_iflag & IXON) && (old_termios.c_cc[VSTOP] == '\023') && (old_termios.c_cc[VSTART] == '\021')); int new_flow = (I_IXON(tty) && STOP_CHAR(tty) == '\023' && START_CHAR(tty) == '\021'); if ((old_flow != new_flow) || extproc) { spin_lock_irqsave(&tty->ctrl_lock, flags); if (old_flow != new_flow) { tty->ctrl_status &= ~(TIOCPKT_DOSTOP | TIOCPKT_NOSTOP); if (new_flow) tty->ctrl_status |= TIOCPKT_DOSTOP; else tty->ctrl_status |= TIOCPKT_NOSTOP; } if (extproc) tty->ctrl_status |= TIOCPKT_IOCTL; spin_unlock_irqrestore(&tty->ctrl_lock, flags); wake_up_interruptible(&tty->link->read_wait); } } if (tty->ops->set_termios) (*tty->ops->set_termios)(tty, &old_termios); else tty_termios_copy_hw(tty->termios, &old_termios); ld = tty_ldisc_ref(tty); if (ld != NULL) { if (ld->ops->set_termios) (ld->ops->set_termios)(tty, &old_termios); tty_ldisc_deref(ld); } mutex_unlock(&tty->termios_mutex); return 0; }
void tty_ldisc_flush(struct tty_struct *tty) { struct tty_ldisc *ld = tty_ldisc_ref(tty); if (ld) { if (ld->ops->flush_buffer) ld->ops->flush_buffer(tty); tty_ldisc_deref(ld); } tty_buffer_flush(tty); }
static void pty_flush_buffer(struct tty_struct *tty) { struct tty_struct *to = tty->link; struct tty_ldisc *ld; if (!to) return; ld = tty_ldisc_ref(to); tty_buffer_flush(to, ld); if (ld) tty_ldisc_deref(ld); if (to->packet) { spin_lock_irq(&tty->ctrl_lock); tty->ctrl_status |= TIOCPKT_FLUSHWRITE; wake_up_interruptible(&to->read_wait); spin_unlock_irq(&tty->ctrl_lock); } }
/** * usb_serial_handle_dcd_change - handle a change of carrier detect state * @port: usb-serial port * @tty: tty for the port * @status: new carrier detect status, nonzero if active */ void usb_serial_handle_dcd_change(struct usb_serial_port *usb_port, struct tty_struct *tty, unsigned int status) { struct tty_port *port = &usb_port->port; dev_dbg(&usb_port->dev, "%s - status %d\n", __func__, status); if (tty) { struct tty_ldisc *ld = tty_ldisc_ref(tty); if (ld) { if (ld->ops->dcd_change) ld->ops->dcd_change(tty, status); tty_ldisc_deref(ld); } } if (status) wake_up_interruptible(&port->open_wait); else if (tty && !C_CLOCAL(tty)) tty_hangup(tty); }
static int tty_port_default_receive_buf(struct tty_port *port, const unsigned char *p, const unsigned char *f, size_t count) { int ret; struct tty_struct *tty; struct tty_ldisc *disc; tty = READ_ONCE(port->itty); if (!tty) return 0; disc = tty_ldisc_ref(tty); if (!disc) return 0; ret = tty_ldisc_receive_buf(disc, p, (char *)f, count); tty_ldisc_deref(disc); return ret; }
static int set_termios(struct tty_struct * tty, void __user *arg, int opt) { struct termios tmp_termios; struct tty_ldisc *ld; int retval = tty_check_change(tty); if (retval) return retval; if (opt & TERMIOS_TERMIO) { memcpy(&tmp_termios, tty->termios, sizeof(struct termios)); if (user_termio_to_kernel_termios(&tmp_termios, (struct termio __user *)arg)) return -EFAULT; } else { if (user_termios_to_kernel_termios(&tmp_termios, (struct termios __user *)arg)) return -EFAULT; } ld = tty_ldisc_ref(tty); if (ld != NULL) { if ((opt & TERMIOS_FLUSH) && ld->flush_buffer) ld->flush_buffer(tty); tty_ldisc_deref(ld); } if (opt & TERMIOS_WAIT) { tty_wait_until_sent(tty, 0); if (signal_pending(current)) return -EINTR; } change_termios(tty, &tmp_termios); return 0; }
/** \<\<private\>\> Read function for TTY * @TODO: Why is this copied here from kernel? * * @param *file - pointer to file we wanted to read from * @param *buf - pointer to buffer in kernelspace witch will be filled with data * @param count - buffer size * @param *ppos - pointer to file offset * * @return Amount of bytes read or error */ static ssize_t proxyfs_tty_real_file_tty_read(struct file * file, char *buf, size_t count, loff_t *ppos) { int i; struct tty_struct * tty; struct inode *inode; struct tty_ldisc *ld = NULL; int state; tty = (struct tty_struct *)file->private_data; inode = file->f_path.dentry->d_inode; //if (tty_paranoia_check(tty, inode, "tty_read")) // return -EIO; if (!tty || (test_bit(TTY_IO_ERROR, &tty->flags))) return -EIO; /* We not want to wait for the line discipline to sort out in this situation */ ld = tty_ldisc_ref(tty); if(ld){ lock_kernel(); if (ld->ops->read){ state = current->state; i = (ld->ops->read)(tty,file,buf,count); current->state = state; } else i = -EIO; tty_ldisc_deref(ld); unlock_kernel(); if (i > 0) inode->i_atime = current_fs_time(inode->i_sb); return i; } else return -EAGAIN; }
void tty_ldisc_hangup(struct tty_struct *tty) { struct tty_ldisc *ld; int reset = tty->driver->flags & TTY_DRIVER_RESET_TERMIOS; int err = 0; /* * FIXME! What are the locking issues here? This may me overdoing * things... This question is especially important now that we've * removed the irqlock. */ ld = tty_ldisc_ref(tty); if (ld != NULL) { /* We may have no line discipline at this point */ if (ld->ops->flush_buffer) ld->ops->flush_buffer(tty); tty_driver_flush_buffer(tty); if ((test_bit(TTY_DO_WRITE_WAKEUP, &tty->flags)) && ld->ops->write_wakeup) ld->ops->write_wakeup(tty); if (ld->ops->hangup) ld->ops->hangup(tty); tty_ldisc_deref(ld); } /* * FIXME: Once we trust the LDISC code better we can wait here for * ldisc completion and fix the driver call race */ wake_up_interruptible_poll(&tty->write_wait, POLLOUT); wake_up_interruptible_poll(&tty->read_wait, POLLIN); /* * Shutdown the current line discipline, and reset it to * N_TTY if need be. * * Avoid racing set_ldisc or tty_ldisc_release */ mutex_lock(&tty->ldisc_mutex); /* * this is like tty_ldisc_halt, but we need to give up * the BTM before calling cancel_work_sync, which may * need to wait for another function taking the BTM */ clear_bit(TTY_LDISC, &tty->flags); tty_unlock(tty); cancel_work_sync(&tty->port->buf.work); mutex_unlock(&tty->ldisc_mutex); retry: tty_lock(tty); mutex_lock(&tty->ldisc_mutex); /* At this point we have a closed ldisc and we want to reopen it. We could defer this to the next open but it means auditing a lot of other paths so this is a FIXME */ if (tty->ldisc) { /* Not yet closed */ if (atomic_read(&tty->ldisc->users) != 1) { char cur_n[TASK_COMM_LEN], tty_n[64]; long timeout = 3 * HZ; tty_unlock(tty); while (tty_ldisc_wait_idle(tty, timeout) == -EBUSY) { timeout = MAX_SCHEDULE_TIMEOUT; printk_ratelimited(KERN_WARNING "%s: waiting (%s) for %s took too long, but we keep waiting...\n", __func__, get_task_comm(cur_n, current), tty_name(tty, tty_n)); } mutex_unlock(&tty->ldisc_mutex); goto retry; } if (reset == 0) { if (!tty_ldisc_reinit(tty, tty->termios.c_line)) err = tty_ldisc_open(tty, tty->ldisc); else err = 1; } /* If the re-open fails or we reset then go to N_TTY. The N_TTY open cannot fail */ if (reset || err) { BUG_ON(tty_ldisc_reinit(tty, N_TTY)); WARN_ON(tty_ldisc_open(tty, tty->ldisc)); } tty_ldisc_enable(tty); } mutex_unlock(&tty->ldisc_mutex); if (reset) tty_reset_termios(tty); }
int tty_set_ldisc(struct tty_struct *tty, int ldisc) { int retval; struct tty_ldisc o_ldisc, new_ldisc; int work; unsigned long flags; struct tty_struct *o_tty; restart: /* This is a bit ugly for now but means we can break the 'ldisc is part of the tty struct' assumption later */ retval = tty_ldisc_get(ldisc, &new_ldisc); if (retval) return retval; /* * Problem: What do we do if this blocks ? */ tty_wait_until_sent(tty, 0); if (tty->ldisc.ops->num == ldisc) { tty_ldisc_put(new_ldisc.ops); return 0; } /* * No more input please, we are switching. The new ldisc * will update this value in the ldisc open function */ tty->receive_room = 0; o_ldisc = tty->ldisc; o_tty = tty->link; /* * Make sure we don't change while someone holds a * reference to the line discipline. The TTY_LDISC bit * prevents anyone taking a reference once it is clear. * We need the lock to avoid racing reference takers. * * We must clear the TTY_LDISC bit here to avoid a livelock * with a userspace app continually trying to use the tty in * parallel to the change and re-referencing the tty. */ clear_bit(TTY_LDISC, &tty->flags); if (o_tty) clear_bit(TTY_LDISC, &o_tty->flags); spin_lock_irqsave(&tty_ldisc_lock, flags); if (tty->ldisc.refcount || (o_tty && o_tty->ldisc.refcount)) { if (tty->ldisc.refcount) { /* Free the new ldisc we grabbed. Must drop the lock first. */ spin_unlock_irqrestore(&tty_ldisc_lock, flags); tty_ldisc_put(new_ldisc.ops); /* * There are several reasons we may be busy, including * random momentary I/O traffic. We must therefore * retry. We could distinguish between blocking ops * and retries if we made tty_ldisc_wait() smarter. * That is up for discussion. */ if (wait_event_interruptible(tty_ldisc_wait, tty->ldisc.refcount == 0) < 0) return -ERESTARTSYS; goto restart; } if (o_tty && o_tty->ldisc.refcount) { spin_unlock_irqrestore(&tty_ldisc_lock, flags); tty_ldisc_put(o_tty->ldisc.ops); if (wait_event_interruptible(tty_ldisc_wait, o_tty->ldisc.refcount == 0) < 0) return -ERESTARTSYS; goto restart; } } /* * If the TTY_LDISC bit is set, then we are racing against * another ldisc change */ if (test_bit(TTY_LDISC_CHANGING, &tty->flags)) { struct tty_ldisc *ld; spin_unlock_irqrestore(&tty_ldisc_lock, flags); tty_ldisc_put(new_ldisc.ops); ld = tty_ldisc_ref_wait(tty); tty_ldisc_deref(ld); goto restart; } /* * This flag is used to avoid two parallel ldisc changes. Once * open and close are fine grained locked this may work better * as a mutex shared with the open/close/hup paths */ set_bit(TTY_LDISC_CHANGING, &tty->flags); if (o_tty) set_bit(TTY_LDISC_CHANGING, &o_tty->flags); spin_unlock_irqrestore(&tty_ldisc_lock, flags); /* * From this point on we know nobody has an ldisc * usage reference, nor can they obtain one until * we say so later on. */ work = cancel_delayed_work(&tty->buf.work); /* * Wait for ->hangup_work and ->buf.work handlers to terminate * MUST NOT hold locks here. */ flush_scheduled_work(); /* Shutdown the current discipline. */ if (o_ldisc.ops->close) (o_ldisc.ops->close)(tty); /* Now set up the new line discipline. */ tty_ldisc_assign(tty, &new_ldisc); tty_set_termios_ldisc(tty, ldisc); if (new_ldisc.ops->open) retval = (new_ldisc.ops->open)(tty); if (retval < 0) { tty_ldisc_put(new_ldisc.ops); tty_ldisc_restore(tty, &o_ldisc); } /* At this point we hold a reference to the new ldisc and a a reference to the old ldisc. If we ended up flipping back to the existing ldisc we have two references to it */ if (tty->ldisc.ops->num != o_ldisc.ops->num && tty->ops->set_ldisc) tty->ops->set_ldisc(tty); tty_ldisc_put(o_ldisc.ops); /* * Allow ldisc referencing to occur as soon as the driver * ldisc callback completes. */ tty_ldisc_enable(tty); if (o_tty) tty_ldisc_enable(o_tty); /* Restart it in case no characters kick it off. Safe if already running */ if (work) schedule_delayed_work(&tty->buf.work, 1); return retval; }
int n_tty_ioctl(struct tty_struct * tty, struct file * file, unsigned int cmd, unsigned long arg) { struct tty_struct * real_tty; void __user *p = (void __user *)arg; int retval; struct tty_ldisc *ld; if (tty->driver->type == TTY_DRIVER_TYPE_PTY && tty->driver->subtype == PTY_TYPE_MASTER) real_tty = tty->link; else real_tty = tty; switch (cmd) { #ifdef TIOCGETP case TIOCGETP: return get_sgttyb(real_tty, (struct sgttyb __user *) arg); case TIOCSETP: case TIOCSETN: return set_sgttyb(real_tty, (struct sgttyb __user *) arg); #endif #ifdef TIOCGETC case TIOCGETC: return get_tchars(real_tty, p); case TIOCSETC: return set_tchars(real_tty, p); #endif #ifdef TIOCGLTC case TIOCGLTC: return get_ltchars(real_tty, p); case TIOCSLTC: return set_ltchars(real_tty, p); #endif case TCGETS: if (kernel_termios_to_user_termios((struct termios __user *)arg, real_tty->termios)) return -EFAULT; return 0; case TCSETSF: return set_termios(real_tty, p, TERMIOS_FLUSH | TERMIOS_WAIT); case TCSETSW: return set_termios(real_tty, p, TERMIOS_WAIT); case TCSETS: return set_termios(real_tty, p, 0); case TCGETA: return get_termio(real_tty, p); case TCSETAF: return set_termios(real_tty, p, TERMIOS_FLUSH | TERMIOS_WAIT | TERMIOS_TERMIO); case TCSETAW: return set_termios(real_tty, p, TERMIOS_WAIT | TERMIOS_TERMIO); case TCSETA: return set_termios(real_tty, p, TERMIOS_TERMIO); case TCXONC: retval = tty_check_change(tty); if (retval) return retval; switch (arg) { case TCOOFF: if (!tty->flow_stopped) { tty->flow_stopped = 1; stop_tty(tty); } break; case TCOON: if (tty->flow_stopped) { tty->flow_stopped = 0; start_tty(tty); } break; case TCIOFF: if (STOP_CHAR(tty) != __DISABLED_CHAR) send_prio_char(tty, STOP_CHAR(tty)); break; case TCION: if (START_CHAR(tty) != __DISABLED_CHAR) send_prio_char(tty, START_CHAR(tty)); break; default: return -EINVAL; } return 0; case TCFLSH: retval = tty_check_change(tty); if (retval) return retval; ld = tty_ldisc_ref(tty); switch (arg) { case TCIFLUSH: if (ld->flush_buffer) ld->flush_buffer(tty); break; case TCIOFLUSH: if (ld->flush_buffer) ld->flush_buffer(tty); /* fall through */ case TCOFLUSH: if (tty->driver->flush_buffer) tty->driver->flush_buffer(tty); break; default: tty_ldisc_deref(ld); return -EINVAL; } tty_ldisc_deref(ld); return 0; case TIOCOUTQ: return put_user(tty->driver->chars_in_buffer ? tty->driver->chars_in_buffer(tty) : 0, (int __user *) arg); case TIOCINQ: retval = tty->read_cnt; if (L_ICANON(tty)) retval = inq_canon(tty); return put_user(retval, (unsigned int __user *) arg); case TIOCGLCKTRMIOS: if (kernel_termios_to_user_termios((struct termios __user *)arg, real_tty->termios_locked)) return -EFAULT; return 0; case TIOCSLCKTRMIOS: if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (user_termios_to_kernel_termios(real_tty->termios_locked, (struct termios __user *) arg)) return -EFAULT; return 0; case TIOCPKT: { int pktmode; if (tty->driver->type != TTY_DRIVER_TYPE_PTY || tty->driver->subtype != PTY_TYPE_MASTER) return -ENOTTY; if (get_user(pktmode, (int __user *) arg)) return -EFAULT; if (pktmode) { if (!tty->packet) { tty->packet = 1; tty->link->ctrl_status = 0; } } else tty->packet = 0; return 0; } case TIOCGSOFTCAR: return put_user(C_CLOCAL(tty) ? 1 : 0, (int __user *)arg); case TIOCSSOFTCAR: if (get_user(arg, (unsigned int __user *) arg)) return -EFAULT; down(&tty->termios_sem); tty->termios->c_cflag = ((tty->termios->c_cflag & ~CLOCAL) | (arg ? CLOCAL : 0)); up(&tty->termios_sem); return 0; default: return -ENOIOCTLCMD; } }
static void flush_to_ldisc(struct work_struct *work) { struct tty_struct *tty = container_of(work, struct tty_struct, buf.work.work); unsigned long flags; struct tty_ldisc *disc; disc = tty_ldisc_ref(tty); if (disc == NULL) /* !TTY_LDISC */ return; spin_lock_irqsave(&tty->buf.lock, flags); if (!test_and_set_bit(TTY_FLUSHING, &tty->flags)) { struct tty_buffer *head, *tail = tty->buf.tail; int seen_tail = 0; while ((head = tty->buf.head) != NULL) { int count; char *char_buf; unsigned char *flag_buf; count = head->commit - head->read; if (!count) { if (head->next == NULL) break; /* There's a possibility tty might get new buffer added during the unlock window below. We could end up spinning in here forever hogging the CPU completely. To avoid this let's have a rest each time we processed the tail buffer. */ if (tail == head) seen_tail = 1; tty->buf.head = head->next; tty_buffer_free(tty, head); continue; } /* Ldisc or user is trying to flush the buffers we are feeding to the ldisc, stop feeding the line discipline as we want to empty the queue */ if (test_bit(TTY_FLUSHPENDING, &tty->flags)) break; if (!tty->receive_room || seen_tail) { schedule_delayed_work(&tty->buf.work, 1); break; } if (count > tty->receive_room) count = tty->receive_room; char_buf = head->char_buf_ptr + head->read; flag_buf = head->flag_buf_ptr + head->read; head->read += count; spin_unlock_irqrestore(&tty->buf.lock, flags); disc->ops->receive_buf(tty, char_buf, flag_buf, count); spin_lock_irqsave(&tty->buf.lock, flags); } clear_bit(TTY_FLUSHING, &tty->flags); } /* We may have a deferred request to flush the input buffer, if so pull the chain under the lock and empty the queue */ if (test_bit(TTY_FLUSHPENDING, &tty->flags)) { __tty_buffer_flush(tty); clear_bit(TTY_FLUSHPENDING, &tty->flags); wake_up(&tty->read_wait); } spin_unlock_irqrestore(&tty->buf.lock, flags); tty_ldisc_deref(disc); }
void tty_ldisc_hangup(struct tty_struct *tty) { struct tty_ldisc *ld; int reset = tty->driver->flags & TTY_DRIVER_RESET_TERMIOS; int err = 0; tty_ldisc_debug(tty, "closing ldisc: %p\n", tty->ldisc); ld = tty_ldisc_ref(tty); if (ld != NULL) { if (ld->ops->flush_buffer) ld->ops->flush_buffer(tty); tty_driver_flush_buffer(tty); if ((test_bit(TTY_DO_WRITE_WAKEUP, &tty->flags)) && ld->ops->write_wakeup) ld->ops->write_wakeup(tty); if (ld->ops->hangup) ld->ops->hangup(tty); tty_ldisc_deref(ld); } wake_up_interruptible_poll(&tty->write_wait, POLLOUT); wake_up_interruptible_poll(&tty->read_wait, POLLIN); tty_unlock(tty); /* * Shutdown the current line discipline, and reset it to * N_TTY if need be. * * Avoid racing set_ldisc or tty_ldisc_release */ tty_ldisc_lock_pair(tty, tty->link); tty_lock(tty); if (tty->ldisc) { /* At this point we have a halted ldisc; we want to close it and reopen a new ldisc. We could defer the reopen to the next open but it means auditing a lot of other paths so this is a FIXME */ if (reset == 0) { if (!tty_ldisc_reinit(tty, tty->termios.c_line)) err = tty_ldisc_open(tty, tty->ldisc); else err = 1; } /* If the re-open fails or we reset then go to N_TTY. The N_TTY open cannot fail */ if (reset || err) { BUG_ON(tty_ldisc_reinit(tty, N_TTY)); WARN_ON(tty_ldisc_open(tty, tty->ldisc)); } } tty_ldisc_enable_pair(tty, tty->link); if (reset) tty_reset_termios(tty); tty_ldisc_debug(tty, "re-opened ldisc: %p\n", tty->ldisc); }
void tty_ldisc_hangup(struct tty_struct *tty) { struct tty_ldisc *ld; int reset = tty->driver->flags & TTY_DRIVER_RESET_TERMIOS; int err = 0; ld = tty_ldisc_ref(tty); if (ld != NULL) { /* We may have no line discipline at this point */ if (ld->ops->flush_buffer) ld->ops->flush_buffer(tty); tty_driver_flush_buffer(tty); if ((test_bit(TTY_DO_WRITE_WAKEUP, &tty->flags)) && ld->ops->write_wakeup) ld->ops->write_wakeup(tty); if (ld->ops->hangup) ld->ops->hangup(tty); tty_ldisc_deref(ld); } wake_up_interruptible_poll(&tty->write_wait, POLLOUT); wake_up_interruptible_poll(&tty->read_wait, POLLIN); /* * Shutdown the current line discipline, and reset it to * N_TTY if need be. * * Avoid racing set_ldisc or tty_ldisc_release */ mutex_lock(&tty->ldisc_mutex); /* * this is like tty_ldisc_halt, but we need to give up * the BTM before calling cancel_delayed_work_sync, * which may need to wait for another function taking the BTM */ clear_bit(TTY_LDISC, &tty->flags); tty_unlock(); cancel_delayed_work_sync(&tty->buf.work); mutex_unlock(&tty->ldisc_mutex); tty_lock(); mutex_lock(&tty->ldisc_mutex); if (tty->ldisc) { /* Not yet closed */ if (reset == 0) { if (!tty_ldisc_reinit(tty, tty->termios->c_line)) err = tty_ldisc_open(tty, tty->ldisc); else err = 1; } /* If the re-open fails or we reset then go to N_TTY. The N_TTY open cannot fail */ if (reset || err) { BUG_ON(tty_ldisc_reinit(tty, N_TTY)); WARN_ON(tty_ldisc_open(tty, tty->ldisc)); } tty_ldisc_enable(tty); } mutex_unlock(&tty->ldisc_mutex); if (reset) tty_reset_termios(tty); }
void jsm_input(struct jsm_channel *ch) { struct jsm_board *bd; struct tty_struct *tp; struct tty_ldisc *ld; u32 rmask; u16 head; u16 tail; int data_len; unsigned long lock_flags; int flip_len = 0; int len = 0; int n = 0; int s = 0; int i = 0; jsm_printk(READ, INFO, &ch->ch_bd->pci_dev, "start\n"); if (!ch) return; tp = ch->uart_port.info->tty; bd = ch->ch_bd; if(!bd) return; spin_lock_irqsave(&ch->ch_lock, lock_flags); /* *Figure the number of characters in the buffer. *Exit immediately if none. */ rmask = RQUEUEMASK; head = ch->ch_r_head & rmask; tail = ch->ch_r_tail & rmask; data_len = (head - tail) & rmask; if (data_len == 0) { spin_unlock_irqrestore(&ch->ch_lock, lock_flags); return; } jsm_printk(READ, INFO, &ch->ch_bd->pci_dev, "start\n"); /* *If the device is not open, or CREAD is off, flush *input data and return immediately. */ if (!tp || !(tp->termios->c_cflag & CREAD) ) { jsm_printk(READ, INFO, &ch->ch_bd->pci_dev, "input. dropping %d bytes on port %d...\n", data_len, ch->ch_portnum); ch->ch_r_head = tail; /* Force queue flow control to be released, if needed */ jsm_check_queue_flow_control(ch); spin_unlock_irqrestore(&ch->ch_lock, lock_flags); return; } /* * If we are throttled, simply don't read any data. */ if (ch->ch_flags & CH_STOPI) { spin_unlock_irqrestore(&ch->ch_lock, lock_flags); jsm_printk(READ, INFO, &ch->ch_bd->pci_dev, "Port %d throttled, not reading any data. head: %x tail: %x\n", ch->ch_portnum, head, tail); return; } jsm_printk(READ, INFO, &ch->ch_bd->pci_dev, "start 2\n"); /* * If the rxbuf is empty and we are not throttled, put as much * as we can directly into the linux TTY buffer. * */ flip_len = TTY_FLIPBUF_SIZE; len = min(data_len, flip_len); len = min(len, (N_TTY_BUF_SIZE - 1) - tp->read_cnt); ld = tty_ldisc_ref(tp); /* * If we were unable to get a reference to the ld, * don't flush our buffer, and act like the ld doesn't * have any space to put the data right now. */ if (!ld) { len = 0; } else { /* * If ld doesn't have a pointer to a receive_buf function, * flush the data, then act like the ld doesn't have any * space to put the data right now. */ if (!ld->receive_buf) { ch->ch_r_head = ch->ch_r_tail; len = 0; } } if (len <= 0) { spin_unlock_irqrestore(&ch->ch_lock, lock_flags); jsm_printk(READ, INFO, &ch->ch_bd->pci_dev, "jsm_input 1\n"); if (ld) tty_ldisc_deref(ld); return; } len = tty_buffer_request_room(tp, len); n = len; /* * n now contains the most amount of data we can copy, * bounded either by the flip buffer size or the amount * of data the card actually has pending... */ while (n) { s = ((head >= tail) ? head : RQUEUESIZE) - tail; s = min(s, n); if (s <= 0) break; /* * If conditions are such that ld needs to see all * UART errors, we will have to walk each character * and error byte and send them to the buffer one at * a time. */ if (I_PARMRK(tp) || I_BRKINT(tp) || I_INPCK(tp)) { for (i = 0; i < s; i++) { /* * Give the Linux ld the flags in the * format it likes. */ if (*(ch->ch_equeue +tail +i) & UART_LSR_BI) tty_insert_flip_char(tp, *(ch->ch_rqueue +tail +i), TTY_BREAK); else if (*(ch->ch_equeue +tail +i) & UART_LSR_PE) tty_insert_flip_char(tp, *(ch->ch_rqueue +tail +i), TTY_PARITY); else if (*(ch->ch_equeue +tail +i) & UART_LSR_FE) tty_insert_flip_char(tp, *(ch->ch_rqueue +tail +i), TTY_FRAME); else tty_insert_flip_char(tp, *(ch->ch_rqueue +tail +i), TTY_NORMAL); } } else { tty_insert_flip_string(tp, ch->ch_rqueue + tail, s) ; } tail += s; n -= s; /* Flip queue if needed */ tail &= rmask; } ch->ch_r_tail = tail & rmask; ch->ch_e_tail = tail & rmask; jsm_check_queue_flow_control(ch); spin_unlock_irqrestore(&ch->ch_lock, lock_flags); /* Tell the tty layer its okay to "eat" the data now */ tty_flip_buffer_push(tp); if (ld) tty_ldisc_deref(ld); jsm_printk(IOCTL, INFO, &ch->ch_bd->pci_dev, "finish\n"); }
static void flush_to_ldisc(struct work_struct *work) { struct tty_struct *tty = container_of(work, struct tty_struct, buf.work); unsigned long flags; struct tty_ldisc *disc; disc = tty_ldisc_ref(tty); if (disc == NULL) /* !TTY_LDISC */ return; spin_lock_irqsave(&tty->buf.lock, flags); if (!test_and_set_bit(TTY_FLUSHING, &tty->flags)) { struct tty_buffer *head; while ((head = tty->buf.head) != NULL) { int count; char *char_buf; unsigned char *flag_buf; unsigned int left = 0; unsigned int max_space; count = head->commit - head->read; if (!count) { if (head->next == NULL) break; tty->buf.head = head->next; tty_buffer_free(tty, head); continue; } /* Ldisc or user is trying to flush the buffers we are feeding to the ldisc, stop feeding the line discipline as we want to empty the queue */ if (test_bit(TTY_FLUSHPENDING, &tty->flags)) break; /* update receive room */ spin_lock(&tty->read_lock); if (tty->update_room_in_ldisc) { if ((tty->read_cnt == N_TTY_BUF_SIZE - 1) && (tty->receive_room == N_TTY_BUF_SIZE - 1)) tty->rr_bug++; left = N_TTY_BUF_SIZE - tty->read_cnt - 1; } spin_unlock(&tty->read_lock); if (!tty->receive_room) break; if (tty->update_room_in_ldisc && !left) { schedule_work(&tty->buf.work); break; } if (tty->update_room_in_ldisc) max_space = min(left, tty->receive_room); else max_space = tty->receive_room; if (count > max_space) count = max_space; char_buf = head->char_buf_ptr + head->read; flag_buf = head->flag_buf_ptr + head->read; head->read += count; spin_unlock_irqrestore(&tty->buf.lock, flags); if(tty->start_debug){ dbg_log_event(NULL, "f_t_l_d:head->read",head->read, "head->commit", head->commit, "receive_room ", tty->receive_room ); } tty->ldisc_cnt += count; disc->ops->receive_buf(tty, char_buf, flag_buf, count); spin_lock_irqsave(&tty->buf.lock, flags); } clear_bit(TTY_FLUSHING, &tty->flags); } /* We may have a deferred request to flush the input buffer, if so pull the chain under the lock and empty the queue */ if (test_bit(TTY_FLUSHPENDING, &tty->flags)) { __tty_buffer_flush(tty); clear_bit(TTY_FLUSHPENDING, &tty->flags); wake_up(&tty->read_wait); } spin_unlock_irqrestore(&tty->buf.lock, flags); tty_ldisc_deref(disc); }