/* * Read from TTY. * No locks may be held. * May run on any CPU - does not talk to device driver. */ io_return_t char_read( register struct tty *tp, register io_req_t ior) { spl_t s; kern_return_t rc; /* * Allocate memory for read buffer. */ rc = device_read_alloc(ior, (vm_size_t)ior->io_count); if (rc != KERN_SUCCESS) return rc; s = spltty(); simple_lock(&tp->t_lock); if ((tp->t_state & TS_CARR_ON) == 0) { if ((tp->t_state & TS_ONDELAY) == 0) { /* * No delayed writes - tell caller that device is down */ rc = D_IO_ERROR; goto out; } if (ior->io_mode & D_NOWAIT) { rc = D_WOULD_BLOCK; goto out; } } if (tp->t_inq.c_cc <= 0 || (tp->t_state & TS_CARR_ON) == 0) { ior->io_dev_ptr = (char *)tp; queue_delayed_reply(&tp->t_delayed_read, ior, char_read_done); rc = D_IO_QUEUED; goto out; } ior->io_residual = ior->io_count - q_to_b(&tp->t_inq, ior->io_data, (int)ior->io_count); if (tp->t_state & TS_RTS_DOWN) { (*tp->t_mctl)(tp, TM_RTS, DMBIS); tp->t_state &= ~TS_RTS_DOWN; } out: simple_unlock(&tp->t_lock); splx(s); return rc; }
int kbdread( dev_t dev, io_req_t ior) { register int err, s, count; err = device_read_alloc(ior, (vm_size_t)ior->io_count); if (err != KERN_SUCCESS) return (err); s = SPLKD(); kdq_lock(&kbd_queue); if (kdq_empty(&kbd_queue)) { if (ior->io_mode & D_NOWAIT) { kdq_unlock(&kbd_queue); splx(s); return (D_WOULD_BLOCK); } ior->io_done = kbd_read_done; mpenqueue_tail(&kbd_read_queue, (queue_entry_t) ior); kdq_unlock(&kbd_queue); splx(s); return (D_IO_QUEUED); } count = 0; while (!kdq_empty(&kbd_queue) && count < ior->io_count) { register kd_event *ev; ev = kdq_get(&kbd_queue); *(kd_event *)(&ior->io_data[count]) = *ev; count += sizeof(kd_event); } kdq_unlock(&kbd_queue); splx(s); ior->io_residual = ior->io_count - count; return (D_SUCCESS); }
/* * Devops read routine */ io_return_t datadev_read( dev_t dev, io_req_t ior) { kern_return_t rc; spl_t s; datadev_t ddp; rc = device_read_alloc(ior, (vm_size_t)ior->io_count); if (rc != KERN_SUCCESS) return (rc); s = splsched(); mutex_lock(&datadev_lock); queue_iterate(&datadev_curr, ddp, datadev_t, dd_chain) if (ddp->dd_dev == dev) break; if (!queue_end(&datadev_curr, (queue_entry_t)ddp) || datadev_ior != (io_req_t)0) { mutex_unlock(&datadev_lock); splx(s); return (D_INVALID_OPERATION); } datadev_ior = ior; mutex_unlock(&datadev_lock); splx(s); datadev_request((datadev_t)0); if ((ior->io_op & IO_SYNC) == 0) return (D_IO_QUEUED); iowait(ior); return (D_SUCCESS); }