static int gs_start_io(struct gs_port *port) { struct list_head *head = &port->read_pool; struct usb_ep *ep = port->port_usb->out; int status; unsigned started; status = gs_alloc_requests(ep, head, RX_QUEUE_SIZE, RX_BUF_SIZE, gs_read_complete); if (status) return status; status = gs_alloc_requests(port->port_usb->in, &port->write_pool, TX_QUEUE_SIZE, TX_BUF_SIZE, gs_write_complete); if (status) { gs_free_requests(ep, head); return status; } port->n_read = 0; started = gs_start_rx(port); if (started) { tty_wakeup(port->port_tty); } else { gs_free_requests(ep, head); gs_free_requests(port->port_usb->in, &port->write_pool); status = -EIO; } return status; }
/** * gs_start_io - start USB I/O streams * @dev: encapsulates endpoints to use * Context: holding port_lock; port_tty and port_usb are non-null * * We only start I/O when something is connected to both sides of * this port. If nothing is listening on the host side, we may * be pointlessly filling up our TX buffers and FIFO. */ static int gs_start_io(struct gs_port *port) { struct list_head *head; struct usb_ep *ep; int status; unsigned started; if (!port || !port->port_usb) { pr_err("Error - port or port->usb is NULL."); return -EIO; } head = &port->read_pool; ep = port->port_usb->out; /* Allocate RX and TX I/O buffers. We can't easily do this much * earlier (with GFP_KERNEL) because the requests are coupled to * endpoints, as are the packet sizes we'll be using. Different * configurations may use different endpoints with a given port; * and high speed vs full speed changes packet sizes too. */ status = gs_alloc_requests(ep, head, RX_QUEUE_SIZE, RX_BUF_SIZE, gs_read_complete, &port->read_allocated); if (status) return status; status = gs_alloc_requests(port->port_usb->in, &port->write_pool, TX_QUEUE_SIZE, TX_BUF_SIZE, gs_write_complete, &port->write_allocated); if (status) { gs_free_requests(ep, head, &port->read_allocated); return status; } /* queue read requests */ port->n_read = 0; started = gs_start_rx(port); if (!port->port.tty) { printk(KERN_ERR "usb:[%s] port_usb or port_tty is NULL!! started(%d)\n", __func__, started); return -EIO; } /* unblock any pending writes into our circular buffer */ if (started) { tty_wakeup(port->port.tty); } else { gs_free_requests(ep, head, &port->read_allocated); gs_free_requests(port->port_usb->in, &port->write_pool, &port->write_allocated); status = -EIO; } return status; }
/** * gs_start_io - start USB I/O streams * @dev: encapsulates endpoints to use * Context: holding port_lock; port_tty and port_usb are non-null * * We only start I/O when something is connected to both sides of * this port. If nothing is listening on the host side, we may * be pointlessly filling up our TX buffers and FIFO. */ static int gs_start_io(struct gs_port *port) { struct list_head *head = &port->read_pool; struct usb_ep *ep = port->port_usb->out; int status; unsigned started; /* Allocate RX and TX I/O buffers. We can't easily do this much * earlier (with GFP_KERNEL) because the requests are coupled to * endpoints, as are the packet sizes we'll be using. Different * configurations may use different endpoints with a given port; * and high speed vs full speed changes packet sizes too. */ status = gs_alloc_requests(ep, head, RX_QUEUE_SIZE, RX_BUF_SIZE, gs_read_complete, &port->read_allocated); if (status) return status; status = gs_alloc_requests(port->port_usb->in, &port->write_pool, TX_QUEUE_SIZE, TX_BUF_SIZE, gs_write_complete, &port->write_allocated); if (status) { gs_free_requests(ep, head, &port->read_allocated); return status; } /* queue read requests */ port->n_read = 0; started = gs_start_rx(port); if (!port->port_usb) return -EIO; /* unblock any pending writes into our circular buffer */ #ifndef CONFIG_USB_G_SERIAL_CONSOLE if (started) { tty_wakeup(port->port_tty); } else { gs_free_requests(ep, head, &port->read_allocated); gs_free_requests(port->port_usb->in, &port->write_pool, &port->write_allocated); status = -EIO; } #else if (!port->port_tty) return status; if(started){ tty_wakeup(port->port_tty); } else { gs_free_requests(ep, head, &port->read_allocated); gs_free_requests(port->port_usb->in, &port->write_pool, &port->write_allocated); status = -EIO; } #endif return status; }
int gserial_resume(struct gserial *gser) { struct gs_port *port = gser->ioport; unsigned long flags; if (!port) { return -ESHUTDOWN; } port->is_suspend = 0; spin_lock_irqsave(&port->port_lock, flags); gs_start_rx(port); spin_unlock_irqrestore(&port->port_lock, flags); return 0; }
/* * RX tasklet takes data out of the RX queue and hands it up to the TTY * layer until it refuses to take any more data (or is throttled back). * Then it issues reads for any further data. * * If the RX queue becomes full enough that no usb_request is queued, * the OUT endpoint may begin NAKing as soon as its FIFO fills up. * So QUEUE_SIZE packets plus however many the FIFO holds (usually two) * can be buffered before the TTY layer's buffers (currently 64 KB). */ static void gs_rx_push(struct work_struct *w) { struct gs_port *port = container_of(w, struct gs_port, push); struct tty_struct *tty; struct list_head *queue = &port->read_queue; bool disconnect = false; bool do_push = false; /* hand any queued data to the tty */ spin_lock_irq(&port->port_lock); tty = port->port_tty; while (!list_empty(queue)) { struct usb_request *req; req = list_first_entry(queue, struct usb_request, list); /* discard data if tty was closed */ if (!tty) goto recycle; /* leave data queued if tty was rx throttled */ if (test_bit(TTY_THROTTLED, &tty->flags)) break; switch (req->status) { case -ESHUTDOWN: disconnect = true; pr_vdebug(PREFIX "%d: shutdown\n", port->port_num); break; default: /* presumably a transient fault */ pr_warning(PREFIX "%d: unexpected RX status %d\n", port->port_num, req->status); /* FALLTHROUGH */ case 0: /* normal completion */ break; } /* push data to (open) tty */ if (req->actual) { char *packet = req->buf; unsigned size = req->actual; unsigned n; int count; /* we may have pushed part of this packet already... */ n = port->n_read; if (n) { packet += n; size -= n; } count = tty_insert_flip_string(tty, packet, size); port->nbytes_to_tty += count; if (count) do_push = true; if (count != size) { /* stop pushing; TTY layer can't handle more */ port->n_read += count; pr_vdebug(PREFIX "%d: rx block %d/%d\n", port->port_num, count, req->actual); break; } port->n_read = 0; } recycle: list_move(&req->list, &port->read_pool); port->read_started--; } /* Push from tty to ldisc; without low_latency set this is handled by * a workqueue, so we won't get callbacks and can hold port_lock */ if (tty && do_push) tty_flip_buffer_push(tty); /* We want our data queue to become empty ASAP, keeping data * in the tty and ldisc (not here). If we couldn't push any * this time around, there may be trouble unless there's an * implicit tty_unthrottle() call on its way... * * REVISIT we should probably add a timer to keep the work queue * from starving ... but it's not clear that case ever happens. */ if (!list_empty(queue) && tty) { if (!test_bit(TTY_THROTTLED, &tty->flags)) { if (do_push) queue_work(gserial_wq, &port->push); else pr_warning(PREFIX "%d: RX not scheduled?\n", port->port_num); } } /* If we're still connected, refill the USB RX queue. */ if (!disconnect && port->port_usb) gs_start_rx(port); spin_unlock_irq(&port->port_lock); }
static void gs_rx_push(struct work_struct *w) { struct gs_port *port = container_of(w, struct gs_port, push); struct tty_struct *tty; struct list_head *queue = &port->read_queue; bool disconnect = false; bool do_push = false; spin_lock_irq(&port->port_lock); tty = port->port_tty; while (!list_empty(queue)) { struct usb_request *req; req = list_first_entry(queue, struct usb_request, list); if (!tty) goto recycle; if (test_bit(TTY_THROTTLED, &tty->flags)) break; switch (req->status) { case -ESHUTDOWN: disconnect = true; pr_vdebug(PREFIX "%d: shutdown\n", port->port_num); break; default: pr_warning(PREFIX "%d: unexpected RX status %d\n", port->port_num, req->status); case 0: break; } if (req->actual) { char *packet = req->buf; unsigned size = req->actual; unsigned n; int count; n = port->n_read; if (n) { packet += n; size -= n; } count = tty_insert_flip_string(tty, packet, size); if (count) do_push = true; if (count != size) { port->n_read += count; pr_vdebug(PREFIX "%d: rx block %d/%d\n", port->port_num, count, req->actual); break; } port->n_read = 0; } recycle: list_move(&req->list, &port->read_pool); } if (tty && do_push) { spin_unlock_irq(&port->port_lock); tty_flip_buffer_push(tty); wake_up_interruptible(&tty->read_wait); spin_lock_irq(&port->port_lock); tty = port->port_tty; } if (!list_empty(queue) && tty) { if (!test_bit(TTY_THROTTLED, &tty->flags)) { if (do_push) queue_work(gserial_wq, &port->push); else pr_warning(PREFIX "%d: RX not scheduled?\n", port->port_num); } } if (!disconnect && port->port_usb) gs_start_rx(port); spin_unlock_irq(&port->port_lock); }
/* * RX tasklet takes data out of the RX queue and hands it up to the TTY * layer until it refuses to take any more data (or is throttled back). * Then it issues reads for any further data. * * If the RX queue becomes full enough that no usb_request is queued, * the OUT endpoint may begin NAKing as soon as its FIFO fills up. * So QUEUE_SIZE packets plus however many the FIFO holds (usually two) * can be buffered before the TTY layer's buffers (currently 64 KB). */ static void gs_rx_push(struct work_struct *w) { struct gs_port *port = container_of(w, struct gs_port, push); struct tty_struct *tty; struct list_head *queue = &port->read_queue; bool disconnect = false; bool do_push = false; /* hand any queued data to the tty */ spin_lock_irq(&port->port_lock); tty = port->port_tty; while (!list_empty(queue)) { struct usb_request *req; req = list_first_entry(queue, struct usb_request, list); /* discard data if tty was closed */ if (!tty) goto recycle; /* push data to (open) tty */ if (req->actual) { char *packet = req->buf; unsigned size = req->actual; unsigned n; int count; /* we may have pushed part of this packet already... */ n = port->n_read; if (n) { packet += n; size -= n; } count = tty_insert_flip_string(tty, packet, size); port->nbytes_to_tty += count; if (count) do_push = true; if (count != size) { /* stop pushing; TTY layer can't handle more */ port->n_read += count; pr_vdebug(PREFIX "%d: rx block %d/%d\n", port->port_num, count, req->actual); break; } port->n_read = 0; } recycle: list_move(&req->list, &port->read_pool); port->read_started--; port->rx_qcnt--; } /* Push from tty to ldisc; this is immediate with low_latency, and * may trigger callbacks to this driver ... so drop the spinlock. */ if (tty && do_push) { spin_unlock_irq(&port->port_lock); tty_flip_buffer_push(tty); wake_up_interruptible(&tty->read_wait); spin_lock_irq(&port->port_lock); /* tty may have been closed */ tty = port->port_tty; } /* We want our data queue to become empty ASAP, keeping data * in the tty and ldisc (not here). If we couldn't push any * this time around, there may be trouble unless there's an * implicit tty_unthrottle() call on its way... * * REVISIT we should probably add a timer to keep the work queue * from starving ... but it's not clear that case ever happens. */ if (!list_empty(queue) && tty) { if (!test_bit(TTY_THROTTLED, &tty->flags)) { if (do_push) queue_work(gserial_wq, &port->push); else pr_warning(PREFIX "%d: RX not scheduled?\n", port->port_num); } } /* If we're still connected, refill the USB RX queue. */ if (!disconnect && port->port_usb) gs_start_rx(port); spin_unlock_irq(&port->port_lock); }
/* * RX tasklet takes data out of the RX queue and hands it up to the TTY * layer until it refuses to take any more data (or is throttled back). * Then it issues reads for any further data. * * If the RX queue becomes full enough that no usb_request is queued, * the OUT endpoint may begin NAKing as soon as its FIFO fills up. * So QUEUE_SIZE packets plus however many the FIFO holds (usually two) * can be buffered before the TTY layer's buffers (currently 64 KB). */ static void gs_rx_push(unsigned long _port) { struct gs_port *port = (void *)_port; struct tty_struct *tty; struct list_head *queue = &port->read_queue; bool disconnect = false; bool do_push = false; /* hand any queued data to the tty */ spin_lock_irq(&port->port_lock); tty = port->port_tty; while (!list_empty(queue)) { struct usb_request *req; req = list_first_entry(queue, struct usb_request, list); /* discard data if tty was closed */ if (!tty) goto recycle; /* leave data queued if tty was rx throttled */ if (test_bit(TTY_THROTTLED, &tty->flags)) break; switch (req->status) { case -ESHUTDOWN: disconnect = true; pr_vdebug(PREFIX "%d: shutdown\n", port->port_num); break; default: /* presumably a transient fault */ pr_warning(PREFIX "%d: unexpected RX status %d\n", port->port_num, req->status); /* FALLTHROUGH */ case 0: /* normal completion */ break; } USB_LOGGER(GS_RX_PUSH, GS_RX_PUSH, port->port_num, req->actual, port->n_read); #define OUTPUT_BTYE_NUM 5 { int i,j = 0; char* prefix[] = {"p1","p2","p3","p4","p5"}; char* suffix[] = {"s1","s2","s3","s4","s5"}; for (i = 0; i < req->actual && i < OUTPUT_BTYE_NUM; i++) USB_LOGGER(HEX_NUM, GS_RX_PUSH, prefix[i], *((u8 *)req->buf+i)); if (req->actual >= OUTPUT_BTYE_NUM*2) { for(i = req->actual-1, j = 1; i >= (req->actual - OUTPUT_BTYE_NUM) \ && i >= OUTPUT_BTYE_NUM; i--,j++) { USB_LOGGER(HEX_NUM, GS_RX_PUSH, suffix[OUTPUT_BTYE_NUM-j], \ *((u8 *)req->buf+i)); } } } /* push data to (open) tty */ if (req->actual) { char *packet = req->buf; unsigned size = req->actual; unsigned n; int count; /* we may have pushed part of this packet already... */ n = port->n_read; if (n) { packet += n; size -= n; } count = tty_insert_flip_string(tty, packet, size); if (count) do_push = true; if (count != size) { /* stop pushing; TTY layer can't handle more */ port->n_read += count; pr_vdebug(PREFIX "%d: rx block %d/%d\n", port->port_num, count, req->actual); break; } port->n_read = 0; } recycle: list_move(&req->list, &port->read_pool); port->read_started--; } /* Push from tty to ldisc; without low_latency set this is handled by * a workqueue, so we won't get callbacks and can hold port_lock */ if (tty && do_push) { tty_flip_buffer_push(tty); } /* We want our data queue to become empty ASAP, keeping data * in the tty and ldisc (not here). If we couldn't push any * this time around, there may be trouble unless there's an * implicit tty_unthrottle() call on its way... * * REVISIT we should probably add a timer to keep the tasklet * from starving ... but it's not clear that case ever happens. */ if (!list_empty(queue) && tty) { if (!test_bit(TTY_THROTTLED, &tty->flags)) { if (do_push) tasklet_schedule(&port->push); else pr_warning(PREFIX "%d: RX not scheduled?\n", port->port_num); } } /* If we're still connected, refill the USB RX queue. */ if (!disconnect && port->port_usb) gs_start_rx(port); spin_unlock_irq(&port->port_lock); }
/* * RX tasklet takes data out of the RX queue and hands it up to the TTY * layer until it refuses to take any more data (or is throttled back). * Then it issues reads for any further data. * * If the RX queue becomes full enough that no usb_request is queued, * the OUT endpoint may begin NAKing as soon as its FIFO fills up. * So QUEUE_SIZE packets plus however many the FIFO holds (usually two) * can be buffered before the TTY layer's buffers (currently 64 KB). */ static void gs_rx_push(unsigned long _port) { struct gs_port *port = (void *)_port; struct tty_struct *tty; struct list_head *queue = &port->read_queue; bool disconnect = false; bool do_push = false; static unsigned int skip = 0; static DEFINE_RATELIMIT_STATE(ratelimit, 1 * HZ, 10); /* hand any queued data to the tty */ spin_lock_irq(&port->port_lock); tty = port->port.tty; while (!list_empty(queue)) { struct usb_request *req; req = list_first_entry(queue, struct usb_request, list); /* leave data queued if tty was rx throttled */ if (tty && test_bit(TTY_THROTTLED, &tty->flags)) break; switch (req->status) { case -ESHUTDOWN: disconnect = true; pr_vdebug(PREFIX "%d: shutdown\n", port->port_num); break; default: /* presumably a transient fault */ pr_warning(PREFIX "%d: unexpected RX status %d\n", port->port_num, req->status); /* FALLTHROUGH */ case 0: /* normal completion */ break; } if (__ratelimit(&ratelimit)) { printk( ACM_LOG \ "%s: ttyGS%d: actual=%d, n_read=%d 0x%02x 0x%02x 0x%02x ...\n", \ __func__, port->port_num, req->actual, port->n_read, *((u8 *)req->buf), *((u8 *)req->buf+1), *((u8 *)req->buf+2)); if (skip > 0) { printk( ACM_LOG "%s Too many data, skipped %d bytes", __func__, skip); skip = 0; } } else skip += req->actual; /* push data to (open) tty */ if (req->actual) { char *packet = req->buf; unsigned size = req->actual; unsigned n; int count; /* we may have pushed part of this packet already... */ n = port->n_read; if (n) { packet += n; size -= n; } count = tty_insert_flip_string(&port->port, packet, size); if (count) do_push = true; if (count != size) { /* stop pushing; TTY layer can't handle more */ port->n_read += count; pr_vdebug(PREFIX "%d: rx block %d/%d\n", port->port_num, count, req->actual); break; } port->n_read = 0; } list_move(&req->list, &port->read_pool); port->read_started--; } /* Push from tty to ldisc; without low_latency set this is handled by * a workqueue, so we won't get callbacks and can hold port_lock */ if (do_push) tty_flip_buffer_push(&port->port); /* We want our data queue to become empty ASAP, keeping data * in the tty and ldisc (not here). If we couldn't push any * this time around, there may be trouble unless there's an * implicit tty_unthrottle() call on its way... * * REVISIT we should probably add a timer to keep the tasklet * from starving ... but it's not clear that case ever happens. */ if (!list_empty(queue) && tty) { if (!test_bit(TTY_THROTTLED, &tty->flags)) { if (do_push) tasklet_schedule(&port->push); else pr_warning(PREFIX "%d: RX not scheduled?\n", port->port_num); } } /* If we're still connected, refill the USB RX queue. */ if (!disconnect && port->port_usb) gs_start_rx(port); spin_unlock_irq(&port->port_lock); }
/* * gs_start_tx * * This function finds available write requests, calls * gs_send_packet to fill these packets with data, and * continues until either there are no more write requests * available or no more data to send. This function is * run whenever data arrives or write requests are available. * * Context: caller owns port_lock; port_usb is non-null. */ static int gs_start_tx(struct gs_port *port) /* __releases(&port->port_lock) __acquires(&port->port_lock) */ { struct list_head *pool = &port->write_pool; struct usb_ep *in = port->port_usb->in; int status = 0; static long prev_len; bool do_tty_wake = false; while (!list_empty(pool)) { struct usb_request *req; int len; req = list_entry(pool->next, struct usb_request, list); len = gs_send_packet(port, req->buf, TX_BUF_SIZE); if (len == 0) { /* Queue zero length packet */ #if 1 //QCT_SBA if (prev_len && (prev_len % in->maxpacket == 0)) { #else if (prev_len & (prev_len % in->maxpacket == 0)) { #endif req->length = 0; list_del(&req->list); spin_unlock(&port->port_lock); status = usb_ep_queue(in, req, GFP_ATOMIC); spin_lock(&port->port_lock); if (!port->port_usb) { gs_free_req(in, req); break; } if (status) { printk(KERN_ERR "%s: %s err %d\n", __func__, "queue", status); list_add(&req->list, pool); } prev_len = 0; } wake_up_interruptible(&port->drain_wait); break; } do_tty_wake = true; req->length = len; list_del(&req->list); pr_vdebug(PREFIX "%d: tx len=%d, 0x%02x 0x%02x 0x%02x ...\n", port->port_num, len, *((u8 *)req->buf), *((u8 *)req->buf+1), *((u8 *)req->buf+2)); /* Drop lock while we call out of driver; completions * could be issued while we do so. Disconnection may * happen too; maybe immediately before we queue this! * * NOTE that we may keep sending data for a while after * the TTY closed (dev->ioport->port_tty is NULL). */ spin_unlock(&port->port_lock); status = usb_ep_queue(in, req, GFP_ATOMIC); spin_lock(&port->port_lock); /* * If port_usb is NULL, gserial disconnect is called * while the spinlock is dropped and all requests are * freed. Free the current request here. */ if (!port->port_usb) { do_tty_wake = false; gs_free_req(in, req); break; } if (status) { pr_debug("%s: %s %s err %d\n", __func__, "queue", in->name, status); list_add(&req->list, pool); break; } prev_len = req->length; } if (do_tty_wake && port->port_tty) tty_wakeup(port->port_tty); return status; } /* * Context: caller owns port_lock, and port_usb is set */ static unsigned gs_start_rx(struct gs_port *port) /* __releases(&port->port_lock) __acquires(&port->port_lock) */ { struct list_head *pool = &port->read_pool; struct usb_ep *out = port->port_usb->out; unsigned started = 0; while (!list_empty(pool)) { struct usb_request *req; int status; struct tty_struct *tty; /* no more rx if closed */ tty = port->port_tty; if (!tty) break; req = list_entry(pool->next, struct usb_request, list); list_del(&req->list); req->length = RX_BUF_SIZE; /* drop lock while we call out; the controller driver * may need to call us back (e.g. for disconnect) */ spin_unlock(&port->port_lock); status = usb_ep_queue(out, req, GFP_ATOMIC); spin_lock(&port->port_lock); /* * If port_usb is NULL, gserial disconnect is called * while the spinlock is dropped and all requests are * freed. Free the current request here. */ if (!port->port_usb) { started = 0; gs_free_req(out, req); break; } if (status) { pr_debug("%s: %s %s err %d\n", __func__, "queue", out->name, status); list_add(&req->list, pool); break; } started++; } return started; } /* * RX work queue takes data out of the RX queue and hands it up to the TTY * layer until it refuses to take any more data (or is throttled back). * Then it issues reads for any further data. * * If the RX queue becomes full enough that no usb_request is queued, * the OUT endpoint may begin NAKing as soon as its FIFO fills up. * So QUEUE_SIZE packets plus however many the FIFO holds (usually two) * can be buffered before the TTY layer's buffers (currently 64 KB). */ static void gs_rx_push(struct work_struct *w) { struct gs_port *port = container_of(w, struct gs_port, push); struct tty_struct *tty; struct list_head *queue = &port->read_queue; bool disconnect = false; bool do_push = false; /* hand any queued data to the tty */ spin_lock_irq(&port->port_lock); tty = port->port_tty; while (!list_empty(queue)) { struct usb_request *req; req = list_first_entry(queue, struct usb_request, list); /* discard data if tty was closed */ if (!tty) goto recycle; /* leave data queued if tty was rx throttled */ if (test_bit(TTY_THROTTLED, &tty->flags)) break; switch (req->status) { case -ESHUTDOWN: disconnect = true; pr_vdebug(PREFIX "%d: shutdown\n", port->port_num); break; default: /* presumably a transient fault */ pr_warning(PREFIX "%d: unexpected RX status %d\n", port->port_num, req->status); /* FALLTHROUGH */ case 0: /* normal completion */ break; } /* push data to (open) tty */ if (req->actual) { char *packet = req->buf; unsigned size = req->actual; unsigned n; int count; /* we may have pushed part of this packet already... */ n = port->n_read; if (n) { packet += n; size -= n; } count = tty_insert_flip_string(tty, packet, size); if (count) do_push = true; if (count != size) { /* stop pushing; TTY layer can't handle more */ port->n_read += count; pr_vdebug(PREFIX "%d: rx block %d/%d\n", port->port_num, count, req->actual); break; } port->n_read = 0; } recycle: list_move(&req->list, &port->read_pool); } /* Push from tty to ldisc; this is immediate with low_latency, and * may trigger callbacks to this driver ... so drop the spinlock. */ if (tty && do_push) { spin_unlock_irq(&port->port_lock); tty_flip_buffer_push(tty); wake_up_interruptible(&tty->read_wait); spin_lock_irq(&port->port_lock); /* tty may have been closed */ tty = port->port_tty; } /* We want our data queue to become empty ASAP, keeping data * in the tty and ldisc (not here). If we couldn't push any * this time around, there may be trouble unless there's an * implicit tty_unthrottle() call on its way... * * REVISIT we should probably add a timer to keep the work queue * from starving ... but it's not clear that case ever happens. */ if (!list_empty(queue) && tty) { if (!test_bit(TTY_THROTTLED, &tty->flags)) { if (do_push) queue_work(gserial_wq, &port->push); else pr_warning(PREFIX "%d: RX not scheduled?\n", port->port_num); } } /* If we're still connected, refill the USB RX queue. */ if (!disconnect && port->port_usb) gs_start_rx(port); spin_unlock_irq(&port->port_lock); } static void gs_read_complete(struct usb_ep *ep, struct usb_request *req) { struct gs_port *port = ep->driver_data; unsigned long flags; /* Queue all received data until the tty layer is ready for it. */ spin_lock_irqsave(&port->port_lock, flags); list_add_tail(&req->list, &port->read_queue); queue_work(gserial_wq, &port->push); spin_unlock_irqrestore(&port->port_lock, flags); } static void gs_write_complete(struct usb_ep *ep, struct usb_request *req) { struct gs_port *port = ep->driver_data; unsigned long flags; spin_lock_irqsave(&port->port_lock, flags); list_add(&req->list, &port->write_pool); switch (req->status) { default: /* presumably a transient fault */ pr_warning("%s: unexpected %s status %d\n", __func__, ep->name, req->status); /* FALL THROUGH */ case 0: /* normal completion */ if (port->port_usb) gs_start_tx(port); break; case -ESHUTDOWN: /* disconnect */ pr_vdebug("%s: %s shutdown\n", __func__, ep->name); break; } spin_unlock_irqrestore(&port->port_lock, flags); } static void gs_free_requests(struct usb_ep *ep, struct list_head *head) { struct usb_request *req; while (!list_empty(head)) { req = list_entry(head->next, struct usb_request, list); list_del(&req->list); gs_free_req(ep, req); } } static int gs_alloc_requests(struct usb_ep *ep, struct list_head *head, int num, int size, void (*fn)(struct usb_ep *, struct usb_request *)) { int i; struct usb_request *req; /* Pre-allocate up to QUEUE_SIZE transfers, but if we can't * do quite that many this time, don't fail ... we just won't * be as speedy as we might otherwise be. */ for (i = 0; i < num; i++) { req = gs_alloc_req(ep, size, GFP_ATOMIC); if (!req) return list_empty(head) ? -ENOMEM : 0; req->complete = fn; list_add_tail(&req->list, head); } return 0; }