/** Read to request from FIFO (max read == bytes in fifo) * Return: 0 = still running, 1 = completed, negative = errno * NOTE: INDEX register must be set for EP */ static int read_fifo(struct lh7a40x_ep *ep, struct lh7a40x_request *req) { u32 csr; u8 *buf; unsigned bufferspace, count, is_short; volatile u32 *fifo = (volatile u32 *)ep->fifo; /* make sure there's a packet in the FIFO. */ csr = usb_read(ep->csr1); if (!(csr & USB_OUT_CSR1_OUT_PKT_RDY)) { DEBUG("%s: Packet NOT ready!\n", __func__); return -EINVAL; } buf = req->req.buf + req->req.actual; prefetchw(buf); bufferspace = req->req.length - req->req.actual; /* read all bytes from this packet */ count = usb_read(USB_OUT_FIFO_WC1); req->req.actual += min(count, bufferspace); is_short = (count < ep->ep.maxpacket); DEBUG("read %s %02x, %d bytes%s req %p %d/%d\n", ep->ep.name, csr, count, is_short ? "/S" : "", req, req->req.actual, req->req.length); while (likely(count-- != 0)) { u8 byte = (u8) (*fifo & 0xff); if (unlikely(bufferspace == 0)) { /* this happens when the driver's buffer * is smaller than what the host sent. * discard the extra data. */ if (req->req.status != -EOVERFLOW) printk(KERN_WARNING "%s overflow %d\n", ep->ep.name, count); req->req.status = -EOVERFLOW; } else { *buf++ = byte; bufferspace--; } } usb_clear(USB_OUT_CSR1_OUT_PKT_RDY, ep->csr1); /* completion */ if (is_short || req->req.actual == req->req.length) { done(ep, req, 0); usb_set(USB_OUT_CSR1_FIFO_FLUSH, ep->csr1); if (list_empty(&ep->queue)) pio_irq_disable(ep_index(ep)); return 1; } /* finished that packet. the next one may be waiting... */ return 0; }
/** Write request to FIFO (max write == maxp size) * Return: 0 = still running, 1 = completed, negative = errno * NOTE: INDEX register must be set for EP */ static int write_fifo(struct lh7a40x_ep *ep, struct lh7a40x_request *req) { u32 max; u32 csr; max = le16_to_cpu(ep->desc->wMaxPacketSize); csr = usb_read(ep->csr1); DEBUG("CSR: %x %d\n", csr, csr & USB_IN_CSR1_FIFO_NOT_EMPTY); if (!(csr & USB_IN_CSR1_FIFO_NOT_EMPTY)) { unsigned count; int is_last, is_short; count = write_packet(ep, req, max); usb_set(USB_IN_CSR1_IN_PKT_RDY, ep->csr1); /* last packet is usually short (or a zlp) */ if (unlikely(count != max)) is_last = is_short = 1; else { if (likely(req->req.length != req->req.actual) || req->req.zero) is_last = 0; else is_last = 1; /* interrupt/iso maxpacket may not fill the fifo */ is_short = unlikely(max < ep_maxpacket(ep)); } DEBUG("%s: wrote %s %d bytes%s%s %d left %p\n", __FUNCTION__, ep->ep.name, count, is_last ? "/L" : "", is_short ? "/S" : "", req->req.length - req->req.actual, req); /* requests complete when all IN data is in the FIFO */ if (is_last) { done(ep, req, 0); if (list_empty(&ep->queue)) { pio_irq_disable(ep_index(ep)); } return 1; } } else { DEBUG("Hmm.. %d ep FIFO is not empty!\n", ep_index(ep)); } return 0; }
/* * nuke - dequeue ALL requests */ void nuke(struct lh7a40x_ep *ep, int status) { struct lh7a40x_request *req; DEBUG("%s, %p\n", __FUNCTION__, ep); /* Flush FIFO */ flush(ep); /* called with irqs blocked */ while (!list_empty(&ep->queue)) { req = list_entry(ep->queue.next, struct lh7a40x_request, queue); done(ep, req, status); } /* Disable IRQ if EP is enabled (has descriptor) */ if (ep->desc) pio_irq_disable(ep_index(ep)); }