irqreturn_t cppi_interrupt(int irq, void *dev_id) { struct musb *musb = dev_id; struct cppi *cppi; void __iomem *tibase; struct musb_hw_ep *hw_ep = NULL; u32 rx, tx; int i, index; unsigned long uninitialized_var(flags); cppi = container_of(musb->dma_controller, struct cppi, controller); if (cppi->irq) spin_lock_irqsave(&musb->lock, flags); tibase = musb->ctrl_base; tx = musb_readl(tibase, DAVINCI_TXCPPI_MASKED_REG); rx = musb_readl(tibase, DAVINCI_RXCPPI_MASKED_REG); if (!tx && !rx) { if (cppi->irq) spin_unlock_irqrestore(&musb->lock, flags); return IRQ_NONE; } musb_dbg(musb, "CPPI IRQ Tx%x Rx%x", tx, rx); /* process TX channels */ for (index = 0; tx; tx = tx >> 1, index++) { struct cppi_channel *tx_ch; struct cppi_tx_stateram __iomem *tx_ram; bool completed = false; struct cppi_descriptor *bd; if (!(tx & 1)) continue; tx_ch = cppi->tx + index; tx_ram = tx_ch->state_ram; /* FIXME need a cppi_tx_scan() routine, which * can also be called from abort code */ cppi_dump_tx(5, tx_ch, "/E"); bd = tx_ch->head; /* * If Head is null then this could mean that a abort interrupt * that needs to be acknowledged. */ if (NULL == bd) { musb_dbg(musb, "null BD"); musb_writel(&tx_ram->tx_complete, 0, 0); continue; } /* run through all completed BDs */ for (i = 0; !completed && bd && i < NUM_TXCHAN_BD; i++, bd = bd->next) { u16 len; /* catch latest BD writes from CPPI */ rmb(); if (bd->hw_options & CPPI_OWN_SET) break; musb_dbg(musb, "C/TXBD %p n %x b %x off %x opt %x", bd, bd->hw_next, bd->hw_bufp, bd->hw_off_len, bd->hw_options); len = bd->hw_off_len & CPPI_BUFFER_LEN_MASK; tx_ch->channel.actual_len += len; tx_ch->last_processed = bd; /* write completion register to acknowledge * processing of completed BDs, and possibly * release the IRQ; EOQ might not be set ... * * REVISIT use the same ack strategy as rx * * REVISIT have observed bit 18 set; huh?? */ /* if ((bd->hw_options & CPPI_EOQ_MASK)) */ musb_writel(&tx_ram->tx_complete, 0, bd->dma); /* stop scanning on end-of-segment */ if (bd->hw_next == 0) completed = true; } /* on end of segment, maybe go to next one */ if (completed) { /* cppi_dump_tx(4, tx_ch, "/complete"); */ /* transfer more, or report completion */ if (tx_ch->offset >= tx_ch->buf_len) { tx_ch->head = NULL; tx_ch->tail = NULL; tx_ch->channel.status = MUSB_DMA_STATUS_FREE; hw_ep = tx_ch->hw_ep; musb_dma_completion(musb, index + 1, 1); } else { /* Bigger transfer than we could fit in * that first batch of descriptors... */ cppi_next_tx_segment(musb, tx_ch); } } else tx_ch->head = bd; } /* Start processing the RX block */ for (index = 0; rx; rx = rx >> 1, index++) { if (rx & 1) { struct cppi_channel *rx_ch; rx_ch = cppi->rx + index; /* let incomplete dma segments finish */ if (!cppi_rx_scan(cppi, index)) continue; /* start another dma segment if needed */ if (rx_ch->channel.actual_len != rx_ch->buf_len && rx_ch->channel.actual_len == rx_ch->offset) { cppi_next_rx_segment(musb, rx_ch, 1); continue; } /* all segments completed! */ rx_ch->channel.status = MUSB_DMA_STATUS_FREE; hw_ep = rx_ch->hw_ep; core_rxirq_disable(tibase, index + 1); musb_dma_completion(musb, index + 1, 0); } } /* write to CPPI EOI register to re-enable interrupts */ musb_writel(tibase, DAVINCI_CPPI_EOI_REG, 0); if (cppi->irq) spin_unlock_irqrestore(&musb->lock, flags); return IRQ_HANDLED; }
/* * Context: controller irqlocked, endpoint selected */ static int cppi_channel_abort(struct dma_channel *channel) { struct cppi_channel *cppi_ch; struct cppi *controller; void __iomem *mbase; void __iomem *tibase; void __iomem *regs; u32 value; struct cppi_descriptor *queue; cppi_ch = container_of(channel, struct cppi_channel, channel); controller = cppi_ch->controller; switch (channel->status) { case MUSB_DMA_STATUS_BUS_ABORT: case MUSB_DMA_STATUS_CORE_ABORT: /* from RX or TX fault irq handler */ case MUSB_DMA_STATUS_BUSY: /* the hardware needs shutting down */ regs = cppi_ch->hw_ep->regs; break; case MUSB_DMA_STATUS_UNKNOWN: case MUSB_DMA_STATUS_FREE: return 0; default: return -EINVAL; } if (!cppi_ch->transmit && cppi_ch->head) cppi_dump_rxq(3, "/abort", cppi_ch); mbase = controller->mregs; tibase = controller->tibase; queue = cppi_ch->head; cppi_ch->head = NULL; cppi_ch->tail = NULL; /* REVISIT should rely on caller having done this, * and caller should rely on us not changing it. * peripheral code is safe ... check host too. */ musb_ep_select(mbase, cppi_ch->index + 1); if (cppi_ch->transmit) { struct cppi_tx_stateram __iomem *tx_ram; /* REVISIT put timeouts on these controller handshakes */ cppi_dump_tx(6, cppi_ch, " (teardown)"); /* teardown DMA engine then usb core */ do { value = musb_readl(tibase, DAVINCI_TXCPPI_TEAR_REG); } while (!(value & CPPI_TEAR_READY)); musb_writel(tibase, DAVINCI_TXCPPI_TEAR_REG, cppi_ch->index); tx_ram = cppi_ch->state_ram; do { value = musb_readl(&tx_ram->tx_complete, 0); } while (0xFFFFFFFC != value); /* FIXME clean up the transfer state ... here? * the completion routine should get called with * an appropriate status code. */ value = musb_readw(regs, MUSB_TXCSR); value &= ~MUSB_TXCSR_DMAENAB; value |= MUSB_TXCSR_FLUSHFIFO; musb_writew(regs, MUSB_TXCSR, value); musb_writew(regs, MUSB_TXCSR, value); /* * 1. Write to completion Ptr value 0x1(bit 0 set) * (write back mode) * 2. Wait for abort interrupt and then put the channel in * compare mode by writing 1 to the tx_complete register. */ cppi_reset_tx(tx_ram, 1); cppi_ch->head = NULL; musb_writel(&tx_ram->tx_complete, 0, 1); cppi_dump_tx(5, cppi_ch, " (done teardown)"); /* REVISIT tx side _should_ clean up the same way * as the RX side ... this does no cleanup at all! */ } else /* RX */ { u16 csr; /* NOTE: docs don't guarantee any of this works ... we * expect that if the usb core stops telling the cppi core * to pull more data from it, then it'll be safe to flush * current RX DMA state iff any pending fifo transfer is done. */ core_rxirq_disable(tibase, cppi_ch->index + 1); /* for host, ensure ReqPkt is never set again */ if (is_host_active(cppi_ch->controller->controller.musb)) { value = musb_readl(tibase, DAVINCI_AUTOREQ_REG); value &= ~((0x3) << (cppi_ch->index * 2)); musb_writel(tibase, DAVINCI_AUTOREQ_REG, value); } csr = musb_readw(regs, MUSB_RXCSR); /* for host, clear (just) ReqPkt at end of current packet(s) */ if (is_host_active(cppi_ch->controller->controller.musb)) { csr |= MUSB_RXCSR_H_WZC_BITS; csr &= ~MUSB_RXCSR_H_REQPKT; } else csr |= MUSB_RXCSR_P_WZC_BITS; /* clear dma enable */ csr &= ~(MUSB_RXCSR_DMAENAB); musb_writew(regs, MUSB_RXCSR, csr); csr = musb_readw(regs, MUSB_RXCSR); /* Quiesce: wait for current dma to finish (if not cleanup). * We can't use bit zero of stateram->rx_sop, since that * refers to an entire "DMA packet" not just emptying the * current fifo. Most segments need multiple usb packets. */ if (channel->status == MUSB_DMA_STATUS_BUSY) udelay(50); /* scan the current list, reporting any data that was * transferred and acking any IRQ */ cppi_rx_scan(controller, cppi_ch->index); /* clobber the existing state once it's idle * * NOTE: arguably, we should also wait for all the other * RX channels to quiesce (how??) and then temporarily * disable RXCPPI_CTRL_REG ... but it seems that we can * rely on the controller restarting from state ram, with * only RXCPPI_BUFCNT state being bogus. BUFCNT will * correct itself after the next DMA transfer though. * * REVISIT does using rndis mode change that? */ cppi_reset_rx(cppi_ch->state_ram); /* next DMA request _should_ load cppi head ptr */ /* ... we don't "free" that list, only mutate it in place. */ cppi_dump_rx(5, cppi_ch, " (done abort)"); /* clean up previously pending bds */ cppi_bd_free(cppi_ch, cppi_ch->last_processed); cppi_ch->last_processed = NULL; while (queue) { struct cppi_descriptor *tmp = queue->next; cppi_bd_free(cppi_ch, queue); queue = tmp; } } channel->status = MUSB_DMA_STATUS_FREE; cppi_ch->buf_dma = 0; cppi_ch->offset = 0; cppi_ch->buf_len = 0; cppi_ch->maxpacket = 0; return 0; }
void cppi_completion(struct musb *musb, u32 rx, u32 tx) { void __iomem *tibase; int i, index; struct cppi *cppi; struct musb_hw_ep *hw_ep = NULL; cppi = container_of(musb->dma_controller, struct cppi, controller); tibase = musb->ctrl_base; /* process TX channels */ for (index = 0; tx; tx = tx >> 1, index++) { struct cppi_channel *tx_ch; struct cppi_tx_stateram __iomem *tx_ram; bool completed = false; struct cppi_descriptor *bd; if (!(tx & 1)) continue; tx_ch = cppi->tx + index; tx_ram = tx_ch->state_ram; /* FIXME need a cppi_tx_scan() routine, which * can also be called from abort code */ cppi_dump_tx(5, tx_ch, "/E"); bd = tx_ch->head; if (NULL == bd) { DBG(1, "null BD\n"); continue; } /* run through all completed BDs */ for (i = 0; !completed && bd && i < NUM_TXCHAN_BD; i++, bd = bd->next) { u16 len; /* catch latest BD writes from CPPI */ rmb(); if (bd->hw_options & CPPI_OWN_SET) break; DBG(5, "C/TXBD %p n %x b %x off %x opt %x\n", bd, bd->hw_next, bd->hw_bufp, bd->hw_off_len, bd->hw_options); len = bd->hw_off_len & CPPI_BUFFER_LEN_MASK; tx_ch->channel.actual_len += len; tx_ch->last_processed = bd; /* write completion register to acknowledge * processing of completed BDs, and possibly * release the IRQ; EOQ might not be set ... * * REVISIT use the same ack strategy as rx * * REVISIT have observed bit 18 set; huh?? */ /* if ((bd->hw_options & CPPI_EOQ_MASK)) */ musb_writel(&tx_ram->tx_complete, 0, bd->dma); /* stop scanning on end-of-segment */ if (bd->hw_next == 0) completed = true; } /* on end of segment, maybe go to next one */ if (completed) { /* cppi_dump_tx(4, tx_ch, "/complete"); */ /* transfer more, or report completion */ if (tx_ch->offset >= tx_ch->buf_len) { tx_ch->head = NULL; tx_ch->tail = NULL; tx_ch->channel.status = MUSB_DMA_STATUS_FREE; hw_ep = tx_ch->hw_ep; /* Peripheral role never repurposes the * endpoint, so immediate completion is * safe. Host role waits for the fifo * to empty (TXPKTRDY irq) before going * to the next queued bulk transfer. */ if (is_host_active(cppi->musb)) { #if 0 /* WORKAROUND because we may * not always get TXKPTRDY ... */ int csr; csr = musb_readw(hw_ep->regs, MUSB_TXCSR); if (csr & MUSB_TXCSR_TXPKTRDY) #endif completed = false; } if (completed) musb_dma_completion(musb, index + 1, 1); } else { /* Bigger transfer than we could fit in * that first batch of descriptors... */ cppi_next_tx_segment(musb, tx_ch); } } else tx_ch->head = bd; } /* Start processing the RX block */ for (index = 0; rx; rx = rx >> 1, index++) { if (rx & 1) { struct cppi_channel *rx_ch; rx_ch = cppi->rx + index; /* let incomplete dma segments finish */ if (!cppi_rx_scan(cppi, index)) continue; /* start another dma segment if needed */ if (rx_ch->channel.actual_len != rx_ch->buf_len && rx_ch->channel.actual_len == rx_ch->offset) { cppi_next_rx_segment(musb, rx_ch, 1); continue; } /* all segments completed! */ rx_ch->channel.status = MUSB_DMA_STATUS_FREE; hw_ep = rx_ch->hw_ep; core_rxirq_disable(tibase, index + 1); musb_dma_completion(musb, index + 1, 0); } } /* write to CPPI EOI register to re-enable interrupts */ musb_writel(tibase, DAVINCI_CPPI_EOI_REG, 0); }