Example #1
0
static void musb_sysdma_completion(int lch, u16 ch_status, void *data)
{
	u32 addr;
	unsigned long flags;

	struct dma_channel *channel;

	struct musb_dma_channel *musb_channel =
					(struct musb_dma_channel *) data;
	struct musb_dma_controller *controller = musb_channel->controller;
	struct musb *musb = controller->private_data;
	channel = &musb_channel->channel;

	DBG(2, "lch = 0x%d, ch_status = 0x%x\n", lch, ch_status);
	spin_lock_irqsave(&musb->lock, flags);

	addr = (u32) omap_get_dma_dst_pos(musb_channel->sysdma_channel);
	if (musb_channel->len == 0)
		channel->actual_len = 0;
	else
		channel->actual_len = addr - musb_channel->start_addr;

	DBG(2, "ch %p, 0x%x -> 0x%x (%d / %d) %s\n",
		channel, musb_channel->start_addr, addr,
		channel->actual_len, musb_channel->len,
		(channel->actual_len < musb_channel->len) ?
		"=> reconfig 0 " : " => complete");

	channel->status = MUSB_DMA_STATUS_FREE;
	musb_dma_completion(musb, musb_channel->epnum, musb_channel->transmit);

	spin_unlock_irqrestore(&musb->lock, flags);
	return;
}
static void musb_sysdma_completion(int lch, u16 ch_status, void *data)
{
	u32 addr;
	unsigned long flags;

	struct dma_channel *channel;

	struct musb_dma_channel *musb_channel =
					(struct musb_dma_channel *) data;
	struct musb_dma_controller *controller = musb_channel->controller;
	struct musb *musb = controller->private_data;
	void __iomem *mbase = controller->base;

	channel = &musb_channel->channel;

	DBG(2, "lch = 0x%d, ch_status = 0x%x\n", lch, ch_status);
	spin_lock_irqsave(&musb->lock, flags);

	if (musb_channel->transmit)
		addr = (u32) omap_get_dma_src_pos(musb_channel->sysdma_channel);
	else
		addr = (u32) omap_get_dma_dst_pos(musb_channel->sysdma_channel);

	if (musb_channel->len == 0)
		channel->actual_len = 0;
	else
		channel->actual_len = addr - musb_channel->start_addr;

	DBG(2, "ch %p, 0x%x -> 0x%x (%d / %d) %s\n",
		channel, musb_channel->start_addr, addr,
		channel->actual_len, musb_channel->len,
		(channel->actual_len < musb_channel->len) ?
		"=> reconfig 0 " : " => complete");

	channel->status = MUSB_DMA_STATUS_FREE;

	/* completed */
	if ((musb_channel->transmit) && (channel->desired_mode == 0)
		&& (channel->actual_len == musb_channel->max_packet_sz)) {

		u8  epnum  = musb_channel->epnum;
		int offset = MUSB_EP_OFFSET(musb, epnum, MUSB_TXCSR);
		u16 txcsr;

		/*
		 * The programming guide says that we
		 * must clear DMAENAB before DMAMODE.
		 */
		musb_ep_select(musb, mbase, epnum);
		txcsr = musb_readw(mbase, offset);
		txcsr |=  MUSB_TXCSR_TXPKTRDY;
		musb_writew(mbase, offset, txcsr);
	}

	musb_dma_completion(musb, musb_channel->epnum, musb_channel->transmit);

	spin_unlock_irqrestore(&musb->lock, flags);
	return;
}
Example #3
0
/* Work function invoked from DMA callback to handle rx transfers. */
static void ux500_dma_callback(void *private_data)
{
    struct dma_channel *channel = private_data;
    struct ux500_dma_channel *ux500_channel = channel->private_data;
    struct musb_hw_ep       *hw_ep = ux500_channel->hw_ep;
    struct musb *musb = hw_ep->musb;
    unsigned long flags;

    dev_dbg(musb->controller, "DMA rx transfer done on hw_ep=%d\n",
            hw_ep->epnum);

    spin_lock_irqsave(&musb->lock, flags);
    ux500_channel->channel.actual_len = ux500_channel->cur_len;
    ux500_channel->channel.status = MUSB_DMA_STATUS_FREE;
    musb_dma_completion(musb, hw_ep->epnum, ux500_channel->is_tx);
    spin_unlock_irqrestore(&musb->lock, flags);

}
static irqreturn_t dma_controller_irq(int irq, void *private_data)
{
	struct musb_dma_controller *controller = private_data;
	struct musb *musb = controller->private_data;
	struct musb_dma_channel *musb_channel;
	struct dma_channel *channel;

	void __iomem *mbase = controller->base;

	irqreturn_t retval = IRQ_NONE;

	unsigned long flags;

	u8 bchannel;
	u8 int_hsdma;

	u32 addr;
	u16 csr, count;

	spin_lock_irqsave(&musb->lock, flags);

	int_hsdma = musb_readb(mbase, MUSB_HSDMA_INTR);
	if (!int_hsdma) {
		DBG(2, "spurious DMA irq\n");

		for (bchannel = 0; bchannel < MUSB_HSDMA_CHANNELS; bchannel++) {
			musb_channel = (struct musb_dma_channel *)
					&(controller->channel[bchannel]);
			channel = &musb_channel->channel;
			if (channel->status == MUSB_DMA_STATUS_BUSY) {
				count = musb_readw(mbase,
					MUSB_HSDMA_CHANNEL_OFFSET(bchannel,
							MUSB_HSDMA_COUNT));
				if (count == 0)
					int_hsdma |= (1 << bchannel);
			}
		}

		DBG(2, "int_hsdma = 0x%x\n", int_hsdma);

		if (!int_hsdma)
			goto done;
	}

	for (bchannel = 0; bchannel < MUSB_HSDMA_CHANNELS; bchannel++) {
		if (int_hsdma & (1 << bchannel)) {
			musb_channel = (struct musb_dma_channel *)
					&(controller->channel[bchannel]);
			channel = &musb_channel->channel;

			csr = musb_readw(mbase,
					MUSB_HSDMA_CHANNEL_OFFSET(bchannel,
							MUSB_HSDMA_CONTROL));

			if (csr & (1 << MUSB_HSDMA_BUSERROR_SHIFT)) {
				musb_channel->channel.status =
					MUSB_DMA_STATUS_BUS_ABORT;
			} else {
				u8 devctl;

				addr = musb_read_hsdma_addr(mbase,
						bchannel);
				channel->actual_len = addr
					- musb_channel->start_addr;

				DBG(2, "ch %p, 0x%x -> 0x%x (%d / %d) %s\n",
					channel, musb_channel->start_addr,
					addr, channel->actual_len,
					musb_channel->len,
					(channel->actual_len
						< musb_channel->len) ?
					"=> reconfig 0" : "=> complete");

				devctl = musb_readb(mbase, MUSB_DEVCTL);

				channel->status = MUSB_DMA_STATUS_FREE;

				if (musb_channel->transmit)
					controller->tx_active &=
							~(1 << bchannel);
				else
					controller->rx_active &=
							~(1 << bchannel);

				/* completed */
				if ((devctl & MUSB_DEVCTL_HM)
					&& (musb_channel->transmit)
					&& ((channel->desired_mode == 0)
					    || (channel->actual_len &
					    (musb_channel->max_packet_sz - 1)))
				    ) {
					u8  epnum  = musb_channel->epnum;
					int offset = MUSB_EP_OFFSET(epnum,
								    MUSB_TXCSR);
					u16 txcsr;

					/*
					 * The programming guide says that we
					 * must clear DMAENAB before DMAMODE.
					 */
					musb_ep_select(mbase, epnum);
					txcsr = musb_readw(mbase, offset);
					txcsr &= ~(MUSB_TXCSR_DMAENAB
							| MUSB_TXCSR_AUTOSET);
					musb_writew(mbase, offset, txcsr);
					/* Send out the packet */
					txcsr &= ~MUSB_TXCSR_DMAMODE;
					txcsr |=  MUSB_TXCSR_TXPKTRDY;
					musb_writew(mbase, offset, txcsr);
				}
				musb_dma_completion(musb, musb_channel->epnum,
						    musb_channel->transmit);
			}
		}
	}

#ifdef CONFIG_BLACKFIN
	/* Clear DMA interrup flags */
	musb_writeb(mbase, MUSB_HSDMA_INTR, int_hsdma);
#endif

	retval = IRQ_HANDLED;
done:
	spin_unlock_irqrestore(&musb->lock, flags);
	return retval;
}
Example #5
0
irqreturn_t cppi_interrupt(int irq, void *dev_id)
{
	struct musb		*musb = dev_id;
	struct cppi		*cppi;
	void __iomem		*tibase;
	struct musb_hw_ep	*hw_ep = NULL;
	u32			rx, tx;
	int			i, index;
	unsigned long		uninitialized_var(flags);

	cppi = container_of(musb->dma_controller, struct cppi, controller);
	if (cppi->irq)
		spin_lock_irqsave(&musb->lock, flags);

	tibase = musb->ctrl_base;

	tx = musb_readl(tibase, DAVINCI_TXCPPI_MASKED_REG);
	rx = musb_readl(tibase, DAVINCI_RXCPPI_MASKED_REG);

	if (!tx && !rx) {
		if (cppi->irq)
			spin_unlock_irqrestore(&musb->lock, flags);
		return IRQ_NONE;
	}

	musb_dbg(musb, "CPPI IRQ Tx%x Rx%x", tx, rx);

	/* process TX channels */
	for (index = 0; tx; tx = tx >> 1, index++) {
		struct cppi_channel		*tx_ch;
		struct cppi_tx_stateram __iomem	*tx_ram;
		bool				completed = false;
		struct cppi_descriptor		*bd;

		if (!(tx & 1))
			continue;

		tx_ch = cppi->tx + index;
		tx_ram = tx_ch->state_ram;

		/* FIXME  need a cppi_tx_scan() routine, which
		 * can also be called from abort code
		 */

		cppi_dump_tx(5, tx_ch, "/E");

		bd = tx_ch->head;

		/*
		 * If Head is null then this could mean that a abort interrupt
		 * that needs to be acknowledged.
		 */
		if (NULL == bd) {
			musb_dbg(musb, "null BD");
			musb_writel(&tx_ram->tx_complete, 0, 0);
			continue;
		}

		/* run through all completed BDs */
		for (i = 0; !completed && bd && i < NUM_TXCHAN_BD;
				i++, bd = bd->next) {
			u16	len;

			/* catch latest BD writes from CPPI */
			rmb();
			if (bd->hw_options & CPPI_OWN_SET)
				break;

			musb_dbg(musb, "C/TXBD %p n %x b %x off %x opt %x",
					bd, bd->hw_next, bd->hw_bufp,
					bd->hw_off_len, bd->hw_options);

			len = bd->hw_off_len & CPPI_BUFFER_LEN_MASK;
			tx_ch->channel.actual_len += len;

			tx_ch->last_processed = bd;

			/* write completion register to acknowledge
			 * processing of completed BDs, and possibly
			 * release the IRQ; EOQ might not be set ...
			 *
			 * REVISIT use the same ack strategy as rx
			 *
			 * REVISIT have observed bit 18 set; huh??
			 */
			/* if ((bd->hw_options & CPPI_EOQ_MASK)) */
				musb_writel(&tx_ram->tx_complete, 0, bd->dma);

			/* stop scanning on end-of-segment */
			if (bd->hw_next == 0)
				completed = true;
		}

		/* on end of segment, maybe go to next one */
		if (completed) {
			/* cppi_dump_tx(4, tx_ch, "/complete"); */

			/* transfer more, or report completion */
			if (tx_ch->offset >= tx_ch->buf_len) {
				tx_ch->head = NULL;
				tx_ch->tail = NULL;
				tx_ch->channel.status = MUSB_DMA_STATUS_FREE;

				hw_ep = tx_ch->hw_ep;

				musb_dma_completion(musb, index + 1, 1);

			} else {
				/* Bigger transfer than we could fit in
				 * that first batch of descriptors...
				 */
				cppi_next_tx_segment(musb, tx_ch);
			}
		} else
			tx_ch->head = bd;
	}

	/* Start processing the RX block */
	for (index = 0; rx; rx = rx >> 1, index++) {

		if (rx & 1) {
			struct cppi_channel		*rx_ch;

			rx_ch = cppi->rx + index;

			/* let incomplete dma segments finish */
			if (!cppi_rx_scan(cppi, index))
				continue;

			/* start another dma segment if needed */
			if (rx_ch->channel.actual_len != rx_ch->buf_len
					&& rx_ch->channel.actual_len
						== rx_ch->offset) {
				cppi_next_rx_segment(musb, rx_ch, 1);
				continue;
			}

			/* all segments completed! */
			rx_ch->channel.status = MUSB_DMA_STATUS_FREE;

			hw_ep = rx_ch->hw_ep;

			core_rxirq_disable(tibase, index + 1);
			musb_dma_completion(musb, index + 1, 0);
		}
	}

	/* write to CPPI EOI register to re-enable interrupts */
	musb_writel(tibase, DAVINCI_CPPI_EOI_REG, 0);

	if (cppi->irq)
		spin_unlock_irqrestore(&musb->lock, flags);

	return IRQ_HANDLED;
}
Example #6
0
static irqreturn_t dma_controller_irq(int irq, void *private_data)
{
	struct musb_dma_controller *controller = private_data;
	struct musb *musb = controller->private_data;
	struct musb_dma_channel *musb_channel;
	struct dma_channel *channel;

	void __iomem *mbase = controller->base;

	irqreturn_t retval = IRQ_NONE;

	unsigned long flags;

	u8 bchannel;
	u8 int_hsdma;

	u32 addr;
	u16 csr;

	spin_lock_irqsave(&musb->lock, flags);

	int_hsdma = musb_readb(mbase, MUSB_HSDMA_INTR);
	if (!int_hsdma)
		goto done;

	for (bchannel = 0; bchannel < MUSB_HSDMA_CHANNELS; bchannel++) {
		if (int_hsdma & (1 << bchannel)) {
			musb_channel = (struct musb_dma_channel *)
					&(controller->channel[bchannel]);
			channel = &musb_channel->channel;

			csr = musb_readw(mbase,
					MUSB_HSDMA_CHANNEL_OFFSET(bchannel,
							MUSB_HSDMA_CONTROL));

			if (csr & (1 << MUSB_HSDMA_BUSERROR_SHIFT)) {
				musb_channel->channel.status =
					MUSB_DMA_STATUS_BUS_ABORT;
			} else {
				u8 devctl;

				addr = musb_read_hsdma_addr(mbase,
						bchannel);
				channel->actual_len = addr
					- musb_channel->start_addr;

				DBG(2, "ch %p, 0x%x -> 0x%x (%d / %d) %s\n",
					channel, musb_channel->start_addr,
					addr, channel->actual_len,
					musb_channel->len,
					(channel->actual_len
						< musb_channel->len) ?
					"=> reconfig 0" : "=> complete");

				devctl = musb_readb(mbase, MUSB_DEVCTL);

				channel->status = MUSB_DMA_STATUS_FREE;

				/* completed */
				if ((devctl & MUSB_DEVCTL_HM)
					&& (musb_channel->transmit)
					&& ((channel->desired_mode == 0)
					    || (channel->actual_len &
					    (musb_channel->max_packet_sz - 1)))
					 ) {
					/* Send out the packet */
					musb_ep_select(mbase,
						musb_channel->epnum);
					musb_writew(mbase, MUSB_EP_OFFSET(
							musb_channel->epnum,
							MUSB_TXCSR),
						MUSB_TXCSR_TXPKTRDY);
				} else {
					musb_dma_completion(
						musb,
						musb_channel->epnum,
						musb_channel->transmit);
				}
			}
		}
	}

#ifdef CONFIG_BLACKFIN
	/* Clear DMA interrup flags */
	musb_writeb(mbase, MUSB_HSDMA_INTR, int_hsdma);
#endif

	retval = IRQ_HANDLED;
done:
	spin_unlock_irqrestore(&musb->lock, flags);
	return retval;
}
Example #7
0
/*
 * See also musb_dma_completion in plat_uds.c and musb_g_[tx|rx]() in
 * musb_gadget.c.
 */
static void tusb_omap_dma_cb(int lch, u16 ch_status, void *data)
{
	struct dma_channel	*channel = (struct dma_channel *)data;
	struct tusb_omap_dma_ch	*chdat = to_chdat(channel);
	struct tusb_omap_dma	*tusb_dma = chdat->tusb_dma;
	struct musb		*musb = chdat->musb;
	struct device		*dev = musb->controller;
	struct musb_hw_ep	*hw_ep = chdat->hw_ep;
	void __iomem		*ep_conf = hw_ep->conf;
	void __iomem		*mbase = musb->mregs;
	unsigned long		remaining, flags, pio;
	int			ch;

	spin_lock_irqsave(&musb->lock, flags);

	if (tusb_dma->multichannel)
		ch = chdat->ch;
	else
		ch = tusb_dma->ch;

	if (ch_status != OMAP_DMA_BLOCK_IRQ)
		printk(KERN_ERR "TUSB DMA error status: %i\n", ch_status);

	dev_dbg(musb->controller, "ep%i %s dma callback ch: %i status: %x\n",
		chdat->epnum, chdat->tx ? "tx" : "rx",
		ch, ch_status);

	if (chdat->tx)
		remaining = musb_readl(ep_conf, TUSB_EP_TX_OFFSET);
	else
		remaining = musb_readl(ep_conf, TUSB_EP_RX_OFFSET);

	remaining = TUSB_EP_CONFIG_XFR_SIZE(remaining);

	/* HW issue #10: XFR_SIZE may get corrupt on DMA (both async & sync) */
	if (unlikely(remaining > chdat->transfer_len)) {
		dev_dbg(musb->controller, "Corrupt %s dma ch%i XFR_SIZE: 0x%08lx\n",
			chdat->tx ? "tx" : "rx", chdat->ch,
			remaining);
		remaining = 0;
	}

	channel->actual_len = chdat->transfer_len - remaining;
	pio = chdat->len - channel->actual_len;

	dev_dbg(musb->controller, "DMA remaining %lu/%u\n", remaining, chdat->transfer_len);

	/* Transfer remaining 1 - 31 bytes */
	if (pio > 0 && pio < 32) {
		u8	*buf;

		dev_dbg(musb->controller, "Using PIO for remaining %lu bytes\n", pio);
		buf = phys_to_virt((u32)chdat->dma_addr) + chdat->transfer_len;
		if (chdat->tx) {
			dma_unmap_single(dev, chdat->dma_addr,
						chdat->transfer_len,
						DMA_TO_DEVICE);
			musb_write_fifo(hw_ep, pio, buf);
		} else {
			dma_unmap_single(dev, chdat->dma_addr,
						chdat->transfer_len,
						DMA_FROM_DEVICE);
			musb_read_fifo(hw_ep, pio, buf);
		}
		channel->actual_len += pio;
	}

	if (!tusb_dma->multichannel)
		tusb_omap_free_shared_dmareq(chdat);

	channel->status = MUSB_DMA_STATUS_FREE;

	/* Handle only RX callbacks here. TX callbacks must be handled based
	 * on the TUSB DMA status interrupt.
	 * REVISIT: Use both TUSB DMA status interrupt and OMAP DMA callback
	 * interrupt for RX and TX.
	 */
	if (!chdat->tx)
		musb_dma_completion(musb, chdat->epnum, chdat->tx);

	/* We must terminate short tx transfers manually by setting TXPKTRDY.
	 * REVISIT: This same problem may occur with other MUSB dma as well.
	 * Easy to test with g_ether by pinging the MUSB board with ping -s54.
	 */
	if ((chdat->transfer_len < chdat->packet_sz)
			|| (chdat->transfer_len % chdat->packet_sz != 0)) {
		u16	csr;

		if (chdat->tx) {
			dev_dbg(musb->controller, "terminating short tx packet\n");
			musb_ep_select(mbase, chdat->epnum);
			csr = musb_readw(hw_ep->regs, MUSB_TXCSR);
			csr |= MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY
				| MUSB_TXCSR_P_WZC_BITS;
			musb_writew(hw_ep->regs, MUSB_TXCSR, csr);
		}
	}

	spin_unlock_irqrestore(&musb->lock, flags);
}
static void cppi41_dma_callback(void *private_data)
{
	struct dma_channel *channel = private_data;
	struct cppi41_dma_channel *cppi41_channel = channel->private_data;
	struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
	struct musb *musb = hw_ep->musb;
	unsigned long flags;
	struct dma_tx_state txstate;
	u32 transferred;

	spin_lock_irqsave(&musb->lock, flags);

	dmaengine_tx_status(cppi41_channel->dc, cppi41_channel->cookie,
			&txstate);
	transferred = cppi41_channel->prog_len - txstate.residue;
	cppi41_channel->transferred += transferred;

	dev_dbg(musb->controller, "DMA transfer done on hw_ep=%d bytes=%d/%d\n",
		hw_ep->epnum, cppi41_channel->transferred,
		cppi41_channel->total_len);

	update_rx_toggle(cppi41_channel);

	if (cppi41_channel->transferred == cppi41_channel->total_len ||
			transferred < cppi41_channel->packet_sz) {

		/* done, complete */
		cppi41_channel->channel.actual_len =
			cppi41_channel->transferred;
		cppi41_channel->channel.status = MUSB_DMA_STATUS_FREE;
		musb_dma_completion(musb, hw_ep->epnum, cppi41_channel->is_tx);
	} else {
		/* next iteration, reload */
		struct dma_chan *dc = cppi41_channel->dc;
		struct dma_async_tx_descriptor *dma_desc;
		enum dma_transfer_direction direction;
		u16 csr;
		u32 remain_bytes;
		void __iomem *epio = cppi41_channel->hw_ep->regs;

		cppi41_channel->buf_addr += cppi41_channel->packet_sz;

		remain_bytes = cppi41_channel->total_len;
		remain_bytes -= cppi41_channel->transferred;
		remain_bytes = min(remain_bytes, cppi41_channel->packet_sz);
		cppi41_channel->prog_len = remain_bytes;

		direction = cppi41_channel->is_tx ? DMA_MEM_TO_DEV
			: DMA_DEV_TO_MEM;
		dma_desc = dmaengine_prep_slave_single(dc,
				cppi41_channel->buf_addr,
				remain_bytes,
				direction,
				DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
		if (WARN_ON(!dma_desc)) {
			spin_unlock_irqrestore(&musb->lock, flags);
			return;
		}

		dma_desc->callback = cppi41_dma_callback;
		dma_desc->callback_param = channel;
		cppi41_channel->cookie = dma_desc->tx_submit(dma_desc);
		dma_async_issue_pending(dc);

		if (!cppi41_channel->is_tx) {
			csr = musb_readw(epio, MUSB_RXCSR);
			csr |= MUSB_RXCSR_H_REQPKT;
			musb_writew(epio, MUSB_RXCSR, csr);
		}
	}
	spin_unlock_irqrestore(&musb->lock, flags);
}
void cppi_completion(struct musb *musb, u32 rx, u32 tx)
{
	void __iomem		*tibase;
	int			i, index;
	struct cppi		*cppi;
	struct musb_hw_ep	*hw_ep = NULL;

	cppi = container_of(musb->dma_controller, struct cppi, controller);

	tibase = musb->ctrl_base;

	/* process TX channels */
	for (index = 0; tx; tx = tx >> 1, index++) {
		struct cppi_channel		*tx_ch;
		struct cppi_tx_stateram __iomem	*tx_ram;
		bool				completed = false;
		struct cppi_descriptor		*bd;

		if (!(tx & 1))
			continue;

		tx_ch = cppi->tx + index;
		tx_ram = tx_ch->state_ram;

		/* FIXME  need a cppi_tx_scan() routine, which
		 * can also be called from abort code
		 */

		cppi_dump_tx(5, tx_ch, "/E");

		bd = tx_ch->head;

		if (NULL == bd) {
			DBG(1, "null BD\n");
			continue;
		}

		/* run through all completed BDs */
		for (i = 0; !completed && bd && i < NUM_TXCHAN_BD;
				i++, bd = bd->next) {
			u16	len;

			/* catch latest BD writes from CPPI */
			rmb();
			if (bd->hw_options & CPPI_OWN_SET)
				break;

			DBG(5, "C/TXBD %p n %x b %x off %x opt %x\n",
					bd, bd->hw_next, bd->hw_bufp,
					bd->hw_off_len, bd->hw_options);

			len = bd->hw_off_len & CPPI_BUFFER_LEN_MASK;
			tx_ch->channel.actual_len += len;

			tx_ch->last_processed = bd;

			/* write completion register to acknowledge
			 * processing of completed BDs, and possibly
			 * release the IRQ; EOQ might not be set ...
			 *
			 * REVISIT use the same ack strategy as rx
			 *
			 * REVISIT have observed bit 18 set; huh??
			 */
			/* if ((bd->hw_options & CPPI_EOQ_MASK)) */
				musb_writel(&tx_ram->tx_complete, 0, bd->dma);

			/* stop scanning on end-of-segment */
			if (bd->hw_next == 0)
				completed = true;
		}

		/* on end of segment, maybe go to next one */
		if (completed) {
			/* cppi_dump_tx(4, tx_ch, "/complete"); */

			/* transfer more, or report completion */
			if (tx_ch->offset >= tx_ch->buf_len) {
				tx_ch->head = NULL;
				tx_ch->tail = NULL;
				tx_ch->channel.status = MUSB_DMA_STATUS_FREE;

				hw_ep = tx_ch->hw_ep;

				/* Peripheral role never repurposes the
				 * endpoint, so immediate completion is
				 * safe.  Host role waits for the fifo
				 * to empty (TXPKTRDY irq) before going
				 * to the next queued bulk transfer.
				 */
				if (is_host_active(cppi->musb)) {
#if 0
					/* WORKAROUND because we may
					 * not always get TXKPTRDY ...
					 */
					int	csr;

					csr = musb_readw(hw_ep->regs,
						MUSB_TXCSR);
					if (csr & MUSB_TXCSR_TXPKTRDY)
#endif
						completed = false;
				}
				if (completed)
					musb_dma_completion(musb, index + 1, 1);

			} else {
				/* Bigger transfer than we could fit in
				 * that first batch of descriptors...
				 */
				cppi_next_tx_segment(musb, tx_ch);
			}
		} else
			tx_ch->head = bd;
	}

	/* Start processing the RX block */
	for (index = 0; rx; rx = rx >> 1, index++) {

		if (rx & 1) {
			struct cppi_channel		*rx_ch;

			rx_ch = cppi->rx + index;

			/* let incomplete dma segments finish */
			if (!cppi_rx_scan(cppi, index))
				continue;

			/* start another dma segment if needed */
			if (rx_ch->channel.actual_len != rx_ch->buf_len
					&& rx_ch->channel.actual_len
						== rx_ch->offset) {
				cppi_next_rx_segment(musb, rx_ch, 1);
				continue;
			}

			/* all segments completed! */
			rx_ch->channel.status = MUSB_DMA_STATUS_FREE;

			hw_ep = rx_ch->hw_ep;

			core_rxirq_disable(tibase, index + 1);
			musb_dma_completion(musb, index + 1, 0);
		}
	}

	/* write to CPPI EOI register to re-enable interrupts */
	musb_writel(tibase, DAVINCI_CPPI_EOI_REG, 0);
}