예제 #1
0
/**
 * cppi_channel_program - program channel for data transfer
 * @ch: the channel
 * @maxpacket: max packet size
 * @mode: For RX, 1 unless the usb protocol driver promised to treat
 *	all short reads as errors and kick in high level fault recovery.
 *	For TX, ignored because of RNDIS mode races/glitches.
 * @dma_addr: dma address of buffer
 * @len: length of buffer
 * Context: controller irqlocked
 */
static int cppi_channel_program(struct dma_channel *ch,
		u16 maxpacket, u8 mode,
		dma_addr_t dma_addr, u32 len)
{
	struct cppi_channel	*cppi_ch;
	struct cppi		*controller;
	struct musb		*musb;

	cppi_ch = container_of(ch, struct cppi_channel, channel);
	controller = cppi_ch->controller;
	musb = controller->controller.musb;

	switch (ch->status) {
	case MUSB_DMA_STATUS_BUS_ABORT:
	case MUSB_DMA_STATUS_CORE_ABORT:
		/* fault irq handler should have handled cleanup */
		WARNING("%cX DMA%d not cleaned up after abort!\n",
				cppi_ch->transmit ? 'T' : 'R',
				cppi_ch->index);
		/* WARN_ON(1); */
		break;
	case MUSB_DMA_STATUS_BUSY:
		WARNING("program active channel?  %cX DMA%d\n",
				cppi_ch->transmit ? 'T' : 'R',
				cppi_ch->index);
		/* WARN_ON(1); */
		break;
	case MUSB_DMA_STATUS_UNKNOWN:
		musb_dbg(musb, "%cX DMA%d not allocated!",
				cppi_ch->transmit ? 'T' : 'R',
				cppi_ch->index);
		/* FALLTHROUGH */
	case MUSB_DMA_STATUS_FREE:
		break;
	}

	ch->status = MUSB_DMA_STATUS_BUSY;

	/* set transfer parameters, then queue up its first segment */
	cppi_ch->buf_dma = dma_addr;
	cppi_ch->offset = 0;
	cppi_ch->maxpacket = maxpacket;
	cppi_ch->buf_len = len;
	cppi_ch->channel.actual_len = 0;

	/* TX channel? or RX? */
	if (cppi_ch->transmit)
		cppi_next_tx_segment(musb, cppi_ch);
	else
		cppi_next_rx_segment(musb, cppi_ch, mode);

	return true;
}
예제 #2
0
irqreturn_t cppi_interrupt(int irq, void *dev_id)
{
	struct musb		*musb = dev_id;
	struct cppi		*cppi;
	void __iomem		*tibase;
	struct musb_hw_ep	*hw_ep = NULL;
	u32			rx, tx;
	int			i, index;
	unsigned long		uninitialized_var(flags);

	cppi = container_of(musb->dma_controller, struct cppi, controller);
	if (cppi->irq)
		spin_lock_irqsave(&musb->lock, flags);

	tibase = musb->ctrl_base;

	tx = musb_readl(tibase, DAVINCI_TXCPPI_MASKED_REG);
	rx = musb_readl(tibase, DAVINCI_RXCPPI_MASKED_REG);

	if (!tx && !rx) {
		if (cppi->irq)
			spin_unlock_irqrestore(&musb->lock, flags);
		return IRQ_NONE;
	}

	musb_dbg(musb, "CPPI IRQ Tx%x Rx%x", tx, rx);

	/* process TX channels */
	for (index = 0; tx; tx = tx >> 1, index++) {
		struct cppi_channel		*tx_ch;
		struct cppi_tx_stateram __iomem	*tx_ram;
		bool				completed = false;
		struct cppi_descriptor		*bd;

		if (!(tx & 1))
			continue;

		tx_ch = cppi->tx + index;
		tx_ram = tx_ch->state_ram;

		/* FIXME  need a cppi_tx_scan() routine, which
		 * can also be called from abort code
		 */

		cppi_dump_tx(5, tx_ch, "/E");

		bd = tx_ch->head;

		/*
		 * If Head is null then this could mean that a abort interrupt
		 * that needs to be acknowledged.
		 */
		if (NULL == bd) {
			musb_dbg(musb, "null BD");
			musb_writel(&tx_ram->tx_complete, 0, 0);
			continue;
		}

		/* run through all completed BDs */
		for (i = 0; !completed && bd && i < NUM_TXCHAN_BD;
				i++, bd = bd->next) {
			u16	len;

			/* catch latest BD writes from CPPI */
			rmb();
			if (bd->hw_options & CPPI_OWN_SET)
				break;

			musb_dbg(musb, "C/TXBD %p n %x b %x off %x opt %x",
					bd, bd->hw_next, bd->hw_bufp,
					bd->hw_off_len, bd->hw_options);

			len = bd->hw_off_len & CPPI_BUFFER_LEN_MASK;
			tx_ch->channel.actual_len += len;

			tx_ch->last_processed = bd;

			/* write completion register to acknowledge
			 * processing of completed BDs, and possibly
			 * release the IRQ; EOQ might not be set ...
			 *
			 * REVISIT use the same ack strategy as rx
			 *
			 * REVISIT have observed bit 18 set; huh??
			 */
			/* if ((bd->hw_options & CPPI_EOQ_MASK)) */
				musb_writel(&tx_ram->tx_complete, 0, bd->dma);

			/* stop scanning on end-of-segment */
			if (bd->hw_next == 0)
				completed = true;
		}

		/* on end of segment, maybe go to next one */
		if (completed) {
			/* cppi_dump_tx(4, tx_ch, "/complete"); */

			/* transfer more, or report completion */
			if (tx_ch->offset >= tx_ch->buf_len) {
				tx_ch->head = NULL;
				tx_ch->tail = NULL;
				tx_ch->channel.status = MUSB_DMA_STATUS_FREE;

				hw_ep = tx_ch->hw_ep;

				musb_dma_completion(musb, index + 1, 1);

			} else {
				/* Bigger transfer than we could fit in
				 * that first batch of descriptors...
				 */
				cppi_next_tx_segment(musb, tx_ch);
			}
		} else
			tx_ch->head = bd;
	}

	/* Start processing the RX block */
	for (index = 0; rx; rx = rx >> 1, index++) {

		if (rx & 1) {
			struct cppi_channel		*rx_ch;

			rx_ch = cppi->rx + index;

			/* let incomplete dma segments finish */
			if (!cppi_rx_scan(cppi, index))
				continue;

			/* start another dma segment if needed */
			if (rx_ch->channel.actual_len != rx_ch->buf_len
					&& rx_ch->channel.actual_len
						== rx_ch->offset) {
				cppi_next_rx_segment(musb, rx_ch, 1);
				continue;
			}

			/* all segments completed! */
			rx_ch->channel.status = MUSB_DMA_STATUS_FREE;

			hw_ep = rx_ch->hw_ep;

			core_rxirq_disable(tibase, index + 1);
			musb_dma_completion(musb, index + 1, 0);
		}
	}

	/* write to CPPI EOI register to re-enable interrupts */
	musb_writel(tibase, DAVINCI_CPPI_EOI_REG, 0);

	if (cppi->irq)
		spin_unlock_irqrestore(&musb->lock, flags);

	return IRQ_HANDLED;
}
예제 #3
0
void cppi_completion(struct musb *musb, u32 rx, u32 tx)
{
	void __iomem		*tibase;
	int			i, index;
	struct cppi		*cppi;
	struct musb_hw_ep	*hw_ep = NULL;

	cppi = container_of(musb->dma_controller, struct cppi, controller);

	tibase = musb->ctrl_base;

	/* process TX channels */
	for (index = 0; tx; tx = tx >> 1, index++) {
		struct cppi_channel		*tx_ch;
		struct cppi_tx_stateram __iomem	*tx_ram;
		bool				completed = false;
		struct cppi_descriptor		*bd;

		if (!(tx & 1))
			continue;

		tx_ch = cppi->tx + index;
		tx_ram = tx_ch->state_ram;

		/* FIXME  need a cppi_tx_scan() routine, which
		 * can also be called from abort code
		 */

		cppi_dump_tx(5, tx_ch, "/E");

		bd = tx_ch->head;

		if (NULL == bd) {
			DBG(1, "null BD\n");
			continue;
		}

		/* run through all completed BDs */
		for (i = 0; !completed && bd && i < NUM_TXCHAN_BD;
				i++, bd = bd->next) {
			u16	len;

			/* catch latest BD writes from CPPI */
			rmb();
			if (bd->hw_options & CPPI_OWN_SET)
				break;

			DBG(5, "C/TXBD %p n %x b %x off %x opt %x\n",
					bd, bd->hw_next, bd->hw_bufp,
					bd->hw_off_len, bd->hw_options);

			len = bd->hw_off_len & CPPI_BUFFER_LEN_MASK;
			tx_ch->channel.actual_len += len;

			tx_ch->last_processed = bd;

			/* write completion register to acknowledge
			 * processing of completed BDs, and possibly
			 * release the IRQ; EOQ might not be set ...
			 *
			 * REVISIT use the same ack strategy as rx
			 *
			 * REVISIT have observed bit 18 set; huh??
			 */
			/* if ((bd->hw_options & CPPI_EOQ_MASK)) */
				musb_writel(&tx_ram->tx_complete, 0, bd->dma);

			/* stop scanning on end-of-segment */
			if (bd->hw_next == 0)
				completed = true;
		}

		/* on end of segment, maybe go to next one */
		if (completed) {
			/* cppi_dump_tx(4, tx_ch, "/complete"); */

			/* transfer more, or report completion */
			if (tx_ch->offset >= tx_ch->buf_len) {
				tx_ch->head = NULL;
				tx_ch->tail = NULL;
				tx_ch->channel.status = MUSB_DMA_STATUS_FREE;

				hw_ep = tx_ch->hw_ep;

				/* Peripheral role never repurposes the
				 * endpoint, so immediate completion is
				 * safe.  Host role waits for the fifo
				 * to empty (TXPKTRDY irq) before going
				 * to the next queued bulk transfer.
				 */
				if (is_host_active(cppi->musb)) {
#if 0
					/* WORKAROUND because we may
					 * not always get TXKPTRDY ...
					 */
					int	csr;

					csr = musb_readw(hw_ep->regs,
						MUSB_TXCSR);
					if (csr & MUSB_TXCSR_TXPKTRDY)
#endif
						completed = false;
				}
				if (completed)
					musb_dma_completion(musb, index + 1, 1);

			} else {
				/* Bigger transfer than we could fit in
				 * that first batch of descriptors...
				 */
				cppi_next_tx_segment(musb, tx_ch);
			}
		} else
			tx_ch->head = bd;
	}

	/* Start processing the RX block */
	for (index = 0; rx; rx = rx >> 1, index++) {

		if (rx & 1) {
			struct cppi_channel		*rx_ch;

			rx_ch = cppi->rx + index;

			/* let incomplete dma segments finish */
			if (!cppi_rx_scan(cppi, index))
				continue;

			/* start another dma segment if needed */
			if (rx_ch->channel.actual_len != rx_ch->buf_len
					&& rx_ch->channel.actual_len
						== rx_ch->offset) {
				cppi_next_rx_segment(musb, rx_ch, 1);
				continue;
			}

			/* all segments completed! */
			rx_ch->channel.status = MUSB_DMA_STATUS_FREE;

			hw_ep = rx_ch->hw_ep;

			core_rxirq_disable(tibase, index + 1);
			musb_dma_completion(musb, index + 1, 0);
		}
	}

	/* write to CPPI EOI register to re-enable interrupts */
	musb_writel(tibase, DAVINCI_CPPI_EOI_REG, 0);
}