示例#1
0
/*
 * Allocate a CPPI Channel for DMA.  With CPPI, channels are bound to
 * each transfer direction of a non-control endpoint, so allocating
 * (and deallocating) is mostly a way to notice bad housekeeping on
 * the software side.  We assume the irqs are always active.
 */
static struct dma_channel *
cppi_channel_allocate(struct dma_controller *c,
		struct musb_hw_ep *ep, u8 transmit)
{
	struct cppi		*controller;
	u8			index;
	struct cppi_channel	*cppi_ch;
	void __iomem		*tibase;
	struct musb		*musb;

	controller = container_of(c, struct cppi, controller);
	tibase = controller->tibase;
	musb = c->musb;

	/* ep0 doesn't use DMA; remember cppi indices are 0..N-1 */
	index = ep->epnum - 1;

	/* return the corresponding CPPI Channel Handle, and
	 * probably disable the non-CPPI irq until we need it.
	 */
	if (transmit) {
		if (index >= ARRAY_SIZE(controller->tx)) {
			musb_dbg(musb, "no %cX%d CPPI channel", 'T', index);
			return NULL;
		}
		cppi_ch = controller->tx + index;
	} else {
		if (index >= ARRAY_SIZE(controller->rx)) {
			musb_dbg(musb, "no %cX%d CPPI channel", 'R', index);
			return NULL;
		}
		cppi_ch = controller->rx + index;
		core_rxirq_disable(tibase, ep->epnum);
	}

	/* REVISIT make this an error later once the same driver code works
	 * with the other DMA engine too
	 */
	if (cppi_ch->hw_ep)
		musb_dbg(musb, "re-allocating DMA%d %cX channel %p",
				index, transmit ? 'T' : 'R', cppi_ch);
	cppi_ch->hw_ep = ep;
	cppi_ch->channel.status = MUSB_DMA_STATUS_FREE;
	cppi_ch->channel.max_len = 0x7fffffff;

	musb_dbg(musb, "Allocate CPPI%d %cX", index, transmit ? 'T' : 'R');
	return &cppi_ch->channel;
}
示例#2
0
/* Context: controller irqlocked */
static void
cppi_dump_tx(int level, struct cppi_channel *c, const char *tag)
{
	void __iomem			*base = c->controller->mregs;
	struct cppi_tx_stateram __iomem	*tx = c->state_ram;

	musb_ep_select(base, c->index + 1);

	musb_dbg(c->controller->controller.musb,
		"TX DMA%d%s: csr %04x, "
		"H%08x S%08x C%08x %08x, "
		"F%08x L%08x .. %08x",
		c->index, tag,
		musb_readw(c->hw_ep->regs, MUSB_TXCSR),

		musb_readl(&tx->tx_head, 0),
		musb_readl(&tx->tx_buf, 0),
		musb_readl(&tx->tx_current, 0),
		musb_readl(&tx->tx_buf_current, 0),

		musb_readl(&tx->tx_info, 0),
		musb_readl(&tx->tx_rem_len, 0),
		/* dummy/unused word 6 */
		musb_readl(&tx->tx_complete, 0)
		);
}
示例#3
0
/* Context: controller irqlocked */
static void
cppi_dump_rx(int level, struct cppi_channel *c, const char *tag)
{
	void __iomem			*base = c->controller->mregs;
	struct cppi_rx_stateram __iomem	*rx = c->state_ram;

	musb_ep_select(base, c->index + 1);

	musb_dbg(c->controller->controller.musb,
		"RX DMA%d%s: %d left, csr %04x, "
		"%08x H%08x S%08x C%08x, "
		"B%08x L%08x %08x .. %08x",
		c->index, tag,
		musb_readl(c->controller->tibase,
			DAVINCI_RXCPPI_BUFCNT0_REG + 4 * c->index),
		musb_readw(c->hw_ep->regs, MUSB_RXCSR),

		musb_readl(&rx->rx_skipbytes, 0),
		musb_readl(&rx->rx_head, 0),
		musb_readl(&rx->rx_sop, 0),
		musb_readl(&rx->rx_current, 0),

		musb_readl(&rx->rx_buf_current, 0),
		musb_readl(&rx->rx_len_len, 0),
		musb_readl(&rx->rx_cnt_cnt, 0),
		musb_readl(&rx->rx_complete, 0)
		);
}
示例#4
0
static void cppi_controller_stop(struct cppi *controller)
{
	void __iomem		*tibase;
	int			i;
	struct musb		*musb;

	musb = controller->controller.musb;

	tibase = controller->tibase;
	/* DISABLE INDIVIDUAL CHANNEL Interrupts */
	musb_writel(tibase, DAVINCI_TXCPPI_INTCLR_REG,
			DAVINCI_DMA_ALL_CHANNELS_ENABLE);
	musb_writel(tibase, DAVINCI_RXCPPI_INTCLR_REG,
			DAVINCI_DMA_ALL_CHANNELS_ENABLE);

	musb_dbg(musb, "Tearing down RX and TX Channels");
	for (i = 0; i < ARRAY_SIZE(controller->tx); i++) {
		/* FIXME restructure of txdma to use bds like rxdma */
		controller->tx[i].last_processed = NULL;
		cppi_pool_free(controller->tx + i);
	}
	for (i = 0; i < ARRAY_SIZE(controller->rx); i++)
		cppi_pool_free(controller->rx + i);

	/* in Tx Case proper teardown is supported. We resort to disabling
	 * Tx/Rx CPPI after cleanup of Tx channels. Before TX teardown is
	 * complete TX CPPI cannot be disabled.
	 */
	/*disable tx/rx cppi */
	musb_writel(tibase, DAVINCI_TXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_DISABLE);
	musb_writel(tibase, DAVINCI_RXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_DISABLE);
}
示例#5
0
void musb_host_finish_resume(struct work_struct *work)
{
	struct musb *musb;
	unsigned long flags;
	u8 power;

	musb = container_of(work, struct musb, finish_resume_work.work);

	spin_lock_irqsave(&musb->lock, flags);

	power = musb_readb(musb->mregs, MUSB_POWER);
	power &= ~MUSB_POWER_RESUME;
	musb_dbg(musb, "root port resume stopped, power %02x", power);
	musb_writeb(musb->mregs, MUSB_POWER, power);

	/*
	 * ISSUE:  DaVinci (RTL 1.300) disconnects after
	 * resume of high speed peripherals (but not full
	 * speed ones).
	 */
	musb->is_active = 1;
	musb->port1_status &= ~(USB_PORT_STAT_SUSPEND | MUSB_PORT_STAT_RESUME);
	musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16;
	usb_hcd_poll_rh_status(musb->hcd);
	/* NOTE: it might really be A_WAIT_BCON ... */
	musb->xceiv->otg->state = OTG_STATE_A_HOST;

	spin_unlock_irqrestore(&musb->lock, flags);
}
示例#6
0
void musb_root_disconnect(struct musb *musb)
{
	struct usb_otg	*otg = musb->xceiv->otg;

	musb->port1_status = USB_PORT_STAT_POWER
			| (USB_PORT_STAT_C_CONNECTION << 16);

	usb_hcd_poll_rh_status(musb->hcd);
	musb->is_active = 0;

	switch (musb->xceiv->otg->state) {
	case OTG_STATE_A_SUSPEND:
		if (otg->host->b_hnp_enable) {
			musb->xceiv->otg->state = OTG_STATE_A_PERIPHERAL;
			musb->g.is_a_peripheral = 1;
			break;
		}
		/* FALLTHROUGH */
	case OTG_STATE_A_HOST:
		musb->xceiv->otg->state = OTG_STATE_A_WAIT_BCON;
		musb->is_active = 0;
		break;
	case OTG_STATE_A_WAIT_VFALL:
		musb->xceiv->otg->state = OTG_STATE_B_IDLE;
		break;
	default:
		musb_dbg(musb, "host disconnect (%s)",
			usb_otg_state_string(musb->xceiv->otg->state));
	}
}
示例#7
0
/**
 * cppi_channel_program - program channel for data transfer
 * @ch: the channel
 * @maxpacket: max packet size
 * @mode: For RX, 1 unless the usb protocol driver promised to treat
 *	all short reads as errors and kick in high level fault recovery.
 *	For TX, ignored because of RNDIS mode races/glitches.
 * @dma_addr: dma address of buffer
 * @len: length of buffer
 * Context: controller irqlocked
 */
static int cppi_channel_program(struct dma_channel *ch,
		u16 maxpacket, u8 mode,
		dma_addr_t dma_addr, u32 len)
{
	struct cppi_channel	*cppi_ch;
	struct cppi		*controller;
	struct musb		*musb;

	cppi_ch = container_of(ch, struct cppi_channel, channel);
	controller = cppi_ch->controller;
	musb = controller->controller.musb;

	switch (ch->status) {
	case MUSB_DMA_STATUS_BUS_ABORT:
	case MUSB_DMA_STATUS_CORE_ABORT:
		/* fault irq handler should have handled cleanup */
		WARNING("%cX DMA%d not cleaned up after abort!\n",
				cppi_ch->transmit ? 'T' : 'R',
				cppi_ch->index);
		/* WARN_ON(1); */
		break;
	case MUSB_DMA_STATUS_BUSY:
		WARNING("program active channel?  %cX DMA%d\n",
				cppi_ch->transmit ? 'T' : 'R',
				cppi_ch->index);
		/* WARN_ON(1); */
		break;
	case MUSB_DMA_STATUS_UNKNOWN:
		musb_dbg(musb, "%cX DMA%d not allocated!",
				cppi_ch->transmit ? 'T' : 'R',
				cppi_ch->index);
		/* FALLTHROUGH */
	case MUSB_DMA_STATUS_FREE:
		break;
	}

	ch->status = MUSB_DMA_STATUS_BUSY;

	/* set transfer parameters, then queue up its first segment */
	cppi_ch->buf_dma = dma_addr;
	cppi_ch->offset = 0;
	cppi_ch->maxpacket = maxpacket;
	cppi_ch->buf_len = len;
	cppi_ch->channel.actual_len = 0;

	/* TX channel? or RX? */
	if (cppi_ch->transmit)
		cppi_next_tx_segment(musb, cppi_ch);
	else
		cppi_next_rx_segment(musb, cppi_ch, mode);

	return true;
}
示例#8
0
/* Release a CPPI Channel.  */
static void cppi_channel_release(struct dma_channel *channel)
{
	struct cppi_channel	*c;
	void __iomem		*tibase;

	/* REVISIT:  for paranoia, check state and abort if needed... */

	c = container_of(channel, struct cppi_channel, channel);
	tibase = c->controller->tibase;
	if (!c->hw_ep)
		musb_dbg(c->controller->controller.musb,
			"releasing idle DMA channel %p", c);
	else if (!c->transmit)
		core_rxirq_enable(tibase, c->index + 1);

	/* for now, leave its cppi IRQ enabled (we won't trigger it) */
	c->hw_ep = NULL;
	channel->status = MUSB_DMA_STATUS_UNKNOWN;
}
示例#9
0
/**
 * cppi_next_rx_segment - dma read for the next chunk of a buffer
 * @musb: the controller
 * @rx: dma channel
 * @onepacket: true unless caller treats short reads as errors, and
 *	performs fault recovery above usbcore.
 * Context: controller irqlocked
 *
 * See above notes about why we can't use multi-BD RX queues except in
 * rare cases (mass storage class), and can never use the hardware "rndis"
 * mode (since it's not a "true" RNDIS mode) with complete safety..
 *
 * It's ESSENTIAL that callers specify "onepacket" mode unless they kick in
 * code to recover from corrupted datastreams after each short transfer.
 */
static void
cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket)
{
	unsigned		maxpacket = rx->maxpacket;
	dma_addr_t		addr = rx->buf_dma + rx->offset;
	size_t			length = rx->buf_len - rx->offset;
	struct cppi_descriptor	*bd, *tail;
	unsigned		n_bds;
	unsigned		i;
	void __iomem		*tibase = musb->ctrl_base;
	int			is_rndis = 0;
	struct cppi_rx_stateram	__iomem *rx_ram = rx->state_ram;
	struct cppi_descriptor	*d;

	if (onepacket) {
		/* almost every USB driver, host or peripheral side */
		n_bds = 1;

		/* maybe apply the heuristic above */
		if (cppi_rx_rndis
				&& is_peripheral_active(musb)
				&& length > maxpacket
				&& (length & ~0xffff) == 0
				&& (length & 0x0fff) != 0
				&& (length & (maxpacket - 1)) == 0) {
			maxpacket = length;
			is_rndis = 1;
		}
	} else {
		/* virtually nothing except mass storage class */
		if (length > 0xffff) {
			n_bds = 0xffff / maxpacket;
			length = n_bds * maxpacket;
		} else {
			n_bds = DIV_ROUND_UP(length, maxpacket);
		}
		if (n_bds == 1)
			onepacket = 1;
		else
			n_bds = min(n_bds, (unsigned) NUM_RXCHAN_BD);
	}

	/* In host mode, autorequest logic can generate some IN tokens; it's
	 * tricky since we can't leave REQPKT set in RXCSR after the transfer
	 * finishes. So:  multipacket transfers involve two or more segments.
	 * And always at least two IRQs ... RNDIS mode is not an option.
	 */
	if (is_host_active(musb))
		n_bds = cppi_autoreq_update(rx, tibase, onepacket, n_bds);

	cppi_rndis_update(rx, 1, musb->ctrl_base, is_rndis);

	length = min(n_bds * maxpacket, length);

	musb_dbg(musb, "RX DMA%d seg, maxp %d %s bds %d (cnt %d) "
			"dma 0x%llx len %u %u/%u",
			rx->index, maxpacket,
			onepacket
				? (is_rndis ? "rndis" : "onepacket")
				: "multipacket",
			n_bds,
			musb_readl(tibase,
				DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4))
					& 0xffff,
			(unsigned long long)addr, length,
			rx->channel.actual_len, rx->buf_len);

	/* only queue one segment at a time, since the hardware prevents
	 * correct queue shutdown after unexpected short packets
	 */
	bd = cppi_bd_alloc(rx);
	rx->head = bd;

	/* Build BDs for all packets in this segment */
	for (i = 0, tail = NULL; bd && i < n_bds; i++, tail = bd) {
		u32	bd_len;

		if (i) {
			bd = cppi_bd_alloc(rx);
			if (!bd)
				break;
			tail->next = bd;
			tail->hw_next = bd->dma;
		}
		bd->hw_next = 0;

		/* all but the last packet will be maxpacket size */
		if (maxpacket < length)
			bd_len = maxpacket;
		else
			bd_len = length;

		bd->hw_bufp = addr;
		addr += bd_len;
		rx->offset += bd_len;

		bd->hw_off_len = (0 /*offset*/ << 16) + bd_len;
		bd->buflen = bd_len;

		bd->hw_options = CPPI_OWN_SET | (i == 0 ? length : 0);
		length -= bd_len;
	}

	/* we always expect at least one reusable BD! */
	if (!tail) {
		WARNING("rx dma%d -- no BDs? need %d\n", rx->index, n_bds);
		return;
	} else if (i < n_bds)
		WARNING("rx dma%d -- only %d of %d BDs\n", rx->index, i, n_bds);

	tail->next = NULL;
	tail->hw_next = 0;

	bd = rx->head;
	rx->tail = tail;

	/* short reads and other faults should terminate this entire
	 * dma segment.  we want one "dma packet" per dma segment, not
	 * one per USB packet, terminating the whole queue at once...
	 * NOTE that current hardware seems to ignore SOP and EOP.
	 */
	bd->hw_options |= CPPI_SOP_SET;
	tail->hw_options |= CPPI_EOP_SET;

	for (d = rx->head; d; d = d->next)
		cppi_dump_rxbd("S", d);

	/* in case the preceding transfer left some state... */
	tail = rx->last_processed;
	if (tail) {
		tail->next = bd;
		tail->hw_next = bd->dma;
	}

	core_rxirq_enable(tibase, rx->index + 1);

	/* BDs live in DMA-coherent memory, but writes might be pending */
	cpu_drain_writebuffer();

	/* REVISIT specs say to write this AFTER the BUFCNT register
	 * below ... but that loses badly.
	 */
	musb_writel(&rx_ram->rx_head, 0, bd->dma);

	/* bufferCount must be at least 3, and zeroes on completion
	 * unless it underflows below zero, or stops at two, or keeps
	 * growing ... grr.
	 */
	i = musb_readl(tibase,
			DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4))
			& 0xffff;

	if (!i)
		musb_writel(tibase,
			DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4),
			n_bds + 2);
	else if (n_bds > (i - 3))
		musb_writel(tibase,
			DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4),
			n_bds - (i - 3));

	i = musb_readl(tibase,
			DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4))
			& 0xffff;
	if (i < (2 + n_bds)) {
		musb_dbg(musb, "bufcnt%d underrun - %d (for %d)",
					rx->index, i, n_bds);
		musb_writel(tibase,
			DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4),
			n_bds + 2);
	}

	cppi_dump_rx(4, rx, "/S");
}
示例#10
0
/*
 * CPPI TX:
 * ========
 * TX is a lot more reasonable than RX; it doesn't need to run in
 * irq-per-packet mode very often.  RNDIS mode seems to behave too
 * (except how it handles the exactly-N-packets case).  Building a
 * txdma queue with multiple requests (urb or usb_request) looks
 * like it would work ... but fault handling would need much testing.
 *
 * The main issue with TX mode RNDIS relates to transfer lengths that
 * are an exact multiple of the packet length.  It appears that there's
 * a hiccup in that case (maybe the DMA completes before the ZLP gets
 * written?) boiling down to not being able to rely on CPPI writing any
 * terminating zero length packet before the next transfer is written.
 * So that's punted to PIO; better yet, gadget drivers can avoid it.
 *
 * Plus, there's allegedly an undocumented constraint that rndis transfer
 * length be a multiple of 64 bytes ... but the chip doesn't act that
 * way, and we really don't _want_ that behavior anyway.
 *
 * On TX, "transparent" mode works ... although experiments have shown
 * problems trying to use the SOP/EOP bits in different USB packets.
 *
 * REVISIT try to handle terminating zero length packets using CPPI
 * instead of doing it by PIO after an IRQ.  (Meanwhile, make Ethernet
 * links avoid that issue by forcing them to avoid zlps.)
 */
static void
cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx)
{
	unsigned		maxpacket = tx->maxpacket;
	dma_addr_t		addr = tx->buf_dma + tx->offset;
	size_t			length = tx->buf_len - tx->offset;
	struct cppi_descriptor	*bd;
	unsigned		n_bds;
	unsigned		i;
	struct cppi_tx_stateram	__iomem *tx_ram = tx->state_ram;
	int			rndis;

	/* TX can use the CPPI "rndis" mode, where we can probably fit this
	 * transfer in one BD and one IRQ.  The only time we would NOT want
	 * to use it is when hardware constraints prevent it, or if we'd
	 * trigger the "send a ZLP?" confusion.
	 */
	rndis = (maxpacket & 0x3f) == 0
		&& length > maxpacket
		&& length < 0xffff
		&& (length % maxpacket) != 0;

	if (rndis) {
		maxpacket = length;
		n_bds = 1;
	} else {
		if (length)
			n_bds = DIV_ROUND_UP(length, maxpacket);
		else
			n_bds = 1;
		n_bds = min(n_bds, (unsigned) NUM_TXCHAN_BD);
		length = min(n_bds * maxpacket, length);
	}

	musb_dbg(musb, "TX DMA%d, pktSz %d %s bds %d dma 0x%llx len %u",
			tx->index,
			maxpacket,
			rndis ? "rndis" : "transparent",
			n_bds,
			(unsigned long long)addr, length);

	cppi_rndis_update(tx, 0, musb->ctrl_base, rndis);

	/* assuming here that channel_program is called during
	 * transfer initiation ... current code maintains state
	 * for one outstanding request only (no queues, not even
	 * the implicit ones of an iso urb).
	 */

	bd = tx->freelist;
	tx->head = bd;
	tx->last_processed = NULL;

	/* FIXME use BD pool like RX side does, and just queue
	 * the minimum number for this request.
	 */

	/* Prepare queue of BDs first, then hand it to hardware.
	 * All BDs except maybe the last should be of full packet
	 * size; for RNDIS there _is_ only that last packet.
	 */
	for (i = 0; i < n_bds; ) {
		if (++i < n_bds && bd->next)
			bd->hw_next = bd->next->dma;
		else
			bd->hw_next = 0;

		bd->hw_bufp = tx->buf_dma + tx->offset;

		/* FIXME set EOP only on the last packet,
		 * SOP only on the first ... avoid IRQs
		 */
		if ((tx->offset + maxpacket) <= tx->buf_len) {
			tx->offset += maxpacket;
			bd->hw_off_len = maxpacket;
			bd->hw_options = CPPI_SOP_SET | CPPI_EOP_SET
				| CPPI_OWN_SET | maxpacket;
		} else {
			/* only this one may be a partial USB Packet */
			u32		partial_len;

			partial_len = tx->buf_len - tx->offset;
			tx->offset = tx->buf_len;
			bd->hw_off_len = partial_len;

			bd->hw_options = CPPI_SOP_SET | CPPI_EOP_SET
				| CPPI_OWN_SET | partial_len;
			if (partial_len == 0)
				bd->hw_options |= CPPI_ZERO_SET;
		}

		musb_dbg(musb, "TXBD %p: nxt %08x buf %08x len %04x opt %08x",
				bd, bd->hw_next, bd->hw_bufp,
				bd->hw_off_len, bd->hw_options);

		/* update the last BD enqueued to the list */
		tx->tail = bd;
		bd = bd->next;
	}

	/* BDs live in DMA-coherent memory, but writes might be pending */
	cpu_drain_writebuffer();

	/* Write to the HeadPtr in state RAM to trigger */
	musb_writel(&tx_ram->tx_head, 0, (u32)tx->freelist->dma);

	cppi_dump_tx(5, tx, "/S");
}
示例#11
0
irqreturn_t cppi_interrupt(int irq, void *dev_id)
{
	struct musb		*musb = dev_id;
	struct cppi		*cppi;
	void __iomem		*tibase;
	struct musb_hw_ep	*hw_ep = NULL;
	u32			rx, tx;
	int			i, index;
	unsigned long		uninitialized_var(flags);

	cppi = container_of(musb->dma_controller, struct cppi, controller);
	if (cppi->irq)
		spin_lock_irqsave(&musb->lock, flags);

	tibase = musb->ctrl_base;

	tx = musb_readl(tibase, DAVINCI_TXCPPI_MASKED_REG);
	rx = musb_readl(tibase, DAVINCI_RXCPPI_MASKED_REG);

	if (!tx && !rx) {
		if (cppi->irq)
			spin_unlock_irqrestore(&musb->lock, flags);
		return IRQ_NONE;
	}

	musb_dbg(musb, "CPPI IRQ Tx%x Rx%x", tx, rx);

	/* process TX channels */
	for (index = 0; tx; tx = tx >> 1, index++) {
		struct cppi_channel		*tx_ch;
		struct cppi_tx_stateram __iomem	*tx_ram;
		bool				completed = false;
		struct cppi_descriptor		*bd;

		if (!(tx & 1))
			continue;

		tx_ch = cppi->tx + index;
		tx_ram = tx_ch->state_ram;

		/* FIXME  need a cppi_tx_scan() routine, which
		 * can also be called from abort code
		 */

		cppi_dump_tx(5, tx_ch, "/E");

		bd = tx_ch->head;

		/*
		 * If Head is null then this could mean that a abort interrupt
		 * that needs to be acknowledged.
		 */
		if (NULL == bd) {
			musb_dbg(musb, "null BD");
			musb_writel(&tx_ram->tx_complete, 0, 0);
			continue;
		}

		/* run through all completed BDs */
		for (i = 0; !completed && bd && i < NUM_TXCHAN_BD;
				i++, bd = bd->next) {
			u16	len;

			/* catch latest BD writes from CPPI */
			rmb();
			if (bd->hw_options & CPPI_OWN_SET)
				break;

			musb_dbg(musb, "C/TXBD %p n %x b %x off %x opt %x",
					bd, bd->hw_next, bd->hw_bufp,
					bd->hw_off_len, bd->hw_options);

			len = bd->hw_off_len & CPPI_BUFFER_LEN_MASK;
			tx_ch->channel.actual_len += len;

			tx_ch->last_processed = bd;

			/* write completion register to acknowledge
			 * processing of completed BDs, and possibly
			 * release the IRQ; EOQ might not be set ...
			 *
			 * REVISIT use the same ack strategy as rx
			 *
			 * REVISIT have observed bit 18 set; huh??
			 */
			/* if ((bd->hw_options & CPPI_EOQ_MASK)) */
				musb_writel(&tx_ram->tx_complete, 0, bd->dma);

			/* stop scanning on end-of-segment */
			if (bd->hw_next == 0)
				completed = true;
		}

		/* on end of segment, maybe go to next one */
		if (completed) {
			/* cppi_dump_tx(4, tx_ch, "/complete"); */

			/* transfer more, or report completion */
			if (tx_ch->offset >= tx_ch->buf_len) {
				tx_ch->head = NULL;
				tx_ch->tail = NULL;
				tx_ch->channel.status = MUSB_DMA_STATUS_FREE;

				hw_ep = tx_ch->hw_ep;

				musb_dma_completion(musb, index + 1, 1);

			} else {
				/* Bigger transfer than we could fit in
				 * that first batch of descriptors...
				 */
				cppi_next_tx_segment(musb, tx_ch);
			}
		} else
			tx_ch->head = bd;
	}

	/* Start processing the RX block */
	for (index = 0; rx; rx = rx >> 1, index++) {

		if (rx & 1) {
			struct cppi_channel		*rx_ch;

			rx_ch = cppi->rx + index;

			/* let incomplete dma segments finish */
			if (!cppi_rx_scan(cppi, index))
				continue;

			/* start another dma segment if needed */
			if (rx_ch->channel.actual_len != rx_ch->buf_len
					&& rx_ch->channel.actual_len
						== rx_ch->offset) {
				cppi_next_rx_segment(musb, rx_ch, 1);
				continue;
			}

			/* all segments completed! */
			rx_ch->channel.status = MUSB_DMA_STATUS_FREE;

			hw_ep = rx_ch->hw_ep;

			core_rxirq_disable(tibase, index + 1);
			musb_dma_completion(musb, index + 1, 0);
		}
	}

	/* write to CPPI EOI register to re-enable interrupts */
	musb_writel(tibase, DAVINCI_CPPI_EOI_REG, 0);

	if (cppi->irq)
		spin_unlock_irqrestore(&musb->lock, flags);

	return IRQ_HANDLED;
}
示例#12
0
static bool cppi_rx_scan(struct cppi *cppi, unsigned ch)
{
	struct cppi_channel		*rx = &cppi->rx[ch];
	struct cppi_rx_stateram __iomem	*state = rx->state_ram;
	struct cppi_descriptor		*bd;
	struct cppi_descriptor		*last = rx->last_processed;
	bool				completed = false;
	bool				acked = false;
	int				i;
	dma_addr_t			safe2ack;
	void __iomem			*regs = rx->hw_ep->regs;
	struct musb			*musb = cppi->controller.musb;

	cppi_dump_rx(6, rx, "/K");

	bd = last ? last->next : rx->head;
	if (!bd)
		return false;

	/* run through all completed BDs */
	for (i = 0, safe2ack = musb_readl(&state->rx_complete, 0);
			(safe2ack || completed) && bd && i < NUM_RXCHAN_BD;
			i++, bd = bd->next) {
		u16	len;

		/* catch latest BD writes from CPPI */
		rmb();
		if (!completed && (bd->hw_options & CPPI_OWN_SET))
			break;

		musb_dbg(musb, "C/RXBD %llx: nxt %08x buf %08x "
			"off.len %08x opt.len %08x (%d)",
			(unsigned long long)bd->dma, bd->hw_next, bd->hw_bufp,
			bd->hw_off_len, bd->hw_options,
			rx->channel.actual_len);

		/* actual packet received length */
		if ((bd->hw_options & CPPI_SOP_SET) && !completed)
			len = bd->hw_off_len & CPPI_RECV_PKTLEN_MASK;
		else
			len = 0;

		if (bd->hw_options & CPPI_EOQ_MASK)
			completed = true;

		if (!completed && len < bd->buflen) {
			/* NOTE:  when we get a short packet, RXCSR_H_REQPKT
			 * must have been cleared, and no more DMA packets may
			 * active be in the queue... TI docs didn't say, but
			 * CPPI ignores those BDs even though OWN is still set.
			 */
			completed = true;
			musb_dbg(musb, "rx short %d/%d (%d)",
					len, bd->buflen,
					rx->channel.actual_len);
		}

		/* If we got here, we expect to ack at least one BD; meanwhile
		 * CPPI may completing other BDs while we scan this list...
		 *
		 * RACE: we can notice OWN cleared before CPPI raises the
		 * matching irq by writing that BD as the completion pointer.
		 * In such cases, stop scanning and wait for the irq, avoiding
		 * lost acks and states where BD ownership is unclear.
		 */
		if (bd->dma == safe2ack) {
			musb_writel(&state->rx_complete, 0, safe2ack);
			safe2ack = musb_readl(&state->rx_complete, 0);
			acked = true;
			if (bd->dma == safe2ack)
				safe2ack = 0;
		}

		rx->channel.actual_len += len;

		cppi_bd_free(rx, last);
		last = bd;

		/* stop scanning on end-of-segment */
		if (bd->hw_next == 0)
			completed = true;
	}
	rx->last_processed = last;

	/* dma abort, lost ack, or ... */
	if (!acked && last) {
		int	csr;

		if (safe2ack == 0 || safe2ack == rx->last_processed->dma)
			musb_writel(&state->rx_complete, 0, safe2ack);
		if (safe2ack == 0) {
			cppi_bd_free(rx, last);
			rx->last_processed = NULL;

			/* if we land here on the host side, H_REQPKT will
			 * be clear and we need to restart the queue...
			 */
			WARN_ON(rx->head);
		}
		musb_ep_select(cppi->mregs, rx->index + 1);
		csr = musb_readw(regs, MUSB_RXCSR);
		if (csr & MUSB_RXCSR_DMAENAB) {
			musb_dbg(musb, "list%d %p/%p, last %llx%s, csr %04x",
				rx->index,
				rx->head, rx->tail,
				rx->last_processed
					? (unsigned long long)
						rx->last_processed->dma
					: 0,
				completed ? ", completed" : "",
				csr);
			cppi_dump_rxq(4, "/what?", rx);
		}
	}
	if (!completed) {
		int	csr;

		rx->head = bd;

		/* REVISIT seems like "autoreq all but EOP" doesn't...
		 * setting it here "should" be racey, but seems to work
		 */
		csr = musb_readw(rx->hw_ep->regs, MUSB_RXCSR);
		if (is_host_active(cppi->controller.musb)
				&& bd
				&& !(csr & MUSB_RXCSR_H_REQPKT)) {
			csr |= MUSB_RXCSR_H_REQPKT;
			musb_writew(regs, MUSB_RXCSR,
					MUSB_RXCSR_H_WZC_BITS | csr);
			csr = musb_readw(rx->hw_ep->regs, MUSB_RXCSR);
		}
	} else {
		rx->head = NULL;
		rx->tail = NULL;
	}

	cppi_dump_rx(6, rx, completed ? "/completed" : "/cleaned");
	return completed;
}
示例#13
0
void musb_port_suspend(struct musb *musb, bool do_suspend)
{
	struct usb_otg	*otg = musb->xceiv->otg;
	u8		power;
	void __iomem	*mbase = musb->mregs;

	if (!is_host_active(musb))
		return;

	/* NOTE:  this doesn't necessarily put PHY into low power mode,
	 * turning off its clock; that's a function of PHY integration and
	 * MUSB_POWER_ENSUSPEND.  PHY may need a clock (sigh) to detect
	 * SE0 changing to connect (J) or wakeup (K) states.
	 */
	power = musb_readb(mbase, MUSB_POWER);
	if (do_suspend) {
		int retries = 10000;

		power &= ~MUSB_POWER_RESUME;
		power |= MUSB_POWER_SUSPENDM;
		musb_writeb(mbase, MUSB_POWER, power);

		/* Needed for OPT A tests */
		power = musb_readb(mbase, MUSB_POWER);
		while (power & MUSB_POWER_SUSPENDM) {
			power = musb_readb(mbase, MUSB_POWER);
			if (retries-- < 1)
				break;
		}

		musb_dbg(musb, "Root port suspended, power %02x", power);

		musb->port1_status |= USB_PORT_STAT_SUSPEND;
		switch (musb->xceiv->otg->state) {
		case OTG_STATE_A_HOST:
			musb->xceiv->otg->state = OTG_STATE_A_SUSPEND;
			musb->is_active = otg->host->b_hnp_enable;
			if (musb->is_active)
				mod_timer(&musb->otg_timer, jiffies
					+ msecs_to_jiffies(
						OTG_TIME_A_AIDL_BDIS));
			musb_platform_try_idle(musb, 0);
			break;
		case OTG_STATE_B_HOST:
			musb->xceiv->otg->state = OTG_STATE_B_WAIT_ACON;
			musb->is_active = otg->host->b_hnp_enable;
			musb_platform_try_idle(musb, 0);
			break;
		default:
			musb_dbg(musb, "bogus rh suspend? %s",
				usb_otg_state_string(musb->xceiv->otg->state));
		}
	} else if (power & MUSB_POWER_SUSPENDM) {
		power &= ~MUSB_POWER_SUSPENDM;
		power |= MUSB_POWER_RESUME;
		musb_writeb(mbase, MUSB_POWER, power);

		musb_dbg(musb, "Root port resuming, power %02x", power);

		/* later, GetPortStatus will stop RESUME signaling */
		musb->port1_status |= MUSB_PORT_STAT_RESUME;
		schedule_delayed_work(&musb->finish_resume_work,
				      msecs_to_jiffies(USB_RESUME_TIMEOUT));
	}
}
示例#14
0
int musb_hub_control(
	struct usb_hcd	*hcd,
	u16		typeReq,
	u16		wValue,
	u16		wIndex,
	char		*buf,
	u16		wLength)
{
	struct musb	*musb = hcd_to_musb(hcd);
	u32		temp;
	int		retval = 0;
	unsigned long	flags;

	spin_lock_irqsave(&musb->lock, flags);

	if (unlikely(!HCD_HW_ACCESSIBLE(hcd))) {
		spin_unlock_irqrestore(&musb->lock, flags);
		return -ESHUTDOWN;
	}

	/* hub features:  always zero, setting is a NOP
	 * port features: reported, sometimes updated when host is active
	 * no indicators
	 */
	switch (typeReq) {
	case ClearHubFeature:
	case SetHubFeature:
		switch (wValue) {
		case C_HUB_OVER_CURRENT:
		case C_HUB_LOCAL_POWER:
			break;
		default:
			goto error;
		}
		break;
	case ClearPortFeature:
		if ((wIndex & 0xff) != 1)
			goto error;

		switch (wValue) {
		case USB_PORT_FEAT_ENABLE:
			break;
		case USB_PORT_FEAT_SUSPEND:
			musb_port_suspend(musb, false);
			break;
		case USB_PORT_FEAT_POWER:
			if (!hcd->self.is_b_host)
				musb_platform_set_vbus(musb, 0);
			break;
		case USB_PORT_FEAT_C_CONNECTION:
		case USB_PORT_FEAT_C_ENABLE:
		case USB_PORT_FEAT_C_OVER_CURRENT:
		case USB_PORT_FEAT_C_RESET:
		case USB_PORT_FEAT_C_SUSPEND:
			break;
		default:
			goto error;
		}
		musb_dbg(musb, "clear feature %d", wValue);
		musb->port1_status &= ~(1 << wValue);
		break;
	case GetHubDescriptor:
		{
		struct usb_hub_descriptor *desc = (void *)buf;

		desc->bDescLength = 9;
		desc->bDescriptorType = USB_DT_HUB;
		desc->bNbrPorts = 1;
		desc->wHubCharacteristics = cpu_to_le16(
			HUB_CHAR_INDV_PORT_LPSM /* per-port power switching */
			| HUB_CHAR_NO_OCPM	/* no overcurrent reporting */
			);
		desc->bPwrOn2PwrGood = 5;	/* msec/2 */
		desc->bHubContrCurrent = 0;

		/* workaround bogus struct definition */
		desc->u.hs.DeviceRemovable[0] = 0x02;	/* port 1 */
		desc->u.hs.DeviceRemovable[1] = 0xff;
		}
		break;
	case GetHubStatus:
		temp = 0;
		*(__le32 *) buf = cpu_to_le32(temp);
		break;
	case GetPortStatus:
		if (wIndex != 1)
			goto error;

		put_unaligned(cpu_to_le32(musb->port1_status
					& ~MUSB_PORT_STAT_RESUME),
				(__le32 *) buf);

		/* port change status is more interesting */
		musb_dbg(musb, "port status %08x", musb->port1_status);
		break;
	case SetPortFeature:
		if ((wIndex & 0xff) != 1)
			goto error;

		switch (wValue) {
		case USB_PORT_FEAT_POWER:
			/* NOTE: this controller has a strange state machine
			 * that involves "requesting sessions" according to
			 * magic side effects from incompletely-described
			 * rules about startup...
			 *
			 * This call is what really starts the host mode; be
			 * very careful about side effects if you reorder any
			 * initialization logic, e.g. for OTG, or change any
			 * logic relating to VBUS power-up.
			 */
			if (!hcd->self.is_b_host && musb_has_gadget(musb))
				musb_start(musb);
			break;
		case USB_PORT_FEAT_RESET:
			musb_port_reset(musb, true);
			break;
		case USB_PORT_FEAT_SUSPEND:
			musb_port_suspend(musb, true);
			break;
		case USB_PORT_FEAT_TEST:
			if (unlikely(is_host_active(musb)))
				goto error;

			wIndex >>= 8;
			switch (wIndex) {
			case 1:
				pr_debug("TEST_J\n");
				temp = MUSB_TEST_J;
				break;
			case 2:
				pr_debug("TEST_K\n");
				temp = MUSB_TEST_K;
				break;
			case 3:
				pr_debug("TEST_SE0_NAK\n");
				temp = MUSB_TEST_SE0_NAK;
				break;
			case 4:
				pr_debug("TEST_PACKET\n");
				temp = MUSB_TEST_PACKET;
				musb_load_testpacket(musb);
				break;
			case 5:
				pr_debug("TEST_FORCE_ENABLE\n");
				temp = MUSB_TEST_FORCE_HOST
					| MUSB_TEST_FORCE_HS;

				musb_writeb(musb->mregs, MUSB_DEVCTL,
						MUSB_DEVCTL_SESSION);
				break;
			case 6:
				pr_debug("TEST_FIFO_ACCESS\n");
				temp = MUSB_TEST_FIFO_ACCESS;
				break;
			default:
				goto error;
			}
			musb_writeb(musb->mregs, MUSB_TESTMODE, temp);
			break;
		default:
			goto error;
		}
		musb_dbg(musb, "set feature %d", wValue);
		musb->port1_status |= 1 << wValue;
		break;

	default:
error:
		/* "protocol stall" on error */
		retval = -EPIPE;
	}
	spin_unlock_irqrestore(&musb->lock, flags);
	return retval;
}
示例#15
0
void musb_port_reset(struct musb *musb, bool do_reset)
{
	u8		power;
	void __iomem	*mbase = musb->mregs;

	if (musb->xceiv->otg->state == OTG_STATE_B_IDLE) {
		musb_dbg(musb, "HNP: Returning from HNP; no hub reset from b_idle");
		musb->port1_status &= ~USB_PORT_STAT_RESET;
		return;
	}

	if (!is_host_active(musb))
		return;

	/* NOTE:  caller guarantees it will turn off the reset when
	 * the appropriate amount of time has passed
	 */
	power = musb_readb(mbase, MUSB_POWER);
	if (do_reset) {
		/*
		 * If RESUME is set, we must make sure it stays minimum 20 ms.
		 * Then we must clear RESUME and wait a bit to let musb start
		 * generating SOFs. If we don't do this, OPT HS A 6.8 tests
		 * fail with "Error! Did not receive an SOF before suspend
		 * detected".
		 */
		if (power &  MUSB_POWER_RESUME) {
			long remain = (unsigned long) musb->rh_timer - jiffies;

			if (musb->rh_timer > 0 && remain > 0) {
				/* take into account the minimum delay after resume */
				schedule_delayed_work(
					&musb->deassert_reset_work, remain);
				return;
			}

			musb_writeb(mbase, MUSB_POWER,
				    power & ~MUSB_POWER_RESUME);

			/* Give the core 1 ms to clear MUSB_POWER_RESUME */
			schedule_delayed_work(&musb->deassert_reset_work,
					      msecs_to_jiffies(1));
			return;
		}

		power &= 0xf0;
		musb_writeb(mbase, MUSB_POWER,
				power | MUSB_POWER_RESET);

		musb->port1_status |= USB_PORT_STAT_RESET;
		musb->port1_status &= ~USB_PORT_STAT_ENABLE;
		schedule_delayed_work(&musb->deassert_reset_work,
				      msecs_to_jiffies(50));
	} else {
		musb_dbg(musb, "root port reset stopped");
		musb_platform_pre_root_reset_end(musb);
		musb_writeb(mbase, MUSB_POWER,
				power & ~MUSB_POWER_RESET);
		musb_platform_post_root_reset_end(musb);

		power = musb_readb(mbase, MUSB_POWER);
		if (power & MUSB_POWER_HSMODE) {
			musb_dbg(musb, "high-speed device connected");
			musb->port1_status |= USB_PORT_STAT_HIGH_SPEED;
		}

		musb->port1_status &= ~USB_PORT_STAT_RESET;
		musb->port1_status |= USB_PORT_STAT_ENABLE
					| (USB_PORT_STAT_C_RESET << 16)
					| (USB_PORT_STAT_C_ENABLE << 16);
		usb_hcd_poll_rh_status(musb->hcd);

		musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
	}
}