static void qh_destroy(struct ehci_hcd *ehci, struct ehci_qh *qh)
{
	/* clean qtds first, and know this is not linked */
	if (!list_empty (&qh->qtd_list) || qh->qh_next.ptr) {
		ehci_dbg (ehci, "unused qh not empty!\n");
		BUG ();
	}
	if (qh->dummy)
		ehci_qtd_free (ehci, qh->dummy);
	dma_pool_free(ehci->qh_pool, qh->hw, qh->qh_dma);
	kfree(qh);
}
示例#2
0
static void qh_put (struct ehci_hcd *ehci, struct ehci_qh *qh)
{
	if (!atomic_dec_and_test (&qh->refcount))
		return;
	/* clean qtds first, and know this is not linked */
	if (!list_empty (&qh->qtd_list) || qh->qh_next.ptr) {
		ehci_dbg (ehci, "unused qh not empty!\n");
		BUG ();
	}
	if (qh->dummy)
		ehci_qtd_free (ehci, qh->dummy);
	pci_pool_free (ehci->qh_pool, qh, qh->qh_dma);
}
示例#3
0
static void qh_destroy(struct ehci_qh *qh)
{
	struct ehci_hcd *ehci = qh->ehci;

	
	if (!list_empty (&qh->qtd_list) || qh->qh_next.ptr) {
		ehci_dbg (ehci, "unused qh not empty!\n");
		BUG ();
	}
	if (qh->dummy)
		ehci_qtd_free (ehci, qh->dummy);
	dma_pool_free(ehci->qh_pool, qh->hw, qh->qh_dma);
	kfree(qh);
}
示例#4
0
static void qh_destroy (struct kref *kref)
{
	struct ehci_qh *qh = container_of(kref, struct ehci_qh, kref);
	struct ehci_hcd *ehci = qh->ehci;

	/* clean qtds first, and know this is not linked */
	if (!list_empty (&qh->qtd_list) || qh->qh_next.ptr) {
		ehci_dbg (ehci, "unused qh not empty!\n");
		BUG ();
	}
	if (qh->dummy)
		ehci_qtd_free (ehci, qh->dummy);
	dma_pool_free (ehci->qh_pool, qh, qh->qh_dma);
}
示例#5
0
static void qh_destroy(struct ehci_qh *qh)
{
	struct ehci_hcd *ehci = qh->ehci;

	/* clean qtds first, and know this is not linked */
	if (!list_empty (&qh->qtd_list) || qh->qh_next.ptr) {
		ehci_dbg (ehci, "unused qh not empty!\n");
		BUG ();
	}
	if (qh->dummy)
		ehci_qtd_free (ehci, qh->dummy);
/* tony.yu map between PHY addr & BUS addr */
#if defined(CONFIG_ARM) && (MP_USB_MSTAR==1)
	qh->qh_dma = PA2BUS(qh->qh_dma);
#endif
	dma_pool_free(ehci->qh_pool, qh->hw, qh->qh_dma);
	kfree(qh);
}
示例#6
0
static void qh_destroy(struct ehci_qh *qh)
{
	struct ehci_hcd *ehci = qh->ehci;

	/* clean qtds first, and know this is not linked */
	if (!list_empty(&qh->qtd_list) || qh->qh_next.ptr) {
		ehci_dbg(ehci, "unused qh not empty!\n");
		BUG();
	}
	if (qh->dummy)
		ehci_qtd_free(ehci, qh->dummy);
	int i;
	for (i = 0; i < IRAM_NTD; i++) {
		if (ehci->usb_address[i] == (qh->hw_info1 & 0x7F))
			ehci->usb_address[i] = 0;
	}

	if ((qh->qh_dma & (USB_IRAM_BASE_ADDR & 0xFFF00000)) ==
	    (USB_IRAM_BASE_ADDR & 0xFFF00000))
		usb_free(qh->qh_dma);
	else
		dma_pool_free(ehci->qh_pool, qh, qh->qh_dma);
	--g_debug_qH_allocated;
}
示例#7
0
/*
 * Process completed qtds for a qh, issuing completions if needed.
 * When freeing:  frees qtds, unmaps buf, returns URB to driver.
 * When not freeing (queued periodic qh):  retain qtds, mapping, and urb.
 * Races up to qh->hw_current; returns number of urb completions.
 */
static int
qh_completions (
	struct ehci_hcd		*ehci,
	struct ehci_qh		*qh,
	int			freeing
) {
	struct ehci_qtd		*qtd, *last;
	struct list_head	*next, *qtd_list = &qh->qtd_list;
	int			unlink = 0, halted = 0;
	unsigned long		flags;
	int			retval = 0;

	spin_lock_irqsave (&ehci->lock, flags);
	if (unlikely (list_empty (qtd_list))) {
		spin_unlock_irqrestore (&ehci->lock, flags);
		return retval;
	}

	/* scan QTDs till end of list, or we reach an active one */
	for (qtd = list_entry (qtd_list->next, struct ehci_qtd, qtd_list),
			    	last = 0, next = 0;
			next != qtd_list;
			last = qtd, qtd = list_entry (next,
						struct ehci_qtd, qtd_list)) {
		struct urb	*urb = qtd->urb;
		u32		token = 0;

		/* clean up any state from previous QTD ...*/
		if (last) {
			if (likely (last->urb != urb)) {
				/* complete() can reenter this HCD */
				spin_unlock_irqrestore (&ehci->lock, flags);
				if (likely (freeing != 0))
					ehci_urb_done (ehci, last->buf_dma,
						last->urb);
				else
					ehci_urb_complete (ehci, last->buf_dma,
						last->urb);
				spin_lock_irqsave (&ehci->lock, flags);
				retval++;
			}

			/* qh overlays can have HC's old cached copies of
			 * next qtd ptrs, if an URB was queued afterwards.
			 */
			if (cpu_to_le32 (last->qtd_dma) == qh->hw_current
					&& last->hw_next != qh->hw_qtd_next) {
				qh->hw_alt_next = last->hw_alt_next;
				qh->hw_qtd_next = last->hw_next;
			}

			if (likely (freeing != 0))
				ehci_qtd_free (ehci, last);
			last = 0;
		}
		next = qtd->qtd_list.next;

		/* QTDs at tail may be active if QH+HC are running,
		 * or when unlinking some urbs queued to this QH
		 */
		token = le32_to_cpu (qtd->hw_token);
		halted = halted
			|| (__constant_cpu_to_le32 (QTD_STS_HALT)
				& qh->hw_token) != 0
			|| (ehci->hcd.state == USB_STATE_HALT)
			|| (qh->qh_state == QH_STATE_IDLE);

		/* fault: unlink the rest, since this qtd saw an error? */
		if (unlikely ((token & QTD_STS_HALT) != 0)) {
			freeing = unlink = 1;
			/* status copied below */

		/* QH halts only because of fault (above) or unlink (here). */
		} else if (unlikely (halted != 0)) {

			/* unlinking everything because of HC shutdown? */
			if (ehci->hcd.state == USB_STATE_HALT) {
				freeing = unlink = 1;

			/* explicit unlink, maybe starting here? */
			} else if (qh->qh_state == QH_STATE_IDLE
					&& (urb->status == -ECONNRESET
						|| urb->status == -ENOENT)) {
				freeing = unlink = 1;

			/* QH halted to unlink urbs _after_ this?  */
			} else if (!unlink && (token & QTD_STS_ACTIVE) != 0) {
				qtd = 0;
				continue;
			}

			/* unlink the rest?  once we start unlinking, after
			 * a fault or explicit unlink, we unlink all later
			 * urbs.  usb spec requires that.
			 */
			if (unlink && urb->status == -EINPROGRESS)
				urb->status = -ECONNRESET;

		/* Else QH is active, so we must not modify QTDs
		 * that HC may be working on.  No more qtds to check.
		 */
		} else if (unlikely ((token & QTD_STS_ACTIVE) != 0)) {
			next = qtd_list;
			qtd = 0;
			continue;
		}

		spin_lock (&urb->lock);
		qtd_copy_status (urb, qtd->length, token);
		spin_unlock (&urb->lock);

		/*
		 * NOTE:  this won't work right with interrupt urbs that
		 * need multiple qtds ... only the first scan of qh->qtd_list
		 * starts at the right qtd, yet multiple scans could happen
		 * for transfers that are scheduled across multiple uframes. 
		 * (Such schedules are not currently allowed!)
		 */
		if (likely (freeing != 0))
			list_del (&qtd->qtd_list);
		else {
			/* restore everything the HC could change
			 * from an interrupt QTD
			 */
			qtd->hw_token = (qtd->hw_token
					& __constant_cpu_to_le32 (0x8300))
				| cpu_to_le32 (qtd->length << 16)
				| __constant_cpu_to_le32 (QTD_STS_ACTIVE
					| (EHCI_TUNE_CERR << 10));
			qtd->hw_buf [0] &= ~__constant_cpu_to_le32 (0x0fff);

			/* this offset, and the length above,
			 * are likely wrong on QTDs #2..N
			 */
			qtd->hw_buf [0] |= cpu_to_le32 (0x0fff & qtd->buf_dma);
		}

#if 0
		if (urb->status == -EINPROGRESS)
			vdbg ("  qtd %p ok, urb %p, token %8x, len %d",
				qtd, urb, token, urb->actual_length);
		else
			vdbg ("urb %p status %d, qtd %p, token %8x, len %d",
				urb, urb->status, qtd, token,
				urb->actual_length);
#endif

		/* SETUP for control urb? */
		if (unlikely (QTD_PID (token) == 2))
			pci_unmap_single (ehci->hcd.pdev,
				qtd->buf_dma, sizeof (devrequest),
				PCI_DMA_TODEVICE);
	}

	/* patch up list head? */
	if (unlikely (halted && !list_empty (qtd_list))) {
		qh_update (qh, list_entry (qtd_list->next,
				struct ehci_qtd, qtd_list));
	}