示例#1
0
int usb_ept_queue_xfer(struct msm_endpoint *ept, struct usb_request *_req)
{
	unsigned long flags;
	struct msm_request *req = to_msm_request(_req);
	struct msm_request *last;
	struct usb_info *ui = ept->ui;
	struct ept_queue_item *item = req->item;
	unsigned length = req->req.length;

	if (length > 0x4000)
		return -EMSGSIZE;

	spin_lock_irqsave(&ui->lock, flags);

	if (req->busy) {
		req->req.status = -EBUSY;
		spin_unlock_irqrestore(&ui->lock, flags);
		INFO("usb_ept_queue_xfer() tried to queue busy request\n");
		return -EBUSY;
	}

	if (!ui->online && (ept->num != 0)) {
		req->req.status = -ESHUTDOWN;
		spin_unlock_irqrestore(&ui->lock, flags);
		INFO("usb_ept_queue_xfer() called while offline\n");
		return -ESHUTDOWN;
	}

	req->busy = 1;
	req->live = 0;
	req->next = 0;
	req->req.status = -EBUSY;

	req->dma = dma_map_single(NULL, req->req.buf, length,
				  (ept->flags & EPT_FLAG_IN) ?
				  DMA_TO_DEVICE : DMA_FROM_DEVICE);

	/* prepare the transaction descriptor item for the hardware */
	item->next = TERMINATE;
	item->info = INFO_BYTES(length) | INFO_IOC | INFO_ACTIVE;
	item->page0 = req->dma;
	item->page1 = (req->dma + 0x1000) & 0xfffff000;
	item->page2 = (req->dma + 0x2000) & 0xfffff000;
	item->page3 = (req->dma + 0x3000) & 0xfffff000;

	/* Add the new request to the end of the queue */
	last = ept->last;
	if (last) {
		/* Already requests in the queue. add us to the
		 * end, but let the completion interrupt actually
		 * start things going, to avoid hw issues
		 */
		last->next = req;

		/* only modify the hw transaction next pointer if
		 * that request is not live
		 */
		if (!last->live)
			last->item->next = req->item_dma;
	} else {
		/* queue was empty -- kick the hardware */
		ept->req = req;
		usb_ept_start(ept);
	}
	ept->last = req;

	spin_unlock_irqrestore(&ui->lock, flags);
	return 0;
}
/*
 * Assumes that TDs allocated already are not freed.
 * But it can handle case where TDs are freed as well.
 */
int udc_request_queue(struct udc_endpoint *ept, struct udc_request *_req)
{
	unsigned xfer = 0;
	struct ept_queue_item *item, *curr_item;
	struct usb_request *req = (struct usb_request *)_req;
	unsigned phys = (unsigned)req->req.buf;
	unsigned len = req->req.length;
	unsigned int count = 0;

	curr_item = NULL;
	xfer = (len > MAX_TD_XFER_SIZE) ? MAX_TD_XFER_SIZE : len;
	/*
	 * First TD allocated during request allocation
	 */
	item = req->item;
	item->info = INFO_BYTES(xfer) | INFO_ACTIVE;
	item->page0 = phys;
	item->page1 = (phys & 0xfffff000) + 0x1000;
	item->page2 = (phys & 0xfffff000) + 0x2000;
	item->page3 = (phys & 0xfffff000) + 0x3000;
	item->page4 = (phys & 0xfffff000) + 0x4000;
	phys += xfer;
	curr_item = item;
	len -= xfer;

	/*
	 * If transfer length is more then
	 * accomodate by 1 TD
	 * we add more transfer descriptors
	 */
	while (len > 0) {
		xfer = (len > MAX_TD_XFER_SIZE) ? MAX_TD_XFER_SIZE : len;
		if (curr_item->next == TERMINATE) {
			/*
			 * Allocate new TD only if chain doesnot
			 * exist already
			 */
			item = memalign(CACHE_LINE,
					ROUNDUP(sizeof(struct ept_queue_item), CACHE_LINE));
			if (!item) {
				dprintf(ALWAYS, "allocate USB item fail ept%d"
							"%s queue\n",
							"td count = %d\n",
							ept->num,
							ept->in ? "in" : "out",
							count);
				return -1;
			} else {
				count ++;
				curr_item->next = PA(item);
				item->next = TERMINATE;
			}
		} else
			/* Since next TD in chain already exists */
			item = VA(curr_item->next);

		/* Update TD with transfer information */
		item->info = INFO_BYTES(xfer) | INFO_ACTIVE;
		item->page0 = phys;
		item->page1 = (phys & 0xfffff000) + 0x1000;
		item->page2 = (phys & 0xfffff000) + 0x2000;
		item->page3 = (phys & 0xfffff000) + 0x3000;
		item->page4 = (phys & 0xfffff000) + 0x4000;

		curr_item = item;
		len -= xfer;
		phys += xfer;
	}

	/* Terminate and set interrupt for last TD */
	curr_item->next = TERMINATE;
	curr_item->info |= INFO_IOC;
	enter_critical_section();
	ept->head->next = PA(req->item);
	ept->head->info = 0;
	ept->req = req;
	arch_clean_invalidate_cache_range((addr_t) ept,
					  sizeof(struct udc_endpoint));
	arch_clean_invalidate_cache_range((addr_t) ept->head,
					  sizeof(struct ept_queue_head));
	arch_clean_invalidate_cache_range((addr_t) ept->req,
					  sizeof(struct usb_request));
	arch_clean_invalidate_cache_range((addr_t) VA(req->req.buf),
					  req->req.length);

	item = req->item;
	/* Write all TD's to memory from cache */
	while (item != NULL) {
		curr_item = item;
		if (curr_item->next == TERMINATE)
			item = NULL;
		else
			item = curr_item->next;
		arch_clean_invalidate_cache_range((addr_t) curr_item,
					  sizeof(struct ept_queue_item));
	}

	DBG("ept%d %s queue req=%p\n", ept->num, ept->in ? "in" : "out", req);
	writel(ept->bit, USB_ENDPTPRIME);
	exit_critical_section();
	return 0;
}