Пример #1
0
static void complete_rx(struct s3c_udc *dev, u8 ep_num)
{
	struct s3c_ep *ep = &dev->ep[ep_num];
	struct s3c_request *req = NULL;
	u32 ep_tsr = 0, xfer_size = 0, xfer_length, is_short = 0;

	if (list_empty(&ep->queue)) {
		DEBUG_OUT_EP("%s: RX DMA done : NULL REQ on OUT EP-%d\n",
					__func__, ep_num);
		return;

	}

	req = list_entry(ep->queue.next, struct s3c_request, queue);

	ep_tsr = readl(S3C_UDC_OTG_DOEPTSIZ(ep_num));

	if (ep_num == EP0_CON)
		xfer_size = (ep_tsr & 0x7f);

	else
		xfer_size = (ep_tsr & 0x7fff);
	
	dma_cache_maint(req->req.buf, req->req.length, DMA_FROM_DEVICE);
	xfer_length = req->req.length - xfer_size;
	req->req.actual += min(xfer_length, req->req.length - req->req.actual);
	is_short = (xfer_length < ep->ep.maxpacket);

	DEBUG_OUT_EP("%s: RX DMA done : ep = %d, rx bytes = %d/%d, "
		     "is_short = %d, DOEPTSIZ = 0x%x, remained bytes = %d\n",
			__func__, ep_num, req->req.actual, req->req.length,
			is_short, ep_tsr, xfer_size);

	if (is_short || req->req.actual == xfer_length) {
		if(ep_num == EP0_CON && dev->ep0state == DATA_STATE_RECV) {
			DEBUG_OUT_EP("	=> Send ZLP\n");
			dev->ep0state = WAIT_FOR_SETUP;
			s3c_udc_ep0_zlp();

		} else {
			done(ep, req, 0);

			if(!list_empty(&ep->queue)) {
				req = list_entry(ep->queue.next, struct s3c_request, queue);
				DEBUG_OUT_EP("%s: Next Rx request start...\n", __func__);
				if (ep_num == EP0_CON && dev->ep0state == WAIT_FOR_SETUP)
					setdma_rx(ep, req, 1);
				else
					setdma_rx(ep, req, 0);
			}
		}
	}
Пример #2
0
static void complete_rx(struct s3c_udc *dev, u8 ep_num)
{
	struct s3c_ep *ep = &dev->ep[ep_num];
	struct s3c_request *req = NULL;
	u32 ep_tsr = 0, xfer_size = 0, is_short = 0;
	u32 *p = the_controller->dma_buf[ep_index(ep)+1];

	if (list_empty(&ep->queue)) {
		debug_cond(DEBUG_OUT_EP != 0,
			   "%s: RX DMA done : NULL REQ on OUT EP-%d\n",
			   __func__, ep_num);
		return;

	}

	req = list_entry(ep->queue.next, struct s3c_request, queue);
	ep_tsr = readl(&reg->out_endp[ep_num].doeptsiz);

	if (ep_num == EP0_CON)
		xfer_size = (ep_tsr & DOEPT_SIZ_XFER_SIZE_MAX_EP0);
	else
		xfer_size = (ep_tsr & DOEPT_SIZ_XFER_SIZE_MAX_EP);

	xfer_size = ep->len - xfer_size;

	invalidate_dcache_range((unsigned long) p,
				(unsigned long) p + DMA_BUFFER_SIZE);

	memcpy(ep->dma_buf, p, ep->len);

	req->req.actual += min(xfer_size, req->req.length - req->req.actual);
	is_short = (xfer_size < ep->ep.maxpacket);

	debug_cond(DEBUG_OUT_EP != 0,
		   "%s: RX DMA done : ep = %d, rx bytes = %d/%d, "
		   "is_short = %d, DOEPTSIZ = 0x%x, remained bytes = %d\n",
		   __func__, ep_num, req->req.actual, req->req.length,
		   is_short, ep_tsr, xfer_size);

	if (is_short || req->req.actual == req->req.length) {
		if (ep_num == EP0_CON && dev->ep0state == DATA_STATE_RECV) {
			debug_cond(DEBUG_OUT_EP != 0, "	=> Send ZLP\n");
			s3c_udc_ep0_zlp(dev);
			/* packet will be completed in complete_tx() */
			dev->ep0state = WAIT_FOR_IN_COMPLETE;
		} else {
			done(ep, req, 0);

			if (!list_empty(&ep->queue)) {
				req = list_entry(ep->queue.next,
					struct s3c_request, queue);
				debug_cond(DEBUG_OUT_EP != 0,
					   "%s: Next Rx request start...\n",
					   __func__);
				setdma_rx(ep, req);
			}
		}
	} else
Пример #3
0
static void complete_rx(struct dwc2_udc *dev, u8 ep_num)
{
	struct dwc2_ep *ep = &dev->ep[ep_num];
	struct dwc2_request *req = NULL;
	u32 ep_tsr = 0, xfer_size = 0, is_short = 0;

	if (list_empty(&ep->queue)) {
		debug_cond(DEBUG_OUT_EP != 0,
			   "%s: RX DMA done : NULL REQ on OUT EP-%d\n",
			   __func__, ep_num);
		return;

	}

	req = list_entry(ep->queue.next, struct dwc2_request, queue);
	ep_tsr = readl(&reg->out_endp[ep_num].doeptsiz);

	if (ep_num == EP0_CON)
		xfer_size = (ep_tsr & DOEPT_SIZ_XFER_SIZE_MAX_EP0);
	else
		xfer_size = (ep_tsr & DOEPT_SIZ_XFER_SIZE_MAX_EP);

	xfer_size = ep->len - xfer_size;

	/*
	 * NOTE:
	 *
	 * Please be careful with proper buffer allocation for USB request,
	 * which needs to be aligned to CONFIG_SYS_CACHELINE_SIZE, not only
	 * with starting address, but also its size shall be a cache line
	 * multiplication.
	 *
	 * This will prevent from corruption of data allocated immediatelly
	 * before or after the buffer.
	 *
	 * For armv7, the cache_v7.c provides proper code to emit "ERROR"
	 * message to warn users.
	 */
	invalidate_dcache_range((unsigned long) ep->dma_buf,
				(unsigned long) ep->dma_buf +
				ROUND(xfer_size, CONFIG_SYS_CACHELINE_SIZE));

	req->req.actual += min(xfer_size, req->req.length - req->req.actual);
	is_short = !!(xfer_size % ep->ep.maxpacket);

	debug_cond(DEBUG_OUT_EP != 0,
		   "%s: RX DMA done : ep = %d, rx bytes = %d/%d, "
		   "is_short = %d, DOEPTSIZ = 0x%x, remained bytes = %d\n",
		   __func__, ep_num, req->req.actual, req->req.length,
		   is_short, ep_tsr, req->req.length - req->req.actual);

	if (is_short || req->req.actual == req->req.length) {
		if (ep_num == EP0_CON && dev->ep0state == DATA_STATE_RECV) {
			debug_cond(DEBUG_OUT_EP != 0, "	=> Send ZLP\n");
			dwc2_udc_ep0_zlp(dev);
			/* packet will be completed in complete_tx() */
			dev->ep0state = WAIT_FOR_IN_COMPLETE;
		} else {
			done(ep, req, 0);

			if (!list_empty(&ep->queue)) {
				req = list_entry(ep->queue.next,
					struct dwc2_request, queue);
				debug_cond(DEBUG_OUT_EP != 0,
					   "%s: Next Rx request start...\n",
					   __func__);
				setdma_rx(ep, req);
			}
		}
	} else