static void mxc_udc_queue_update(u8 epnum, u8 *data, u32 len, u32 tx) { mxc_ep_t *ep; struct ep_queue_item *tqi, *head, *last; int send = 0; int in; head = last = NULL; in = ep_is_in(epnum, tx); ep = mxc_udc.mxc_ep + (epnum * 2 + in); DBG("epnum = %d, in = %d\n", epnum, in); do { tqi = ep->ep_dtd[ep->index]; DBG("%s, index = %d, tqi = %p\n", __func__, ep->index, tqi); while (mxc_tqi_is_busy(tqi)) ; mxc_tqi_init_page(tqi); DBG("%s, line = %d, len = %d\n", __func__, __LINE__, len); inc_index(ep->index); send = MIN(len, 0x1000); if (data) { memcpy((void *)tqi->page_vir, (void *)data, send); _dump_buf(tqi->page_vir, send); } if (!head) last = head = tqi; else { last->next_item_ptr = tqi->item_dma; last->next_item_vir = tqi; last = tqi; } if (!tx) tqi->reserved[0] = send; /* we set IOS for every dtd */ tqi->info = ((send << 16) | (1 << 15) | (1 << 7)); data += send; len -= send; } while (len); last->next_item_ptr = 0x1; /* end */ if (ep->tail) { ep->tail->next_item_ptr = head->item_dma; ep->tail->next_item_vir = head; if (mxc_ep_xfer_is_working(ep, in)) { DBG("ep is working\n"); goto out; } } mxc_update_qh(ep, head, in); out: ep->tail = last; }
static int setdma_rx(struct s3c_ep *ep, struct s3c_request *req) { u32 *buf, ctrl; u32 length, pktcnt; u32 ep_num = ep_index(ep); struct s3c_udc *udc = ep->dev; struct device *dev = &udc->dev->dev; aligned_map_buf(req, ep_is_in(ep)); buf = req->req.buf + req->req.actual; prefetchw(buf); length = req->req.length - req->req.actual; req->req.dma = dma_map_single(dev, buf, length, DMA_FROM_DEVICE); req->mapped = 1; if (length == 0) pktcnt = 1; else pktcnt = (length - 1)/(ep->ep.maxpacket) + 1; ctrl = __raw_readl(udc->regs + S3C_UDC_OTG_DOEPCTL(ep_num)); __raw_writel(virt_to_phys(buf), udc->regs + S3C_UDC_OTG_DOEPDMA(ep_num)); __raw_writel((pktcnt<<19) | (length<<0), udc->regs + S3C_UDC_OTG_DOEPTSIZ(ep_num)); __raw_writel(DEPCTL_EPENA | DEPCTL_CNAK | ctrl, udc->regs + S3C_UDC_OTG_DOEPCTL(ep_num)); DEBUG_OUT_EP("%s: EP%d RX DMA start : DOEPDMA = 0x%x," "DOEPTSIZ = 0x%x, DOEPCTL = 0x%x\n" "\tbuf = 0x%p, pktcnt = %d, xfersize = %d\n", __func__, ep_num, __raw_readl(udc->regs + S3C_UDC_OTG_DOEPDMA(ep_num)), __raw_readl(udc->regs + S3C_UDC_OTG_DOEPTSIZ(ep_num)), __raw_readl(udc->regs + S3C_UDC_OTG_DOEPCTL(ep_num)), buf, pktcnt, length); return 0; }
static void mxc_udc_queue_update(u8 epnum, u8 *data, u32 len, u32 tx) { struct mxc_ep_t *ep; struct ep_queue_item *tqi, *head, *last; int send = 0; int in; head = last = NULL; in = ep_is_in(epnum, tx); ep = mxc_udc.mxc_ep + (epnum * 2 + in); DBG("epnum = %d, in = %d\n", epnum, in); do { tqi = ep->ep_dtd[ep->index]; DBG("%s, index = %d, tqi = %p\n", __func__, ep->index, tqi); while (mxc_tqi_is_busy(tqi)) ; mxc_tqi_init_page(tqi); DBG("%s, line = %d, len = %d\n", __func__, __LINE__, len); inc_index(ep->index); send = MIN(len, ep->max_pkt_size); if (data) { memcpy((void *)tqi->page_vir, (void *)data, send); _dump_buf((u8 *)(tqi->page_vir), send); flush_dcache_range((unsigned long)(tqi->page_vir), CACHE_ALIGNED_END(tqi->page_vir, send)); } if (!head) last = head = tqi; else { last->next_item_ptr = virt_to_phys(tqi); last->next_item_vir = tqi; last = tqi; } if (!tx) tqi->reserved[0] = send; /* we set IOC for every dtd */ tqi->info = ((send << 16) | (1 << 15) | (1 << 7)); data += send; len -= send; flush_dcache_range((unsigned long)tqi, CACHE_ALIGNED_END(tqi, sizeof(struct ep_queue_item))); } while (len); last->next_item_ptr = 0x1; /* end */ flush_dcache_range((unsigned long)last, CACHE_ALIGNED_END(last, sizeof(struct ep_queue_item))); if (ep->tail) { ep->tail->next_item_ptr = virt_to_phys(head); ep->tail->next_item_vir = head; flush_dcache_range((unsigned long)(ep->tail), CACHE_ALIGNED_END(ep->tail, sizeof(struct ep_queue_item))); if (mxc_ep_xfer_is_working(ep, in)) { DBG("ep is working\n"); goto out; } } mxc_update_qh(ep, head, in); out: ep->tail = last; }
static int setdma_tx(struct s3c_ep *ep, struct s3c_request *req) { u32 *buf, ctrl = 0; u32 length, pktcnt; u32 ep_num = ep_index(ep); struct s3c_udc *udc = ep->dev; struct device *dev = &udc->dev->dev; aligned_map_buf(req, ep_is_in(ep)); buf = req->req.buf + req->req.actual; prefetch(buf); length = req->req.length - req->req.actual; if (ep_num == EP0_CON) length = min_t(u32, length, (u32)ep_maxpacket(ep)); req->req.actual += length; req->req.dma = dma_map_single(dev, buf, length, DMA_TO_DEVICE); req->mapped = 1; if (length == 0) pktcnt = 1; else pktcnt = (length - 1)/(ep->ep.maxpacket) + 1; #ifdef DED_TX_FIFO /* Write the FIFO number to be used for this endpoint */ ctrl = __raw_readl(udc->regs + S3C_UDC_OTG_DIEPCTL(ep_num)); ctrl &= ~DEPCTL_TXFNUM_MASK; ctrl |= (ep_num << DEPCTL_TXFNUM_BIT); __raw_writel(ctrl , udc->regs + S3C_UDC_OTG_DIEPCTL(ep_num)); #endif __raw_writel(virt_to_phys(buf), udc->regs + S3C_UDC_OTG_DIEPDMA(ep_num)); __raw_writel((pktcnt<<19)|(length<<0), udc->regs + S3C_UDC_OTG_DIEPTSIZ(ep_num)); ctrl = __raw_readl(udc->regs + S3C_UDC_OTG_DIEPCTL(ep_num)); if ((ctrl & DEPCTL_TYPE_MASK) == DEPCTL_ISO_TYPE) { if (ctrl & DEPCTL_EO_FRNUM) ctrl |= DEPCTL_SETD0PID; else ctrl |= DEPCTL_SETD1PID; } __raw_writel(DEPCTL_EPENA|DEPCTL_CNAK|ctrl, udc->regs + S3C_UDC_OTG_DIEPCTL(ep_num)); #ifndef DED_TX_FIFO ctrl = __raw_readl(udc->regs + S3C_UDC_OTG_DIEPCTL(EP0_CON)); ctrl = (ctrl & ~(EP_MASK<<DEPCTL_NEXT_EP_BIT)) | (ep_num<<DEPCTL_NEXT_EP_BIT); __raw_writel(ctrl, udc->regs + S3C_UDC_OTG_DIEPCTL(EP0_CON)); #endif DEBUG_IN_EP("%s:EP%d TX DMA start : DIEPDMA0 = 0x%x," "DIEPTSIZ0 = 0x%x, DIEPCTL0 = 0x%x\n" "\tbuf = 0x%p, pktcnt = %d, xfersize = %d\n", __func__, ep_num, __raw_readl(udc->regs + S3C_UDC_OTG_DIEPDMA(ep_num)), __raw_readl(udc->regs + S3C_UDC_OTG_DIEPTSIZ(ep_num)), __raw_readl(udc->regs + S3C_UDC_OTG_DIEPCTL(ep_num)), buf, pktcnt, length); req->written_bytes = length; return length; }