void udc_setup_ep(struct usb_device_instance *device, u32 index, struct usb_endpoint_instance *epi) { u8 dir, epnum, zlt, mult; u8 ep_type; u32 max_pkt_size; int ep_addr; struct mxc_ep_t *ep; if (epi) { zlt = 1; mult = 0; ep_addr = epi->endpoint_address; epnum = ep_addr & USB_ENDPOINT_NUMBER_MASK; DBG("setup ep %d\n", epnum); if ((ep_addr & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN) { dir = USB_SEND; ep_type = epi->tx_attributes; max_pkt_size = epi->tx_packetSize; } else { dir = USB_RECV; ep_type = epi->rcv_attributes; max_pkt_size = epi->rcv_packetSize; } if (ep_type == USB_ENDPOINT_XFER_ISOC) { mult = (u32)(1 + ((max_pkt_size >> 11) & 0x03)); max_pkt_size = max_pkt_size & 0x7ff; DBG("mult = %d\n", mult); } ep = mxc_udc.mxc_ep + (epnum * 2 + dir); ep->epi = epi; if (epnum) { struct ep_queue_item *tqi; int i; mxc_ep_qh_setup(epnum, dir, ep_type, max_pkt_size, zlt, mult); mxc_ep_setup(epnum, dir, ep_type); mxc_init_ep_dtd(epnum * 2 + dir); /* malloc endpoint's dtd's data buffer*/ ep->max_pkt_size = max_pkt_size; for (i = 0; i < EP_TQ_ITEM_SIZE; i++) { tqi = ep->ep_dtd[i]; tqi->page_vir = (u32)malloc_aligned_buffer( &tqi->page_unaligned, max_pkt_size, USB_MEM_ALIGN_BYTE); if ((void *)tqi->page_vir == NULL) { printf("malloc dtd bufer failure\n"); return; } mxc_tqi_init_page(tqi); flush_dcache_range((unsigned long)tqi, CACHE_ALIGNED_END(tqi, sizeof(struct ep_queue_item))); } } }
static void mxc_udc_queue_update(u8 epnum, u8 *data, u32 len, u32 tx) { mxc_ep_t *ep; struct ep_queue_item *tqi, *head, *last; int send = 0; int in; head = last = NULL; in = ep_is_in(epnum, tx); ep = mxc_udc.mxc_ep + (epnum * 2 + in); DBG("epnum = %d, in = %d\n", epnum, in); do { tqi = ep->ep_dtd[ep->index]; DBG("%s, index = %d, tqi = %p\n", __func__, ep->index, tqi); while (mxc_tqi_is_busy(tqi)) ; mxc_tqi_init_page(tqi); DBG("%s, line = %d, len = %d\n", __func__, __LINE__, len); inc_index(ep->index); send = MIN(len, 0x1000); if (data) { memcpy((void *)tqi->page_vir, (void *)data, send); _dump_buf(tqi->page_vir, send); } if (!head) last = head = tqi; else { last->next_item_ptr = tqi->item_dma; last->next_item_vir = tqi; last = tqi; } if (!tx) tqi->reserved[0] = send; /* we set IOS for every dtd */ tqi->info = ((send << 16) | (1 << 15) | (1 << 7)); data += send; len -= send; } while (len); last->next_item_ptr = 0x1; /* end */ if (ep->tail) { ep->tail->next_item_ptr = head->item_dma; ep->tail->next_item_vir = head; if (mxc_ep_xfer_is_working(ep, in)) { DBG("ep is working\n"); goto out; } } mxc_update_qh(ep, head, in); out: ep->tail = last; }
static int mxc_malloc_ep0_ptr(mxc_ep_t *ep) { int i; struct ep_queue_item *tqi; int max_pkt_size = USB_MAX_CTRL_PAYLOAD; ep->max_pkt_size = max_pkt_size; for (i = 0; i < EP_TQ_ITEM_SIZE; i++) { tqi = ep->ep_dtd[i]; tqi->page_vir = (u32)malloc_dma_buffer(&tqi->page_dma, max_pkt_size, USB_MEM_ALIGN_BYTE); if ((void *)tqi->page_vir == NULL) { printf("malloc ep's dtd bufer failure, i=%d\n", i); return -1; } mxc_tqi_init_page(tqi); } return 0; }
static int mxc_malloc_ep0_ptr(struct mxc_ep_t *ep) { int i; struct ep_queue_item *tqi; int max_pkt_size = USB_MAX_CTRL_PAYLOAD; ep->max_pkt_size = max_pkt_size; for (i = 0; i < EP_TQ_ITEM_SIZE; i++) { tqi = ep->ep_dtd[i]; tqi->page_vir = (u32)malloc_aligned_buffer(&tqi->page_unaligned, max_pkt_size, USB_MEM_ALIGN_BYTE); if ((void *)tqi->page_vir == NULL) { printf("malloc ep's dtd bufer failure, i=%d\n", i); return -1; } mxc_tqi_init_page(tqi); /*flush dtd's config field to physical memory*/ flush_dcache_range((unsigned long)tqi, CACHE_ALIGNED_END(tqi, sizeof(struct ep_queue_item))); } return 0; }
static void mxc_udc_queue_update(u8 epnum, u8 *data, u32 len, u32 tx) { struct mxc_ep_t *ep; struct ep_queue_item *tqi, *head, *last; int send = 0; int in; head = last = NULL; in = ep_is_in(epnum, tx); ep = mxc_udc.mxc_ep + (epnum * 2 + in); DBG("epnum = %d, in = %d\n", epnum, in); do { tqi = ep->ep_dtd[ep->index]; DBG("%s, index = %d, tqi = %p\n", __func__, ep->index, tqi); while (mxc_tqi_is_busy(tqi)) ; mxc_tqi_init_page(tqi); DBG("%s, line = %d, len = %d\n", __func__, __LINE__, len); inc_index(ep->index); send = MIN(len, ep->max_pkt_size); if (data) { memcpy((void *)tqi->page_vir, (void *)data, send); _dump_buf((u8 *)(tqi->page_vir), send); flush_dcache_range((unsigned long)(tqi->page_vir), CACHE_ALIGNED_END(tqi->page_vir, send)); } if (!head) last = head = tqi; else { last->next_item_ptr = virt_to_phys(tqi); last->next_item_vir = tqi; last = tqi; } if (!tx) tqi->reserved[0] = send; /* we set IOC for every dtd */ tqi->info = ((send << 16) | (1 << 15) | (1 << 7)); data += send; len -= send; flush_dcache_range((unsigned long)tqi, CACHE_ALIGNED_END(tqi, sizeof(struct ep_queue_item))); } while (len); last->next_item_ptr = 0x1; /* end */ flush_dcache_range((unsigned long)last, CACHE_ALIGNED_END(last, sizeof(struct ep_queue_item))); if (ep->tail) { ep->tail->next_item_ptr = virt_to_phys(head); ep->tail->next_item_vir = head; flush_dcache_range((unsigned long)(ep->tail), CACHE_ALIGNED_END(ep->tail, sizeof(struct ep_queue_item))); if (mxc_ep_xfer_is_working(ep, in)) { DBG("ep is working\n"); goto out; } } mxc_update_qh(ep, head, in); out: ep->tail = last; }