static void handle_out_ep(int ep) { struct usb_ctrlrequest *req = (void*)AS3525_UNCACHED_ADDR(&setup_desc->data1); int ep_sts = USB_OEP_STS(ep) & ~USB_OEP_STS_MASK(ep); if (ep > 3) panicf("out_ep > 3!?"); USB_OEP_STS(ep) = ep_sts; /* ACK */ if (ep_sts & USB_EP_STAT_BNA) { /* Buffer was not set up */ int ctrl = USB_OEP_CTRL(ep); logf("ep%d OUT, status %x ctrl %x (BNA)\n", ep, ep_sts, ctrl); panicf("ep%d OUT 0x%x 0x%x (BNA)", ep, ep_sts, ctrl); ep_sts &= ~USB_EP_STAT_BNA; } if (ep_sts & USB_EP_STAT_OUT_RCVD) { struct usb_dev_dma_desc *uc_desc = endpoints[ep][1].uc_desc; int dma_sts = uc_desc->status; int dma_len = dma_sts & 0xffff; if (!(dma_sts & USB_DMA_DESC_ZERO_LEN)) { logf("EP%d OUT token, st:%08x len:%d frm:%x data=%s epstate=%d\n", ep, dma_sts & 0xf8000000, dma_len, (dma_sts >> 16) & 0x7ff, make_hex(uc_desc->data_ptr, dma_len), endpoints[ep][1].state); /* * If parts of the just dmaed range are in cache, dump them now. */ discard_dcache_range(uc_desc->data_ptr, dma_len); } else{
static void ep_transfer(int ep, void *ptr, int len, bool out) { /* disable interrupts to avoid any race */ int oldlevel = disable_irq_save(); struct ep_type *endpoint = &endpoints[ep][out ? DIR_OUT : DIR_IN]; endpoint->busy = true; endpoint->size = len; endpoint->status = -1; if (out) DEPCTL(ep, out) &= ~DEPCTL_stall; int mps = usb_drv_port_speed() ? 512 : 64; int nb_packets = (len + mps - 1) / mps; if (nb_packets == 0) nb_packets = 1; DEPDMA(ep, out) = len ? (void*)PHYSICAL_ADDR(ptr) : NULL; DEPTSIZ(ep, out) = (nb_packets << DEPTSIZ_pkcnt_bitp) | len; if(out) discard_dcache_range(ptr, len); else commit_dcache_range(ptr, len); logf("pkt=%d dma=%lx", nb_packets, DEPDMA(ep, out)); // if (!out) while (((GNPTXSTS & 0xffff) << 2) < MIN(mps, length)); DEPCTL(ep, out) |= DEPCTL_epena | DEPCTL_cnak; restore_irq(oldlevel); }
static enum imx233_i2c_error_t imx233_i2c_finalize(void) { discard_dcache_range(i2c_buffer, I2C_BUFFER_SIZE); for(int i = 0; i < i2c_nr_stages; i++) { struct i2c_dma_command_t *c = &i2c_stage[i]; if(BF_RDX(c->dma.cmd, APB_CHx_CMD, COMMAND) == BV_APB_CHx_CMD_COMMAND__WRITE) memcpy(c->dst, c->src, BF_RDX(c->dma.cmd, APB_CHx_CMD, XFER_COUNT)); } return I2C_SUCCESS; }
int usb_drv_recv(int ep, void *ptr, int len) { struct usb_dev_dma_desc *uc_desc = endpoints[ep][1].uc_desc; ep &= 0x7f; logf("usb_drv_recv(%d,%x,%d)\n", ep, (int)ptr, len); if (len > USB_DMA_DESC_RXTX_BYTES) panicf("usb_recv: len=%d > %d", len, USB_DMA_DESC_RXTX_BYTES); if ((int)ptr & 31) { logf("addr %08x not aligned!\n", (int)ptr); } endpoints[ep][1].state |= EP_STATE_BUSY; endpoints[ep][1].len = len; endpoints[ep][1].rc = -1; /* remove data buffer from cache */ discard_dcache_range(ptr, len); /* DMA setup */ uc_desc->status = USB_DMA_DESC_BS_HST_RDY | USB_DMA_DESC_LAST | len; if (len == 0) { uc_desc->status |= USB_DMA_DESC_ZERO_LEN; uc_desc->data_ptr = 0; } else { uc_desc->data_ptr = AS3525_PHYSICAL_ADDR(ptr); } USB_OEP_DESC_PTR(ep) = AS3525_PHYSICAL_ADDR((int)&dmadescs[ep][1]); USB_OEP_STS(ep) = USB_EP_STAT_OUT_RCVD; /* clear status */ /* Make sure receive DMA is on */ if (!(USB_DEV_CTRL & USB_DEV_CTRL_RDE)){ USB_DEV_CTRL |= USB_DEV_CTRL_RDE; if (!(USB_DEV_CTRL & USB_DEV_CTRL_RDE)) logf("failed to enable RDE!\n"); } USB_OEP_CTRL(ep) |= USB_EP_CTRL_CNAK; /* Go! */ if (USB_OEP_CTRL(ep) & USB_EP_CTRL_NAK) { int i = 0; while (USB_OEP_CTRL(ep) & USB_EP_CTRL_NAK) { USB_OEP_CTRL(ep) |= USB_EP_CTRL_CNAK; /* Go! */ i++; } logf("ep%d CNAK needed %d retries CTRL=%x\n", ep, i, (int)USB_OEP_CTRL(ep)); } return 0; }
/* Commit and/or discard all DMA descriptors and buffers pointed by them, * handle circular lists. At the same time, convert virtual pointers to * real ones */ static void dma_commit_and_discard(unsigned chan, struct apb_dma_command_t *cmd) { /* We handle circular descriptors by using unused bits: * bits 8-11 are not used by the hardware so we first go through the whole * list and mark them all a special value at the same time we commit buffers * and then we go through the list another time to clear the mark and * commit the descriptors */ struct apb_dma_command_t *cur = cmd; while((cur->cmd & HW_APB_CHx_CMD__UNUSED_BM) != HW_APB_CHx_CMD__UNUSED_MAGIC) { cur->cmd = (cur->cmd & ~HW_APB_CHx_CMD__UNUSED_BM) | HW_APB_CHx_CMD__UNUSED_MAGIC; int op = cur->cmd & HW_APB_CHx_CMD__COMMAND_BM; int sz = __XTRACT_EX(cur->cmd, HW_APB_CHx_CMD__XFER_COUNT); /* device > host: discard */ if(op == HW_APB_CHx_CMD__COMMAND__WRITE) discard_dcache_range(cur->buffer, sz); /* host > device: commit and discard */ else if(op == HW_APB_CHx_CMD__COMMAND__READ) commit_discard_dcache_range(cur->buffer, sz); if((uint32_t)cur->buffer % CACHEALIGN_SIZE) apb_nr_unaligned[chan]++; /* Virtual to physical buffer pointer conversion */ cur->buffer = PHYSICAL_ADDR(cur->buffer); /* chain ? */ if(cur->cmd & HW_APB_CHx_CMD__CHAIN) cur = cur->next; else break; } cur = cmd; while((cur->cmd & HW_APB_CHx_CMD__UNUSED_BM) != 0) { cur->cmd = cur->cmd & ~HW_APB_CHx_CMD__UNUSED_BM; int sz = __XTRACT_EX(cur->cmd, HW_APB_CHx_CMD__CMDWORDS) * sizeof(uint32_t); /* commit descriptor and discard descriptor */ /* chain ? */ if(cur->cmd & HW_APB_CHx_CMD__CHAIN) { struct apb_dma_command_t *next = cur->next; cur->next = PHYSICAL_ADDR(cur->next); commit_dcache_range(cur, sizeof(struct apb_dma_command_t) + sz); cur = next; } else { commit_dcache_range(cur, sizeof(struct apb_dma_command_t) + sz); break; } } }
enum imx233_dcp_error_t imx233_dcp_memcpy_ex(int ch, bool fill, const void *src, void *dst, size_t len) { /* prepare packet */ channel_packet[ch].next = 0; channel_packet[ch].ctrl0 = HW_DCP_CTRL0__INTERRUPT_ENABLE | HW_DCP_CTRL0__ENABLE_MEMCOPY | HW_DCP_CTRL0__DECR_SEMAPHORE | (fill ? HW_DCP_CTRL0__CONSTANT_FILL : 0); channel_packet[ch].ctrl1 = 0; channel_packet[ch].src = (uint32_t)(fill ? src : PHYSICAL_ADDR(src)); channel_packet[ch].dst = (uint32_t)PHYSICAL_ADDR(dst); channel_packet[ch].size = len; channel_packet[ch].payload = 0; channel_packet[ch].status = 0; /* write-back src if not filling, discard dst */ if(!fill) commit_discard_dcache_range(src, len); discard_dcache_range(dst, len); /* do the job */ return imx233_dcp_job(ch); }