static enum imx233_dcp_error_t imx233_dcp_job(int ch) { /* if IRQs are not enabled, don't enable channel interrupt and do some polling */ bool irq_enabled = irq_enabled(); /* enable channel, clear interrupt, enable interrupt */ imx233_icoll_enable_interrupt(INT_SRC_DCP, true); if(irq_enabled) __REG_SET(HW_DCP_CTRL) = HW_DCP_CTRL__CHANNEL_INTERRUPT_ENABLE(ch); __REG_CLR(HW_DCP_STAT) = HW_DCP_STAT__IRQ(ch); __REG_SET(HW_DCP_CHANNELCTRL) = HW_DCP_CHANNELCTRL__ENABLE_CHANNEL(ch); /* write back packet */ commit_discard_dcache_range(&channel_packet[ch], sizeof(struct imx233_dcp_packet_t)); /* write 1 to semaphore to run job */ HW_DCP_CHxCMDPTR(ch) = (uint32_t)PHYSICAL_ADDR(&channel_packet[ch]); HW_DCP_CHxSEMA(ch) = 1; /* wait completion */ if(irq_enabled) semaphore_wait(&channel_sema[ch], TIMEOUT_BLOCK); else while(__XTRACT_EX(HW_DCP_CHxSEMA(ch), HW_DCP_CHxSEMA__VALUE)) udelay(10); /* disable channel and interrupt */ __REG_CLR(HW_DCP_CTRL) = HW_DCP_CTRL__CHANNEL_INTERRUPT_ENABLE(ch); __REG_CLR(HW_DCP_CHANNELCTRL) = HW_DCP_CHANNELCTRL__ENABLE_CHANNEL(ch); /* read status */ return get_error_status(ch); }
/* Commit and/or discard all DMA descriptors and buffers pointed by them, * handle circular lists. At the same time, convert virtual pointers to * real ones */ static void dma_commit_and_discard(unsigned chan, struct apb_dma_command_t *cmd) { /* We handle circular descriptors by using unused bits: * bits 8-11 are not used by the hardware so we first go through the whole * list and mark them all a special value at the same time we commit buffers * and then we go through the list another time to clear the mark and * commit the descriptors */ struct apb_dma_command_t *cur = cmd; while((cur->cmd & HW_APB_CHx_CMD__UNUSED_BM) != HW_APB_CHx_CMD__UNUSED_MAGIC) { cur->cmd = (cur->cmd & ~HW_APB_CHx_CMD__UNUSED_BM) | HW_APB_CHx_CMD__UNUSED_MAGIC; int op = cur->cmd & HW_APB_CHx_CMD__COMMAND_BM; int sz = __XTRACT_EX(cur->cmd, HW_APB_CHx_CMD__XFER_COUNT); /* device > host: discard */ if(op == HW_APB_CHx_CMD__COMMAND__WRITE) discard_dcache_range(cur->buffer, sz); /* host > device: commit and discard */ else if(op == HW_APB_CHx_CMD__COMMAND__READ) commit_discard_dcache_range(cur->buffer, sz); if((uint32_t)cur->buffer % CACHEALIGN_SIZE) apb_nr_unaligned[chan]++; /* Virtual to physical buffer pointer conversion */ cur->buffer = PHYSICAL_ADDR(cur->buffer); /* chain ? */ if(cur->cmd & HW_APB_CHx_CMD__CHAIN) cur = cur->next; else break; } cur = cmd; while((cur->cmd & HW_APB_CHx_CMD__UNUSED_BM) != 0) { cur->cmd = cur->cmd & ~HW_APB_CHx_CMD__UNUSED_BM; int sz = __XTRACT_EX(cur->cmd, HW_APB_CHx_CMD__CMDWORDS) * sizeof(uint32_t); /* commit descriptor and discard descriptor */ /* chain ? */ if(cur->cmd & HW_APB_CHx_CMD__CHAIN) { struct apb_dma_command_t *next = cur->next; cur->next = PHYSICAL_ADDR(cur->next); commit_dcache_range(cur, sizeof(struct apb_dma_command_t) + sz); cur = next; } else { commit_dcache_range(cur, sizeof(struct apb_dma_command_t) + sz); break; } } }
static void hdma_i2s_transfer(const void *addr, size_t size) { SCU_CLKCFG &= ~(1<<3); /* enable HDMA clock */ commit_discard_dcache_range(addr, size); HDMA_ISRC0 = (uint32_t)addr; /* source address */ HDMA_IDST0 = (uint32_t)&I2S_TXR; /* i2s tx fifo */ HDMA_ICNT0 = (uint16_t)((size>>2) - 1); /* number of dma transactions * of transfer size bytes * (zero based) */ HDMA_ISR = ((1<<13) | /* mask ch1 accumulation overflow irq */ (1<<12) | /* mask ch0 accumulation overflow irq */ (1<<11) | /* mask ch1 page count down irq */ (0<<10) | /* UNMASK ch0 page count down irq */ (1<<9) | /* mask ch0 transfer irq */ (1<<8) | /* mask ch1 transfer irq */ (0<<5) | /* clear ch1 accumulation overflow flag */ (0<<4) | /* clear ch0 accumulation overflow flag */ (0<<3) | /* clear ch1 count down to zero flag */ (0<<2) | /* clear ch0 count down to zero flag */ (0<<1) | /* clear ch1 active flag */ (0<<0)); /* clear ch0 active flag */ HDMA_ISCNT0 = 0x07; /* slice size in transfer size units (zero base) */ HDMA_IPNCNTD0 = 0x01; /* page count */ HDMA_CON0 = ((0<<23) | /* page mode */ (1<<22) | /* slice mode */ (1<<21) | /* DMA enable */ (1<<18) | /* generate interrupt */ (0<<16) | /* on-the-fly is not supported by rk27xx */ (5<<13) | /* transfer mode inc8 */ (6<<9) | /* external hdreq from i2s tx */ (0<<7) | /* increment source address */ (1<<5) | /* fixed destination address */ (2<<3) | /* transfer size = 32bits word */ (0<<1) | /* command of software DMA (not relevant) */ (1<<0)); /* hardware trigger DMA mode */ }
void dma_enable_channel(int channel, struct dma_request *request) { struct dma_channel_regs *regs = dma_regs [channel]; /* TODO - transfer sizes (assumes word) */ if (DMA_GET_SRC(request->source_map, channel) == DMA_INVALID) panicf ("DMA: invalid channel"); /* setup a transfer on specified channel */ dma_disable_channel (channel); if((unsigned long)request->source_addr < UNCACHED_BASE_ADDR) regs->disrc = (unsigned long)request->source_addr + UNCACHED_BASE_ADDR; else regs->disrc = (unsigned long)request->source_addr; regs->disrcc = request->source_control; if((unsigned long)request->dest_addr < UNCACHED_BASE_ADDR) regs->didst = (unsigned long)request->dest_addr + UNCACHED_BASE_ADDR; else regs->didst = (unsigned long)request->dest_addr; regs->didstc = request->dest_control; regs->dcon = request->control | request->count | DMA_GET_SRC(request->source_map, channel) * DCON_HWSRCSEL; dma_state [channel].callback = request->callback; /* Activate the channel */ commit_discard_dcache_range((void *)request->dest_addr, request->count * 4); dma_state [channel].status |= STATUS_CHANNEL_ACTIVE; regs->dmasktrig = DMASKTRIG_ON; if ((request->control & DCON_HW_SEL) == 0) { /* Start DMA */ regs->dmasktrig |= DMASKTRIG_SW_TRIG; } }
enum imx233_dcp_error_t imx233_dcp_blit_ex(int ch, bool fill, const void *src, size_t w, size_t h, void *dst, size_t out_w) { /* prepare packet */ channel_packet[ch].next = 0; channel_packet[ch].ctrl0 = HW_DCP_CTRL0__INTERRUPT_ENABLE | HW_DCP_CTRL0__ENABLE_MEMCOPY | HW_DCP_CTRL0__DECR_SEMAPHORE | HW_DCP_CTRL0__ENABLE_BLIT | (fill ? HW_DCP_CTRL0__CONSTANT_FILL : 0); channel_packet[ch].ctrl1 = out_w; channel_packet[ch].src = (uint32_t)(fill ? src : PHYSICAL_ADDR(src)); channel_packet[ch].dst = (uint32_t)PHYSICAL_ADDR(dst); channel_packet[ch].size = w | h << HW_DCP_SIZE__NUMBER_LINES_BP; channel_packet[ch].payload = 0; channel_packet[ch].status = 0; /* we have a problem here to discard the output buffer since it's not contiguous * so only commit the source */ if(!fill) commit_discard_dcache_range(src, w * h); /* do the job */ return imx233_dcp_job(ch); }
enum imx233_dcp_error_t imx233_dcp_memcpy_ex(int ch, bool fill, const void *src, void *dst, size_t len) { /* prepare packet */ channel_packet[ch].next = 0; channel_packet[ch].ctrl0 = HW_DCP_CTRL0__INTERRUPT_ENABLE | HW_DCP_CTRL0__ENABLE_MEMCOPY | HW_DCP_CTRL0__DECR_SEMAPHORE | (fill ? HW_DCP_CTRL0__CONSTANT_FILL : 0); channel_packet[ch].ctrl1 = 0; channel_packet[ch].src = (uint32_t)(fill ? src : PHYSICAL_ADDR(src)); channel_packet[ch].dst = (uint32_t)PHYSICAL_ADDR(dst); channel_packet[ch].size = len; channel_packet[ch].payload = 0; channel_packet[ch].status = 0; /* write-back src if not filling, discard dst */ if(!fill) commit_discard_dcache_range(src, len); discard_dcache_range(dst, len); /* do the job */ return imx233_dcp_job(ch); }