void CPU::hdma_update(unsigned i) { dma_add_clocks(4); regs.mdr = dma_read((channel[i].source_bank << 16) | channel[i].hdma_addr); dma_add_clocks(4); dma_write(false); if((channel[i].line_counter & 0x7f) == 0) { channel[i].line_counter = regs.mdr; channel[i].hdma_addr++; channel[i].hdma_completed = (channel[i].line_counter == 0); channel[i].hdma_do_transfer = !channel[i].hdma_completed; if(channel[i].indirect) { dma_add_clocks(4); regs.mdr = dma_read(hdma_addr(i)); channel[i].indirect_addr = regs.mdr << 8; dma_add_clocks(4); dma_write(false); if(!channel[i].hdma_completed || hdma_active_after(i)) { dma_add_clocks(4); regs.mdr = dma_read(hdma_addr(i)); channel[i].indirect_addr >>= 8; channel[i].indirect_addr |= regs.mdr << 8; dma_add_clocks(4); dma_write(false); }
void omap_stop_dma(int lch) { u32 l; /* Disable all interrupts on the channel */ if (cpu_class_is_omap1()) dma_write(0, CICR(lch)); l = dma_read(CCR(lch)); l &= ~OMAP_DMA_CCR_EN; dma_write(l, CCR(lch)); if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) { int next_lch, cur_lch = lch; char dma_chan_link_map[OMAP_DMA4_LOGICAL_DMA_CH_COUNT]; memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map)); do { /* The loop case: we've been here already */ if (dma_chan_link_map[cur_lch]) break; /* Mark the current channel */ dma_chan_link_map[cur_lch] = 1; disable_lnk(cur_lch); next_lch = dma_chan[cur_lch].next_lch; cur_lch = next_lch; } while (next_lch != -1); } dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE; }
static void create_dma_lch_chain(int lch_head, int lch_queue) { u32 l; /* Check if this is the first link in chain */ if (dma_chan[lch_head].next_linked_ch == -1) { dma_chan[lch_head].next_linked_ch = lch_queue; dma_chan[lch_head].prev_linked_ch = lch_queue; dma_chan[lch_queue].next_linked_ch = lch_head; dma_chan[lch_queue].prev_linked_ch = lch_head; } /* a link exists, link the new channel in circular chain */ else { dma_chan[lch_queue].next_linked_ch = dma_chan[lch_head].next_linked_ch; dma_chan[lch_queue].prev_linked_ch = lch_head; dma_chan[lch_head].next_linked_ch = lch_queue; dma_chan[dma_chan[lch_queue].next_linked_ch].prev_linked_ch = lch_queue; } l = dma_read(CLNK_CTRL(lch_head)); l &= ~(0x1f); l |= lch_queue; dma_write(l, CLNK_CTRL(lch_head)); l = dma_read(CLNK_CTRL(lch_queue)); l &= ~(0x1f); l |= (dma_chan[lch_queue].next_linked_ch); dma_write(l, CLNK_CTRL(lch_queue)); }
void omap_set_dma_color_mode(int lch, enum omap_dma_color_mode mode, u32 color) { BUG_ON(enable_1510_mode); if (d->dma_dev_attr & IS_WORD_16) { u16 w; w = dma_read(CCR2(lch)); w &= ~0x03; switch (mode) { case OMAP_DMA_CONSTANT_FILL: w |= 0x01; break; case OMAP_DMA_TRANSPARENT_COPY: w |= 0x02; break; case OMAP_DMA_COLOR_DIS: break; default: BUG(); } dma_write(w, CCR2(lch)); w = dma_read(LCH_CTRL(lch)); w &= ~0x0f; /* Default is channel type 2D */ if (mode) { dma_write((u16)color, COLOR_L(lch)); dma_write((u16)(color >> 16), COLOR_U(lch)); w |= 1; /* Channel type G */ } dma_write(w, LCH_CTRL(lch)); } else {
void omap_set_dma_color_mode(int lch, enum omap_dma_color_mode mode, u32 color) { BUG_ON(omap_dma_in_1510_mode()); if (cpu_class_is_omap1()) { u16 w; w = dma_read(CCR2(lch)); w &= ~0x03; switch (mode) { case OMAP_DMA_CONSTANT_FILL: w |= 0x01; break; case OMAP_DMA_TRANSPARENT_COPY: w |= 0x02; break; case OMAP_DMA_COLOR_DIS: break; default: BUG(); } dma_write(w, CCR2(lch)); w = dma_read(LCH_CTRL(lch)); w &= ~0x0f; /* Default is channel type 2D */ if (mode) { dma_write((u16)color, COLOR_L(lch)); dma_write((u16)(color >> 16), COLOR_U(lch)); w |= 1; /* Channel type G */ } dma_write(w, LCH_CTRL(lch)); } if (cpu_class_is_omap2()) { u32 val; val = dma_read(CCR(lch)); val &= ~((1 << 17) | (1 << 16)); switch (mode) { case OMAP_DMA_CONSTANT_FILL: val |= 1 << 16; break; case OMAP_DMA_TRANSPARENT_COPY: val |= 1 << 17; break; case OMAP_DMA_COLOR_DIS: break; default: BUG(); } dma_write(val, CCR(lch)); color &= 0xffffff; dma_write(color, COLOR(lch)); } }
void omap_set_dma_dest_index(int lch, int eidx, int fidx) { if (cpu_class_is_omap2()) return; dma_write(eidx, CDEI(lch)); dma_write(fidx, CDFI(lch)); }
static inline void omap_enable_channel_irq(int lch) { u32 status; /* Clear CSR */ if (cpu_class_is_omap1()) status = dma_read(CSR(lch)); else if (cpu_class_is_omap2()) dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR(lch)); /* Enable some nice interrupts. */ dma_write(dma_chan[lch].enabled_irqs, CICR(lch)); }
void CPU::dma_transfer(bool direction, uint8 bbus, uint32 abus) { if(direction == 0) { dma_add_clocks(4); regs.mdr = dma_read(abus); dma_add_clocks(4); dma_write(dma_transfer_valid(bbus, abus), 0x2100 | bbus, regs.mdr); } else { dma_add_clocks(4); regs.mdr = dma_transfer_valid(bbus, abus) ? bus.read(0x2100 | bbus) : 0x00; dma_add_clocks(4); dma_write(dma_addr_valid(abus), abus, regs.mdr); } }
void omap_start_dma(int lch) { u32 l; if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) { int next_lch, cur_lch; char dma_chan_link_map[OMAP_DMA4_LOGICAL_DMA_CH_COUNT]; dma_chan_link_map[lch] = 1; /* Set the link register of the first channel */ enable_lnk(lch); memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map)); cur_lch = dma_chan[lch].next_lch; do { next_lch = dma_chan[cur_lch].next_lch; /* The loop case: we've been here already */ if (dma_chan_link_map[cur_lch]) break; /* Mark the current channel */ dma_chan_link_map[cur_lch] = 1; enable_lnk(cur_lch); omap_enable_channel_irq(cur_lch); cur_lch = next_lch; } while (next_lch != -1); } else if (cpu_is_omap242x() || (cpu_is_omap243x() && omap_type() <= OMAP2430_REV_ES1_0)) { /* Errata: Need to write lch even if not using chaining */ dma_write(lch, CLNK_CTRL(lch)); } omap_enable_channel_irq(lch); l = dma_read(CCR(lch)); /* * Errata: On ES2.0 BUFFERING disable must be set. * This will always fail on ES1.0 */ if (cpu_is_omap24xx()) l |= OMAP_DMA_CCR_EN; l |= OMAP_DMA_CCR_EN; dma_write(l, CCR(lch)); dma_chan[lch].flags |= OMAP_DMA_ACTIVE; }
/** * @brief omap_stop_dma_chain_transfers - Stop the dma transfer of a chain. * * @param chain_id * * @return - Success : 0 * Failure : EINVAL */ int omap_stop_dma_chain_transfers(int chain_id) { int *channels; u32 l, i; u32 sys_cf; /* Check for input params */ if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) { IOLog("Invalid chain id\n"); return -EINVAL; } /* Check if the chain exists */ if (dma_linked_lch[chain_id].linked_dmach_q == NULL) { IOLog("Chain doesn't exists\n"); return -EINVAL; } channels = dma_linked_lch[chain_id].linked_dmach_q; /* * DMA Errata: * Special programming model needed to disable DMA before end of block */ sys_cf = dma_read(OCP_SYSCONFIG); l = sys_cf; /* Middle mode reg set no Standby */ l &= ~((1 << 12)|(1 << 13)); dma_write(l, OCP_SYSCONFIG); for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked; i++) { /* Stop the Channel transmission */ l = dma_read(CCR(channels[i])); l &= ~(1 << 7); dma_write(l, CCR(channels[i])); /* Disable the link in all the channels */ disable_lnk(channels[i]); dma_chan[channels[i]].state = DMA_CH_NOTSTARTED; } dma_linked_lch[chain_id].chain_state = DMA_CHAIN_NOTSTARTED; /* Reset the Queue pointers */ OMAP_DMA_CHAIN_QINIT(chain_id); /* Errata - put in the old value */ dma_write(sys_cf, OCP_SYSCONFIG); return 0; }
/* * Clears any DMA state so the DMA engine is ready to restart with new buffers * through omap_start_dma(). Any buffers in flight are discarded. */ void omap_clear_dma(int lch) { unsigned long flags; flags = splhigh(); if (cpu_class_is_omap1()) { u32 l; l = dma_read(CCR(lch)); l &= ~OMAP_DMA_CCR_EN; dma_write(l, CCR(lch)); /* Clear pending interrupts */ l = dma_read(CSR(lch)); } if (cpu_class_is_omap2()) { int i; void __iomem *lch_base = omap_dma_base + OMAP_DMA4_CH_BASE(lch); for (i = 0; i < 0x44; i += 4) __raw_writel(0, lch_base + i); } splx(flags); }
static void omap2_clear_dma(int lch) { int i; for (i = CSDP; i <= dma_common_ch_end; i += 1) dma_write(0, i, lch); }
/* * Once the DMA queue is stopped, we can destroy it. */ void omap_dma_unlink_lch(int lch_head, int lch_queue) { if (omap_dma_in_1510_mode()) { if (lch_head == lch_queue) { dma_write(dma_read(CCR(lch_head)) & ~(3 << 8), CCR(lch_head)); return; } printk("DMA linking is not supported in 1510 mode"); BUG(); return; } if (dma_chan[lch_head].next_lch != lch_queue || dma_chan[lch_head].next_lch == -1) { printk("omap_dma: trying to unlink " "non linked channels"); BUG(); } if ((dma_chan[lch_head].flags & OMAP_DMA_ACTIVE) || (dma_chan[lch_head].flags & OMAP_DMA_ACTIVE)) { printk("omap_dma: You need to stop the DMA channels " "before unlinking"); BUG(); } dma_chan[lch_head].next_lch = -1; }
static void omap2_clear_dma(int lch) { int i = dma_common_ch_start; for (; i <= dma_common_ch_end; i += 1) dma_write(0, i, lch); }
static inline void omap2_disable_irq_lch(int lch) { u32 val; val = dma_read(IRQENABLE_L0, lch); val &= ~(1 << lch); dma_write(val, IRQENABLE_L0, lch); }
void CPU::hblank() { if(status.dma_mode == 1 && status.dma_length && ppu.status.ly < 144) { for(unsigned n = 0; n < 16; n++) { dma_write(status.dma_target++, dma_read(status.dma_source++)); } add_clocks(8 << status.speed_double); status.dma_length -= 16; } }
void omap_set_dma_dest_data_pack(int lch, int enable) { u32 l; l = dma_read(CSDP(lch)); l &= ~(1 << 13); if (enable) l |= 1 << 13; dma_write(l, CSDP(lch)); }
void Timer0AIntHandler(void) //Timer ISR 200us { TimerIntClear(TIMER0_BASE, TIMER_TIMA_TIMEOUT); //start udma transter dma_write(send_data,16,0); }
void omap_set_dma_src_data_pack(int lch, int enable) { u32 l; l = dma_read(CSDP(lch)); l &= ~(1 << 6); if (enable) l |= (1 << 6); dma_write(l, CSDP(lch)); }
void omap_set_dma_write_mode(int lch, enum omap_dma_write_mode mode) { if (cpu_class_is_omap2()) { u32 csdp; csdp = dma_read(CSDP(lch)); csdp &= ~(0x3 << 16); csdp |= (mode << 16); dma_write(csdp, CSDP(lch)); } }
void omap_set_dma_channel_mode(int lch, enum omap_dma_channel_mode mode) { if (cpu_class_is_omap1() && !cpu_is_omap15xx()) { u32 l; l = dma_read(LCH_CTRL(lch)); l &= ~0x7; l |= mode; dma_write(l, LCH_CTRL(lch)); } }
/* Note that src_port is only for omap1 */ void omap_set_dma_src_params(int lch, int src_port, int src_amode, unsigned long src_start, int src_ei, int src_fi) { u32 l; if (cpu_class_is_omap1()) { u16 w; w = dma_read(CSDP(lch)); w &= ~(0x1f << 2); w |= src_port << 2; dma_write(w, CSDP(lch)); } l = dma_read(CCR(lch)); l &= ~(0x03 << 12); l |= src_amode << 12; dma_write(l, CCR(lch)); if (cpu_class_is_omap1()) { dma_write(src_start >> 16, CSSA_U(lch)); dma_write((u16)src_start, CSSA_L(lch)); } if (cpu_class_is_omap2()) dma_write(src_start, CSSA(lch)); dma_write(src_ei, CSEI(lch)); dma_write(src_fi, CSFI(lch)); }
/* Note that dest_port is only for OMAP1 */ void omap_set_dma_dest_params(int lch, int dest_port, int dest_amode, unsigned long dest_start, int dst_ei, int dst_fi) { u32 l; if (cpu_class_is_omap1()) { l = dma_read(CSDP(lch)); l &= ~(0x1f << 9); l |= dest_port << 9; dma_write(l, CSDP(lch)); } l = dma_read(CCR(lch)); l &= ~(0x03 << 14); l |= dest_amode << 14; dma_write(l, CCR(lch)); if (cpu_class_is_omap1()) { dma_write(dest_start >> 16, CDSA_U(lch)); dma_write(dest_start, CDSA_L(lch)); } if (cpu_class_is_omap2()) dma_write(dest_start, CDSA(lch)); dma_write(dst_ei, CDEI(lch)); dma_write(dst_fi, CDFI(lch)); }
int nec7210_write(gpib_board_t *board, nec7210_private_t *priv, uint8_t *buffer, size_t length, int send_eoi, size_t *bytes_written) { int retval = 0; *bytes_written = 0; clear_bit( DEV_CLEAR_BN, &priv->state ); //XXX if(send_eoi) { length-- ; /* save the last byte for sending EOI */ } if(length > 0) { if(0 /*priv->dma_channel*/) { // isa dma transfer /* dma writes are unreliable since they can't recover from bus errors * (which happen when ATN is asserted in the middle of a write) */ #if 0 retval = dma_write(board, priv, buffer, length); if(retval < 0) return retval; else count += retval; #endif }else { // PIO transfer size_t num_bytes; retval = pio_write(board, priv, buffer, length, &num_bytes); *bytes_written += num_bytes; if(retval < 0) { return retval; } } } if(send_eoi) { size_t num_bytes; /*send EOI */ write_byte(priv, AUX_SEOI, AUXMR); retval = pio_write(board, priv, &buffer[*bytes_written], 1, &num_bytes); *bytes_written += num_bytes; if(retval < 0) { return retval; } } return retval; }
static inline void disable_lnk(int lch) { u32 l; l = dma_read(CLNK_CTRL(lch)); /* Disable interrupts */ if (cpu_class_is_omap1()) { dma_write(0, CICR(lch)); /* Set the STOP_LNK bit */ l |= 1 << 14; } if (cpu_class_is_omap2()) { omap_disable_channel_irq(lch); /* Clear the ENABLE_LNK bit */ l &= ~(1 << 15); } dma_write(l, CLNK_CTRL(lch)); dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE; }
void omap_set_dma_transfer_params(int lch, int data_type, int elem_count, int frame_count, int sync_mode, int dma_trigger, int src_or_dst_synch) { u32 l; l = dma_read(CSDP(lch)); l &= ~0x03; l |= data_type; dma_write(l, CSDP(lch)); if (cpu_class_is_omap1()) { u16 ccr; ccr = dma_read(CCR(lch)); ccr &= ~(1 << 5); if (sync_mode == OMAP_DMA_SYNC_FRAME) ccr |= 1 << 5; dma_write(ccr, CCR(lch)); ccr = dma_read(CCR2(lch)); ccr &= ~(1 << 2); if (sync_mode == OMAP_DMA_SYNC_BLOCK) ccr |= 1 << 2; dma_write(ccr, CCR2(lch)); } if (cpu_class_is_omap2() && dma_trigger) { u32 val; val = dma_read(CCR(lch)); /* DMA_SYNCHRO_CONTROL_UPPER depends on the channel number */ val &= ~((3 << 19) | 0x1f); val |= (dma_trigger & ~0x1f) << 14; val |= dma_trigger & 0x1f; if (sync_mode & OMAP_DMA_SYNC_FRAME) val |= 1 << 5; else val &= ~(1 << 5); if (sync_mode & OMAP_DMA_SYNC_BLOCK) val |= 1 << 18; else val &= ~(1 << 18); if (src_or_dst_synch) val |= 1 << 24; /* source synch */ else val &= ~(1 << 24); /* dest synch */ dma_write(val, CCR(lch)); } dma_write(elem_count, CEN(lch)); dma_write(frame_count, CFN(lch)); }
void CPU::dma_run() { dma_add_clocks(8); dma_write(false); dma_edge(); for(unsigned i = 0; i < 8; i++) { if(channel[i].dma_enabled == false) continue; unsigned index = 0; do { dma_transfer(channel[i].direction, dma_bbus(i, index++), dma_addr(i)); dma_edge(); } while(channel[i].dma_enabled && --channel[i].transfer_size); dma_add_clocks(8); dma_write(false); dma_edge(); channel[i].dma_enabled = false; } status.irq_lock = true; }
static inline void omap2_enable_irq_lch(int lch) { u32 val; unsigned long flags; if (!cpu_class_is_omap2()) return; spin_lock_irqsave(&dma_chan_lock, flags); val = dma_read(IRQENABLE_L0); val |= 1 << lch; dma_write(val, IRQENABLE_L0); spin_unlock_irqrestore(&dma_chan_lock, flags); }
void omap_free_dma(int lch) { unsigned long flags; if (dma_chan[lch].dev_id == -1) { IOLog("omap_dma: trying to free unallocated DMA channel %d\n", lch); return; } if (cpu_class_is_omap1()) { /* Disable all DMA interrupts for the channel. */ dma_write(0, CICR(lch)); /* Make sure the DMA transfer is stopped. */ dma_write(0, CCR(lch)); } if (cpu_class_is_omap2()) { u32 val; spin_lock_irqsave(&dma_chan_lock, flags); /* Disable interrupts */ val = dma_read(IRQENABLE_L0); val &= ~(1 << lch); dma_write(val, IRQENABLE_L0); spin_unlock_irqrestore(&dma_chan_lock, flags); /* Clear the CSR register and IRQ status register */ dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR(lch)); dma_write(1 << lch, IRQSTATUS_L0); /* Disable all DMA interrupts for the channel. */ dma_write(0, CICR(lch)); /* Make sure the DMA transfer is stopped. */ dma_write(0, CCR(lch)); omap_clear_dma(lch); } spin_lock_irqsave(&dma_chan_lock, flags); dma_chan[lch].dev_id = -1; dma_chan[lch].next_lch = -1; dma_chan[lch].callback = NULL; spin_unlock_irqrestore(&dma_chan_lock, flags); }
/** * @brief omap_start_dma_chain_transfers - Start the chain * * @param chain_id * * @return - Success : 0 * Failure : -EINVAL/-EBUSY */ int omap_start_dma_chain_transfers(int chain_id) { int *channels; u32 l, i; if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) { IOLog("Invalid chain id\n"); return -EINVAL; } channels = dma_linked_lch[chain_id].linked_dmach_q; if (dma_linked_lch[channels[0]].chain_state == DMA_CHAIN_STARTED) { IOLog("Chain is already started\n"); return -EBUSY; } if (dma_linked_lch[chain_id].chain_mode == OMAP_DMA_STATIC_CHAIN) { for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked; i++) { enable_lnk(channels[i]); omap_enable_channel_irq(channels[i]); } } else { omap_enable_channel_irq(channels[0]); } l = dma_read(CCR(channels[0])); l |= (1 << 7); dma_linked_lch[chain_id].chain_state = DMA_CHAIN_STARTED; dma_chan[channels[0]].state = DMA_CH_STARTED; if ((0 == (l & (1 << 24)))) l &= ~(1 << 25); else l |= (1 << 25); dma_write(l, CCR(channels[0])); dma_chan[channels[0]].flags |= OMAP_DMA_ACTIVE; return 0; }