/* Low-level block device routine */ static int floppy_rw_blk(struct blkdev *bdev, int write, block_t blk, char *buf, size_t len) { int head, track, sector; int tries = 3; long flags; if ( write ) { printk("floppy0: read-only device\n"); return -1; } try_again: if ( inb(dprts->dir) & DIR_CHAN ) { printk("floppy: disk change on read\n"); return -1; } floppy_block(blk, &head, &track, §or); floppy_seek(dprts, track); /* select data rate (is this redundant?) */ outb(dprts->ccr, 0); /* Do the read */ lock_irq(flags); dma_read(2, buf, 512); floppy_send(dprts, CMD_READ); floppy_send(dprts, head<<2); floppy_send(dprts, track); floppy_send(dprts, head); floppy_send(dprts, sector); floppy_send(dprts, 2); floppy_send(dprts, geom->spt); floppy_send(dprts, geom->g3_rw); floppy_send(dprts, 0xff); sleep_on(&floppyq); unlock_irq(flags); /* Success */ if ( (status[0] & 0xc0) == 0 ) { if ( --len ) { blk++; buf+=512; goto try_again; } return 0; } if ( --tries ) { printk("floppy_rw_block: I/O err, try again\n"); floppy_recal(dprts); goto try_again; } return -1; }
void omap_set_dma_src_data_pack(int lch, int enable) { u32 l; l = dma_read(CSDP(lch)); l &= ~(1 << 6); if (enable) l |= (1 << 6); dma_write(l, CSDP(lch)); }
void omap_set_dma_dest_data_pack(int lch, int enable) { u32 l; l = dma_read(CSDP(lch)); l &= ~(1 << 13); if (enable) l |= 1 << 13; dma_write(l, CSDP(lch)); }
void omap_set_dma_write_mode(int lch, enum omap_dma_write_mode mode) { if (cpu_class_is_omap2()) { u32 csdp; csdp = dma_read(CSDP(lch)); csdp &= ~(0x3 << 16); csdp |= (mode << 16); dma_write(csdp, CSDP(lch)); } }
void omap_set_dma_channel_mode(int lch, enum omap_dma_channel_mode mode) { if (cpu_class_is_omap1() && !cpu_is_omap15xx()) { u32 l; l = dma_read(LCH_CTRL(lch)); l &= ~0x7; l |= mode; dma_write(l, LCH_CTRL(lch)); } }
/* * Returns current physical destination address for the given DMA channel. * If the channel is running the caller must disable interrupts prior calling * this function and process the returned value before re-enabling interrupt to * prevent races with the interrupt handler. Note that in continuous mode there * is a chance for CDSA_L register overflow inbetween the two reads resulting * in incorrect return value. */ dma_addr_t omap_get_dma_dst_pos(int lch) { dma_addr_t offset = 0; if (cpu_is_omap15xx()) offset = dma_read(CPC(lch)); else offset = dma_read(CDAC(lch)); /* * omap 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is * read before the DMA controller finished disabling the channel. */ if (!cpu_is_omap15xx() && offset == 0) offset = dma_read(CDAC(lch)); if (cpu_class_is_omap1()) offset |= (dma_read(CDSA_U(lch)) << 16); return offset; }
static inline void omap_enable_channel_irq(int lch) { u32 status; /* Clear CSR */ if (cpu_class_is_omap1()) status = dma_read(CSR(lch)); else if (cpu_class_is_omap2()) dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR(lch)); /* Enable some nice interrupts. */ dma_write(dma_chan[lch].enabled_irqs, CICR(lch)); }
void CPU::dma_transfer(bool direction, uint8 bbus, uint32 abus) { if(direction == 0) { add_clocks(4); regs.mdr = dma_read(abus); add_clocks(4); if (dma_transfer_valid(bbus, abus)) bus.write(0x2100 | bbus, regs.mdr); } else { add_clocks(4); regs.mdr = dma_transfer_valid(bbus, abus) ? bus.read(0x2100 | bbus) : 0x00; add_clocks(4); if (dma_addr_valid(abus)) bus.write(abus, regs.mdr); } }
void omap_start_dma(int lch) { u32 l; if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) { int next_lch, cur_lch; char dma_chan_link_map[OMAP_DMA4_LOGICAL_DMA_CH_COUNT]; dma_chan_link_map[lch] = 1; /* Set the link register of the first channel */ enable_lnk(lch); memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map)); cur_lch = dma_chan[lch].next_lch; do { next_lch = dma_chan[cur_lch].next_lch; /* The loop case: we've been here already */ if (dma_chan_link_map[cur_lch]) break; /* Mark the current channel */ dma_chan_link_map[cur_lch] = 1; enable_lnk(cur_lch); omap_enable_channel_irq(cur_lch); cur_lch = next_lch; } while (next_lch != -1); } else if (cpu_is_omap242x() || (cpu_is_omap243x() && omap_type() <= OMAP2430_REV_ES1_0)) { /* Errata: Need to write lch even if not using chaining */ dma_write(lch, CLNK_CTRL(lch)); } omap_enable_channel_irq(lch); l = dma_read(CCR(lch)); /* * Errata: On ES2.0 BUFFERING disable must be set. * This will always fail on ES1.0 */ if (cpu_is_omap24xx()) l |= OMAP_DMA_CCR_EN; l |= OMAP_DMA_CCR_EN; dma_write(l, CCR(lch)); dma_chan[lch].flags |= OMAP_DMA_ACTIVE; }
static inline void omap2_enable_irq_lch(int lch) { u32 val; unsigned long flags; if (!cpu_class_is_omap2()) return; spin_lock_irqsave(&dma_chan_lock, flags); val = dma_read(IRQENABLE_L0); val |= 1 << lch; dma_write(val, IRQENABLE_L0); spin_unlock_irqrestore(&dma_chan_lock, flags); }
/* One time initializations */ static int __init omap2_system_dma_init_dev(struct omap_hwmod *oh, void *unused) { struct platform_device *pdev; struct omap_system_dma_plat_info p; struct omap_dma_dev_attr *d; struct resource *mem; char *name = "omap_dma_system"; p = dma_plat_info; p.dma_attr = (struct omap_dma_dev_attr *)oh->dev_attr; p.errata = configure_dma_errata(); pdev = omap_device_build(name, 0, oh, &p, sizeof(p)); if (IS_ERR(pdev)) { pr_err("%s: Can't build omap_device for %s:%s.\n", __func__, name, oh->name); return PTR_ERR(pdev); } omap_dma_dev_info.res = pdev->resource; omap_dma_dev_info.num_res = pdev->num_resources; mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mem) { dev_err(&pdev->dev, "%s: no mem resource\n", __func__); return -EINVAL; } dma_base = ioremap(mem->start, resource_size(mem)); if (!dma_base) { dev_err(&pdev->dev, "%s: ioremap fail\n", __func__); return -ENOMEM; } d = oh->dev_attr; if (cpu_is_omap34xx() && (omap_type() != OMAP2_DEVICE_TYPE_GP)) d->dev_caps |= HS_CHANNELS_RESERVED; if (platform_get_irq_byname(pdev, "0") < 0) d->dev_caps |= DMA_ENGINE_HANDLE_IRQ; /* Check the capabilities register for descriptor loading feature */ if (dma_read(CAPS_0, 0) & DMA_HAS_DESCRIPTOR_CAPS) dma_common_ch_end = CCDN; else dma_common_ch_end = CCFN; return 0; }
int omap_dma_running(void) { int lch; #if 0 if (cpu_class_is_omap1()) if (omap_lcd_dma_running()) return 1; #endif for (lch = 0; lch < dma_chan_count; lch++) if (dma_read(CCR(lch)) & OMAP_DMA_CCR_EN) return 1; return 0; }
void omap_free_dma(int lch) { unsigned long flags; if (dma_chan[lch].dev_id == -1) { IOLog("omap_dma: trying to free unallocated DMA channel %d\n", lch); return; } if (cpu_class_is_omap1()) { /* Disable all DMA interrupts for the channel. */ dma_write(0, CICR(lch)); /* Make sure the DMA transfer is stopped. */ dma_write(0, CCR(lch)); } if (cpu_class_is_omap2()) { u32 val; spin_lock_irqsave(&dma_chan_lock, flags); /* Disable interrupts */ val = dma_read(IRQENABLE_L0); val &= ~(1 << lch); dma_write(val, IRQENABLE_L0); spin_unlock_irqrestore(&dma_chan_lock, flags); /* Clear the CSR register and IRQ status register */ dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR(lch)); dma_write(1 << lch, IRQSTATUS_L0); /* Disable all DMA interrupts for the channel. */ dma_write(0, CICR(lch)); /* Make sure the DMA transfer is stopped. */ dma_write(0, CCR(lch)); omap_clear_dma(lch); } spin_lock_irqsave(&dma_chan_lock, flags); dma_chan[lch].dev_id = -1; dma_chan[lch].next_lch = -1; dma_chan[lch].callback = NULL; spin_unlock_irqrestore(&dma_chan_lock, flags); }
/** * @brief omap_start_dma_chain_transfers - Start the chain * * @param chain_id * * @return - Success : 0 * Failure : -EINVAL/-EBUSY */ int omap_start_dma_chain_transfers(int chain_id) { int *channels; u32 l, i; if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) { IOLog("Invalid chain id\n"); return -EINVAL; } channels = dma_linked_lch[chain_id].linked_dmach_q; if (dma_linked_lch[channels[0]].chain_state == DMA_CHAIN_STARTED) { IOLog("Chain is already started\n"); return -EBUSY; } if (dma_linked_lch[chain_id].chain_mode == OMAP_DMA_STATIC_CHAIN) { for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked; i++) { enable_lnk(channels[i]); omap_enable_channel_irq(channels[i]); } } else { omap_enable_channel_irq(channels[0]); } l = dma_read(CCR(channels[0])); l |= (1 << 7); dma_linked_lch[chain_id].chain_state = DMA_CHAIN_STARTED; dma_chan[channels[0]].state = DMA_CH_STARTED; if ((0 == (l & (1 << 24)))) l &= ~(1 << 25); else l |= (1 << 25); dma_write(l, CCR(channels[0])); dma_chan[channels[0]].flags |= OMAP_DMA_ACTIVE; return 0; }
void omap_set_dma_src_burst_mode(int lch, enum omap_dma_burst_mode burst_mode) { unsigned int burst = 0; u32 l; l = dma_read(CSDP(lch)); l &= ~(0x03 << 7); switch (burst_mode) { case OMAP_DMA_DATA_BURST_DIS: break; case OMAP_DMA_DATA_BURST_4: if (cpu_class_is_omap2()) burst = 0x1; else burst = 0x2; break; case OMAP_DMA_DATA_BURST_8: if (cpu_class_is_omap2()) { burst = 0x2; break; } /* not supported by current hardware on OMAP1 * w |= (0x03 << 7); * fall through */ case OMAP_DMA_DATA_BURST_16: if (cpu_class_is_omap2()) { burst = 0x3; break; } /* OMAP1 don't support burst 16 * fall through */ default: BUG(); } l |= (burst << 7); dma_write(l, CSDP(lch)); }
static inline void enable_lnk(int lch) { u32 l; l = dma_read(CLNK_CTRL(lch)); if (cpu_class_is_omap1()) l &= ~(1 << 14); /* Set the ENABLE_LNK bits */ if (dma_chan[lch].next_lch != -1) l = dma_chan[lch].next_lch | (1 << 15); #ifndef CONFIG_ARCH_OMAP1 if (cpu_class_is_omap2()) if (dma_chan[lch].next_linked_ch != -1) l = dma_chan[lch].next_linked_ch | (1 << 15); #endif dma_write(l, CLNK_CTRL(lch)); }
/** * @brief omap_dma_set_prio_lch : Set channel wise priority settings * * @param lch * @param read_prio - Read priority * @param write_prio - Write priority * Both of the above can be set with one of the following values : * DMA_CH_PRIO_HIGH/DMA_CH_PRIO_LOW */ int omap_dma_set_prio_lch(int lch, unsigned char read_prio, unsigned char write_prio) { u32 l; if (unlikely((lch < 0 || lch >= dma_lch_count))) { IOLog("Invalid channel id\n"); return -EINVAL; } l = dma_read(CCR(lch)); l &= ~((1 << 6) | (1 << 26)); if (cpu_is_omap2430() || cpu_is_omap34xx() || cpu_is_omap44xx()) l |= ((read_prio & 0x1) << 6) | ((write_prio & 0x1) << 26); else l |= ((read_prio & 0x1) << 6); dma_write(l, CCR(lch)); return 0; }
void omap_set_dma_priority(int lch, int dst_port, int priority) { unsigned long reg; u32 l; if (cpu_class_is_omap1()) { switch (dst_port) { case OMAP_DMA_PORT_OCP_T1: /* FFFECC00 */ reg = OMAP_TC_OCPT1_PRIOR; break; case OMAP_DMA_PORT_OCP_T2: /* FFFECCD0 */ reg = OMAP_TC_OCPT2_PRIOR; break; case OMAP_DMA_PORT_EMIFF: /* FFFECC08 */ reg = OMAP_TC_EMIFF_PRIOR; break; case OMAP_DMA_PORT_EMIFS: /* FFFECC04 */ reg = OMAP_TC_EMIFS_PRIOR; break; default: BUG(); return; } l = omap_readl(reg); l &= ~(0xf << 8); l |= (priority & 0xf) << 8; omap_writel(l, reg); } if (cpu_class_is_omap2()) { u32 ccr; ccr = dma_read(CCR(lch)); if (priority) ccr |= (1 << 6); else ccr &= ~(1 << 6); dma_write(ccr, CCR(lch)); } }
static inline void disable_lnk(int lch) { u32 l; l = dma_read(CLNK_CTRL(lch)); /* Disable interrupts */ if (cpu_class_is_omap1()) { dma_write(0, CICR(lch)); /* Set the STOP_LNK bit */ l |= 1 << 14; } if (cpu_class_is_omap2()) { omap_disable_channel_irq(lch); /* Clear the ENABLE_LNK bit */ l &= ~(1 << 15); } dma_write(l, CLNK_CTRL(lch)); dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE; }
/* * lch_queue DMA will start right after lch_head one is finished. * For this DMA link to start, you still need to start (see omap_start_dma) * the first one. That will fire up the entire queue. */ void omap_dma_link_lch(int lch_head, int lch_queue) { if (omap_dma_in_1510_mode()) { if (lch_head == lch_queue) { dma_write(dma_read(CCR(lch_head)) | (3 << 8), CCR(lch_head)); return; } IOLog("DMA linking is not supported in 1510 mode\n"); BUG(); return; } if ((dma_chan[lch_head].dev_id == -1) || (dma_chan[lch_queue].dev_id == -1)) { IOLog("omap_dma: trying to link " "non requested channels\n"); BUG(); } dma_chan[lch_head].next_lch = lch_queue; }
void omap_set_dma_dest_burst_mode(int lch, enum omap_dma_burst_mode burst_mode) { unsigned int burst = 0; u32 l; l = dma_read(CSDP(lch)); l &= ~(0x03 << 14); switch (burst_mode) { case OMAP_DMA_DATA_BURST_DIS: break; case OMAP_DMA_DATA_BURST_4: if (cpu_class_is_omap2()) burst = 0x1; else burst = 0x2; break; case OMAP_DMA_DATA_BURST_8: if (cpu_class_is_omap2()) burst = 0x2; else burst = 0x3; break; case OMAP_DMA_DATA_BURST_16: if (cpu_class_is_omap2()) { burst = 0x3; break; } /* OMAP1 don't support burst 16 * fall through */ default: IOLog("Invalid DMA burst mode"); BUG(); return; } l |= (burst << 14); dma_write(l, CSDP(lch)); }
static int nor_read(const struct spi_flash *flash, u32 addr, size_t len, void *buf) { u32 next; size_t done = 0; uintptr_t dma_buf; size_t dma_buf_len; if (!IS_ALIGNED((uintptr_t)buf, SFLASH_DMA_ALIGN)) { next = MIN(ALIGN_UP((uintptr_t)buf, SFLASH_DMA_ALIGN) - (uintptr_t)buf, len); if (pio_read(addr, buf, next)) return -1; done += next; } if (ENV_BOOTBLOCK || ENV_VERSTAGE) { dma_buf = (uintptr_t)_dma_coherent; dma_buf_len = _dma_coherent_size; } else { dma_buf = (uintptr_t)_dram_dma; dma_buf_len = _dram_dma_size; } while (len - done >= SFLASH_DMA_ALIGN) { next = MIN(dma_buf_len, ALIGN_DOWN(len - done, SFLASH_DMA_ALIGN)); if (dma_read(addr + done, buf + done, next, dma_buf, dma_buf_len)) return -1; done += next; } next = len - done; if (next > 0 && pio_read(addr + done, buf + done, next)) return -1; return 0; }
/** * @brief omap_get_dma_chain_src_pos - Get the source position * of the ongoing DMA in chain * @param chain_id * * @return - Success : Destination position * Failure : -EINVAL */ int omap_get_dma_chain_src_pos(int chain_id) { int lch; int *channels; /* Check for input params */ if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) { IOLog("Invalid chain id\n"); return -EINVAL; } /* Check if the chain exists */ if (dma_linked_lch[chain_id].linked_dmach_q == NULL) { IOLog("Chain doesn't exists\n"); return -EINVAL; } channels = dma_linked_lch[chain_id].linked_dmach_q; /* Get the current channel */ lch = channels[dma_linked_lch[chain_id].q_head]; return dma_read(CSAC(lch)); }
static inline u32 dma_ctrl_read(struct xilinx_dma_chan *chan, u32 reg) { return dma_read(chan, chan->ctrl_offset + reg); }
void amiga_fdc::live_run(const attotime &limit) { amiga_state *state = machine().driver_data<amiga_state>(); if(cur_live.state == IDLE || cur_live.next_state != -1) return; for(;;) { switch(cur_live.state) { case RUNNING: { if(!(dskbyt & 0x2000)) { int bit = cur_live.pll.get_next_bit(cur_live.tm, floppy, limit); if(bit < 0) return; cur_live.shift_reg = (cur_live.shift_reg << 1) | bit; cur_live.bit_counter++; if((adkcon & 0x0200) && !(cur_live.shift_reg & 0x80)) { cur_live.bit_counter--; // Avoid any risk of livelock live_delay(RUNNING_SYNCPOINT); return; } if(cur_live.bit_counter > 8) fatalerror("amiga_fdc::live_run - cur_live.bit_counter > 8\n"); if(cur_live.bit_counter == 8) { live_delay(RUNNING_SYNCPOINT); return; } if(dskbyt & 0x1000) { if(cur_live.shift_reg != dsksync) { live_delay(RUNNING_SYNCPOINT); return; } } else { if(cur_live.shift_reg == dsksync) { live_delay(RUNNING_SYNCPOINT); return; } } } else { int bit = (dma_state == DMA_RUNNING_BYTE_0 ? 15 : 7) - cur_live.bit_counter; if(cur_live.pll.write_next_bit((dma_value >> bit) & 1, cur_live.tm, floppy, limit)) return; cur_live.bit_counter++; if(cur_live.bit_counter > 8) fatalerror("amiga_fdc::live_run - cur_live.bit_counter > 8\n"); if(cur_live.bit_counter == 8) { live_delay(RUNNING_SYNCPOINT); return; } } break; } case RUNNING_SYNCPOINT: { if(!(dskbyt & 0x2000)) { if(cur_live.shift_reg == dsksync) { if(adkcon & 0x0400) { if(dma_state == DMA_WAIT_START) { cur_live.bit_counter = 0; if(!(dsklen & 0x3fff)) dma_done(); else if(dsklen & 0x4000) { dskbyt |= 0x2000; cur_live.bit_counter = 0; dma_value = dma_read(); } else dma_write(dsksync); } else if(dma_state != DMA_IDLE) { dma_write(dsksync); cur_live.bit_counter = 0; } else if(cur_live.bit_counter != 8) cur_live.bit_counter = 0; } dskbyt |= 0x1000; state->custom_chip_w(REG_INTREQ, INTENA_SETCLR | INTENA_DSKSYN); } else dskbyt &= ~0x1000; if(cur_live.bit_counter == 8) { dskbyt = (dskbyt & 0xff00) | 0x8000 | (cur_live.shift_reg & 0xff); cur_live.bit_counter = 0; switch(dma_state) { case DMA_IDLE: case DMA_WAIT_START: break; case DMA_RUNNING_BYTE_0: dma_value = (cur_live.shift_reg & 0xff) << 8; dma_state = DMA_RUNNING_BYTE_1; break; case DMA_RUNNING_BYTE_1: { dma_value |= cur_live.shift_reg & 0xff; dma_write(dma_value); break; } } } } else { if(cur_live.bit_counter != 8) fatalerror("amiga_fdc::live_run - cur_live.bit_counter != 8\n"); cur_live.bit_counter = 0; switch(dma_state) { case DMA_IDLE: case DMA_WAIT_START: break; case DMA_RUNNING_BYTE_0: dma_state = DMA_RUNNING_BYTE_1; break; case DMA_RUNNING_BYTE_1: { dma_value = dma_read(); break; } } } cur_live.state = RUNNING; checkpoint(); break; } } } }
int omap_get_dma_active_status(int lch) { return (dma_read(CCR(lch)) & OMAP_DMA_CCR_EN) != 0; }
void i8257_device::execute_run() { do { switch (m_state) { case STATE_SI: set_tc(0); if(next_channel()) m_state = STATE_S0; else { suspend_until_trigger(1, true); m_icount = 0; } break; case STATE_S0: set_hreq(1); if (m_hack) { m_state = STATE_S1; } else { suspend_until_trigger(1, true); m_icount = 0; } break; case STATE_S1: set_tc(0); m_state = STATE_S2; break; case STATE_S2: set_dack(); m_state = STATE_S3; break; case STATE_S3: dma_read(); if (MODE_EXTENDED_WRITE) { dma_write(); } m_state = m_ready ? STATE_S4 : STATE_SW; break; case STATE_SW: m_state = m_ready ? STATE_S4 : STATE_SW; break; case STATE_S4: if (!MODE_EXTENDED_WRITE) { dma_write(); } advance(); if(next_channel()) m_state = STATE_S1; else { set_hreq(0); m_current_channel = -1; m_state = STATE_SI; set_dack(); } break; } m_icount--; } while (m_icount > 0); }
/** * @brief omap_dma_chain_a_transfer - Get a free channel from a chain, * set the params and start the transfer. * * @param chain_id * @param src_start - buffer start address * @param dest_start - Dest address * @param elem_count * @param frame_count * @param callbk_data - channel callback parameter data. * * @return - Success : 0 * Failure: -EINVAL/-EBUSY */ int omap_dma_chain_a_transfer(int chain_id, int src_start, int dest_start, int elem_count, int frame_count, void *callbk_data) { int *channels; u32 l, lch; int start_dma = 0; /* * if buffer size is less than 1 then there is * no use of starting the chain */ if (elem_count < 1) { IOLog("Invalid buffer size\n"); return -EINVAL; } /* Check for input params */ if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) { IOLog("Invalid chain id\n"); return -EINVAL; } /* Check if the chain exists */ if (dma_linked_lch[chain_id].linked_dmach_q == NULL) { IOLog("Chain doesn't exist\n"); return -EINVAL; } /* Check if all the channels in chain are in use */ if (OMAP_DMA_CHAIN_QFULL(chain_id)) return -EBUSY; /* Frame count may be negative in case of indexed transfers */ channels = dma_linked_lch[chain_id].linked_dmach_q; /* Get a free channel */ lch = channels[dma_linked_lch[chain_id].q_tail]; /* Store the callback data */ dma_chan[lch].data = callbk_data; /* Increment the q_tail */ OMAP_DMA_CHAIN_INCQTAIL(chain_id); /* Set the params to the free channel */ if (src_start != 0) dma_write(src_start, CSSA(lch)); if (dest_start != 0) dma_write(dest_start, CDSA(lch)); /* Write the buffer size */ dma_write(elem_count, CEN(lch)); dma_write(frame_count, CFN(lch)); /* * If the chain is dynamically linked, * then we may have to start the chain if its not active */ if (dma_linked_lch[chain_id].chain_mode == OMAP_DMA_DYNAMIC_CHAIN) { /* * In Dynamic chain, if the chain is not started, * queue the channel */ if (dma_linked_lch[chain_id].chain_state == DMA_CHAIN_NOTSTARTED) { /* Enable the link in previous channel */ if (dma_chan[dma_chan[lch].prev_linked_ch].state == DMA_CH_QUEUED) enable_lnk(dma_chan[lch].prev_linked_ch); dma_chan[lch].state = DMA_CH_QUEUED; } /* * Chain is already started, make sure its active, * if not then start the chain */ else { start_dma = 1; if (dma_chan[dma_chan[lch].prev_linked_ch].state == DMA_CH_STARTED) { enable_lnk(dma_chan[lch].prev_linked_ch); dma_chan[lch].state = DMA_CH_QUEUED; start_dma = 0; if (0 == ((1 << 7) & dma_read( CCR(dma_chan[lch].prev_linked_ch)))) { disable_lnk(dma_chan[lch]. prev_linked_ch); IOLog("\n prev ch is stopped\n"); start_dma = 1; } } else if (dma_chan[dma_chan[lch].prev_linked_ch].state == DMA_CH_QUEUED) { enable_lnk(dma_chan[lch].prev_linked_ch); dma_chan[lch].state = DMA_CH_QUEUED; start_dma = 0; } omap_enable_channel_irq(lch); l = dma_read(CCR(lch)); if ((0 == (l & (1 << 24)))) l &= ~(1 << 25); else l |= (1 << 25); if (start_dma == 1) { if (0 == (l & (1 << 7))) { l |= (1 << 7); dma_chan[lch].state = DMA_CH_STARTED; IOLog("starting %d\n", lch); dma_write(l, CCR(lch)); } else start_dma = 0; } else { if (0 == (l & (1 << 7))) dma_write(l, CCR(lch)); } dma_chan[lch].flags |= OMAP_DMA_ACTIVE; } } return 0; }
void am9517a_device::execute_run() { do { switch (m_state) { case STATE_SI: set_eop(CLEAR_LINE); if (!COMMAND_DISABLE) { int priority[] = { 0, 1, 2, 3 }; if (COMMAND_ROTATING_PRIORITY) { int last_channel = m_last_channel; for (int channel = 3; channel >= 0; channel--) { priority[channel] = last_channel; last_channel--; if (last_channel < 0) last_channel = 3; } } for (int channel = 0; channel < 4; channel++) { if (is_request_active(priority[channel]) || is_software_request_active(priority[channel])) { m_current_channel = m_last_channel = priority[channel]; m_state = STATE_S0; break; } else if (COMMAND_MEM_TO_MEM && BIT(m_request, channel) && ((m_channel[channel].m_mode & 0xc0) == MODE_SINGLE)) { m_current_channel = m_last_channel = priority[channel]; m_state = STATE_S0; break; } } } if(m_state == STATE_SI) { suspend_until_trigger(1, true); m_icount = 0; } break; case STATE_S0: set_hreq(1); if (m_hack) { m_state = (MODE_MASK == MODE_CASCADE) ? STATE_SC : get_state1(true); } else { suspend_until_trigger(1, true); m_icount = 0; } break; case STATE_SC: if (!is_request_active(m_current_channel)) { set_hreq(0); m_current_channel = -1; m_state = STATE_SI; } else { suspend_until_trigger(1, true); m_icount = 0; } set_dack(); break; case STATE_S1: m_state = STATE_S2; break; case STATE_S2: set_dack(); m_state = COMMAND_COMPRESSED_TIMING ? STATE_S4 : STATE_S3; break; case STATE_S3: dma_read(); if (COMMAND_EXTENDED_WRITE) { dma_write(); } m_state = m_ready ? STATE_S4 : STATE_SW; break; case STATE_SW: m_state = m_ready ? STATE_S4 : STATE_SW; break; case STATE_S4: if (COMMAND_COMPRESSED_TIMING) { dma_read(); dma_write(); } else if (!COMMAND_EXTENDED_WRITE) { dma_write(); } dma_advance(); break; case STATE_S11: m_current_channel = 0; m_state = STATE_S12; break; case STATE_S12: m_state = STATE_S13; break; case STATE_S13: m_state = STATE_S14; break; case STATE_S14: dma_read(); m_state = STATE_S21; break; case STATE_S21: m_current_channel = 1; m_state = STATE_S22; break; case STATE_S22: m_state = STATE_S23; break; case STATE_S23: m_state = STATE_S24; break; case STATE_S24: dma_write(); dma_advance(); m_current_channel = 0; m_channel[m_current_channel].m_count--; if (MODE_ADDRESS_DECREMENT) { m_channel[m_current_channel].m_address--; } else { m_channel[m_current_channel].m_address++; } break; } m_icount--; } while (m_icount > 0); }
int main(int argc, char *argv[]) { int i; buildBuf(); dma_read( sgBufs.bufs[0].bus, sgBufs.bufs[1].bus, sgBufs.bufs[0].mem ); }