static void setup_DMA(void) { unsigned long addr,count; unsigned char dma_code; dma_code = DMA_WRITE; if (command == FD_READ) dma_code = DMA_READ; if (command == FD_FORMAT) { addr = (long) tmp_floppy_area; count = floppy->sect*4; } else { addr = (long) CURRENT->buffer; count = 1024; } if (read_track) { /* mark buffer-track bad, in case all this fails.. */ buffer_drive = buffer_track = -1; count = floppy->sect*floppy->head*512; addr = (long) floppy_track_buffer; } else if (addr >= LAST_DMA_ADDR) { addr = (long) tmp_floppy_area; if (command == FD_WRITE) copy_buffer(CURRENT->buffer, tmp_floppy_area); } cli(); disable_dma(FLOPPY_DMA); //clear_dma_ff(FLOPPY_DMA); set_dma_mode(FLOPPY_DMA, (command == FD_READ)? DMA_MODE_READ : DMA_MODE_WRITE); set_dma_addr(FLOPPY_DMA, addr); set_dma_count(FLOPPY_DMA, count); enable_dma(FLOPPY_DMA); sti(); }
static void handlewrite(struct net_device *dev) { /* called *only* from idle, non-reentrant */ /* on entry, 0xfb and ltdmabuf holds data */ int dma = dev->dma; int base = dev->base_addr; unsigned long flags; flags=claim_dma_lock(); disable_dma(dma); clear_dma_ff(dma); set_dma_mode(dma,DMA_MODE_WRITE); set_dma_addr(dma,virt_to_bus(ltdmabuf)); set_dma_count(dma,800); enable_dma(dma); release_dma_lock(flags); inb_p(base+3); inb_p(base+2); if ( wait_timeout(dev,0xfb) ) { flags=claim_dma_lock(); printk("timed out in handlewrite, dma res %d\n", get_dma_residue(dev->dma) ); release_dma_lock(flags); } }
static int bfin_lq035_fb_open(struct fb_info *info, int user) { unsigned long flags; spin_lock_irqsave(&bfin_lq035_lock, flags); lq035_open_cnt++; spin_unlock_irqrestore(&bfin_lq035_lock, flags); if (lq035_open_cnt <= 1) { bfin_write_PPI_CONTROL(0); SSYNC(); set_vcomm(); config_dma(); config_ppi(); /* start dma */ enable_dma(CH_PPI); SSYNC(); bfin_write_PPI_CONTROL(bfin_read_PPI_CONTROL() | PORT_EN); SSYNC(); if (!t_conf_done) { config_timers(); start_timers(); } /* gpio_set_value(MOD,1); */ } return 0; }
/* called in irq context */ void DMAbuf_inputintr(int dev) { struct audio_operations *adev = audio_devs[dev]; struct dma_buffparms *dmap = adev->dmap_in; unsigned long flags; spin_lock_irqsave(&dmap->lock,flags); if (!(dmap->flags & DMA_NODMA)) { int chan = dmap->dma, pos, n; unsigned long f; f=claim_dma_lock(); if(!isa_dma_bridge_buggy) disable_dma(dmap->dma); clear_dma_ff(chan); pos = dmap->bytes_in_use - get_dma_residue(chan); if(!isa_dma_bridge_buggy) enable_dma(dmap->dma); release_dma_lock(f); pos = pos / dmap->fragment_size; /* Actual qhead */ if (pos < 0 || pos >= dmap->nbufs) pos = 0; n = 0; while (dmap->qtail != pos && ++n < dmap->nbufs) do_inputintr(dev); } else do_inputintr(dev); spin_unlock_irqrestore(&dmap->lock,flags); }
static __inline__ int NCR53c406a_dma_setup (unsigned char *ptr, unsigned int count, unsigned char mode) { unsigned limit; unsigned long flags = 0; VDEB(printk("dma: before count=%d ", count)); if (dma_chan <=3) { if (count > 65536) count = 65536; limit = 65536 - (((unsigned) ptr) & 0xFFFF); } else { if (count > (65536<<1)) count = (65536<<1); limit = (65536<<1) - (((unsigned) ptr) & 0x1FFFF); } if (count > limit) count = limit; VDEB(printk("after count=%d\n", count)); if ((count & 1) || (((unsigned) ptr) & 1)) panic ("NCR53c406a: attempted unaligned DMA transfer\n"); flags=claim_dma_lock(); disable_dma(dma_chan); clear_dma_ff(dma_chan); set_dma_addr(dma_chan, (long) ptr); set_dma_count(dma_chan, count); set_dma_mode(dma_chan, mode); enable_dma(dma_chan); release_dma_lock(flags); return count; }
static void bfin_sir_dma_tx_chars(struct net_device *dev) { struct bfin_sir_self *self = netdev_priv(dev); struct bfin_sir_port *port = self->sir_port; if (!port->tx_done) return; port->tx_done = 0; if (self->tx_buff.len == 0) { self->stats.tx_packets++; if (self->newspeed) { bfin_sir_set_speed(port, self->newspeed); self->speed = self->newspeed; self->newspeed = 0; } bfin_sir_enable_rx(port); port->tx_done = 1; netif_wake_queue(dev); return; } blackfin_dcache_flush_range((unsigned long)(self->tx_buff.data), (unsigned long)(self->tx_buff.data+self->tx_buff.len)); set_dma_config(port->tx_dma_channel, set_bfin_dma_config(DIR_READ, DMA_FLOW_STOP, INTR_ON_BUF, DIMENSION_LINEAR, DATA_SIZE_8, DMA_SYNC_RESTART)); set_dma_start_addr(port->tx_dma_channel, (unsigned long)(self->tx_buff.data)); set_dma_x_count(port->tx_dma_channel, self->tx_buff.len); set_dma_x_modify(port->tx_dma_channel, 1); enable_dma(port->tx_dma_channel); }
/** * snd_dma_pointer - return the current pointer to DMA transfer buffer in bytes * @dma: the dma number * @size: the dma transfer size * * Returns the current pointer in DMA tranfer buffer in bytes */ unsigned int snd_dma_pointer(unsigned long dma, unsigned int size) { unsigned long flags; unsigned int result, result1; flags = claim_dma_lock(); clear_dma_ff(dma); if (!isa_dma_bridge_buggy) disable_dma(dma); result = get_dma_residue(dma); /* * HACK - read the counter again and choose higher value in order to * avoid reading during counter lower byte roll over if the * isa_dma_bridge_buggy is set. */ result1 = get_dma_residue(dma); if (!isa_dma_bridge_buggy) enable_dma(dma); release_dma_lock(flags); if (unlikely(result < result1)) result = result1; #ifdef CONFIG_SND_DEBUG if (result > size) snd_printk(KERN_ERR "pointer (0x%x) for DMA #%ld is greater than transfer size (0x%x)\n", result, dma, size); #endif if (result >= size || result == 0) return 0; else return size - result; }
static int ppi_start(struct ppi_if *ppi) { const struct ppi_info *info = ppi->info; /* enable DMA */ enable_dma(info->dma_ch); /* enable PPI */ ppi->ppi_control |= PORT_EN; switch (info->type) { case PPI_TYPE_PPI: { struct bfin_ppi_regs *reg = info->base; bfin_write16(®->control, ppi->ppi_control); break; } case PPI_TYPE_EPPI: { struct bfin_eppi_regs *reg = info->base; bfin_write32(®->control, ppi->ppi_control); break; } case PPI_TYPE_EPPI3: { struct bfin_eppi3_regs *reg = info->base; bfin_write32(®->ctl, ppi->ppi_control); break; } default: return -EINVAL; } SSYNC(); return 0; }
/* Prototype: fasdmatype_t eesoxscsi_dma_setup(host, SCpnt, direction, min_type) * Purpose : initialises DMA/PIO * Params : host - host * SCpnt - command * direction - DMA on to/off of card * min_type - minimum DMA support that we must have for this transfer * Returns : type of transfer to be performed */ static fasdmatype_t eesoxscsi_dma_setup(struct Scsi_Host *host, Scsi_Pointer *SCp, fasdmadir_t direction, fasdmatype_t min_type) { struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata; int dmach = host->dma_channel; if (dmach != NO_DMA && (min_type == fasdma_real_all || SCp->this_residual >= 512)) { int bufs, map_dir, dma_dir; bufs = copy_SCp_to_sg(&info->sg[0], SCp, NR_SG); if (direction == DMA_OUT) map_dir = PCI_DMA_TODEVICE, dma_dir = DMA_MODE_WRITE; else map_dir = PCI_DMA_FROMDEVICE, dma_dir = DMA_MODE_READ; pci_map_sg(NULL, info->sg, bufs, map_dir); disable_dma(dmach); set_dma_sg(dmach, info->sg, bufs); set_dma_mode(dmach, dma_dir); enable_dma(dmach); return fasdma_real_all; } /* * We don't do DMA, we only do slow PIO * * Some day, we will do Pseudo DMA */ return fasdma_pseudo; }
/* Prototype: fasdmatype_t powertecscsi_dma_setup(host, SCpnt, direction, min_type) * Purpose : initialises DMA/PIO * Params : host - host * SCpnt - command * direction - DMA on to/off of card * min_type - minimum DMA support that we must have for this transfer * Returns : type of transfer to be performed */ static fasdmatype_t powertecscsi_dma_setup(struct Scsi_Host *host, Scsi_Pointer *SCp, fasdmadir_t direction, fasdmatype_t min_type) { struct powertec_info *info = (struct powertec_info *)host->hostdata; int dmach = host->dma_channel; if (info->info.ifcfg.capabilities & FASCAP_DMA && min_type == fasdma_real_all) { int bufs, map_dir, dma_dir; bufs = copy_SCp_to_sg(&info->sg[0], SCp, NR_SG); if (direction == DMA_OUT) map_dir = PCI_DMA_TODEVICE, dma_dir = DMA_MODE_WRITE; else map_dir = PCI_DMA_FROMDEVICE, dma_dir = DMA_MODE_READ; pci_map_sg(NULL, info->sg, bufs, map_dir); disable_dma(dmach); set_dma_sg(dmach, info->sg, bufs); set_dma_mode(dmach, dma_dir); enable_dma(dmach); return fasdma_real_all; } /* * If we're not doing DMA, * we'll do slow PIO */ return fasdma_pio; }
void DMAbuf_outputintr(int dev, int notify_only) { struct audio_operations *adev = audio_devs[dev]; unsigned long flags; struct dma_buffparms *dmap = adev->dmap_out; save_flags(flags); cli(); if (!(dmap->flags & DMA_NODMA)) { int chan = dmap->dma, pos, n; unsigned long f; f=claim_dma_lock(); if(!isa_dma_bridge_buggy) disable_dma(dmap->dma); clear_dma_ff(chan); pos = dmap->bytes_in_use - get_dma_residue(chan); if(!isa_dma_bridge_buggy) enable_dma(dmap->dma); release_dma_lock(f); pos = pos / dmap->fragment_size; /* Actual qhead */ if (pos < 0 || pos >= dmap->nbufs) pos = 0; n = 0; while (dmap->qhead != pos && n++ < dmap->nbufs) do_outputintr(dev, notify_only); } else do_outputintr(dev, notify_only); restore_flags(flags); }
static void receive_packet(struct net_device *dev, int len) { int rlen; elp_device *adapter = dev->priv; void *target; struct sk_buff *skb; unsigned long flags; rlen = (len + 1) & ~1; skb = dev_alloc_skb(rlen + 2); if (!skb) { printk(KERN_WARNING "%s: memory squeeze, dropping packet\n", dev->name); target = adapter->dma_buffer; adapter->current_dma.target = NULL; /* FIXME: stats */ return; } skb_reserve(skb, 2); target = skb_put(skb, rlen); if ((unsigned long)(target + rlen) >= MAX_DMA_ADDRESS) { adapter->current_dma.target = target; target = adapter->dma_buffer; } else { adapter->current_dma.target = NULL; } /* if this happens, we die */ if (test_and_set_bit(0, (void *) &adapter->dmaing)) printk(KERN_ERR "%s: rx blocked, DMA in progress, dir %d\n", dev->name, adapter->current_dma.direction); skb->dev = dev; adapter->current_dma.direction = 0; adapter->current_dma.length = rlen; adapter->current_dma.skb = skb; adapter->current_dma.start_time = jiffies; outb_control(adapter->hcr_val | DIR | TCEN | DMAE, dev); flags=claim_dma_lock(); disable_dma(dev->dma); clear_dma_ff(dev->dma); set_dma_mode(dev->dma, 0x04); /* dma read */ set_dma_addr(dev->dma, isa_virt_to_bus(target)); set_dma_count(dev->dma, rlen); enable_dma(dev->dma); release_dma_lock(flags); if (elp_debug >= 3) { printk(KERN_DEBUG "%s: rx DMA transfer started\n", dev->name); } if (adapter->rx_active) adapter->rx_active--; if (!adapter->busy) printk(KERN_WARNING "%s: receive_packet called, busy not set.\n", dev->name); }
static netdev_tx_t send_packet(struct net_device *dev, struct sk_buff *skb) { elp_device *adapter = netdev_priv(dev); unsigned long target; unsigned long flags; unsigned int nlen = (((skb->len < 60) ? 60 : skb->len) + 1) & (~1); if (test_and_set_bit(0, (void *) &adapter->busy)) { if (elp_debug >= 2) pr_debug("%s: transmit blocked\n", dev->name); return false; } dev->stats.tx_bytes += nlen; adapter->tx_pcb.command = CMD_TRANSMIT_PACKET; adapter->tx_pcb.length = sizeof(struct Xmit_pkt); adapter->tx_pcb.data.xmit_pkt.buf_ofs = adapter->tx_pcb.data.xmit_pkt.buf_seg = 0; adapter->tx_pcb.data.xmit_pkt.pkt_len = nlen; if (!send_pcb(dev, &adapter->tx_pcb)) { adapter->busy = 0; return false; } if (test_and_set_bit(0, (void *) &adapter->dmaing)) pr_debug("%s: tx: DMA %d in progress\n", dev->name, adapter->current_dma.direction); adapter->current_dma.direction = 1; adapter->current_dma.start_time = jiffies; if ((unsigned long)(skb->data + nlen) >= MAX_DMA_ADDRESS || nlen != skb->len) { skb_copy_from_linear_data(skb, adapter->dma_buffer, nlen); memset(adapter->dma_buffer+skb->len, 0, nlen-skb->len); target = isa_virt_to_bus(adapter->dma_buffer); } else { target = isa_virt_to_bus(skb->data); } adapter->current_dma.skb = skb; flags=claim_dma_lock(); disable_dma(dev->dma); clear_dma_ff(dev->dma); set_dma_mode(dev->dma, 0x48); set_dma_addr(dev->dma, target); set_dma_count(dev->dma, nlen); outb_control(adapter->hcr_val | DMAE | TCEN, dev); enable_dma(dev->dma); release_dma_lock(flags); if (elp_debug >= 3) pr_debug("%s: DMA transfer started\n", dev->name); return true; }
static int bfin_sir_startup(struct bfin_sir_port *port, struct net_device *dev) { #ifdef CONFIG_SIR_BFIN_DMA dma_addr_t dma_handle; #endif /* CONFIG_SIR_BFIN_DMA */ if (request_dma(port->rx_dma_channel, "BFIN_UART_RX") < 0) { dev_warn(&dev->dev, "Unable to attach SIR RX DMA channel\n"); return -EBUSY; } if (request_dma(port->tx_dma_channel, "BFIN_UART_TX") < 0) { dev_warn(&dev->dev, "Unable to attach SIR TX DMA channel\n"); free_dma(port->rx_dma_channel); return -EBUSY; } #ifdef CONFIG_SIR_BFIN_DMA set_dma_callback(port->rx_dma_channel, bfin_sir_dma_rx_int, dev); set_dma_callback(port->tx_dma_channel, bfin_sir_dma_tx_int, dev); port->rx_dma_buf.buf = dma_alloc_coherent(NULL, PAGE_SIZE, &dma_handle, GFP_DMA); port->rx_dma_buf.head = 0; port->rx_dma_buf.tail = 0; port->rx_dma_nrows = 0; set_dma_config(port->rx_dma_channel, set_bfin_dma_config(DIR_WRITE, DMA_FLOW_AUTO, INTR_ON_ROW, DIMENSION_2D, DATA_SIZE_8, DMA_SYNC_RESTART)); set_dma_x_count(port->rx_dma_channel, DMA_SIR_RX_XCNT); set_dma_x_modify(port->rx_dma_channel, 1); set_dma_y_count(port->rx_dma_channel, DMA_SIR_RX_YCNT); set_dma_y_modify(port->rx_dma_channel, 1); set_dma_start_addr(port->rx_dma_channel, (unsigned long)port->rx_dma_buf.buf); enable_dma(port->rx_dma_channel); port->rx_dma_timer.data = (unsigned long)(dev); port->rx_dma_timer.function = (void *)bfin_sir_rx_dma_timeout; #else if (request_irq(port->irq, bfin_sir_rx_int, 0, "BFIN_SIR_RX", dev)) { dev_warn(&dev->dev, "Unable to attach SIR RX interrupt\n"); return -EBUSY; } if (request_irq(port->irq+1, bfin_sir_tx_int, 0, "BFIN_SIR_TX", dev)) { dev_warn(&dev->dev, "Unable to attach SIR TX interrupt\n"); free_irq(port->irq, dev); return -EBUSY; } #endif return 0; }
void sport_rx_start(struct sport_device *sport) { set_dma_next_desc_addr(sport->rx_dma_chan, sport->rx_desc); set_dma_config(sport->rx_dma_chan, DMAFLOW_LIST | DI_EN | WNR | compute_wdsize(sport->wdsize) | NDSIZE_6); enable_dma(sport->rx_dma_chan); sport->rx_regs->spctl |= SPORT_CTL_SPENPRI; SSYNC(); }
static ssize_t __dma_write(gpib_board_t *board, nec7210_private_t *priv, dma_addr_t address, size_t length) { unsigned long flags, dma_irq_flags; int residue = 0; int retval = 0; spin_lock_irqsave(&board->spinlock, flags); /* program dma controller */ dma_irq_flags = claim_dma_lock(); disable_dma(priv->dma_channel); clear_dma_ff(priv->dma_channel); set_dma_count(priv->dma_channel, length); set_dma_addr(priv->dma_channel, address); set_dma_mode(priv->dma_channel, DMA_MODE_WRITE ); enable_dma(priv->dma_channel); release_dma_lock(dma_irq_flags); // enable board's dma for output nec7210_set_reg_bits( priv, IMR2, HR_DMAO, HR_DMAO ); clear_bit(WRITE_READY_BN, &priv->state); set_bit(DMA_WRITE_IN_PROGRESS_BN, &priv->state); spin_unlock_irqrestore(&board->spinlock, flags); // suspend until message is sent if(wait_event_interruptible(board->wait, test_bit(DMA_WRITE_IN_PROGRESS_BN, &priv->state) == 0 || test_bit( BUS_ERROR_BN, &priv->state ) || test_bit( DEV_CLEAR_BN, &priv->state ) || test_bit(TIMO_NUM, &board->status))) { GPIB_DPRINTK( "gpib write interrupted!\n" ); retval = -ERESTARTSYS; } if(test_bit(TIMO_NUM, &board->status)) retval = -ETIMEDOUT; if( test_and_clear_bit( DEV_CLEAR_BN, &priv->state ) ) retval = -EINTR; if( test_and_clear_bit( BUS_ERROR_BN, &priv->state ) ) retval = -EIO; // disable board's dma nec7210_set_reg_bits( priv, IMR2, HR_DMAO, 0 ); dma_irq_flags = claim_dma_lock(); clear_dma_ff(priv->dma_channel); disable_dma(priv->dma_channel); residue = get_dma_residue(priv->dma_channel); release_dma_lock( dma_irq_flags ); if(residue) retval = -EPIPE; return retval ? retval : length; }
int DMAbuf_get_buffer_pointer(int dev, struct dma_buffparms *dmap, int direction) { /* * Try to approximate the active byte position of the DMA pointer within the * buffer area as well as possible. */ int pos; unsigned long flags; unsigned long f; save_flags(flags); cli(); if (!(dmap->flags & DMA_ACTIVE)) pos = 0; else { int chan = dmap->dma; f=claim_dma_lock(); clear_dma_ff(chan); if(!isa_dma_bridge_buggy) disable_dma(dmap->dma); pos = get_dma_residue(chan); pos = dmap->bytes_in_use - pos; if (!(dmap->mapping_flags & DMA_MAP_MAPPED)) { if (direction == DMODE_OUTPUT) { if (dmap->qhead == 0) if (pos > dmap->fragment_size) pos = 0; } else { if (dmap->qtail == 0) if (pos > dmap->fragment_size) pos = 0; } } if (pos < 0) pos = 0; if (pos >= dmap->bytes_in_use) pos = 0; if(!isa_dma_bridge_buggy) enable_dma(dmap->dma); release_dma_lock(f); } restore_flags(flags); /* printk( "%04x ", pos); */ return pos; }
static void handle_isa_dma(struct comedi_device *dev) { struct labpc_private *devpriv = dev->private; labpc_drain_dma(dev); enable_dma(devpriv->dma_chan); /* clear dma tc interrupt */ devpriv->write_byte(dev, 0x1, DMATC_CLEAR_REG); }
/* data format transform */ static void hi3620_data_transform_work(struct work_struct *work) { struct dc_dma_para *dma_para = container_of(work, struct dc_dma_para, irq_work); unsigned char *data_buff_src = dma_para->sour_vir_addr + dma_para->period_next * dma_para->dma_size; int dst_dma_size = dma_para->dma_size*2; /* transform data_buff_src's data to the format that low 16bits is effective and high 16bits is zero, and then save into data_buff_dst*/ pcm_16bits_to_32bits(data_buff_src, dma_para->dst_vir_addr, dst_dma_size); enable_dma(dma_para->dma, dma_para->dst_dma_addr, 0, dst_dma_size); }
int mmpfb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg) { struct mmpfb_info *fbi = info->par; dev_dbg(info->dev, "cmd 0x%x, arg 0x%lx\n", cmd, arg); if (!mmp_path_ctrl_safe(fbi->path)) return -EINVAL; switch (cmd) { case FB_IOCTL_QUERY_GLOBAL_INFO: get_global_info(info, arg); break; case FB_IOCTL_FLIP_COMMIT: enable_commit(info, arg); break; case FB_IOCTL_WAIT_VSYNC: mmp_wait_vsync(&fbi->path->vsync); break; case FB_IOCTL_FLIP_USR_BUF: #ifdef CONFIG_MMP_FENCE return flip_buffer(info, arg); #else return flip_buffer_vsync(info, arg); #endif case FB_IOCTL_FLIP_VSYNC: return flip_buffer_vsync(info, arg); case FB_IOCTL_GAMMA_SET: return set_gamma(info, arg); case FB_IOCTL_SET_COLORKEYnALPHA: return set_colorkey_alpha(info, arg); case FB_IOCTL_GET_COLORKEYnALPHA: return get_colorkey_alpha(info, arg); case FB_IOCTL_ENABLE_DMA: return enable_dma(info, arg); case FB_IOCTL_VSMOOTH_EN: return vsmooth_en(info, arg); /* FB_IOCTL_ENABLE_COMMIT_DMA is only for overlay commit temporarily */ case FB_IOCTL_ENABLE_COMMIT_DMA: return enable_commit_dma(info, arg); case FB_IOCTL_SET_PATHALPHA: return set_path_alpha(info, arg); case FB_IOCTL_SET_DFC_RATE: return set_dfc_rate(info, arg); case FB_IOCTL_GET_DFC_RATE: return get_dfc_rate(info, arg); default: dev_info(info->dev, "unknown ioctl 0x%x\n", cmd); return -EINVAL; } return 0; }
static int ppi_chr_open(struct inode* i, struct file* filp) { /* Reset global values */ current_buffer_index = 0; current_buffer_pointer = 0; /* Enable DMA */ enable_dma(CH_PPI); /* Enable PPI */ bfin_write_PPI_CONTROL(bfin_read_PPI_CONTROL() | PORT_EN); return 0; }
static ssize_t coreb_read(struct file *file, char * buf, size_t count, loff_t *ppos) { unsigned long p = *ppos; ssize_t read = 0; if ((p + count) > coreb_size) return -EFAULT; while (count > 0) { int len = count; if (len > PAGE_SIZE) len = PAGE_SIZE; /* Source Channel */ set_dma_start_addr(CH_MEM_STREAM2_SRC, coreb_base + p); set_dma_x_count(CH_MEM_STREAM2_SRC, len); set_dma_x_modify(CH_MEM_STREAM2_SRC, sizeof(char)); set_dma_config(CH_MEM_STREAM2_SRC, RESTART); /* Destination Channel */ set_dma_start_addr(CH_MEM_STREAM2_DEST, (unsigned long)buf); set_dma_x_count(CH_MEM_STREAM2_DEST, len); set_dma_x_modify(CH_MEM_STREAM2_DEST, sizeof(char)); set_dma_config(CH_MEM_STREAM2_DEST, WNR | RESTART | DI_EN); enable_dma(CH_MEM_STREAM2_SRC); enable_dma(CH_MEM_STREAM2_DEST); interruptible_sleep_on(&coreb_dma_wait); disable_dma(CH_MEM_STREAM2_SRC); disable_dma(CH_MEM_STREAM2_DEST); count -= len; read += len; buf += len; p += len; } return read; }
static int sscape_start_dma(int chan, unsigned long physaddr, int count, int dma_mode) { unsigned long flags; flags = claim_dma_lock(); disable_dma(chan); clear_dma_ff(chan); set_dma_mode(chan, dma_mode); set_dma_addr(chan, physaddr); set_dma_count(chan, count); enable_dma(chan); release_dma_lock(flags); return 0; }
void dac0800_startdma(unsigned char *buf, unsigned int size) { /* Clear DMA interrupt */ disable_dma(DAC0800_DMA_CHAN); /* Do DMA write to i/o operation */ set_dma_mode(DAC0800_DMA_CHAN, DMA_MODE_WRITE); set_dma_device_addr(DAC0800_DMA_CHAN, DAC0800_DMA_DESTADDR); set_dma_addr(DAC0800_DMA_CHAN, (unsigned int) buf); set_dma_count(DAC0800_DMA_CHAN, size); /* Fire it off! */ enable_dma(DAC0800_DMA_CHAN); }
/* * Configure and start DMA engine. */ void __inline__ m5249audio_dmarun(void) { #if DEBUG printk("m5249audio_dmarun(): dma=%x count=%d\n", m5249audio_dmastart, m5249audio_dmacount); #endif set_dma_mode(M5249AUDIO_DMA, DMA_MODE_WRITE|DMA_MODE_LONG_BIT); set_dma_device_addr(M5249AUDIO_DMA, (MCF_MBAR2+MCFA_PDOR3)); set_dma_addr(M5249AUDIO_DMA, (int)&m5249audio_buf[m5249audio_dmastart]); set_dma_count(M5249AUDIO_DMA, m5249audio_dmacount); m5249audio_dmaing = 1; m5249audio_txbusy = 1; enable_dma(M5249AUDIO_DMA); }
static inline void jz_mmc_start_dma(int chan, unsigned long phyaddr, int count, int mode) { unsigned long flags; flags = claim_dma_lock(); disable_dma(chan); clear_dma_ff(chan); jz_set_dma_block_size(chan, 32); set_dma_mode(chan, mode); set_dma_addr(chan, phyaddr); set_dma_count(chan, count + 31); enable_dma(chan); release_dma_lock(flags); }
/** * snd_dma_program - program an ISA DMA transfer * @dma: the dma number * @addr: the physical address of the buffer * @size: the DMA transfer size * @mode: the DMA transfer mode, DMA_MODE_XXX * * Programs an ISA DMA transfer for the given buffer. */ void snd_dma_program(unsigned long dma, unsigned long addr, unsigned int size, unsigned short mode) { unsigned long flags; flags = claim_dma_lock(); disable_dma(dma); clear_dma_ff(dma); set_dma_mode(dma, mode); set_dma_addr(dma, addr); set_dma_count(dma, size); if (!(mode & DMA_MODE_NO_ENABLE)) enable_dma(dma); release_dma_lock(flags); }
static void setup_rx_dma(struct pt_local *lp) { unsigned long flags; int cmd; unsigned long dma_abs; unsigned char dmachan; save_flags(flags); cli(); dma_abs = (unsigned long) (lp->rcvbuf->data); dmachan = lp->dmachan; cmd = lp->base + CTL; if(!valid_dma_page(dma_abs, DMA_BUFF_SIZE + sizeof(struct mbuf))) panic("PI: RX buffer violates DMA boundary!"); /* Get ready for RX DMA */ wrtscc(lp->cardbase, cmd, R1, WT_FN_RDYFN | WT_RDY_RT | INT_ERR_Rx | EXT_INT_ENAB); disable_dma(dmachan); clear_dma_ff(dmachan); /* * Set DMA mode register to single transfers, incrementing address, * auto init, writes */ set_dma_mode(dmachan, DMA_MODE_READ | 0x10); set_dma_addr(dmachan, dma_abs); set_dma_count(dmachan, lp->bufsiz); enable_dma(dmachan); /* * If a packet is already coming in, this line is supposed to * avoid receiving a partial packet. */ wrtscc(lp->cardbase, cmd, R0, RES_Rx_CRC); /* Enable RX dma */ wrtscc(lp->cardbase, cmd, R1, WT_RDY_ENAB | WT_FN_RDYFN | WT_RDY_RT | INT_ERR_Rx | EXT_INT_ENAB); restore_flags(flags); }
static int sound_start_dma(struct dma_buffparms *dmap, unsigned long physaddr, int count, int dma_mode) { unsigned long flags; int chan = dmap->dma; /* printk( "Start DMA%d %d, %d\n", chan, (int)(physaddr-dmap->raw_buf_phys), count); */ flags = claim_dma_lock(); disable_dma(chan); clear_dma_ff(chan); set_dma_mode(chan, dma_mode); set_dma_addr(chan, physaddr); set_dma_count(chan, count); enable_dma(chan); release_dma_lock(flags); return 0; }
/* Prototype: fasdmatype_t cumanascsi_2_dma_setup(host, SCpnt, direction, min_type) * Purpose : initialises DMA/PIO * Params : host - host * SCpnt - command * direction - DMA on to/off of card * min_type - minimum DMA support that we must have for this transfer * Returns : type of transfer to be performed */ static fasdmatype_t cumanascsi_2_dma_setup(struct Scsi_Host *host, struct scsi_pointer *SCp, fasdmadir_t direction, fasdmatype_t min_type) { struct cumanascsi2_info *info = (struct cumanascsi2_info *)host->hostdata; struct device *dev = scsi_get_device(host); int dmach = info->info.scsi.dma; writeb(ALATCH_DIS_DMA, info->base + CUMANASCSI2_ALATCH); if (dmach != NO_DMA && (min_type == fasdma_real_all || SCp->this_residual >= 512)) { int bufs, map_dir, dma_dir, alatch_dir; bufs = copy_SCp_to_sg(&info->sg[0], SCp, NR_SG); if (direction == DMA_OUT) map_dir = DMA_TO_DEVICE, dma_dir = DMA_MODE_WRITE, alatch_dir = ALATCH_DMA_OUT; else map_dir = DMA_FROM_DEVICE, dma_dir = DMA_MODE_READ, alatch_dir = ALATCH_DMA_IN; dma_map_sg(dev, info->sg, bufs + 1, map_dir); disable_dma(dmach); set_dma_sg(dmach, info->sg, bufs + 1); writeb(alatch_dir, info->base + CUMANASCSI2_ALATCH); set_dma_mode(dmach, dma_dir); enable_dma(dmach); writeb(ALATCH_ENA_DMA, info->base + CUMANASCSI2_ALATCH); writeb(ALATCH_DIS_BIT32, info->base + CUMANASCSI2_ALATCH); return fasdma_real_all; } /* * If we're not doing DMA, * we'll do pseudo DMA */ return fasdma_pio; }