static void bfin_sir_shutdown(struct bfin_sir_port *port, struct net_device *dev) { unsigned short val; bfin_sir_stop_rx(port); val = UART_GET_GCTL(port); val &= ~(UCEN | UMOD_MASK | RPOLC); UART_PUT_GCTL(port, val); #ifdef CONFIG_SIR_BFIN_DMA disable_dma(port->tx_dma_channel); disable_dma(port->rx_dma_channel); del_timer(&(port->rx_dma_timer)); dma_free_coherent(NULL, PAGE_SIZE, port->rx_dma_buf.buf, 0); #else free_irq(port->irq+1, dev); free_irq(port->irq, dev); #endif free_dma(port->tx_dma_channel); free_dma(port->rx_dma_channel); }
/* MUST be called with dmap->lock hold */ int DMAbuf_get_buffer_pointer(int dev, struct dma_buffparms *dmap, int direction) { /* * Try to approximate the active byte position of the DMA pointer within the * buffer area as well as possible. */ int pos; unsigned long f; if (!(dmap->flags & DMA_ACTIVE)) pos = 0; else { int chan = dmap->dma; f=claim_dma_lock(); clear_dma_ff(chan); if(!isa_dma_bridge_buggy) disable_dma(dmap->dma); pos = get_dma_residue(chan); pos = dmap->bytes_in_use - pos; if (!(dmap->mapping_flags & DMA_MAP_MAPPED)) { if (direction == DMODE_OUTPUT) { if (dmap->qhead == 0) if (pos > dmap->fragment_size) pos = 0; } else { if (dmap->qtail == 0) if (pos > dmap->fragment_size) pos = 0; } } if (pos < 0) pos = 0; if (pos >= dmap->bytes_in_use) pos = 0; if(!isa_dma_bridge_buggy) enable_dma(dmap->dma); release_dma_lock(f); } /* printk( "%04x ", pos); */ return pos; }
static int sscape_start_dma(int chan, unsigned long physaddr, int count, int dma_mode) { unsigned long flags; flags = claim_dma_lock(); disable_dma(chan); clear_dma_ff(chan); set_dma_mode(chan, dma_mode); set_dma_addr(chan, physaddr); set_dma_count(chan, count); enable_dma(chan); release_dma_lock(flags); return 0; }
void dac0800_startdma(unsigned char *buf, unsigned int size) { /* Clear DMA interrupt */ disable_dma(DAC0800_DMA_CHAN); /* Do DMA write to i/o operation */ set_dma_mode(DAC0800_DMA_CHAN, DMA_MODE_WRITE); set_dma_device_addr(DAC0800_DMA_CHAN, DAC0800_DMA_DESTADDR); set_dma_addr(DAC0800_DMA_CHAN, (unsigned int) buf); set_dma_count(DAC0800_DMA_CHAN, size); /* Fire it off! */ enable_dma(DAC0800_DMA_CHAN); }
int z8530_sync_txdma_close(struct net_device *dev, struct z8530_channel *c) { unsigned long dflags, cflags; u8 chk; spin_lock_irqsave(c->lock, cflags); c->irqs = &z8530_nop; c->max = 0; c->sync = 0; /* * Disable the PC DMA channels */ dflags = claim_dma_lock(); disable_dma(c->txdma); clear_dma_ff(c->txdma); c->txdma_on = 0; c->tx_dma_used = 0; release_dma_lock(dflags); /* * Disable DMA control mode */ c->regs[R1]&= ~WT_RDY_ENAB; write_zsreg(c, R1, c->regs[R1]); c->regs[R1]&= ~(WT_RDY_RT|WT_FN_RDYFN|INT_ERR_Rx); c->regs[R1]|= INT_ALL_Rx; write_zsreg(c, R1, c->regs[R1]); c->regs[R14]&= ~DTRREQ; write_zsreg(c, R14, c->regs[R14]); if(c->tx_dma_buf[0]) { free_page((unsigned long)c->tx_dma_buf[0]); c->tx_dma_buf[0]=NULL; } chk=read_zsreg(c,R0); write_zsreg(c, R3, c->regs[R3]); z8530_rtsdtr(c,0); spin_unlock_irqrestore(c->lock, cflags); return 0; }
void labpc_drain_dma(struct comedi_device *dev) { struct labpc_private *devpriv = dev->private; struct comedi_subdevice *s = dev->read_subdev; struct comedi_async *async = s->async; struct comedi_cmd *cmd = &async->cmd; int status; unsigned long flags; unsigned int max_points, num_points, residue, leftover; status = devpriv->stat1; flags = claim_dma_lock(); disable_dma(devpriv->dma_chan); /* clear flip-flop to make sure 2-byte registers for * count and address get set correctly */ clear_dma_ff(devpriv->dma_chan); /* figure out how many points to read */ max_points = devpriv->dma_transfer_size / sample_size; /* residue is the number of points left to be done on the dma * transfer. It should always be zero at this point unless * the stop_src is set to external triggering. */ residue = get_dma_residue(devpriv->dma_chan) / sample_size; num_points = max_points - residue; if (cmd->stop_src == TRIG_COUNT && devpriv->count < num_points) num_points = devpriv->count; /* figure out how many points will be stored next time */ leftover = 0; if (cmd->stop_src != TRIG_COUNT) { leftover = devpriv->dma_transfer_size / sample_size; } else if (devpriv->count > num_points) { leftover = devpriv->count - num_points; if (leftover > max_points) leftover = max_points; } comedi_buf_write_samples(s, devpriv->dma_buffer, num_points); if (cmd->stop_src == TRIG_COUNT) devpriv->count -= num_points; /* set address and count for next transfer */ set_dma_addr(devpriv->dma_chan, devpriv->dma_addr); set_dma_count(devpriv->dma_chan, leftover * sample_size); release_dma_lock(flags); }
static irqreturn_t jz_mmc_dma_tx_callback(int irq, void *devid) { int chan = txdmachan; disable_dma(chan); if (__dmac_channel_address_error_detected(chan)) { printk(KERN_DEBUG "%s: DMAC address error.\n", __FUNCTION__); __dmac_channel_clear_address_error(chan); } if (__dmac_channel_transmit_end_detected(chan)) { __dmac_channel_clear_transmit_end(chan); } return IRQ_HANDLED; }
static inline void jz_mmc_start_dma(int chan, unsigned long phyaddr, int count, int mode) { unsigned long flags; flags = claim_dma_lock(); disable_dma(chan); clear_dma_ff(chan); jz_set_dma_block_size(chan, 32); set_dma_mode(chan, mode); set_dma_addr(chan, phyaddr); set_dma_count(chan, count + 31); enable_dma(chan); release_dma_lock(flags); }
void dac0800_isr(int irq, void *dev_id, struct pt_regs *regs) { /* Clear DMA interrupt */ disable_dma(DAC0800_DMA_CHAN); /* Mark buffer no longer in use */ dac0800_bufbusy &= ~dac0800_curbuf; dac0800_curbuf = 0; /* Start of any other waiting buffers! */ dac0800_startbuf(); /* Wake up writers */ wake_up_interruptible(&dac0800_waitchan); }
void free_au1000_dma(unsigned int dmanr) { struct dma_chan *chan = get_dma_chan(dmanr); if (!chan) { printk("Trying to free DMA%d\n", dmanr); return; } disable_dma(dmanr); if (chan->irq) free_irq(chan->irq, chan->irq_dev); chan->irq = 0; chan->irq_dev = NULL; chan->dev_id = -1; }
/** * snd_dma_program - program an ISA DMA transfer * @dma: the dma number * @addr: the physical address of the buffer * @size: the DMA transfer size * @mode: the DMA transfer mode, DMA_MODE_XXX * * Programs an ISA DMA transfer for the given buffer. */ void snd_dma_program(unsigned long dma, unsigned long addr, unsigned int size, unsigned short mode) { unsigned long flags; flags = claim_dma_lock(); disable_dma(dma); clear_dma_ff(dma); set_dma_mode(dma, mode); set_dma_addr(dma, addr); set_dma_count(dma, size); if (!(mode & DMA_MODE_NO_ENABLE)) enable_dma(dma); release_dma_lock(flags); }
static void close_dmap(struct audio_operations *adev, struct dma_buffparms *dmap) { unsigned long flags; sound_close_dma(dmap->dma); if (dmap->flags & DMA_BUSY) dmap->dma_mode = DMODE_NONE; dmap->flags &= ~DMA_BUSY; flags=claim_dma_lock(); disable_dma(dmap->dma); release_dma_lock(flags); if (sound_dmap_flag == DMAP_FREE_ON_CLOSE) sound_free_dmap(dmap); }
static void z8530_dma_status(struct z8530_channel *chan) { u8 status, altered; status=read_zsreg(chan, R0); altered=chan->status^status; chan->status=status; if(chan->dma_tx) { if(status&TxEOM) { unsigned long flags; flags=claim_dma_lock(); disable_dma(chan->txdma); clear_dma_ff(chan->txdma); chan->txdma_on=0; release_dma_lock(flags); z8530_tx_done(chan); } } if(altered&chan->dcdcheck) { if(status&chan->dcdcheck) { printk(KERN_INFO "%s: DCD raised\n", chan->dev->name); write_zsreg(chan, R3, chan->regs[3]|RxENABLE); if(chan->netdevice && ((chan->netdevice->type == ARPHRD_HDLC) || (chan->netdevice->type == ARPHRD_PPP))) sppp_reopen(chan->netdevice); } else { printk(KERN_INFO "%s:DCD lost\n", chan->dev->name); write_zsreg(chan, R3, chan->regs[3]&~RxENABLE); z8530_flush_fifo(chan); } } write_zsctrl(chan, RES_EXT_INT); write_zsctrl(chan, RES_H_IUS); }
static void setup_rx_dma(struct pt_local *lp) { unsigned long flags; int cmd; unsigned long dma_abs; unsigned char dmachan; save_flags(flags); cli(); dma_abs = (unsigned long) (lp->rcvbuf->data); dmachan = lp->dmachan; cmd = lp->base + CTL; if(!valid_dma_page(dma_abs, DMA_BUFF_SIZE + sizeof(struct mbuf))) panic("PI: RX buffer violates DMA boundary!"); /* Get ready for RX DMA */ wrtscc(lp->cardbase, cmd, R1, WT_FN_RDYFN | WT_RDY_RT | INT_ERR_Rx | EXT_INT_ENAB); disable_dma(dmachan); clear_dma_ff(dmachan); /* * Set DMA mode register to single transfers, incrementing address, * auto init, writes */ set_dma_mode(dmachan, DMA_MODE_READ | 0x10); set_dma_addr(dmachan, dma_abs); set_dma_count(dmachan, lp->bufsiz); enable_dma(dmachan); /* * If a packet is already coming in, this line is supposed to * avoid receiving a partial packet. */ wrtscc(lp->cardbase, cmd, R0, RES_Rx_CRC); /* Enable RX dma */ wrtscc(lp->cardbase, cmd, R1, WT_RDY_ENAB | WT_FN_RDYFN | WT_RDY_RT | INT_ERR_Rx | EXT_INT_ENAB); restore_flags(flags); }
static int sound_start_dma(struct dma_buffparms *dmap, unsigned long physaddr, int count, int dma_mode) { unsigned long flags; int chan = dmap->dma; /* printk( "Start DMA%d %d, %d\n", chan, (int)(physaddr-dmap->raw_buf_phys), count); */ flags = claim_dma_lock(); disable_dma(chan); clear_dma_ff(chan); set_dma_mode(chan, dma_mode); set_dma_addr(chan, physaddr); set_dma_count(chan, count); enable_dma(chan); release_dma_lock(flags); return 0; }
void free_dma(unsigned int channel) { pr_debug("freedma() : BEGIN\n"); BUG_ON(channel >= MAX_DMA_CHANNELS || !atomic_read(&dma_ch[channel].chan_status)); disable_dma(channel); clear_dma_buffer(channel); if (dma_ch[channel].irq) free_irq(dma_ch[channel].irq, dma_ch[channel].data); atomic_set(&dma_ch[channel].chan_status, 0); pr_debug("freedma() : END\n"); }
static void dma_reset_output(int dev) { struct audio_operations *adev = audio_devs[dev]; unsigned long flags,f ; struct dma_buffparms *dmap = adev->dmap_out; if (!(dmap->flags & DMA_STARTED)) /* DMA is not active */ return; /* * First wait until the current fragment has been played completely */ spin_lock_irqsave(&dmap->lock,flags); adev->dmap_out->flags |= DMA_SYNCING; adev->dmap_out->underrun_count = 0; if (!signal_pending(current) && adev->dmap_out->qlen && adev->dmap_out->underrun_count == 0){ spin_unlock_irqrestore(&dmap->lock,flags); interruptible_sleep_on_timeout(&adev->out_sleeper, dmabuf_timeout(dmap)); spin_lock_irqsave(&dmap->lock,flags); } adev->dmap_out->flags &= ~(DMA_SYNCING | DMA_ACTIVE); /* * Finally shut the device off */ if (!(adev->flags & DMA_DUPLEX) || !adev->d->halt_output) adev->d->halt_io(dev); else adev->d->halt_output(dev); adev->dmap_out->flags &= ~DMA_STARTED; f=claim_dma_lock(); clear_dma_ff(dmap->dma); disable_dma(dmap->dma); release_dma_lock(f); dmap->byte_counter = 0; reorganize_buffers(dev, adev->dmap_out, 0); dmap->qlen = dmap->qhead = dmap->qtail = dmap->user_counter = 0; spin_unlock_irqrestore(&dmap->lock,flags); }
/* Prototype: fasdmatype_t cumanascsi_2_dma_setup(host, SCpnt, direction, min_type) * Purpose : initialises DMA/PIO * Params : host - host * SCpnt - command * direction - DMA on to/off of card * min_type - minimum DMA support that we must have for this transfer * Returns : type of transfer to be performed */ static fasdmatype_t cumanascsi_2_dma_setup(struct Scsi_Host *host, struct scsi_pointer *SCp, fasdmadir_t direction, fasdmatype_t min_type) { struct cumanascsi2_info *info = (struct cumanascsi2_info *)host->hostdata; struct device *dev = scsi_get_device(host); int dmach = info->info.scsi.dma; writeb(ALATCH_DIS_DMA, info->base + CUMANASCSI2_ALATCH); if (dmach != NO_DMA && (min_type == fasdma_real_all || SCp->this_residual >= 512)) { int bufs, map_dir, dma_dir, alatch_dir; bufs = copy_SCp_to_sg(&info->sg[0], SCp, NR_SG); if (direction == DMA_OUT) map_dir = DMA_TO_DEVICE, dma_dir = DMA_MODE_WRITE, alatch_dir = ALATCH_DMA_OUT; else map_dir = DMA_FROM_DEVICE, dma_dir = DMA_MODE_READ, alatch_dir = ALATCH_DMA_IN; dma_map_sg(dev, info->sg, bufs + 1, map_dir); disable_dma(dmach); set_dma_sg(dmach, info->sg, bufs + 1); writeb(alatch_dir, info->base + CUMANASCSI2_ALATCH); set_dma_mode(dmach, dma_dir); enable_dma(dmach); writeb(ALATCH_ENA_DMA, info->base + CUMANASCSI2_ALATCH); writeb(ALATCH_DIS_BIT32, info->base + CUMANASCSI2_ALATCH); return fasdma_real_all; } /* * If we're not doing DMA, * we'll do pseudo DMA */ return fasdma_pio; }
static void handlecommand(struct net_device *dev) { /* on entry, 0xfa and ltdmacbuf holds command */ int dma = dev->dma; int base = dev->base_addr; unsigned long flags; flags=claim_dma_lock(); disable_dma(dma); clear_dma_ff(dma); set_dma_mode(dma,DMA_MODE_WRITE); set_dma_addr(dma,virt_to_bus(ltdmacbuf)); set_dma_count(dma,50); enable_dma(dma); release_dma_lock(flags); inb_p(base+3); inb_p(base+2); if ( wait_timeout(dev,0xfa) ) printk("timed out in handlecommand\n"); }
int tms380tr_close(struct net_device *dev) { struct net_local *tp = netdev_priv(dev); netif_stop_queue(dev); del_timer(&tp->timer); tp->HaltInProgress = 1; tms380tr_exec_cmd(dev, OC_CLOSE); tp->timer.expires = jiffies + 1*HZ; tp->timer.function = tms380tr_timer_end_wait; tp->timer.data = (unsigned long)dev; add_timer(&tp->timer); tms380tr_enable_interrupts(dev); tp->Sleeping = 1; interruptible_sleep_on(&tp->wait_for_tok_int); tp->TransmitCommandActive = 0; del_timer(&tp->timer); tms380tr_disable_interrupts(dev); #ifdef CONFIG_ISA if(dev->dma > 0) { unsigned long flags=claim_dma_lock(); disable_dma(dev->dma); release_dma_lock(flags); } #endif SIFWRITEW(0xFF00, SIFCMD); #if 0 if(dev->dma > 0) SIFWRITEB(0xff, POSREG); #endif tms380tr_cancel_tx_queue(tp); return (0); }
static int bfin_lq035_fb_release(struct fb_info *info, int user) { unsigned long flags; spin_lock_irqsave(&bfin_lq035_lock, flags); lq035_open_cnt--; spin_unlock_irqrestore(&bfin_lq035_lock, flags); if (lq035_open_cnt <= 0) { bfin_write_PPI_CONTROL(0); SSYNC(); disable_dma(CH_PPI); } return 0; }
/* * Interrupt from DMA engine. */ void m5249audio_dmaisr(int irq, void *dev_id, struct pt_regs *regs) { #if DEBUG printk("m5249audio_dmaisr(irq=%d)", irq); #endif /* Clear DMA interrupt */ disable_dma(M5249AUDIO_DMA); m5249audio_dmaing = 0; /* Update data pointers and counts */ m5249audio_dmastart += m5249audio_dmacount; if (m5249audio_dmastart >= BUFSIZE) m5249audio_dmastart = 0; m5249audio_dmacount = 0; /* Start new DMA buffer if we can */ m5249audio_dmabuf(); }
/* Prototype: fasdmatype_t eesoxscsi_dma_setup(host, SCpnt, direction, min_type) * Purpose : initialises DMA/PIO * Params : host - host * SCpnt - command * direction - DMA on to/off of card * min_type - minimum DMA support that we must have for this transfer * Returns : type of transfer to be performed */ static fasdmatype_t eesoxscsi_dma_setup(struct Scsi_Host *host, Scsi_Pointer *SCp, fasdmadir_t direction, fasdmatype_t min_type) { EESOXScsi_Info *info = (EESOXScsi_Info *)host->hostdata; int dmach = host->dma_channel; if (dmach != NO_DMA && (min_type == fasdma_real_all || SCp->this_residual >= 512)) { int buf; for(buf = 1; buf <= SCp->buffers_residual && buf < NR_SG; buf++) { info->dmasg[buf].address = __virt_to_bus( (unsigned long)SCp->buffer[buf].address); info->dmasg[buf].length = SCp->buffer[buf].length; eesoxscsi_invalidate(SCp->buffer[buf].address, SCp->buffer[buf].length, direction); } info->dmasg[0].address = __virt_to_phys((unsigned long)SCp->ptr); info->dmasg[0].length = SCp->this_residual; eesoxscsi_invalidate(SCp->ptr, SCp->this_residual, direction); disable_dma(dmach); set_dma_sg(dmach, info->dmasg, buf); set_dma_mode(dmach, direction == DMA_OUT ? DMA_MODE_WRITE : DMA_MODE_READ); enable_dma(dmach); return fasdma_real_all; } /* * We don't do DMA, we only do slow PIO * * Some day, we will do Pseudo DMA */ return fasdma_pseudo; }
/* Prototype: fasdmatype_t powertecscsi_dma_setup(host, SCpnt, direction, min_type) * Purpose : initialises DMA/PIO * Params : host - host * SCpnt - command * direction - DMA on to/off of card * min_type - minimum DMA support that we must have for this transfer * Returns : type of transfer to be performed */ static fasdmatype_t powertecscsi_dma_setup(struct Scsi_Host *host, Scsi_Pointer *SCp, fasdmadir_t direction, fasdmatype_t min_type) { PowerTecScsi_Info *info = (PowerTecScsi_Info *)host->hostdata; int dmach = host->dma_channel; if (dmach != NO_DMA && (min_type == fasdma_real_all || SCp->this_residual >= 512)) { int buf; for (buf = 1; buf <= SCp->buffers_residual && buf < NR_SG; buf++) { info->dmasg[buf].address = __virt_to_bus( (unsigned long)SCp->buffer[buf].address); info->dmasg[buf].length = SCp->buffer[buf].length; powertecscsi_invalidate(SCp->buffer[buf].address, SCp->buffer[buf].length, direction); } info->dmasg[0].address = __virt_to_phys((unsigned long)SCp->ptr); info->dmasg[0].length = SCp->this_residual; powertecscsi_invalidate(SCp->ptr, SCp->this_residual, direction); disable_dma(dmach); set_dma_sg(dmach, info->dmasg, buf); set_dma_mode(dmach, direction == DMA_OUT ? DMA_MODE_WRITE : DMA_MODE_READ); enable_dma(dmach); return fasdma_real_all; } /* * If we're not doing DMA, * we'll do slow PIO */ return fasdma_pio; }
/* Prototype: fasdmatype_t cumanascsi_2_dma_setup(host, SCpnt, direction, min_type) * Purpose : initialises DMA/PIO * Params : host - host * SCpnt - command * direction - DMA on to/off of card * min_type - minimum DMA support that we must have for this transfer * Returns : type of transfer to be performed */ static fasdmatype_t cumanascsi_2_dma_setup(struct Scsi_Host *host, Scsi_Pointer *SCp, fasdmadir_t direction, fasdmatype_t min_type) { struct cumanascsi2_info *info = (struct cumanascsi2_info *)host->hostdata; int dmach = host->dma_channel; outb(ALATCH_DIS_DMA, info->alatch); if (dmach != NO_DMA && (min_type == fasdma_real_all || SCp->this_residual >= 512)) { int bufs, map_dir, dma_dir, alatch_dir; bufs = copy_SCp_to_sg(&info->sg[0], SCp, NR_SG); if (direction == DMA_OUT) map_dir = PCI_DMA_TODEVICE, dma_dir = DMA_MODE_WRITE, alatch_dir = ALATCH_DMA_OUT; else map_dir = PCI_DMA_FROMDEVICE, dma_dir = DMA_MODE_READ, alatch_dir = ALATCH_DMA_IN; pci_map_sg(NULL, info->sg, bufs, map_dir); disable_dma(dmach); set_dma_sg(dmach, info->sg, bufs); outb(alatch_dir, info->alatch); set_dma_mode(dmach, dma_dir); enable_dma(dmach); outb(ALATCH_ENA_DMA, info->alatch); outb(ALATCH_DIS_BIT32, info->alatch); return fasdma_real_all; } /* * If we're not doing DMA, * we'll do pseudo DMA */ return fasdma_pio; }
/* Check to make sure that a DMA transfer hasn't timed out. This should * never happen in theory, but seems to occur occasionally if the card gets * prodded at the wrong time. */ static inline void check_3c505_dma(struct net_device *dev) { elp_device *adapter = dev->priv; if (adapter->dmaing && time_after(jiffies, adapter->current_dma.start_time + 10)) { unsigned long flags, f; printk(KERN_ERR "%s: DMA %s timed out, %d bytes left\n", dev->name, adapter->current_dma.direction ? "download" : "upload", get_dma_residue(dev->dma)); spin_lock_irqsave(&adapter->lock, flags); adapter->dmaing = 0; adapter->busy = 0; f=claim_dma_lock(); disable_dma(dev->dma); release_dma_lock(f); if (adapter->rx_active) adapter->rx_active--; outb_control(adapter->hcr_val & ~(DMAE | TCEN | DIR), dev); spin_unlock_irqrestore(&adapter->lock, flags); } }
void free_dma(unsigned int channel) { pr_debug("freedma() : BEGIN \n"); BUG_ON(channel >= MAX_DMA_CHANNELS || dma_ch[channel].chan_status == DMA_CHANNEL_FREE); /* Halt the DMA */ disable_dma(channel); clear_dma_buffer(channel); if (dma_ch[channel].irq) free_irq(dma_ch[channel].irq, dma_ch[channel].data); /* Clear the DMA Variable in the Channel */ mutex_lock(&(dma_ch[channel].dmalock)); dma_ch[channel].chan_status = DMA_CHANNEL_FREE; mutex_unlock(&(dma_ch[channel].dmalock)); pr_debug("freedma() : END \n"); }
/* Prototype: fasdmatype_t eesoxscsi_dma_setup(host, SCpnt, direction, min_type) * Purpose : initialises DMA/PIO * Params : host - host * SCpnt - command * direction - DMA on to/off of card * min_type - minimum DMA support that we must have for this transfer * Returns : type of transfer to be performed */ static fasdmatype_t eesoxscsi_dma_setup(struct Scsi_Host *host, Scsi_Pointer *SCp, fasdmadir_t direction, fasdmatype_t min_type) { EESOXScsi_Info *info = (EESOXScsi_Info *)host->hostdata; int dmach = host->dma_channel; if (dmach != NO_DMA && (min_type == fasdma_real_all || SCp->this_residual >= 512)) { int bufs = SCp->buffers_residual; int pci_dir, dma_dir; if (bufs) memcpy(info->sg + 1, SCp->buffer + 1, sizeof(struct scatterlist) * bufs); info->sg[0].address = SCp->ptr; info->sg[0].page = NULL; info->sg[0].length = SCp->this_residual; if (direction == DMA_OUT) pci_dir = PCI_DMA_TODEVICE, dma_dir = DMA_MODE_WRITE; else pci_dir = PCI_DMA_FROMDEVICE, dma_dir = DMA_MODE_READ; pci_map_sg(NULL, info->sg, bufs + 1, pci_dir); disable_dma(dmach); set_dma_sg(dmach, info->sg, bufs + 1); set_dma_mode(dmach, dma_dir); enable_dma(dmach); return fasdma_real_all; } /* * We don't do DMA, we only do slow PIO * * Some day, we will do Pseudo DMA */ return fasdma_pseudo; }
/* read data from the card */ static void handlefd(struct net_device *dev) { int dma = dev->dma; int base = dev->base_addr; unsigned long flags; flags=claim_dma_lock(); disable_dma(dma); clear_dma_ff(dma); set_dma_mode(dma,DMA_MODE_READ); set_dma_addr(dma,virt_to_bus(ltdmabuf)); set_dma_count(dma,800); enable_dma(dma); release_dma_lock(flags); inb_p(base+3); inb_p(base+2); if ( wait_timeout(dev,0xfd) ) printk("timed out in handlefd\n"); sendup_buffer(dev); }
/* read a command from the card */ static void handlefc(struct net_device *dev) { /* called *only* from idle, non-reentrant */ int dma = dev->dma; int base = dev->base_addr; unsigned long flags; flags=claim_dma_lock(); disable_dma(dma); clear_dma_ff(dma); set_dma_mode(dma,DMA_MODE_READ); set_dma_addr(dma,virt_to_bus(ltdmacbuf)); set_dma_count(dma,50); enable_dma(dma); release_dma_lock(flags); inb_p(base+3); inb_p(base+2); if ( wait_timeout(dev,0xfc) ) printk("timed out in handlefc\n"); }