static void setup_tx_dma(struct pi_local *lp, int length) { unsigned long dma_abs; unsigned long flags; unsigned long dmachan; save_flags(flags); cli(); dmachan = lp->dmachan; dma_abs = (unsigned long) (lp->txdmabuf); if(!valid_dma_page(dma_abs, DMA_BUFF_SIZE + sizeof(struct mbuf))) panic("PI: TX buffer violates DMA boundary!"); disable_dma(dmachan); /* Set DMA mode register to single transfers, incrementing address, * no auto init, reads */ set_dma_mode(dmachan, DMA_MODE_WRITE); clear_dma_ff(dmachan); set_dma_addr(dmachan, dma_abs); /* output byte count */ set_dma_count(dmachan, length); restore_flags(flags); }
static void handlewrite(struct net_device *dev) { /* called *only* from idle, non-reentrant */ /* on entry, 0xfb and ltdmabuf holds data */ int dma = dev->dma; int base = dev->base_addr; unsigned long flags; flags=claim_dma_lock(); disable_dma(dma); clear_dma_ff(dma); set_dma_mode(dma,DMA_MODE_WRITE); set_dma_addr(dma,virt_to_bus(ltdmabuf)); set_dma_count(dma,800); enable_dma(dma); release_dma_lock(flags); inb_p(base+3); inb_p(base+2); if ( wait_timeout(dev,0xfb) ) { flags=claim_dma_lock(); printk("timed out in handlewrite, dma res %d\n", get_dma_residue(dev->dma) ); release_dma_lock(flags); } }
static __inline__ int NCR53c406a_dma_setup (unsigned char *ptr, unsigned int count, unsigned char mode) { unsigned limit; unsigned long flags = 0; VDEB(printk("dma: before count=%d ", count)); if (dma_chan <=3) { if (count > 65536) count = 65536; limit = 65536 - (((unsigned) ptr) & 0xFFFF); } else { if (count > (65536<<1)) count = (65536<<1); limit = (65536<<1) - (((unsigned) ptr) & 0x1FFFF); } if (count > limit) count = limit; VDEB(printk("after count=%d\n", count)); if ((count & 1) || (((unsigned) ptr) & 1)) panic ("NCR53c406a: attempted unaligned DMA transfer\n"); flags=claim_dma_lock(); disable_dma(dma_chan); clear_dma_ff(dma_chan); set_dma_addr(dma_chan, (long) ptr); set_dma_count(dma_chan, count); set_dma_mode(dma_chan, mode); enable_dma(dma_chan); release_dma_lock(flags); return count; }
static void setup_DMA(void) { unsigned long addr,count; unsigned char dma_code; dma_code = DMA_WRITE; if (command == FD_READ) dma_code = DMA_READ; if (command == FD_FORMAT) { addr = (long) tmp_floppy_area; count = floppy->sect*4; } else { addr = (long) CURRENT->buffer; count = 1024; } if (read_track) { /* mark buffer-track bad, in case all this fails.. */ buffer_drive = buffer_track = -1; count = floppy->sect*floppy->head*512; addr = (long) floppy_track_buffer; } else if (addr >= LAST_DMA_ADDR) { addr = (long) tmp_floppy_area; if (command == FD_WRITE) copy_buffer(CURRENT->buffer, tmp_floppy_area); } cli(); disable_dma(FLOPPY_DMA); //clear_dma_ff(FLOPPY_DMA); set_dma_mode(FLOPPY_DMA, (command == FD_READ)? DMA_MODE_READ : DMA_MODE_WRITE); set_dma_addr(FLOPPY_DMA, addr); set_dma_count(FLOPPY_DMA, count); enable_dma(FLOPPY_DMA); sti(); }
static void receive_packet(struct net_device *dev, int len) { int rlen; elp_device *adapter = dev->priv; void *target; struct sk_buff *skb; unsigned long flags; rlen = (len + 1) & ~1; skb = dev_alloc_skb(rlen + 2); if (!skb) { printk(KERN_WARNING "%s: memory squeeze, dropping packet\n", dev->name); target = adapter->dma_buffer; adapter->current_dma.target = NULL; /* FIXME: stats */ return; } skb_reserve(skb, 2); target = skb_put(skb, rlen); if ((unsigned long)(target + rlen) >= MAX_DMA_ADDRESS) { adapter->current_dma.target = target; target = adapter->dma_buffer; } else { adapter->current_dma.target = NULL; } /* if this happens, we die */ if (test_and_set_bit(0, (void *) &adapter->dmaing)) printk(KERN_ERR "%s: rx blocked, DMA in progress, dir %d\n", dev->name, adapter->current_dma.direction); skb->dev = dev; adapter->current_dma.direction = 0; adapter->current_dma.length = rlen; adapter->current_dma.skb = skb; adapter->current_dma.start_time = jiffies; outb_control(adapter->hcr_val | DIR | TCEN | DMAE, dev); flags=claim_dma_lock(); disable_dma(dev->dma); clear_dma_ff(dev->dma); set_dma_mode(dev->dma, 0x04); /* dma read */ set_dma_addr(dev->dma, isa_virt_to_bus(target)); set_dma_count(dev->dma, rlen); enable_dma(dev->dma); release_dma_lock(flags); if (elp_debug >= 3) { printk(KERN_DEBUG "%s: rx DMA transfer started\n", dev->name); } if (adapter->rx_active) adapter->rx_active--; if (!adapter->busy) printk(KERN_WARNING "%s: receive_packet called, busy not set.\n", dev->name); }
static netdev_tx_t send_packet(struct net_device *dev, struct sk_buff *skb) { elp_device *adapter = netdev_priv(dev); unsigned long target; unsigned long flags; unsigned int nlen = (((skb->len < 60) ? 60 : skb->len) + 1) & (~1); if (test_and_set_bit(0, (void *) &adapter->busy)) { if (elp_debug >= 2) pr_debug("%s: transmit blocked\n", dev->name); return false; } dev->stats.tx_bytes += nlen; adapter->tx_pcb.command = CMD_TRANSMIT_PACKET; adapter->tx_pcb.length = sizeof(struct Xmit_pkt); adapter->tx_pcb.data.xmit_pkt.buf_ofs = adapter->tx_pcb.data.xmit_pkt.buf_seg = 0; adapter->tx_pcb.data.xmit_pkt.pkt_len = nlen; if (!send_pcb(dev, &adapter->tx_pcb)) { adapter->busy = 0; return false; } if (test_and_set_bit(0, (void *) &adapter->dmaing)) pr_debug("%s: tx: DMA %d in progress\n", dev->name, adapter->current_dma.direction); adapter->current_dma.direction = 1; adapter->current_dma.start_time = jiffies; if ((unsigned long)(skb->data + nlen) >= MAX_DMA_ADDRESS || nlen != skb->len) { skb_copy_from_linear_data(skb, adapter->dma_buffer, nlen); memset(adapter->dma_buffer+skb->len, 0, nlen-skb->len); target = isa_virt_to_bus(adapter->dma_buffer); } else { target = isa_virt_to_bus(skb->data); } adapter->current_dma.skb = skb; flags=claim_dma_lock(); disable_dma(dev->dma); clear_dma_ff(dev->dma); set_dma_mode(dev->dma, 0x48); set_dma_addr(dev->dma, target); set_dma_count(dev->dma, nlen); outb_control(adapter->hcr_val | DMAE | TCEN, dev); enable_dma(dev->dma); release_dma_lock(flags); if (elp_debug >= 3) pr_debug("%s: DMA transfer started\n", dev->name); return true; }
static ssize_t __dma_write(gpib_board_t *board, nec7210_private_t *priv, dma_addr_t address, size_t length) { unsigned long flags, dma_irq_flags; int residue = 0; int retval = 0; spin_lock_irqsave(&board->spinlock, flags); /* program dma controller */ dma_irq_flags = claim_dma_lock(); disable_dma(priv->dma_channel); clear_dma_ff(priv->dma_channel); set_dma_count(priv->dma_channel, length); set_dma_addr(priv->dma_channel, address); set_dma_mode(priv->dma_channel, DMA_MODE_WRITE ); enable_dma(priv->dma_channel); release_dma_lock(dma_irq_flags); // enable board's dma for output nec7210_set_reg_bits( priv, IMR2, HR_DMAO, HR_DMAO ); clear_bit(WRITE_READY_BN, &priv->state); set_bit(DMA_WRITE_IN_PROGRESS_BN, &priv->state); spin_unlock_irqrestore(&board->spinlock, flags); // suspend until message is sent if(wait_event_interruptible(board->wait, test_bit(DMA_WRITE_IN_PROGRESS_BN, &priv->state) == 0 || test_bit( BUS_ERROR_BN, &priv->state ) || test_bit( DEV_CLEAR_BN, &priv->state ) || test_bit(TIMO_NUM, &board->status))) { GPIB_DPRINTK( "gpib write interrupted!\n" ); retval = -ERESTARTSYS; } if(test_bit(TIMO_NUM, &board->status)) retval = -ETIMEDOUT; if( test_and_clear_bit( DEV_CLEAR_BN, &priv->state ) ) retval = -EINTR; if( test_and_clear_bit( BUS_ERROR_BN, &priv->state ) ) retval = -EIO; // disable board's dma nec7210_set_reg_bits( priv, IMR2, HR_DMAO, 0 ); dma_irq_flags = claim_dma_lock(); clear_dma_ff(priv->dma_channel); disable_dma(priv->dma_channel); residue = get_dma_residue(priv->dma_channel); release_dma_lock( dma_irq_flags ); if(residue) retval = -EPIPE; return retval ? retval : length; }
void labpc_drain_dma(struct comedi_device *dev) { struct labpc_private *devpriv = dev->private; struct comedi_subdevice *s = dev->read_subdev; struct comedi_async *async = s->async; struct comedi_cmd *cmd = &async->cmd; int status; unsigned long flags; unsigned int max_points, num_points, residue, leftover; int i; status = devpriv->stat1; flags = claim_dma_lock(); disable_dma(devpriv->dma_chan); /* clear flip-flop to make sure 2-byte registers for * count and address get set correctly */ clear_dma_ff(devpriv->dma_chan); /* figure out how many points to read */ max_points = devpriv->dma_transfer_size / sample_size; /* residue is the number of points left to be done on the dma * transfer. It should always be zero at this point unless * the stop_src is set to external triggering. */ residue = get_dma_residue(devpriv->dma_chan) / sample_size; num_points = max_points - residue; if (cmd->stop_src == TRIG_COUNT && devpriv->count < num_points) num_points = devpriv->count; /* figure out how many points will be stored next time */ leftover = 0; if (cmd->stop_src != TRIG_COUNT) { leftover = devpriv->dma_transfer_size / sample_size; } else if (devpriv->count > num_points) { leftover = devpriv->count - num_points; if (leftover > max_points) leftover = max_points; } /* write data to comedi buffer */ for (i = 0; i < num_points; i++) cfc_write_to_buffer(s, devpriv->dma_buffer[i]); if (cmd->stop_src == TRIG_COUNT) devpriv->count -= num_points; /* set address and count for next transfer */ set_dma_addr(devpriv->dma_chan, devpriv->dma_addr); set_dma_count(devpriv->dma_chan, leftover * sample_size); release_dma_lock(flags); async->events |= COMEDI_CB_BLOCK; }
static void start_print( int device ) { struct slm *sip = &slm_info[device]; unsigned char *cmd; unsigned long paddr; int i; stdma_lock( slm_interrupt, NULL ); CMDSET_TARG_LUN( slmprint_cmd, sip->target, sip->lun ); cmd = slmprint_cmd; paddr = virt_to_phys( SLMBuffer ); dma_cache_maintenance( paddr, virt_to_phys(BufferP)-paddr, 1 ); DISABLE_IRQ(); /* Low on A1 */ dma_wd.dma_mode_status = 0x88; MFPDELAY(); /* send the command bytes except the last */ for( i = 0; i < 5; ++i ) { DMA_LONG_WRITE( *cmd++, 0x8a ); udelay(20); if (!acsi_wait_for_IRQ( HZ/2 )) { SLMError = 1; return; /* timeout */ } } /* last command byte */ DMA_LONG_WRITE( *cmd++, 0x82 ); MFPDELAY(); /* set DMA address */ set_dma_addr( paddr ); /* program DMA for write and select sector counter reg */ dma_wd.dma_mode_status = 0x192; MFPDELAY(); /* program for 255*512 bytes and start DMA */ DMA_LONG_WRITE( SLM_DMA_AMOUNT, 0x112 ); #ifndef SLM_CONT_CNT_REPROG SLMCurAddr = paddr; SLMEndAddr = paddr + SLMSliceSize + SLM_DMA_INT_OFFSET; #endif START_TIMER( DMA_STARTUP_TIME + DMA_TIME_FOR( SLMSliceSize )); #if !defined(SLM_CONT_CNT_REPROG) && defined(DEBUG) printk( "SLM: CurAddr=%#lx EndAddr=%#lx timer=%ld\n", SLMCurAddr, SLMEndAddr, DMA_TIME_FOR( SLMSliceSize ) ); #endif ENABLE_IRQ(); }
static int sscape_start_dma(int chan, unsigned long physaddr, int count, int dma_mode) { unsigned long flags; flags = claim_dma_lock(); disable_dma(chan); clear_dma_ff(chan); set_dma_mode(chan, dma_mode); set_dma_addr(chan, physaddr); set_dma_count(chan, count); enable_dma(chan); release_dma_lock(flags); return 0; }
void dac0800_startdma(unsigned char *buf, unsigned int size) { /* Clear DMA interrupt */ disable_dma(DAC0800_DMA_CHAN); /* Do DMA write to i/o operation */ set_dma_mode(DAC0800_DMA_CHAN, DMA_MODE_WRITE); set_dma_device_addr(DAC0800_DMA_CHAN, DAC0800_DMA_DESTADDR); set_dma_addr(DAC0800_DMA_CHAN, (unsigned int) buf); set_dma_count(DAC0800_DMA_CHAN, size); /* Fire it off! */ enable_dma(DAC0800_DMA_CHAN); }
static inline void jz_mmc_start_dma(int chan, unsigned long phyaddr, int count, int mode) { unsigned long flags; flags = claim_dma_lock(); disable_dma(chan); clear_dma_ff(chan); jz_set_dma_block_size(chan, 32); set_dma_mode(chan, mode); set_dma_addr(chan, phyaddr); set_dma_count(chan, count + 31); enable_dma(chan); release_dma_lock(flags); }
/* * Configure and start DMA engine. */ void __inline__ m5249audio_dmarun(void) { #if DEBUG printk("m5249audio_dmarun(): dma=%x count=%d\n", m5249audio_dmastart, m5249audio_dmacount); #endif set_dma_mode(M5249AUDIO_DMA, DMA_MODE_WRITE|DMA_MODE_LONG_BIT); set_dma_device_addr(M5249AUDIO_DMA, (MCF_MBAR2+MCFA_PDOR3)); set_dma_addr(M5249AUDIO_DMA, (int)&m5249audio_buf[m5249audio_dmastart]); set_dma_count(M5249AUDIO_DMA, m5249audio_dmacount); m5249audio_dmaing = 1; m5249audio_txbusy = 1; enable_dma(M5249AUDIO_DMA); }
/** * snd_dma_program - program an ISA DMA transfer * @dma: the dma number * @addr: the physical address of the buffer * @size: the DMA transfer size * @mode: the DMA transfer mode, DMA_MODE_XXX * * Programs an ISA DMA transfer for the given buffer. */ void snd_dma_program(unsigned long dma, unsigned long addr, unsigned int size, unsigned short mode) { unsigned long flags; flags = claim_dma_lock(); disable_dma(dma); clear_dma_ff(dma); set_dma_mode(dma, mode); set_dma_addr(dma, addr); set_dma_count(dma, size); if (!(mode & DMA_MODE_NO_ENABLE)) enable_dma(dma); release_dma_lock(flags); }
static void setup_rx_dma(struct pt_local *lp) { unsigned long flags; int cmd; unsigned long dma_abs; unsigned char dmachan; save_flags(flags); cli(); dma_abs = (unsigned long) (lp->rcvbuf->data); dmachan = lp->dmachan; cmd = lp->base + CTL; if(!valid_dma_page(dma_abs, DMA_BUFF_SIZE + sizeof(struct mbuf))) panic("PI: RX buffer violates DMA boundary!"); /* Get ready for RX DMA */ wrtscc(lp->cardbase, cmd, R1, WT_FN_RDYFN | WT_RDY_RT | INT_ERR_Rx | EXT_INT_ENAB); disable_dma(dmachan); clear_dma_ff(dmachan); /* * Set DMA mode register to single transfers, incrementing address, * auto init, writes */ set_dma_mode(dmachan, DMA_MODE_READ | 0x10); set_dma_addr(dmachan, dma_abs); set_dma_count(dmachan, lp->bufsiz); enable_dma(dmachan); /* * If a packet is already coming in, this line is supposed to * avoid receiving a partial packet. */ wrtscc(lp->cardbase, cmd, R0, RES_Rx_CRC); /* Enable RX dma */ wrtscc(lp->cardbase, cmd, R1, WT_RDY_ENAB | WT_FN_RDYFN | WT_RDY_RT | INT_ERR_Rx | EXT_INT_ENAB); restore_flags(flags); }
static int sound_start_dma(struct dma_buffparms *dmap, unsigned long physaddr, int count, int dma_mode) { unsigned long flags; int chan = dmap->dma; /* printk( "Start DMA%d %d, %d\n", chan, (int)(physaddr-dmap->raw_buf_phys), count); */ flags = claim_dma_lock(); disable_dma(chan); clear_dma_ff(chan); set_dma_mode(chan, dma_mode); set_dma_addr(chan, physaddr); set_dma_count(chan, count); enable_dma(chan); release_dma_lock(flags); return 0; }
static void handlecommand(struct net_device *dev) { /* on entry, 0xfa and ltdmacbuf holds command */ int dma = dev->dma; int base = dev->base_addr; unsigned long flags; flags=claim_dma_lock(); disable_dma(dma); clear_dma_ff(dma); set_dma_mode(dma,DMA_MODE_WRITE); set_dma_addr(dma,virt_to_bus(ltdmacbuf)); set_dma_count(dma,50); enable_dma(dma); release_dma_lock(flags); inb_p(base+3); inb_p(base+2); if ( wait_timeout(dev,0xfa) ) printk("timed out in handlecommand\n"); }
static void i8237A_resume(void) { unsigned long flags; int i; flags = claim_dma_lock(); dma_outb(0, DMA1_RESET_REG); dma_outb(0, DMA2_RESET_REG); for (i = 0; i < 8; i++) { set_dma_addr(i, 0x000000); set_dma_count(i, 1); } enable_dma(4); release_dma_lock(flags); }
/* read data from the card */ static void handlefd(struct net_device *dev) { int dma = dev->dma; int base = dev->base_addr; unsigned long flags; flags=claim_dma_lock(); disable_dma(dma); clear_dma_ff(dma); set_dma_mode(dma,DMA_MODE_READ); set_dma_addr(dma,virt_to_bus(ltdmabuf)); set_dma_count(dma,800); enable_dma(dma); release_dma_lock(flags); inb_p(base+3); inb_p(base+2); if ( wait_timeout(dev,0xfd) ) printk("timed out in handlefd\n"); sendup_buffer(dev); }
/* read a command from the card */ static void handlefc(struct net_device *dev) { /* called *only* from idle, non-reentrant */ int dma = dev->dma; int base = dev->base_addr; unsigned long flags; flags=claim_dma_lock(); disable_dma(dma); clear_dma_ff(dma); set_dma_mode(dma,DMA_MODE_READ); set_dma_addr(dma,virt_to_bus(ltdmacbuf)); set_dma_count(dma,50); enable_dma(dma); release_dma_lock(flags); inb_p(base+3); inb_p(base+2); if ( wait_timeout(dev,0xfc) ) printk("timed out in handlefc\n"); }
void labpc_setup_dma(struct comedi_device *dev, struct comedi_subdevice *s) { struct labpc_private *devpriv = dev->private; struct comedi_cmd *cmd = &s->async->cmd; unsigned long irq_flags; irq_flags = claim_dma_lock(); disable_dma(devpriv->dma_chan); /* clear flip-flop to make sure 2-byte registers for * count and address get set correctly */ clear_dma_ff(devpriv->dma_chan); set_dma_addr(devpriv->dma_chan, devpriv->dma_addr); /* set appropriate size of transfer */ devpriv->dma_transfer_size = labpc_suggest_transfer_size(cmd); if (cmd->stop_src == TRIG_COUNT && devpriv->count * sample_size < devpriv->dma_transfer_size) devpriv->dma_transfer_size = devpriv->count * sample_size; set_dma_count(devpriv->dma_chan, devpriv->dma_transfer_size); enable_dma(devpriv->dma_chan); release_dma_lock(irq_flags); /* set CMD3 bits for caller to enable DMA and interrupt */ devpriv->cmd3 |= (CMD3_DMAEN | CMD3_DMATCINTEN); }
static int send_packet(struct net_device *dev, struct sk_buff *skb) { elp_device *adapter = dev->priv; unsigned long target; unsigned long flags; /* * make sure the length is even and no shorter than 60 bytes */ unsigned int nlen = (((skb->len < 60) ? 60 : skb->len) + 1) & (~1); if (test_and_set_bit(0, (void *) &adapter->busy)) { if (elp_debug >= 2) printk(KERN_DEBUG "%s: transmit blocked\n", dev->name); return FALSE; } adapter->stats.tx_bytes += nlen; /* * send the adapter a transmit packet command. Ignore segment and offset * and make sure the length is even */ adapter->tx_pcb.command = CMD_TRANSMIT_PACKET; adapter->tx_pcb.length = sizeof(struct Xmit_pkt); adapter->tx_pcb.data.xmit_pkt.buf_ofs = adapter->tx_pcb.data.xmit_pkt.buf_seg = 0; /* Unused */ adapter->tx_pcb.data.xmit_pkt.pkt_len = nlen; if (!send_pcb(dev, &adapter->tx_pcb)) { adapter->busy = 0; return FALSE; } /* if this happens, we die */ if (test_and_set_bit(0, (void *) &adapter->dmaing)) printk(KERN_DEBUG "%s: tx: DMA %d in progress\n", dev->name, adapter->current_dma.direction); adapter->current_dma.direction = 1; adapter->current_dma.start_time = jiffies; if ((unsigned long)(skb->data + nlen) >= MAX_DMA_ADDRESS || nlen != skb->len) { skb_copy_from_linear_data(skb, adapter->dma_buffer, nlen); memset(adapter->dma_buffer+skb->len, 0, nlen-skb->len); target = isa_virt_to_bus(adapter->dma_buffer); } else { target = isa_virt_to_bus(skb->data); } adapter->current_dma.skb = skb; flags=claim_dma_lock(); disable_dma(dev->dma); clear_dma_ff(dev->dma); set_dma_mode(dev->dma, 0x48); /* dma memory -> io */ set_dma_addr(dev->dma, target); set_dma_count(dev->dma, nlen); outb_control(adapter->hcr_val | DMAE | TCEN, dev); enable_dma(dev->dma); release_dma_lock(flags); if (elp_debug >= 3) printk(KERN_DEBUG "%s: DMA transfer started\n", dev->name); return TRUE; }
static void slm_test_ready( unsigned long dummy ) { #ifdef SLM_CONT_CNT_REPROG /* program for 255*512 bytes again */ dma_wd.fdc_acces_seccount = SLM_DMA_AMOUNT; START_TIMER( DMA_TIME_FOR(0) ); #ifdef DEBUG printk( "SLM: reprogramming timer for %d jiffies, addr=%#lx\n", DMA_TIME_FOR(0), get_dma_addr() ); #endif #else /* !SLM_CONT_CNT_REPROG */ unsigned long flags, addr; int d, ti; #ifdef DEBUG struct timeval start_tm, end_tm; int did_wait = 0; #endif save_flags(flags); cli(); addr = get_dma_addr(); if ((d = SLMEndAddr - addr) > 0) { restore_flags(flags); /* slice not yet finished, decide whether to start another timer or to * busy-wait */ ti = DMA_TIME_FOR( d ); if (ti > 0) { #ifdef DEBUG printk( "SLM: reprogramming timer for %d jiffies, rest %d bytes\n", ti, d ); #endif START_TIMER( ti ); return; } /* wait for desired end address to be reached */ #ifdef DEBUG do_gettimeofday( &start_tm ); did_wait = 1; #endif cli(); while( get_dma_addr() < SLMEndAddr ) barrier(); } /* slice finished, start next one */ SLMCurAddr += SLMSliceSize; #ifdef SLM_CONTINUOUS_DMA /* program for 255*512 bytes again */ dma_wd.fdc_acces_seccount = SLM_DMA_AMOUNT; #else /* set DMA address; * add 2 bytes for the ones in the SLM controller FIFO! */ set_dma_addr( SLMCurAddr + 2 ); /* toggle DMA to write and select sector counter reg */ dma_wd.dma_mode_status = 0x92; MFPDELAY(); dma_wd.dma_mode_status = 0x192; MFPDELAY(); /* program for 255*512 bytes and start DMA */ DMA_LONG_WRITE( SLM_DMA_AMOUNT, 0x112 ); #endif restore_flags(flags); #ifdef DEBUG if (did_wait) { int ms; do_gettimeofday( &end_tm ); ms = (end_tm.tv_sec*1000000+end_tm.tv_usec) - (start_tm.tv_sec*1000000+start_tm.tv_usec); printk( "SLM: did %ld.%ld ms busy waiting for %d bytes\n", ms/1000, ms%1000, d ); } else printk( "SLM: didn't wait (!)\n" ); #endif if ((unsigned char *)PTOV( SLMCurAddr + SLMSliceSize ) >= BufferP) { /* will be last slice, no timer necessary */ #ifdef DEBUG printk( "SLM: CurAddr=%#lx EndAddr=%#lx last slice -> no timer\n", SLMCurAddr, SLMEndAddr ); #endif } else { /* not last slice */ SLMEndAddr = SLMCurAddr + SLMSliceSize + SLM_DMA_INT_OFFSET; START_TIMER( DMA_TIME_FOR( SLMSliceSize )); #ifdef DEBUG printk( "SLM: CurAddr=%#lx EndAddr=%#lx timer=%ld\n", SLMCurAddr, SLMEndAddr, DMA_TIME_FOR( SLMSliceSize ) ); #endif } #endif /* SLM_CONT_CNT_REPROG */ }
struct net_device * __init ltpc_probe(void) { struct net_device *dev; int err = -ENOMEM; int x=0,y=0; int autoirq; unsigned long f; unsigned long timeout; dev = alloc_ltalkdev(sizeof(struct ltpc_private)); if (!dev) goto out; SET_MODULE_OWNER(dev); /* probe for the I/O port address */ if (io != 0x240 && request_region(0x220,8,"ltpc")) { x = inb_p(0x220+6); if ( (x!=0xff) && (x>=0xf0) ) { io = 0x220; goto got_port; } release_region(0x220,8); } if (io != 0x220 && request_region(0x240,8,"ltpc")) { y = inb_p(0x240+6); if ( (y!=0xff) && (y>=0xf0) ){ io = 0x240; goto got_port; } release_region(0x240,8); } /* give up in despair */ printk(KERN_ERR "LocalTalk card not found; 220 = %02x, 240 = %02x.\n", x,y); err = -ENODEV; goto out1; got_port: /* probe for the IRQ line */ if (irq < 2) { unsigned long irq_mask; irq_mask = probe_irq_on(); /* reset the interrupt line */ inb_p(io+7); inb_p(io+7); /* trigger an interrupt (I hope) */ inb_p(io+6); mdelay(2); autoirq = probe_irq_off(irq_mask); if (autoirq == 0) { printk(KERN_ERR "ltpc: probe at %#x failed to detect IRQ line.\n", io); } else { irq = autoirq; } } /* allocate a DMA buffer */ ltdmabuf = (unsigned char *) dma_mem_alloc(1000); if (!ltdmabuf) { printk(KERN_ERR "ltpc: mem alloc failed\n"); err = -ENOMEM; goto out2; } ltdmacbuf = <dmabuf[800]; if(debug & DEBUG_VERBOSE) { printk("ltdmabuf pointer %08lx\n",(unsigned long) ltdmabuf); } /* reset the card */ inb_p(io+1); inb_p(io+3); msleep(20); inb_p(io+0); inb_p(io+2); inb_p(io+7); /* clear reset */ inb_p(io+4); inb_p(io+5); inb_p(io+5); /* enable dma */ inb_p(io+6); /* tri-state interrupt line */ ssleep(1); /* now, figure out which dma channel we're using, unless it's already been specified */ /* well, 0 is a legal DMA channel, but the LTPC card doesn't use it... */ dma = ltpc_probe_dma(io, dma); if (!dma) { /* no dma channel */ printk(KERN_ERR "No DMA channel found on ltpc card.\n"); err = -ENODEV; goto out3; } /* print out friendly message */ if(irq) printk(KERN_INFO "Apple/Farallon LocalTalk-PC card at %03x, IR%d, DMA%d.\n",io,irq,dma); else printk(KERN_INFO "Apple/Farallon LocalTalk-PC card at %03x, DMA%d. Using polled mode.\n",io,dma); /* Fill in the fields of the device structure with ethernet-generic values. */ dev->hard_start_xmit = ltpc_xmit; dev->hard_header = ltpc_hard_header; dev->get_stats = ltpc_get_stats; /* add the ltpc-specific things */ dev->do_ioctl = <pc_ioctl; dev->set_multicast_list = &set_multicast_list; dev->mc_list = NULL; dev->base_addr = io; dev->irq = irq; dev->dma = dma; /* the card will want to send a result at this point */ /* (I think... leaving out this part makes the kernel crash, so I put it back in...) */ f=claim_dma_lock(); disable_dma(dma); clear_dma_ff(dma); set_dma_mode(dma,DMA_MODE_READ); set_dma_addr(dma,virt_to_bus(ltdmabuf)); set_dma_count(dma,0x100); enable_dma(dma); release_dma_lock(f); (void) inb_p(io+3); (void) inb_p(io+2); timeout = jiffies+100*HZ/100; while(time_before(jiffies, timeout)) { if( 0xf9 == inb_p(io+6)) break; schedule(); } if(debug & DEBUG_VERBOSE) { printk("setting up timer and irq\n"); } /* grab it and don't let go :-) */ if (irq && request_irq( irq, <pc_interrupt, 0, "ltpc", dev) >= 0) { (void) inb_p(io+7); /* enable interrupts from board */ (void) inb_p(io+7); /* and reset irq line */ } else { if( irq ) printk(KERN_ERR "ltpc: IRQ already in use, using polled mode.\n"); dev->irq = 0; /* polled mode -- 20 times per second */ /* this is really, really slow... should it poll more often? */ init_timer(<pc_timer); ltpc_timer.function=ltpc_poll; ltpc_timer.data = (unsigned long) dev; ltpc_timer.expires = jiffies + HZ/20; add_timer(<pc_timer); } err = register_netdev(dev); if (err) goto out4; return NULL; out4: del_timer_sync(<pc_timer); if (dev->irq) free_irq(dev->irq, dev); out3: free_pages((unsigned long)ltdmabuf, get_order(1000)); out2: release_region(io, 8); out1: free_netdev(dev); out: return ERR_PTR(err); }
static int das16_cmd_exec(struct comedi_device *dev, struct comedi_subdevice *s) { const struct das16_board *board = comedi_board(dev); struct comedi_async *async = s->async; struct comedi_cmd *cmd = &async->cmd; unsigned int byte; unsigned long flags; int range; if (devpriv->dma_chan == 0 || (dev->irq == 0 && devpriv->timer_mode == 0)) { comedi_error(dev, "irq (or use of 'timer mode') dma required to " "execute comedi_cmd"); return -1; } if (cmd->flags & TRIG_RT) { comedi_error(dev, "isa dma transfers cannot be performed with " "TRIG_RT, aborting"); return -1; } devpriv->adc_byte_count = cmd->stop_arg * cmd->chanlist_len * sizeof(uint16_t); /* disable conversions for das1600 mode */ if (board->size > 0x400) outb(DAS1600_CONV_DISABLE, dev->iobase + DAS1600_CONV); /* set scan limits */ byte = CR_CHAN(cmd->chanlist[0]); byte |= CR_CHAN(cmd->chanlist[cmd->chanlist_len - 1]) << 4; outb(byte, dev->iobase + DAS16_MUX); /* set gain (this is also burst rate register but according to * computer boards manual, burst rate does nothing, even on * keithley cards) */ if (board->ai_pg != das16_pg_none) { range = CR_RANGE(cmd->chanlist[0]); outb((das16_gainlists[board->ai_pg])[range], dev->iobase + DAS16_GAIN); } /* set counter mode and counts */ cmd->convert_arg = das16_set_pacer(dev, cmd->convert_arg, cmd->flags & TRIG_ROUND_MASK); DEBUG_PRINT("pacer period: %d ns\n", cmd->convert_arg); /* enable counters */ byte = 0; /* Enable burst mode if appropriate. */ if (board->size > 0x400) { if (cmd->convert_src == TRIG_NOW) { outb(DAS1600_BURST_VAL, dev->iobase + DAS1600_BURST); /* set burst length */ byte |= BURST_LEN_BITS(cmd->chanlist_len - 1); } else { outb(0, dev->iobase + DAS1600_BURST); } } outb(byte, dev->iobase + DAS16_PACER); /* set up dma transfer */ flags = claim_dma_lock(); disable_dma(devpriv->dma_chan); /* clear flip-flop to make sure 2-byte registers for * count and address get set correctly */ clear_dma_ff(devpriv->dma_chan); devpriv->current_buffer = 0; set_dma_addr(devpriv->dma_chan, devpriv->dma_buffer_addr[devpriv->current_buffer]); /* set appropriate size of transfer */ devpriv->dma_transfer_size = das16_suggest_transfer_size(dev, *cmd); set_dma_count(devpriv->dma_chan, devpriv->dma_transfer_size); enable_dma(devpriv->dma_chan); release_dma_lock(flags); /* set up interrupt */ if (devpriv->timer_mode) { devpriv->timer_running = 1; devpriv->timer.expires = jiffies + timer_period(); add_timer(&devpriv->timer); devpriv->control_state &= ~DAS16_INTE; } else { /* clear interrupt bit */ outb(0x00, dev->iobase + DAS16_STATUS); /* enable interrupts */ devpriv->control_state |= DAS16_INTE; } devpriv->control_state |= DMA_ENABLE; devpriv->control_state &= ~PACING_MASK; if (cmd->convert_src == TRIG_EXT) devpriv->control_state |= EXT_PACER; else devpriv->control_state |= INT_PACER; outb(devpriv->control_state, dev->iobase + DAS16_CONTROL); /* Enable conversions if using das1600 mode */ if (board->size > 0x400) outb(0, dev->iobase + DAS1600_CONV); return 0; }
ssize_t xfifo_dma_write(struct file *filp, const char __user *buf, size_t count, loff_t *f_pos) { struct xfifo_dma_dev *dev = filp->private_data; size_t transfer_size; int retval = 0; if (mutex_lock_interruptible(&dev->mutex)) { return -EINTR; } dev->writes++; transfer_size = count; if (count > dev->fifo_depth) { transfer_size = dev->fifo_depth; } /* Allocate a DMA buffer for the transfer */ dev->buffer_v_addr = dma_alloc_coherent(&dev->pdev->dev, transfer_size, &dev->buffer_d_addr, GFP_KERNEL); if (!dev->buffer_v_addr) { dev_err(&dev->pdev->dev, "coherent DMA buffer allocation failed\n"); retval = -ENOMEM; goto fail_buffer; } PDEBUG("dma buffer alloc - d @0x%0x v @0x%0x\n", (u32)dev->buffer_d_addr, (u32)dev->buffer_v_addr); if (request_dma(dev->dma_channel, MODULE_NAME)) { dev_err(&dev->pdev->dev, "unable to alloc DMA channel %d\n", dev->dma_channel); retval = -EBUSY; goto fail_client_data; } dev->busy = 1; dev->count = transfer_size; set_dma_mode(dev->dma_channel, DMA_MODE_WRITE); set_dma_addr(dev->dma_channel, dev->buffer_d_addr); set_dma_count(dev->dma_channel, transfer_size); set_pl330_client_data(dev->dma_channel, dev->client_data); set_pl330_done_callback(dev->dma_channel, xfifo_dma_done_callback, dev); set_pl330_fault_callback(dev->dma_channel, xfifo_dma_fault_callback, dev); set_pl330_incr_dev_addr(dev->dma_channel, 0); /* Load our DMA buffer with the user data */ copy_from_user(dev->buffer_v_addr, buf, transfer_size); xfifo_dma_reset_fifo(); /* Kick off the DMA */ enable_dma(dev->dma_channel); mutex_unlock(&dev->mutex); wait_event_interruptible(xfifo_dma_wait, dev->busy == 0); /* Deallocate the DMA buffer and free the channel */ free_dma(dev->dma_channel); dma_free_coherent(&dev->pdev->dev, dev->count, dev->buffer_v_addr, dev->buffer_d_addr); PDEBUG("dma write %d bytes\n", transfer_size); return transfer_size; fail_client_data: dma_free_coherent(&dev->pdev->dev, transfer_size, dev->buffer_v_addr, dev->buffer_d_addr); fail_buffer: mutex_unlock(&dev->mutex); return retval; }
int z8530_sync_dma_open(struct net_device *dev, struct z8530_channel *c) { unsigned long cflags, dflags; c->sync = 1; c->mtu = dev->mtu+64; c->count = 0; c->skb = NULL; c->skb2 = NULL; /* * Load the DMA interfaces up */ c->rxdma_on = 0; c->txdma_on = 0; /* * Allocate the DMA flip buffers. Limit by page size. * Everyone runs 1500 mtu or less on wan links so this * should be fine. */ if(c->mtu > PAGE_SIZE/2) return -EMSGSIZE; c->rx_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA); if(c->rx_buf[0]==NULL) return -ENOBUFS; c->rx_buf[1]=c->rx_buf[0]+PAGE_SIZE/2; c->tx_dma_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA); if(c->tx_dma_buf[0]==NULL) { free_page((unsigned long)c->rx_buf[0]); c->rx_buf[0]=NULL; return -ENOBUFS; } c->tx_dma_buf[1]=c->tx_dma_buf[0]+PAGE_SIZE/2; c->tx_dma_used=0; c->dma_tx = 1; c->dma_num=0; c->dma_ready=1; /* * Enable DMA control mode */ spin_lock_irqsave(c->lock, cflags); /* * TX DMA via DIR/REQ */ c->regs[R14]|= DTRREQ; write_zsreg(c, R14, c->regs[R14]); c->regs[R1]&= ~TxINT_ENAB; write_zsreg(c, R1, c->regs[R1]); /* * RX DMA via W/Req */ c->regs[R1]|= WT_FN_RDYFN; c->regs[R1]|= WT_RDY_RT; c->regs[R1]|= INT_ERR_Rx; c->regs[R1]&= ~TxINT_ENAB; write_zsreg(c, R1, c->regs[R1]); c->regs[R1]|= WT_RDY_ENAB; write_zsreg(c, R1, c->regs[R1]); /* * DMA interrupts */ /* * Set up the DMA configuration */ dflags=claim_dma_lock(); disable_dma(c->rxdma); clear_dma_ff(c->rxdma); set_dma_mode(c->rxdma, DMA_MODE_READ|0x10); set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[0])); set_dma_count(c->rxdma, c->mtu); enable_dma(c->rxdma); disable_dma(c->txdma); clear_dma_ff(c->txdma); set_dma_mode(c->txdma, DMA_MODE_WRITE); disable_dma(c->txdma); release_dma_lock(dflags); /* * Select the DMA interrupt handlers */ c->rxdma_on = 1; c->txdma_on = 1; c->tx_dma_used = 1; c->irqs = &z8530_dma_sync; z8530_rtsdtr(c,1); write_zsreg(c, R3, c->regs[R3]|RxENABLE); spin_unlock_irqrestore(c->lock, cflags); return 0; }
static void z8530_rx_done(struct z8530_channel *c) { struct sk_buff *skb; int ct; /* * Is our receive engine in DMA mode */ if(c->rxdma_on) { /* * Save the ready state and the buffer currently * being used as the DMA target */ int ready=c->dma_ready; unsigned char *rxb=c->rx_buf[c->dma_num]; unsigned long flags; /* * Complete this DMA. Neccessary to find the length */ flags=claim_dma_lock(); disable_dma(c->rxdma); clear_dma_ff(c->rxdma); c->rxdma_on=0; ct=c->mtu-get_dma_residue(c->rxdma); if(ct<0) ct=2; /* Shit happens.. */ c->dma_ready=0; /* * Normal case: the other slot is free, start the next DMA * into it immediately. */ if(ready) { c->dma_num^=1; set_dma_mode(c->rxdma, DMA_MODE_READ|0x10); set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[c->dma_num])); set_dma_count(c->rxdma, c->mtu); c->rxdma_on = 1; enable_dma(c->rxdma); /* Stop any frames that we missed the head of from passing */ write_zsreg(c, R0, RES_Rx_CRC); } else /* Can't occur as we dont reenable the DMA irq until after the flip is done */ printk(KERN_WARNING "%s: DMA flip overrun!\n", c->netdevice->name); release_dma_lock(flags); /* * Shove the old buffer into an sk_buff. We can't DMA * directly into one on a PC - it might be above the 16Mb * boundary. Optimisation - we could check to see if we * can avoid the copy. Optimisation 2 - make the memcpy * a copychecksum. */ skb = dev_alloc_skb(ct); if (skb == NULL) { c->netdevice->stats.rx_dropped++; printk(KERN_WARNING "%s: Memory squeeze.\n", c->netdevice->name); } else { skb_put(skb, ct); skb_copy_to_linear_data(skb, rxb, ct); c->netdevice->stats.rx_packets++; c->netdevice->stats.rx_bytes += ct; } c->dma_ready = 1; } else { RT_LOCK; skb = c->skb; /* * The game we play for non DMA is similar. We want to * get the controller set up for the next packet as fast * as possible. We potentially only have one byte + the * fifo length for this. Thus we want to flip to the new * buffer and then mess around copying and allocating * things. For the current case it doesn't matter but * if you build a system where the sync irq isnt blocked * by the kernel IRQ disable then you need only block the * sync IRQ for the RT_LOCK area. * */ ct=c->count; c->skb = c->skb2; c->count = 0; c->max = c->mtu; if (c->skb) { c->dptr = c->skb->data; c->max = c->mtu; } else { c->count = 0; c->max = 0; } RT_UNLOCK; c->skb2 = dev_alloc_skb(c->mtu); if (c->skb2 == NULL) printk(KERN_WARNING "%s: memory squeeze.\n", c->netdevice->name); else skb_put(c->skb2, c->mtu); c->netdevice->stats.rx_packets++; c->netdevice->stats.rx_bytes += ct; } /* * If we received a frame we must now process it. */ if (skb) { skb_trim(skb, ct); c->rx_function(c, skb); } else { c->netdevice->stats.rx_dropped++; printk(KERN_ERR "%s: Lost a frame\n", c->netdevice->name); } }
static void z8530_tx_begin(struct z8530_channel *c) { unsigned long flags; if(c->tx_skb) return; c->tx_skb=c->tx_next_skb; c->tx_next_skb=NULL; c->tx_ptr=c->tx_next_ptr; if(c->tx_skb==NULL) { /* Idle on */ if(c->dma_tx) { flags=claim_dma_lock(); disable_dma(c->txdma); /* * Check if we crapped out. */ if (get_dma_residue(c->txdma)) { c->netdevice->stats.tx_dropped++; c->netdevice->stats.tx_fifo_errors++; } release_dma_lock(flags); } c->txcount=0; } else { c->txcount=c->tx_skb->len; if(c->dma_tx) { /* * FIXME. DMA is broken for the original 8530, * on the older parts we need to set a flag and * wait for a further TX interrupt to fire this * stage off */ flags=claim_dma_lock(); disable_dma(c->txdma); /* * These two are needed by the 8530/85C30 * and must be issued when idling. */ if(c->dev->type!=Z85230) { write_zsctrl(c, RES_Tx_CRC); write_zsctrl(c, RES_EOM_L); } write_zsreg(c, R10, c->regs[10]&~ABUNDER); clear_dma_ff(c->txdma); set_dma_addr(c->txdma, virt_to_bus(c->tx_ptr)); set_dma_count(c->txdma, c->txcount); enable_dma(c->txdma); release_dma_lock(flags); write_zsctrl(c, RES_EOM_L); write_zsreg(c, R5, c->regs[R5]|TxENAB); } else { /* ABUNDER off */ write_zsreg(c, R10, c->regs[10]); write_zsctrl(c, RES_Tx_CRC); while(c->txcount && (read_zsreg(c,R0)&Tx_BUF_EMP)) { write_zsreg(c, R8, *c->tx_ptr++); c->txcount--; } } } /* * Since we emptied tx_skb we can ask for more */ netif_wake_queue(c->netdevice); }
static int __init ltpc_probe_dma(int base, int dma) { int want = (dma == 3) ? 2 : (dma == 1) ? 1 : 3; unsigned long timeout; unsigned long f; if (want & 1) { if (request_dma(1,"ltpc")) { want &= ~1; } else { f=claim_dma_lock(); disable_dma(1); clear_dma_ff(1); set_dma_mode(1,DMA_MODE_WRITE); set_dma_addr(1,virt_to_bus(ltdmabuf)); set_dma_count(1,sizeof(struct lt_mem)); enable_dma(1); release_dma_lock(f); } } if (want & 2) { if (request_dma(3,"ltpc")) { want &= ~2; } else { f=claim_dma_lock(); disable_dma(3); clear_dma_ff(3); set_dma_mode(3,DMA_MODE_WRITE); set_dma_addr(3,virt_to_bus(ltdmabuf)); set_dma_count(3,sizeof(struct lt_mem)); enable_dma(3); release_dma_lock(f); } } /* set up request */ /* FIXME -- do timings better! */ ltdmabuf[0] = LT_READMEM; ltdmabuf[1] = 1; /* mailbox */ ltdmabuf[2] = 0; ltdmabuf[3] = 0; /* address */ ltdmabuf[4] = 0; ltdmabuf[5] = 1; /* read 0x0100 bytes */ ltdmabuf[6] = 0; /* dunno if this is necessary */ inb_p(io+1); inb_p(io+0); timeout = jiffies+100*HZ/100; while(time_before(jiffies, timeout)) { if ( 0xfa == inb_p(io+6) ) break; } inb_p(io+3); inb_p(io+2); while(time_before(jiffies, timeout)) { if ( 0xfb == inb_p(io+6) ) break; } /* release the other dma channel (if we opened both of them) */ if ((want & 2) && (get_dma_residue(3)==sizeof(struct lt_mem))) { want &= ~2; free_dma(3); } if ((want & 1) && (get_dma_residue(1)==sizeof(struct lt_mem))) { want &= ~1; free_dma(1); } if (!want) return 0; return (want & 2) ? 3 : 1; }