Esempio n. 1
0
static void setup_tx_dma(struct pi_local *lp, int length)
{
    unsigned long dma_abs;
    unsigned long flags;
    unsigned long dmachan;

    save_flags(flags);
    cli();

    dmachan = lp->dmachan;
    dma_abs = (unsigned long) (lp->txdmabuf);

    if(!valid_dma_page(dma_abs, DMA_BUFF_SIZE + sizeof(struct mbuf)))
	panic("PI: TX buffer violates DMA boundary!");

    disable_dma(dmachan);
    /* Set DMA mode register to single transfers, incrementing address,
     *  no auto init, reads
     */
    set_dma_mode(dmachan, DMA_MODE_WRITE);
    clear_dma_ff(dmachan);
    set_dma_addr(dmachan, dma_abs);
    /* output byte count */
    set_dma_count(dmachan, length);

    restore_flags(flags);
}
Esempio n. 2
0
static void setup_DMA(void) {

	unsigned long addr,count;
	unsigned char dma_code;
	dma_code = DMA_WRITE;
	if (command == FD_READ)
		dma_code = DMA_READ;
	if (command == FD_FORMAT) {
		addr = (long) tmp_floppy_area;
		count = floppy->sect*4;
	} else {
		addr = (long) CURRENT->buffer;
		count = 1024;
	}
	if (read_track) {
/* mark buffer-track bad, in case all this fails.. */
		buffer_drive = buffer_track = -1;
		count = floppy->sect*floppy->head*512;
		addr = (long) floppy_track_buffer;
	} else if (addr >= LAST_DMA_ADDR) {
		addr = (long) tmp_floppy_area;
		if (command == FD_WRITE)
			copy_buffer(CURRENT->buffer, tmp_floppy_area);
	}
	cli();
	disable_dma(FLOPPY_DMA);

	//clear_dma_ff(FLOPPY_DMA);
	set_dma_mode(FLOPPY_DMA, (command == FD_READ)? DMA_MODE_READ : DMA_MODE_WRITE);
	set_dma_addr(FLOPPY_DMA, addr);
	set_dma_count(FLOPPY_DMA, count);
	enable_dma(FLOPPY_DMA);
	sti();
}
Esempio n. 3
0
/* Prototype: fasdmatype_t powertecscsi_dma_setup(host, SCpnt, direction, min_type)
 * Purpose  : initialises DMA/PIO
 * Params   : host      - host
 *	      SCpnt     - command
 *	      direction - DMA on to/off of card
 *	      min_type  - minimum DMA support that we must have for this transfer
 * Returns  : type of transfer to be performed
 */
static fasdmatype_t
powertecscsi_dma_setup(struct Scsi_Host *host, Scsi_Pointer *SCp,
		       fasdmadir_t direction, fasdmatype_t min_type)
{
	struct powertec_info *info = (struct powertec_info *)host->hostdata;
	int dmach = host->dma_channel;

	if (info->info.ifcfg.capabilities & FASCAP_DMA &&
	    min_type == fasdma_real_all) {
		int bufs, map_dir, dma_dir;

		bufs = copy_SCp_to_sg(&info->sg[0], SCp, NR_SG);

		if (direction == DMA_OUT)
			map_dir = PCI_DMA_TODEVICE,
			dma_dir = DMA_MODE_WRITE;
		else
			map_dir = PCI_DMA_FROMDEVICE,
			dma_dir = DMA_MODE_READ;

		pci_map_sg(NULL, info->sg, bufs, map_dir);

		disable_dma(dmach);
		set_dma_sg(dmach, info->sg, bufs);
		set_dma_mode(dmach, dma_dir);
		enable_dma(dmach);
		return fasdma_real_all;
	}

	/*
	 * If we're not doing DMA,
	 *  we'll do slow PIO
	 */
	return fasdma_pio;
}
Esempio n. 4
0
static void handlewrite(struct net_device *dev)
{
	/* called *only* from idle, non-reentrant */
	/* on entry, 0xfb and ltdmabuf holds data */
	int dma = dev->dma;
	int base = dev->base_addr;
	unsigned long flags;
	
	flags=claim_dma_lock();
	disable_dma(dma);
	clear_dma_ff(dma);
	set_dma_mode(dma,DMA_MODE_WRITE);
	set_dma_addr(dma,virt_to_bus(ltdmabuf));
	set_dma_count(dma,800);
	enable_dma(dma);
	release_dma_lock(flags);
	
	inb_p(base+3);
	inb_p(base+2);

	if ( wait_timeout(dev,0xfb) ) {
		flags=claim_dma_lock();
		printk("timed out in handlewrite, dma res %d\n",
			get_dma_residue(dev->dma) );
		release_dma_lock(flags);
	}
}
Esempio n. 5
0
/* Prototype: fasdmatype_t eesoxscsi_dma_setup(host, SCpnt, direction, min_type)
 * Purpose  : initialises DMA/PIO
 * Params   : host      - host
 *	      SCpnt     - command
 *	      direction - DMA on to/off of card
 *	      min_type  - minimum DMA support that we must have for this transfer
 * Returns  : type of transfer to be performed
 */
static fasdmatype_t
eesoxscsi_dma_setup(struct Scsi_Host *host, Scsi_Pointer *SCp,
		       fasdmadir_t direction, fasdmatype_t min_type)
{
	struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata;
	int dmach = host->dma_channel;

	if (dmach != NO_DMA &&
	    (min_type == fasdma_real_all || SCp->this_residual >= 512)) {
		int bufs, map_dir, dma_dir;

		bufs = copy_SCp_to_sg(&info->sg[0], SCp, NR_SG);

		if (direction == DMA_OUT)
			map_dir = PCI_DMA_TODEVICE,
			dma_dir = DMA_MODE_WRITE;
		else
			map_dir = PCI_DMA_FROMDEVICE,
			dma_dir = DMA_MODE_READ;

		pci_map_sg(NULL, info->sg, bufs, map_dir);

		disable_dma(dmach);
		set_dma_sg(dmach, info->sg, bufs);
		set_dma_mode(dmach, dma_dir);
		enable_dma(dmach);
		return fasdma_real_all;
	}
	/*
	 * We don't do DMA, we only do slow PIO
	 *
	 * Some day, we will do Pseudo DMA
	 */
	return fasdma_pseudo;
}
static __inline__ int 
NCR53c406a_dma_setup (unsigned char *ptr, 
		      unsigned int count, 
		      unsigned char mode) {
    unsigned limit;
    unsigned long flags = 0;
    
    VDEB(printk("dma: before count=%d   ", count));
    if (dma_chan <=3) {
        if (count > 65536)
            count = 65536;
        limit = 65536 - (((unsigned) ptr) & 0xFFFF);
    } else {
        if (count > (65536<<1)) 
            count = (65536<<1);
        limit = (65536<<1) - (((unsigned) ptr) & 0x1FFFF);
    }
    
    if (count > limit) count = limit;
    
    VDEB(printk("after count=%d\n", count));
    if ((count & 1) || (((unsigned) ptr) & 1))
        panic ("NCR53c406a: attempted unaligned DMA transfer\n"); 
    
    flags=claim_dma_lock();
    disable_dma(dma_chan);
    clear_dma_ff(dma_chan);
    set_dma_addr(dma_chan, (long) ptr);
    set_dma_count(dma_chan, count);
    set_dma_mode(dma_chan, mode);
    enable_dma(dma_chan);
    release_dma_lock(flags);    
    
    return count;
}
Esempio n. 7
0
int labpc_init_dma_chan(struct comedi_device *dev, unsigned int dma_chan)
{
	struct labpc_private *devpriv = dev->private;
	void *dma_buffer;
	unsigned long dma_flags;
	int ret;

	if (dma_chan != 1 && dma_chan != 3)
		return -EINVAL;

	dma_buffer = kmalloc(dma_buffer_size, GFP_KERNEL | GFP_DMA);
	if (!dma_buffer)
		return -ENOMEM;

	ret = request_dma(dma_chan, dev->board_name);
	if (ret) {
		kfree(dma_buffer);
		return ret;
	}

	devpriv->dma_buffer = dma_buffer;
	devpriv->dma_chan = dma_chan;
	devpriv->dma_addr = virt_to_bus(devpriv->dma_buffer);

	dma_flags = claim_dma_lock();
	disable_dma(devpriv->dma_chan);
	set_dma_mode(devpriv->dma_chan, DMA_MODE_READ);
	release_dma_lock(dma_flags);

	return 0;
}
Esempio n. 8
0
static void receive_packet(struct net_device *dev, int len)
{
	int rlen;
	elp_device *adapter = dev->priv;
	void *target;
	struct sk_buff *skb;
	unsigned long flags;

	rlen = (len + 1) & ~1;
	skb = dev_alloc_skb(rlen + 2);

	if (!skb) {
		printk(KERN_WARNING "%s: memory squeeze, dropping packet\n", dev->name);
		target = adapter->dma_buffer;
		adapter->current_dma.target = NULL;
		/* FIXME: stats */
		return;
	}

	skb_reserve(skb, 2);
	target = skb_put(skb, rlen);
	if ((unsigned long)(target + rlen) >= MAX_DMA_ADDRESS) {
		adapter->current_dma.target = target;
		target = adapter->dma_buffer;
	} else {
		adapter->current_dma.target = NULL;
	}

	/* if this happens, we die */
	if (test_and_set_bit(0, (void *) &adapter->dmaing))
		printk(KERN_ERR "%s: rx blocked, DMA in progress, dir %d\n", dev->name, adapter->current_dma.direction);

	skb->dev = dev;
	adapter->current_dma.direction = 0;
	adapter->current_dma.length = rlen;
	adapter->current_dma.skb = skb;
	adapter->current_dma.start_time = jiffies;

	outb_control(adapter->hcr_val | DIR | TCEN | DMAE, dev);

	flags=claim_dma_lock();
	disable_dma(dev->dma);
	clear_dma_ff(dev->dma);
	set_dma_mode(dev->dma, 0x04);	/* dma read */
	set_dma_addr(dev->dma, isa_virt_to_bus(target));
	set_dma_count(dev->dma, rlen);
	enable_dma(dev->dma);
	release_dma_lock(flags);

	if (elp_debug >= 3) {
		printk(KERN_DEBUG "%s: rx DMA transfer started\n", dev->name);
	}

	if (adapter->rx_active)
		adapter->rx_active--;

	if (!adapter->busy)
		printk(KERN_WARNING "%s: receive_packet called, busy not set.\n", dev->name);
}
static netdev_tx_t send_packet(struct net_device *dev, struct sk_buff *skb)
{
	elp_device *adapter = netdev_priv(dev);
	unsigned long target;
	unsigned long flags;

	unsigned int nlen = (((skb->len < 60) ? 60 : skb->len) + 1) & (~1);

	if (test_and_set_bit(0, (void *) &adapter->busy)) {
		if (elp_debug >= 2)
			pr_debug("%s: transmit blocked\n", dev->name);
		return false;
	}

	dev->stats.tx_bytes += nlen;

	adapter->tx_pcb.command = CMD_TRANSMIT_PACKET;
	adapter->tx_pcb.length = sizeof(struct Xmit_pkt);
	adapter->tx_pcb.data.xmit_pkt.buf_ofs
	    = adapter->tx_pcb.data.xmit_pkt.buf_seg = 0;	
	adapter->tx_pcb.data.xmit_pkt.pkt_len = nlen;

	if (!send_pcb(dev, &adapter->tx_pcb)) {
		adapter->busy = 0;
		return false;
	}
	
	if (test_and_set_bit(0, (void *) &adapter->dmaing))
		pr_debug("%s: tx: DMA %d in progress\n", dev->name, adapter->current_dma.direction);

	adapter->current_dma.direction = 1;
	adapter->current_dma.start_time = jiffies;

	if ((unsigned long)(skb->data + nlen) >= MAX_DMA_ADDRESS || nlen != skb->len) {
		skb_copy_from_linear_data(skb, adapter->dma_buffer, nlen);
		memset(adapter->dma_buffer+skb->len, 0, nlen-skb->len);
		target = isa_virt_to_bus(adapter->dma_buffer);
	}
	else {
		target = isa_virt_to_bus(skb->data);
	}
	adapter->current_dma.skb = skb;

	flags=claim_dma_lock();
	disable_dma(dev->dma);
	clear_dma_ff(dev->dma);
	set_dma_mode(dev->dma, 0x48);	
	set_dma_addr(dev->dma, target);
	set_dma_count(dev->dma, nlen);
	outb_control(adapter->hcr_val | DMAE | TCEN, dev);
	enable_dma(dev->dma);
	release_dma_lock(flags);

	if (elp_debug >= 3)
		pr_debug("%s: DMA transfer started\n", dev->name);

	return true;
}
Esempio n. 10
0
static ssize_t __dma_write(gpib_board_t *board, nec7210_private_t *priv, dma_addr_t address, size_t length)
{
	unsigned long flags, dma_irq_flags;
	int residue = 0;
	int retval = 0;

	spin_lock_irqsave(&board->spinlock, flags);

	/* program dma controller */
	dma_irq_flags = claim_dma_lock();
	disable_dma(priv->dma_channel);
	clear_dma_ff(priv->dma_channel);
	set_dma_count(priv->dma_channel, length);
	set_dma_addr(priv->dma_channel, address);
	set_dma_mode(priv->dma_channel, DMA_MODE_WRITE );
	enable_dma(priv->dma_channel);
	release_dma_lock(dma_irq_flags);

	// enable board's dma for output
	nec7210_set_reg_bits( priv, IMR2, HR_DMAO, HR_DMAO );

	clear_bit(WRITE_READY_BN, &priv->state);
	set_bit(DMA_WRITE_IN_PROGRESS_BN, &priv->state);

	spin_unlock_irqrestore(&board->spinlock, flags);

	// suspend until message is sent
	if(wait_event_interruptible(board->wait, test_bit(DMA_WRITE_IN_PROGRESS_BN, &priv->state) == 0 ||
		test_bit( BUS_ERROR_BN, &priv->state ) || test_bit( DEV_CLEAR_BN, &priv->state ) ||
		test_bit(TIMO_NUM, &board->status)))
	{
		GPIB_DPRINTK( "gpib write interrupted!\n" );
		retval = -ERESTARTSYS;
	}
	if(test_bit(TIMO_NUM, &board->status))
		retval = -ETIMEDOUT;
	if( test_and_clear_bit( DEV_CLEAR_BN, &priv->state ) )
		retval = -EINTR;
	if( test_and_clear_bit( BUS_ERROR_BN, &priv->state ) )
		retval = -EIO;

	// disable board's dma
	nec7210_set_reg_bits( priv, IMR2, HR_DMAO, 0 );

	dma_irq_flags = claim_dma_lock();
	clear_dma_ff(priv->dma_channel);
	disable_dma(priv->dma_channel);
	residue = get_dma_residue(priv->dma_channel);
	release_dma_lock( dma_irq_flags );

	if(residue)
		retval = -EPIPE;

	return retval ? retval : length;
}
Esempio n. 11
0
static int sscape_start_dma(int chan, unsigned long physaddr, int count, int dma_mode)
{
	unsigned long flags;

	flags = claim_dma_lock();
	disable_dma(chan);
	clear_dma_ff(chan);
	set_dma_mode(chan, dma_mode);
	set_dma_addr(chan, physaddr);
	set_dma_count(chan, count);
	enable_dma(chan);
	release_dma_lock(flags);
	return 0;
}
Esempio n. 12
0
void dac0800_startdma(unsigned char *buf, unsigned int size)
{
	/* Clear DMA interrupt */
	disable_dma(DAC0800_DMA_CHAN);

	/* Do DMA write to i/o operation */
	set_dma_mode(DAC0800_DMA_CHAN, DMA_MODE_WRITE);
	set_dma_device_addr(DAC0800_DMA_CHAN, DAC0800_DMA_DESTADDR);
	set_dma_addr(DAC0800_DMA_CHAN, (unsigned int) buf);
	set_dma_count(DAC0800_DMA_CHAN, size);

	/* Fire it off! */
	enable_dma(DAC0800_DMA_CHAN);
}
Esempio n. 13
0
static inline void
jz_mmc_start_dma(int chan, unsigned long phyaddr, int count, int mode)
{
	unsigned long flags;

	flags = claim_dma_lock();
	disable_dma(chan);
	clear_dma_ff(chan);
	jz_set_dma_block_size(chan, 32);
	set_dma_mode(chan, mode);
	set_dma_addr(chan, phyaddr);
	set_dma_count(chan, count + 31);
	enable_dma(chan);
	release_dma_lock(flags);
}
Esempio n. 14
0
/*
 *	Configure and start DMA engine.
 */
void __inline__ m5249audio_dmarun(void)
{
#if DEBUG
	printk("m5249audio_dmarun(): dma=%x count=%d\n",
		m5249audio_dmastart, m5249audio_dmacount);
#endif

	set_dma_mode(M5249AUDIO_DMA, DMA_MODE_WRITE|DMA_MODE_LONG_BIT);
	set_dma_device_addr(M5249AUDIO_DMA, (MCF_MBAR2+MCFA_PDOR3));
	set_dma_addr(M5249AUDIO_DMA, (int)&m5249audio_buf[m5249audio_dmastart]);
	set_dma_count(M5249AUDIO_DMA, m5249audio_dmacount);
	m5249audio_dmaing = 1;
	m5249audio_txbusy = 1;
	enable_dma(M5249AUDIO_DMA);
}
Esempio n. 15
0
/**
 * snd_dma_program - program an ISA DMA transfer
 * @dma: the dma number
 * @addr: the physical address of the buffer
 * @size: the DMA transfer size
 * @mode: the DMA transfer mode, DMA_MODE_XXX
 *
 * Programs an ISA DMA transfer for the given buffer.
 */
void snd_dma_program(unsigned long dma,
		     unsigned long addr, unsigned int size,
                     unsigned short mode)
{
	unsigned long flags;

	flags = claim_dma_lock();
	disable_dma(dma);
	clear_dma_ff(dma);
	set_dma_mode(dma, mode);
	set_dma_addr(dma, addr);
	set_dma_count(dma, size);
	if (!(mode & DMA_MODE_NO_ENABLE))
		enable_dma(dma);
	release_dma_lock(flags);
}
Esempio n. 16
0
static void setup_rx_dma(struct pt_local *lp)
{
	unsigned long flags;
	int cmd;
	unsigned long dma_abs;
	unsigned char dmachan;

	save_flags(flags);
	cli();

	dma_abs = (unsigned long) (lp->rcvbuf->data);
	dmachan = lp->dmachan;
	cmd = lp->base + CTL;

	if(!valid_dma_page(dma_abs, DMA_BUFF_SIZE + sizeof(struct mbuf)))
		panic("PI: RX buffer violates DMA boundary!");

	/* Get ready for RX DMA */
	wrtscc(lp->cardbase, cmd, R1, WT_FN_RDYFN | WT_RDY_RT | INT_ERR_Rx | EXT_INT_ENAB);

	disable_dma(dmachan);
	clear_dma_ff(dmachan);

	/*
	 *	Set DMA mode register to single transfers, incrementing address,
	 *	auto init, writes
	 */

	set_dma_mode(dmachan, DMA_MODE_READ | 0x10);
	set_dma_addr(dmachan, dma_abs);
	set_dma_count(dmachan, lp->bufsiz);
	enable_dma(dmachan);

	/*
	 *	If a packet is already coming in, this line is supposed to
	 *	avoid receiving a partial packet.
	 */

	wrtscc(lp->cardbase, cmd, R0, RES_Rx_CRC);

	/* Enable RX dma */
	wrtscc(lp->cardbase, cmd, R1,
		WT_RDY_ENAB | WT_FN_RDYFN | WT_RDY_RT | INT_ERR_Rx | EXT_INT_ENAB);

	restore_flags(flags);
}
Esempio n. 17
0
static int sound_start_dma(struct dma_buffparms *dmap, unsigned long physaddr, int count, int dma_mode)
{
	unsigned long flags;
	int chan = dmap->dma;

	/* printk( "Start DMA%d %d, %d\n",  chan,  (int)(physaddr-dmap->raw_buf_phys),  count); */

	flags = claim_dma_lock();
	disable_dma(chan);
	clear_dma_ff(chan);
	set_dma_mode(chan, dma_mode);
	set_dma_addr(chan, physaddr);
	set_dma_count(chan, count);
	enable_dma(chan);
	release_dma_lock(flags);

	return 0;
}
Esempio n. 18
0
static void handlecommand(struct net_device *dev)
{
	/* on entry, 0xfa and ltdmacbuf holds command */
	int dma = dev->dma;
	int base = dev->base_addr;
	unsigned long flags;

	flags=claim_dma_lock();
	disable_dma(dma);
	clear_dma_ff(dma);
	set_dma_mode(dma,DMA_MODE_WRITE);
	set_dma_addr(dma,virt_to_bus(ltdmacbuf));
	set_dma_count(dma,50);
	enable_dma(dma);
	release_dma_lock(flags);
	inb_p(base+3);
	inb_p(base+2);
	if ( wait_timeout(dev,0xfa) ) printk("timed out in handlecommand\n");
} 
Esempio n. 19
0
/* Prototype: fasdmatype_t cumanascsi_2_dma_setup(host, SCpnt, direction, min_type)
 * Purpose  : initialises DMA/PIO
 * Params   : host      - host
 *	      SCpnt     - command
 *	      direction - DMA on to/off of card
 *	      min_type  - minimum DMA support that we must have for this transfer
 * Returns  : type of transfer to be performed
 */
static fasdmatype_t
cumanascsi_2_dma_setup(struct Scsi_Host *host, struct scsi_pointer *SCp,
                       fasdmadir_t direction, fasdmatype_t min_type)
{
    struct cumanascsi2_info *info = (struct cumanascsi2_info *)host->hostdata;
    struct device *dev = scsi_get_device(host);
    int dmach = info->info.scsi.dma;

    writeb(ALATCH_DIS_DMA, info->base + CUMANASCSI2_ALATCH);

    if (dmach != NO_DMA &&
            (min_type == fasdma_real_all || SCp->this_residual >= 512)) {
        int bufs, map_dir, dma_dir, alatch_dir;

        bufs = copy_SCp_to_sg(&info->sg[0], SCp, NR_SG);

        if (direction == DMA_OUT)
            map_dir = DMA_TO_DEVICE,
            dma_dir = DMA_MODE_WRITE,
            alatch_dir = ALATCH_DMA_OUT;
        else
            map_dir = DMA_FROM_DEVICE,
            dma_dir = DMA_MODE_READ,
            alatch_dir = ALATCH_DMA_IN;

        dma_map_sg(dev, info->sg, bufs + 1, map_dir);

        disable_dma(dmach);
        set_dma_sg(dmach, info->sg, bufs + 1);
        writeb(alatch_dir, info->base + CUMANASCSI2_ALATCH);
        set_dma_mode(dmach, dma_dir);
        enable_dma(dmach);
        writeb(ALATCH_ENA_DMA, info->base + CUMANASCSI2_ALATCH);
        writeb(ALATCH_DIS_BIT32, info->base + CUMANASCSI2_ALATCH);
        return fasdma_real_all;
    }

    /*
     * If we're not doing DMA,
     *  we'll do pseudo DMA
     */
    return fasdma_pio;
}
Esempio n. 20
0
/* Prototype: fasdmatype_t cumanascsi_2_dma_setup(host, SCpnt, direction, min_type)
 * Purpose  : initialises DMA/PIO
 * Params   : host      - host
 *	      SCpnt     - command
 *	      direction - DMA on to/off of card
 *	      min_type  - minimum DMA support that we must have for this transfer
 * Returns  : type of transfer to be performed
 */
static fasdmatype_t
cumanascsi_2_dma_setup(struct Scsi_Host *host, Scsi_Pointer *SCp,
		       fasdmadir_t direction, fasdmatype_t min_type)
{
	struct cumanascsi2_info *info = (struct cumanascsi2_info *)host->hostdata;
	int dmach = host->dma_channel;

	outb(ALATCH_DIS_DMA, info->alatch);

	if (dmach != NO_DMA &&
	    (min_type == fasdma_real_all || SCp->this_residual >= 512)) {
		int bufs, map_dir, dma_dir, alatch_dir;

		bufs = copy_SCp_to_sg(&info->sg[0], SCp, NR_SG);

		if (direction == DMA_OUT)
			map_dir = PCI_DMA_TODEVICE,
			dma_dir = DMA_MODE_WRITE,
			alatch_dir = ALATCH_DMA_OUT;
		else
			map_dir = PCI_DMA_FROMDEVICE,
			dma_dir = DMA_MODE_READ,
			alatch_dir = ALATCH_DMA_IN;

		pci_map_sg(NULL, info->sg, bufs, map_dir);

		disable_dma(dmach);
		set_dma_sg(dmach, info->sg, bufs);
		outb(alatch_dir, info->alatch);
		set_dma_mode(dmach, dma_dir);
		enable_dma(dmach);
		outb(ALATCH_ENA_DMA, info->alatch);
		outb(ALATCH_DIS_BIT32, info->alatch);
		return fasdma_real_all;
	}

	/*
	 * If we're not doing DMA,
	 *  we'll do pseudo DMA
	 */
	return fasdma_pio;
}
Esempio n. 21
0
/* Prototype: fasdmatype_t powertecscsi_dma_setup(host, SCpnt, direction, min_type)
 * Purpose  : initialises DMA/PIO
 * Params   : host      - host
 *	      SCpnt     - command
 *	      direction - DMA on to/off of card
 *	      min_type  - minimum DMA support that we must have for this transfer
 * Returns  : type of transfer to be performed
 */
static fasdmatype_t
powertecscsi_dma_setup(struct Scsi_Host *host, Scsi_Pointer *SCp,
		       fasdmadir_t direction, fasdmatype_t min_type)
{
	PowerTecScsi_Info *info = (PowerTecScsi_Info *)host->hostdata;
	int dmach = host->dma_channel;

	if (dmach != NO_DMA &&
	    (min_type == fasdma_real_all || SCp->this_residual >= 512)) {
		int buf;

		for (buf = 1; buf <= SCp->buffers_residual &&
			      buf < NR_SG; buf++) {
			info->dmasg[buf].address = __virt_to_bus(
				(unsigned long)SCp->buffer[buf].address);
			info->dmasg[buf].length = SCp->buffer[buf].length;

			powertecscsi_invalidate(SCp->buffer[buf].address,
						SCp->buffer[buf].length,
						direction);
		}

		info->dmasg[0].address = __virt_to_phys((unsigned long)SCp->ptr);
		info->dmasg[0].length = SCp->this_residual;
		powertecscsi_invalidate(SCp->ptr,
					SCp->this_residual, direction);

		disable_dma(dmach);
		set_dma_sg(dmach, info->dmasg, buf);
		set_dma_mode(dmach,
			     direction == DMA_OUT ? DMA_MODE_WRITE :
						    DMA_MODE_READ);
		enable_dma(dmach);
		return fasdma_real_all;
	}

	/*
	 * If we're not doing DMA,
	 *  we'll do slow PIO
	 */
	return fasdma_pio;
}
Esempio n. 22
0
/* Prototype: fasdmatype_t eesoxscsi_dma_setup(host, SCpnt, direction, min_type)
 * Purpose  : initialises DMA/PIO
 * Params   : host      - host
 *	      SCpnt     - command
 *	      direction - DMA on to/off of card
 *	      min_type  - minimum DMA support that we must have for this transfer
 * Returns  : type of transfer to be performed
 */
static fasdmatype_t
eesoxscsi_dma_setup(struct Scsi_Host *host, Scsi_Pointer *SCp,
		       fasdmadir_t direction, fasdmatype_t min_type)
{
	EESOXScsi_Info *info = (EESOXScsi_Info *)host->hostdata;
	int dmach = host->dma_channel;

	if (dmach != NO_DMA &&
	    (min_type == fasdma_real_all || SCp->this_residual >= 512)) {
		int buf;

		for(buf = 1; buf <= SCp->buffers_residual &&
			     buf < NR_SG; buf++) {
			info->dmasg[buf].address = __virt_to_bus(
				(unsigned long)SCp->buffer[buf].address);
			info->dmasg[buf].length = SCp->buffer[buf].length;

			eesoxscsi_invalidate(SCp->buffer[buf].address,
						SCp->buffer[buf].length,
						direction);
		}

		info->dmasg[0].address = __virt_to_phys((unsigned long)SCp->ptr);
		info->dmasg[0].length = SCp->this_residual;
		eesoxscsi_invalidate(SCp->ptr,
					SCp->this_residual, direction);

		disable_dma(dmach);
		set_dma_sg(dmach, info->dmasg, buf);
		set_dma_mode(dmach,
			     direction == DMA_OUT ? DMA_MODE_WRITE :
						    DMA_MODE_READ);
		enable_dma(dmach);
		return fasdma_real_all;
	}
	/*
	 * We don't do DMA, we only do slow PIO
	 *
	 * Some day, we will do Pseudo DMA
	 */
	return fasdma_pseudo;
}
Esempio n. 23
0
/* Prototype: fasdmatype_t eesoxscsi_dma_setup(host, SCpnt, direction, min_type)
 * Purpose  : initialises DMA/PIO
 * Params   : host      - host
 *	      SCpnt     - command
 *	      direction - DMA on to/off of card
 *	      min_type  - minimum DMA support that we must have for this transfer
 * Returns  : type of transfer to be performed
 */
static fasdmatype_t
eesoxscsi_dma_setup(struct Scsi_Host *host, Scsi_Pointer *SCp,
		       fasdmadir_t direction, fasdmatype_t min_type)
{
	EESOXScsi_Info *info = (EESOXScsi_Info *)host->hostdata;
	int dmach = host->dma_channel;

	if (dmach != NO_DMA &&
	    (min_type == fasdma_real_all || SCp->this_residual >= 512)) {
		int bufs = SCp->buffers_residual;
		int pci_dir, dma_dir;

		if (bufs)
			memcpy(info->sg + 1, SCp->buffer + 1,
				sizeof(struct scatterlist) * bufs);
		info->sg[0].address = SCp->ptr;
		info->sg[0].page    = NULL;
		info->sg[0].length  = SCp->this_residual;

		if (direction == DMA_OUT)
			pci_dir = PCI_DMA_TODEVICE,
			dma_dir = DMA_MODE_WRITE;
		else
			pci_dir = PCI_DMA_FROMDEVICE,
			dma_dir = DMA_MODE_READ;

		pci_map_sg(NULL, info->sg, bufs + 1, pci_dir);

		disable_dma(dmach);
		set_dma_sg(dmach, info->sg, bufs + 1);
		set_dma_mode(dmach, dma_dir);
		enable_dma(dmach);
		return fasdma_real_all;
	}
	/*
	 * We don't do DMA, we only do slow PIO
	 *
	 * Some day, we will do Pseudo DMA
	 */
	return fasdma_pseudo;
}
Esempio n. 24
0
/* read data from the card */
static void handlefd(struct net_device *dev)
{
	int dma = dev->dma;
	int base = dev->base_addr;
	unsigned long flags;

	flags=claim_dma_lock();
	disable_dma(dma);
	clear_dma_ff(dma);
	set_dma_mode(dma,DMA_MODE_READ);
	set_dma_addr(dma,virt_to_bus(ltdmabuf));
	set_dma_count(dma,800);
	enable_dma(dma);
	release_dma_lock(flags);

	inb_p(base+3);
	inb_p(base+2);

	if ( wait_timeout(dev,0xfd) ) printk("timed out in handlefd\n");
	sendup_buffer(dev);
} 
Esempio n. 25
0
/* read a command from the card */
static void handlefc(struct net_device *dev)
{
	/* called *only* from idle, non-reentrant */
	int dma = dev->dma;
	int base = dev->base_addr;
	unsigned long flags;


	flags=claim_dma_lock();
	disable_dma(dma);
	clear_dma_ff(dma);
	set_dma_mode(dma,DMA_MODE_READ);
	set_dma_addr(dma,virt_to_bus(ltdmacbuf));
	set_dma_count(dma,50);
	enable_dma(dma);
	release_dma_lock(flags);

	inb_p(base+3);
	inb_p(base+2);

	if ( wait_timeout(dev,0xfc) ) printk("timed out in handlefc\n");
}
Esempio n. 26
0
static int send_packet(struct net_device *dev, struct sk_buff *skb)
{
	elp_device *adapter = dev->priv;
	unsigned long target;
	unsigned long flags;

	/*
	 * make sure the length is even and no shorter than 60 bytes
	 */
	unsigned int nlen = (((skb->len < 60) ? 60 : skb->len) + 1) & (~1);

	if (test_and_set_bit(0, (void *) &adapter->busy)) {
		if (elp_debug >= 2)
			printk(KERN_DEBUG "%s: transmit blocked\n", dev->name);
		return FALSE;
	}

	adapter->stats.tx_bytes += nlen;

	/*
	 * send the adapter a transmit packet command. Ignore segment and offset
	 * and make sure the length is even
	 */
	adapter->tx_pcb.command = CMD_TRANSMIT_PACKET;
	adapter->tx_pcb.length = sizeof(struct Xmit_pkt);
	adapter->tx_pcb.data.xmit_pkt.buf_ofs
	    = adapter->tx_pcb.data.xmit_pkt.buf_seg = 0;	/* Unused */
	adapter->tx_pcb.data.xmit_pkt.pkt_len = nlen;

	if (!send_pcb(dev, &adapter->tx_pcb)) {
		adapter->busy = 0;
		return FALSE;
	}
	/* if this happens, we die */
	if (test_and_set_bit(0, (void *) &adapter->dmaing))
		printk(KERN_DEBUG "%s: tx: DMA %d in progress\n", dev->name, adapter->current_dma.direction);

	adapter->current_dma.direction = 1;
	adapter->current_dma.start_time = jiffies;

	if ((unsigned long)(skb->data + nlen) >= MAX_DMA_ADDRESS || nlen != skb->len) {
		skb_copy_from_linear_data(skb, adapter->dma_buffer, nlen);
		memset(adapter->dma_buffer+skb->len, 0, nlen-skb->len);
		target = isa_virt_to_bus(adapter->dma_buffer);
	}
	else {
		target = isa_virt_to_bus(skb->data);
	}
	adapter->current_dma.skb = skb;

	flags=claim_dma_lock();
	disable_dma(dev->dma);
	clear_dma_ff(dev->dma);
	set_dma_mode(dev->dma, 0x48);	/* dma memory -> io */
	set_dma_addr(dev->dma, target);
	set_dma_count(dev->dma, nlen);
	outb_control(adapter->hcr_val | DMAE | TCEN, dev);
	enable_dma(dev->dma);
	release_dma_lock(flags);

	if (elp_debug >= 3)
		printk(KERN_DEBUG "%s: DMA transfer started\n", dev->name);

	return TRUE;
}
Esempio n. 27
0
ssize_t xfifo_dma_write(struct file *filp, const char __user *buf, size_t count,
        loff_t *f_pos)
{
        struct xfifo_dma_dev *dev = filp->private_data;
        size_t transfer_size;
 
        int retval = 0;
 
        if (mutex_lock_interruptible(&dev->mutex)) {
                return -EINTR;
        }
 
        dev->writes++;
 
        transfer_size = count;
        if (count > dev->fifo_depth) {
                transfer_size = dev->fifo_depth;
        }
 
        /* Allocate a DMA buffer for the transfer */
        dev->buffer_v_addr = dma_alloc_coherent(&dev->pdev->dev, transfer_size,
                &dev->buffer_d_addr, GFP_KERNEL);
        if (!dev->buffer_v_addr) {
                dev_err(&dev->pdev->dev,
                        "coherent DMA buffer allocation failed\n");
                retval = -ENOMEM;
                goto fail_buffer;
        }
 
        PDEBUG("dma buffer alloc - d @0x%0x v @0x%0x\n",
                (u32)dev->buffer_d_addr, (u32)dev->buffer_v_addr);
 
        if (request_dma(dev->dma_channel, MODULE_NAME)) {
                dev_err(&dev->pdev->dev,
                        "unable to alloc DMA channel %d\n",
                        dev->dma_channel);
                retval = -EBUSY;
                goto fail_client_data;
        }
 
        dev->busy = 1;
        dev->count = transfer_size;
 
        set_dma_mode(dev->dma_channel, DMA_MODE_WRITE);
        set_dma_addr(dev->dma_channel, dev->buffer_d_addr);
        set_dma_count(dev->dma_channel, transfer_size);
        set_pl330_client_data(dev->dma_channel, dev->client_data);
        set_pl330_done_callback(dev->dma_channel,
                xfifo_dma_done_callback, dev);
        set_pl330_fault_callback(dev->dma_channel,
                xfifo_dma_fault_callback, dev);
        set_pl330_incr_dev_addr(dev->dma_channel, 0);
 
        /* Load our DMA buffer with the user data */
        copy_from_user(dev->buffer_v_addr, buf, transfer_size);
 
        xfifo_dma_reset_fifo();
        /* Kick off the DMA */
        enable_dma(dev->dma_channel);
 
        mutex_unlock(&dev->mutex);
 
        wait_event_interruptible(xfifo_dma_wait, dev->busy == 0);
 
        /* Deallocate the DMA buffer and free the channel */
        free_dma(dev->dma_channel);
 
        dma_free_coherent(&dev->pdev->dev, dev->count, dev->buffer_v_addr,
                dev->buffer_d_addr);
 
        PDEBUG("dma write %d bytes\n", transfer_size);
 
        return transfer_size;
 
fail_client_data:
        dma_free_coherent(&dev->pdev->dev, transfer_size, dev->buffer_v_addr,
                dev->buffer_d_addr);
fail_buffer:
        mutex_unlock(&dev->mutex);
        return retval;
}
Esempio n. 28
0
int z8530_sync_dma_open(struct net_device *dev, struct z8530_channel *c)
{
	unsigned long cflags, dflags;

	c->sync = 1;
	c->mtu = dev->mtu+64;
	c->count = 0;
	c->skb = NULL;
	c->skb2 = NULL;
	/*
	 *	Load the DMA interfaces up
	 */
	c->rxdma_on = 0;
	c->txdma_on = 0;

	/*
	 *	Allocate the DMA flip buffers. Limit by page size.
	 *	Everyone runs 1500 mtu or less on wan links so this
	 *	should be fine.
	 */

	if(c->mtu  > PAGE_SIZE/2)
		return -EMSGSIZE;

	c->rx_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
	if(c->rx_buf[0]==NULL)
		return -ENOBUFS;
	c->rx_buf[1]=c->rx_buf[0]+PAGE_SIZE/2;

	c->tx_dma_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
	if(c->tx_dma_buf[0]==NULL)
	{
		free_page((unsigned long)c->rx_buf[0]);
		c->rx_buf[0]=NULL;
		return -ENOBUFS;
	}
	c->tx_dma_buf[1]=c->tx_dma_buf[0]+PAGE_SIZE/2;

	c->tx_dma_used=0;
	c->dma_tx = 1;
	c->dma_num=0;
	c->dma_ready=1;

	/*
	 *	Enable DMA control mode
	 */

	spin_lock_irqsave(c->lock, cflags);

	/*
	 *	TX DMA via DIR/REQ
	 */

	c->regs[R14]|= DTRREQ;
	write_zsreg(c, R14, c->regs[R14]);

	c->regs[R1]&= ~TxINT_ENAB;
	write_zsreg(c, R1, c->regs[R1]);

	/*
	 *	RX DMA via W/Req
	 */

	c->regs[R1]|= WT_FN_RDYFN;
	c->regs[R1]|= WT_RDY_RT;
	c->regs[R1]|= INT_ERR_Rx;
	c->regs[R1]&= ~TxINT_ENAB;
	write_zsreg(c, R1, c->regs[R1]);
	c->regs[R1]|= WT_RDY_ENAB;
	write_zsreg(c, R1, c->regs[R1]);

	/*
	 *	DMA interrupts
	 */

	/*
	 *	Set up the DMA configuration
	 */

	dflags=claim_dma_lock();

	disable_dma(c->rxdma);
	clear_dma_ff(c->rxdma);
	set_dma_mode(c->rxdma, DMA_MODE_READ|0x10);
	set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[0]));
	set_dma_count(c->rxdma, c->mtu);
	enable_dma(c->rxdma);

	disable_dma(c->txdma);
	clear_dma_ff(c->txdma);
	set_dma_mode(c->txdma, DMA_MODE_WRITE);
	disable_dma(c->txdma);

	release_dma_lock(dflags);

	/*
	 *	Select the DMA interrupt handlers
	 */

	c->rxdma_on = 1;
	c->txdma_on = 1;
	c->tx_dma_used = 1;

	c->irqs = &z8530_dma_sync;
	z8530_rtsdtr(c,1);
	write_zsreg(c, R3, c->regs[R3]|RxENABLE);

	spin_unlock_irqrestore(c->lock, cflags);

	return 0;
}
Esempio n. 29
0
static void z8530_rx_done(struct z8530_channel *c)
{
	struct sk_buff *skb;
	int ct;

	/*
	 *	Is our receive engine in DMA mode
	 */

	if(c->rxdma_on)
	{
		/*
		 *	Save the ready state and the buffer currently
		 *	being used as the DMA target
		 */

		int ready=c->dma_ready;
		unsigned char *rxb=c->rx_buf[c->dma_num];
		unsigned long flags;

		/*
		 *	Complete this DMA. Neccessary to find the length
		 */

		flags=claim_dma_lock();

		disable_dma(c->rxdma);
		clear_dma_ff(c->rxdma);
		c->rxdma_on=0;
		ct=c->mtu-get_dma_residue(c->rxdma);
		if(ct<0)
			ct=2;	/* Shit happens.. */
		c->dma_ready=0;

		/*
		 *	Normal case: the other slot is free, start the next DMA
		 *	into it immediately.
		 */

		if(ready)
		{
			c->dma_num^=1;
			set_dma_mode(c->rxdma, DMA_MODE_READ|0x10);
			set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[c->dma_num]));
			set_dma_count(c->rxdma, c->mtu);
			c->rxdma_on = 1;
			enable_dma(c->rxdma);
			/* Stop any frames that we missed the head of
			   from passing */
			write_zsreg(c, R0, RES_Rx_CRC);
		}
		else
			/* Can't occur as we dont reenable the DMA irq until
			   after the flip is done */
			printk(KERN_WARNING "%s: DMA flip overrun!\n",
			       c->netdevice->name);

		release_dma_lock(flags);

		/*
		 *	Shove the old buffer into an sk_buff. We can't DMA
		 *	directly into one on a PC - it might be above the 16Mb
		 *	boundary. Optimisation - we could check to see if we
		 *	can avoid the copy. Optimisation 2 - make the memcpy
		 *	a copychecksum.
		 */

		skb = dev_alloc_skb(ct);
		if (skb == NULL) {
			c->netdevice->stats.rx_dropped++;
			printk(KERN_WARNING "%s: Memory squeeze.\n",
			       c->netdevice->name);
		} else {
			skb_put(skb, ct);
			skb_copy_to_linear_data(skb, rxb, ct);
			c->netdevice->stats.rx_packets++;
			c->netdevice->stats.rx_bytes += ct;
		}
		c->dma_ready = 1;
	} else {
		RT_LOCK;
		skb = c->skb;

		/*
		 *	The game we play for non DMA is similar. We want to
		 *	get the controller set up for the next packet as fast
		 *	as possible. We potentially only have one byte + the
		 *	fifo length for this. Thus we want to flip to the new
		 *	buffer and then mess around copying and allocating
		 *	things. For the current case it doesn't matter but
		 *	if you build a system where the sync irq isnt blocked
		 *	by the kernel IRQ disable then you need only block the
		 *	sync IRQ for the RT_LOCK area.
		 *
		 */
		ct=c->count;

		c->skb = c->skb2;
		c->count = 0;
		c->max = c->mtu;
		if (c->skb) {
			c->dptr = c->skb->data;
			c->max = c->mtu;
		} else {
			c->count = 0;
			c->max = 0;
		}
		RT_UNLOCK;

		c->skb2 = dev_alloc_skb(c->mtu);
		if (c->skb2 == NULL)
			printk(KERN_WARNING "%s: memory squeeze.\n",
			       c->netdevice->name);
		else
			skb_put(c->skb2, c->mtu);
		c->netdevice->stats.rx_packets++;
		c->netdevice->stats.rx_bytes += ct;
	}
	/*
	 *	If we received a frame we must now process it.
	 */
	if (skb) {
		skb_trim(skb, ct);
		c->rx_function(c, skb);
	} else {
		c->netdevice->stats.rx_dropped++;
		printk(KERN_ERR "%s: Lost a frame\n", c->netdevice->name);
	}
}
Esempio n. 30
0
int z8530_sync_txdma_open(struct net_device *dev, struct z8530_channel *c)
{
	unsigned long cflags, dflags;

	printk("Opening sync interface for TX-DMA\n");
	c->sync = 1;
	c->mtu = dev->mtu+64;
	c->count = 0;
	c->skb = NULL;
	c->skb2 = NULL;

	/*
	 *	Allocate the DMA flip buffers. Limit by page size.
	 *	Everyone runs 1500 mtu or less on wan links so this
	 *	should be fine.
	 */

	if(c->mtu  > PAGE_SIZE/2)
		return -EMSGSIZE;

	c->tx_dma_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
	if(c->tx_dma_buf[0]==NULL)
		return -ENOBUFS;

	c->tx_dma_buf[1] = c->tx_dma_buf[0] + PAGE_SIZE/2;


	spin_lock_irqsave(c->lock, cflags);

	/*
	 *	Load the PIO receive ring
	 */

	z8530_rx_done(c);
	z8530_rx_done(c);

 	/*
	 *	Load the DMA interfaces up
	 */

	c->rxdma_on = 0;
	c->txdma_on = 0;

	c->tx_dma_used=0;
	c->dma_num=0;
	c->dma_ready=1;
	c->dma_tx = 1;

 	/*
	 *	Enable DMA control mode
	 */

 	/*
	 *	TX DMA via DIR/REQ
 	 */
	c->regs[R14]|= DTRREQ;
	write_zsreg(c, R14, c->regs[R14]);

	c->regs[R1]&= ~TxINT_ENAB;
	write_zsreg(c, R1, c->regs[R1]);

	/*
	 *	Set up the DMA configuration
	 */

	dflags = claim_dma_lock();

	disable_dma(c->txdma);
	clear_dma_ff(c->txdma);
	set_dma_mode(c->txdma, DMA_MODE_WRITE);
	disable_dma(c->txdma);

	release_dma_lock(dflags);

	/*
	 *	Select the DMA interrupt handlers
	 */

	c->rxdma_on = 0;
	c->txdma_on = 1;
	c->tx_dma_used = 1;

	c->irqs = &z8530_txdma_sync;
	z8530_rtsdtr(c,1);
	write_zsreg(c, R3, c->regs[R3]|RxENABLE);
	spin_unlock_irqrestore(c->lock, cflags);

	return 0;
}