Ejemplo n.º 1
0
/* Request needed resources */
static int znet_request_resources (struct net_device *dev)
{
	struct znet_private *znet = dev->priv;
	unsigned long flags;
		
	if (request_irq (dev->irq, &znet_interrupt, 0, "ZNet", dev))
		goto failed;
	if (request_dma (znet->rx_dma, "ZNet rx"))
		goto free_irq;
	if (request_dma (znet->tx_dma, "ZNet tx"))
		goto free_rx_dma;
	if (!request_region (znet->sia_base, znet->sia_size, "ZNet SIA"))
		goto free_tx_dma;
	if (!request_region (dev->base_addr, znet->io_size, "ZNet I/O"))
		goto free_sia;

	return 0;				/* Happy ! */

 free_sia:
	release_region (znet->sia_base, znet->sia_size);
 free_tx_dma:
	flags = claim_dma_lock();
	free_dma (znet->tx_dma);
	release_dma_lock (flags);
 free_rx_dma:
	flags = claim_dma_lock();
	free_dma (znet->rx_dma);
	release_dma_lock (flags);
 free_irq:
	free_irq (dev->irq, dev);
 failed:
	return -1;
}
static void handlewrite(struct net_device *dev)
{
	/* called *only* from idle, non-reentrant */
	/* on entry, 0xfb and ltdmabuf holds data */
	int dma = dev->dma;
	int base = dev->base_addr;
	unsigned long flags;
	
	flags=claim_dma_lock();
	disable_dma(dma);
	clear_dma_ff(dma);
	set_dma_mode(dma,DMA_MODE_WRITE);
	set_dma_addr(dma,virt_to_bus(ltdmabuf));
	set_dma_count(dma,800);
	enable_dma(dma);
	release_dma_lock(flags);
	
	inb_p(base+3);
	inb_p(base+2);

	if ( wait_timeout(dev,0xfb) ) {
		flags=claim_dma_lock();
		printk("timed out in handlewrite, dma res %d\n",
			get_dma_residue(dev->dma) );
		release_dma_lock(flags);
	}
}
Ejemplo n.º 3
0
static ssize_t __dma_write(gpib_board_t *board, nec7210_private_t *priv, dma_addr_t address, size_t length)
{
	unsigned long flags, dma_irq_flags;
	int residue = 0;
	int retval = 0;

	spin_lock_irqsave(&board->spinlock, flags);

	/* program dma controller */
	dma_irq_flags = claim_dma_lock();
	disable_dma(priv->dma_channel);
	clear_dma_ff(priv->dma_channel);
	set_dma_count(priv->dma_channel, length);
	set_dma_addr(priv->dma_channel, address);
	set_dma_mode(priv->dma_channel, DMA_MODE_WRITE );
	enable_dma(priv->dma_channel);
	release_dma_lock(dma_irq_flags);

	// enable board's dma for output
	nec7210_set_reg_bits( priv, IMR2, HR_DMAO, HR_DMAO );

	clear_bit(WRITE_READY_BN, &priv->state);
	set_bit(DMA_WRITE_IN_PROGRESS_BN, &priv->state);

	spin_unlock_irqrestore(&board->spinlock, flags);

	// suspend until message is sent
	if(wait_event_interruptible(board->wait, test_bit(DMA_WRITE_IN_PROGRESS_BN, &priv->state) == 0 ||
		test_bit( BUS_ERROR_BN, &priv->state ) || test_bit( DEV_CLEAR_BN, &priv->state ) ||
		test_bit(TIMO_NUM, &board->status)))
	{
		GPIB_DPRINTK( "gpib write interrupted!\n" );
		retval = -ERESTARTSYS;
	}
	if(test_bit(TIMO_NUM, &board->status))
		retval = -ETIMEDOUT;
	if( test_and_clear_bit( DEV_CLEAR_BN, &priv->state ) )
		retval = -EINTR;
	if( test_and_clear_bit( BUS_ERROR_BN, &priv->state ) )
		retval = -EIO;

	// disable board's dma
	nec7210_set_reg_bits( priv, IMR2, HR_DMAO, 0 );

	dma_irq_flags = claim_dma_lock();
	clear_dma_ff(priv->dma_channel);
	disable_dma(priv->dma_channel);
	residue = get_dma_residue(priv->dma_channel);
	release_dma_lock( dma_irq_flags );

	if(residue)
		retval = -EPIPE;

	return retval ? retval : length;
}
Ejemplo n.º 4
0
/* called in irq context */
void DMAbuf_inputintr(int dev)
{
	struct audio_operations *adev = audio_devs[dev];
	struct dma_buffparms *dmap = adev->dmap_in;
	unsigned long flags;

	spin_lock_irqsave(&dmap->lock,flags);

	if (!(dmap->flags & DMA_NODMA)) {
		int chan = dmap->dma, pos, n;
		unsigned long f;
		
		f=claim_dma_lock();
		if(!isa_dma_bridge_buggy)
			disable_dma(dmap->dma);
		clear_dma_ff(chan);
		pos = dmap->bytes_in_use - get_dma_residue(chan);
		if(!isa_dma_bridge_buggy)
			enable_dma(dmap->dma);
		release_dma_lock(f);

		pos = pos / dmap->fragment_size;	/* Actual qhead */
		if (pos < 0 || pos >= dmap->nbufs)
			pos = 0;

		n = 0;
		while (dmap->qtail != pos && ++n < dmap->nbufs)
			do_inputintr(dev);
	} else
		do_inputintr(dev);
	spin_unlock_irqrestore(&dmap->lock,flags);
}
Ejemplo n.º 5
0
static __inline__ int 
NCR53c406a_dma_setup (unsigned char *ptr, 
		      unsigned int count, 
		      unsigned char mode) {
    unsigned limit;
    unsigned long flags = 0;
    
    VDEB(printk("dma: before count=%d   ", count));
    if (dma_chan <=3) {
        if (count > 65536)
            count = 65536;
        limit = 65536 - (((unsigned) ptr) & 0xFFFF);
    } else {
        if (count > (65536<<1)) 
            count = (65536<<1);
        limit = (65536<<1) - (((unsigned) ptr) & 0x1FFFF);
    }
    
    if (count > limit) count = limit;
    
    VDEB(printk("after count=%d\n", count));
    if ((count & 1) || (((unsigned) ptr) & 1))
        panic ("NCR53c406a: attempted unaligned DMA transfer\n"); 
    
    flags=claim_dma_lock();
    disable_dma(dma_chan);
    clear_dma_ff(dma_chan);
    set_dma_addr(dma_chan, (long) ptr);
    set_dma_count(dma_chan, count);
    set_dma_mode(dma_chan, mode);
    enable_dma(dma_chan);
    release_dma_lock(flags);    
    
    return count;
}
Ejemplo n.º 6
0
int labpc_init_dma_chan(struct comedi_device *dev, unsigned int dma_chan)
{
	struct labpc_private *devpriv = dev->private;
	void *dma_buffer;
	unsigned long dma_flags;
	int ret;

	if (dma_chan != 1 && dma_chan != 3)
		return -EINVAL;

	dma_buffer = kmalloc(dma_buffer_size, GFP_KERNEL | GFP_DMA);
	if (!dma_buffer)
		return -ENOMEM;

	ret = request_dma(dma_chan, dev->board_name);
	if (ret) {
		kfree(dma_buffer);
		return ret;
	}

	devpriv->dma_buffer = dma_buffer;
	devpriv->dma_chan = dma_chan;
	devpriv->dma_addr = virt_to_bus(devpriv->dma_buffer);

	dma_flags = claim_dma_lock();
	disable_dma(devpriv->dma_chan);
	set_dma_mode(devpriv->dma_chan, DMA_MODE_READ);
	release_dma_lock(dma_flags);

	return 0;
}
Ejemplo n.º 7
0
static void wbsd_request_end(struct wbsd_host *host, struct mmc_request *mrq)
{
	unsigned long dmaflags;

	if (host->dma >= 0) {
		/*
		 * Release ISA DMA controller.
		 */
		dmaflags = claim_dma_lock();
		disable_dma(host->dma);
		clear_dma_ff(host->dma);
		release_dma_lock(dmaflags);

		/*
		 * Disable DMA on host.
		 */
		wbsd_write_index(host, WBSD_IDX_DMA, 0);
	}

	host->mrq = NULL;

	/*
	 * MMC layer might call back into the driver so first unlock.
	 */
	spin_unlock(&host->lock);
	mmc_request_done(host->mmc, mrq);
	spin_lock(&host->lock);
}
Ejemplo n.º 8
0
/**
 * snd_dma_pointer - return the current pointer to DMA transfer buffer in bytes
 * @dma: the dma number
 * @size: the dma transfer size
 *
 * Returns the current pointer in DMA tranfer buffer in bytes
 */
unsigned int snd_dma_pointer(unsigned long dma, unsigned int size)
{
	unsigned long flags;
	unsigned int result, result1;

	flags = claim_dma_lock();
	clear_dma_ff(dma);
	if (!isa_dma_bridge_buggy)
		disable_dma(dma);
	result = get_dma_residue(dma);
	/*
	 * HACK - read the counter again and choose higher value in order to
	 * avoid reading during counter lower byte roll over if the
	 * isa_dma_bridge_buggy is set.
	 */
	result1 = get_dma_residue(dma);
	if (!isa_dma_bridge_buggy)
		enable_dma(dma);
	release_dma_lock(flags);
	if (unlikely(result < result1))
		result = result1;
#ifdef CONFIG_SND_DEBUG
	if (result > size)
		snd_printk(KERN_ERR "pointer (0x%x) for DMA #%ld is greater than transfer size (0x%x)\n", result, dma, size);
#endif
	if (result >= size || result == 0)
		return 0;
	else
		return size - result;
}
Ejemplo n.º 9
0
void DMAbuf_outputintr(int dev, int notify_only)
{
	struct audio_operations *adev = audio_devs[dev];
	unsigned long flags;
	struct dma_buffparms *dmap = adev->dmap_out;

	save_flags(flags);
	cli();
	if (!(dmap->flags & DMA_NODMA)) {
		int chan = dmap->dma, pos, n;
		unsigned long f;
		
		f=claim_dma_lock();
		
		if(!isa_dma_bridge_buggy)
			disable_dma(dmap->dma);
		clear_dma_ff(chan);
		pos = dmap->bytes_in_use - get_dma_residue(chan);
		if(!isa_dma_bridge_buggy)
			enable_dma(dmap->dma);
		release_dma_lock(f);
		
		pos = pos / dmap->fragment_size;	/* Actual qhead */
		if (pos < 0 || pos >= dmap->nbufs)
			pos = 0;
		n = 0;
		while (dmap->qhead != pos && n++ < dmap->nbufs)
			do_outputintr(dev, notify_only);
	}
	else
		do_outputintr(dev, notify_only);
	restore_flags(flags);
}
Ejemplo n.º 10
0
static void receive_packet(struct net_device *dev, int len)
{
	int rlen;
	elp_device *adapter = dev->priv;
	void *target;
	struct sk_buff *skb;
	unsigned long flags;

	rlen = (len + 1) & ~1;
	skb = dev_alloc_skb(rlen + 2);

	if (!skb) {
		printk(KERN_WARNING "%s: memory squeeze, dropping packet\n", dev->name);
		target = adapter->dma_buffer;
		adapter->current_dma.target = NULL;
		/* FIXME: stats */
		return;
	}

	skb_reserve(skb, 2);
	target = skb_put(skb, rlen);
	if ((unsigned long)(target + rlen) >= MAX_DMA_ADDRESS) {
		adapter->current_dma.target = target;
		target = adapter->dma_buffer;
	} else {
		adapter->current_dma.target = NULL;
	}

	/* if this happens, we die */
	if (test_and_set_bit(0, (void *) &adapter->dmaing))
		printk(KERN_ERR "%s: rx blocked, DMA in progress, dir %d\n", dev->name, adapter->current_dma.direction);

	skb->dev = dev;
	adapter->current_dma.direction = 0;
	adapter->current_dma.length = rlen;
	adapter->current_dma.skb = skb;
	adapter->current_dma.start_time = jiffies;

	outb_control(adapter->hcr_val | DIR | TCEN | DMAE, dev);

	flags=claim_dma_lock();
	disable_dma(dev->dma);
	clear_dma_ff(dev->dma);
	set_dma_mode(dev->dma, 0x04);	/* dma read */
	set_dma_addr(dev->dma, isa_virt_to_bus(target));
	set_dma_count(dev->dma, rlen);
	enable_dma(dev->dma);
	release_dma_lock(flags);

	if (elp_debug >= 3) {
		printk(KERN_DEBUG "%s: rx DMA transfer started\n", dev->name);
	}

	if (adapter->rx_active)
		adapter->rx_active--;

	if (!adapter->busy)
		printk(KERN_WARNING "%s: receive_packet called, busy not set.\n", dev->name);
}
Ejemplo n.º 11
0
static void wbsd_finish_data(struct wbsd_host* host, struct mmc_data* data)
{
	unsigned long dmaflags;
	int count;
	
	WARN_ON(host->mrq == NULL);

	/*
	 * Send a stop command if needed.
	 */
	if (data->stop)
		wbsd_send_command(host, data->stop);
	
	/*
	 * DMA transfer?
	 */
	if (host->dma >= 0)
	{
		/*
		 * Disable DMA on the host.
		 */
		wbsd_write_index(host, WBSD_IDX_DMA, 0);
		
		/*
		 * Turn of ISA DMA controller.
		 */
		dmaflags = claim_dma_lock();
		disable_dma(host->dma);
		clear_dma_ff(host->dma);
		count = get_dma_residue(host->dma);
		release_dma_lock(dmaflags);
		
		/*
		 * Any leftover data?
		 */
		if (count)
		{
			printk(KERN_ERR DRIVER_NAME ": Incomplete DMA "
				"transfer. %d bytes left.\n", count);
			
			data->error = MMC_ERR_FAILED;
		}
		else
		{
			/*
			 * Transfer data from DMA buffer to
			 * SG list.
			 */
			if (data->flags & MMC_DATA_READ)
				wbsd_dma_to_sg(host, data);
			
			data->bytes_xfered = host->size;
		}
	}
	
	DBGF("Ending data transfer (%d bytes)\n", data->bytes_xfered);
	
	wbsd_request_end(host, host->mrq);
}
Ejemplo n.º 12
0
static netdev_tx_t send_packet(struct net_device *dev, struct sk_buff *skb)
{
	elp_device *adapter = netdev_priv(dev);
	unsigned long target;
	unsigned long flags;

	unsigned int nlen = (((skb->len < 60) ? 60 : skb->len) + 1) & (~1);

	if (test_and_set_bit(0, (void *) &adapter->busy)) {
		if (elp_debug >= 2)
			pr_debug("%s: transmit blocked\n", dev->name);
		return false;
	}

	dev->stats.tx_bytes += nlen;

	adapter->tx_pcb.command = CMD_TRANSMIT_PACKET;
	adapter->tx_pcb.length = sizeof(struct Xmit_pkt);
	adapter->tx_pcb.data.xmit_pkt.buf_ofs
	    = adapter->tx_pcb.data.xmit_pkt.buf_seg = 0;	
	adapter->tx_pcb.data.xmit_pkt.pkt_len = nlen;

	if (!send_pcb(dev, &adapter->tx_pcb)) {
		adapter->busy = 0;
		return false;
	}
	
	if (test_and_set_bit(0, (void *) &adapter->dmaing))
		pr_debug("%s: tx: DMA %d in progress\n", dev->name, adapter->current_dma.direction);

	adapter->current_dma.direction = 1;
	adapter->current_dma.start_time = jiffies;

	if ((unsigned long)(skb->data + nlen) >= MAX_DMA_ADDRESS || nlen != skb->len) {
		skb_copy_from_linear_data(skb, adapter->dma_buffer, nlen);
		memset(adapter->dma_buffer+skb->len, 0, nlen-skb->len);
		target = isa_virt_to_bus(adapter->dma_buffer);
	}
	else {
		target = isa_virt_to_bus(skb->data);
	}
	adapter->current_dma.skb = skb;

	flags=claim_dma_lock();
	disable_dma(dev->dma);
	clear_dma_ff(dev->dma);
	set_dma_mode(dev->dma, 0x48);	
	set_dma_addr(dev->dma, target);
	set_dma_count(dev->dma, nlen);
	outb_control(adapter->hcr_val | DMAE | TCEN, dev);
	enable_dma(dev->dma);
	release_dma_lock(flags);

	if (elp_debug >= 3)
		pr_debug("%s: DMA transfer started\n", dev->name);

	return true;
}
Ejemplo n.º 13
0
int z8530_sync_dma_close(struct net_device *dev, struct z8530_channel *c)
{
	u8 chk;
	unsigned long flags;

	c->irqs = &z8530_nop;
	c->max = 0;
	c->sync = 0;

	/*
	 *	Disable the PC DMA channels
	 */

	flags=claim_dma_lock();
	disable_dma(c->rxdma);
	clear_dma_ff(c->rxdma);

	c->rxdma_on = 0;

	disable_dma(c->txdma);
	clear_dma_ff(c->txdma);
	release_dma_lock(flags);

	c->txdma_on = 0;
	c->tx_dma_used = 0;

	spin_lock_irqsave(c->lock, flags);

	/*
	 *	Disable DMA control mode
	 */

	c->regs[R1]&= ~WT_RDY_ENAB;
	write_zsreg(c, R1, c->regs[R1]);
	c->regs[R1]&= ~(WT_RDY_RT|WT_FN_RDYFN|INT_ERR_Rx);
	c->regs[R1]|= INT_ALL_Rx;
	write_zsreg(c, R1, c->regs[R1]);
	c->regs[R14]&= ~DTRREQ;
	write_zsreg(c, R14, c->regs[R14]);

	if(c->rx_buf[0])
	{
		free_page((unsigned long)c->rx_buf[0]);
		c->rx_buf[0]=NULL;
	}
	if(c->tx_dma_buf[0])
	{
		free_page((unsigned  long)c->tx_dma_buf[0]);
		c->tx_dma_buf[0]=NULL;
	}
	chk=read_zsreg(c,R0);
	write_zsreg(c, R3, c->regs[R3]);
	z8530_rtsdtr(c,0);

	spin_unlock_irqrestore(c->lock, flags);

	return 0;
}
Ejemplo n.º 14
0
/**
 * snd_dma_disable - stop the ISA DMA transfer
 * @dma: the dma number
 *
 * Stops the ISA DMA transfer.
 */
void snd_dma_disable(unsigned long dma)
{
	unsigned long flags;

	flags = claim_dma_lock();
	clear_dma_ff(dma);
	disable_dma(dma);
	release_dma_lock(flags);
}
Ejemplo n.º 15
0
void labpc_drain_dma(struct comedi_device *dev)
{
	struct labpc_private *devpriv = dev->private;
	struct comedi_subdevice *s = dev->read_subdev;
	struct comedi_async *async = s->async;
	struct comedi_cmd *cmd = &async->cmd;
	int status;
	unsigned long flags;
	unsigned int max_points, num_points, residue, leftover;
	int i;

	status = devpriv->stat1;

	flags = claim_dma_lock();
	disable_dma(devpriv->dma_chan);
	/* clear flip-flop to make sure 2-byte registers for
	 * count and address get set correctly */
	clear_dma_ff(devpriv->dma_chan);

	/* figure out how many points to read */
	max_points = devpriv->dma_transfer_size / sample_size;
	/* residue is the number of points left to be done on the dma
	 * transfer.  It should always be zero at this point unless
	 * the stop_src is set to external triggering.
	 */
	residue = get_dma_residue(devpriv->dma_chan) / sample_size;
	num_points = max_points - residue;
	if (cmd->stop_src == TRIG_COUNT && devpriv->count < num_points)
		num_points = devpriv->count;

	/* figure out how many points will be stored next time */
	leftover = 0;
	if (cmd->stop_src != TRIG_COUNT) {
		leftover = devpriv->dma_transfer_size / sample_size;
	} else if (devpriv->count > num_points) {
		leftover = devpriv->count - num_points;
		if (leftover > max_points)
			leftover = max_points;
	}

	/* write data to comedi buffer */
	for (i = 0; i < num_points; i++)
		cfc_write_to_buffer(s, devpriv->dma_buffer[i]);

	if (cmd->stop_src == TRIG_COUNT)
		devpriv->count -= num_points;

	/* set address and count for next transfer */
	set_dma_addr(devpriv->dma_chan, devpriv->dma_addr);
	set_dma_count(devpriv->dma_chan, leftover * sample_size);
	release_dma_lock(flags);

	async->events |= COMEDI_CB_BLOCK;
}
Ejemplo n.º 16
0
int DMAbuf_get_buffer_pointer(int dev, struct dma_buffparms *dmap, int direction)
{
	/*
	 *	Try to approximate the active byte position of the DMA pointer within the
	 *	buffer area as well as possible.
	 */

	int pos;
	unsigned long flags;
	unsigned long f;

	save_flags(flags);
	cli();
	if (!(dmap->flags & DMA_ACTIVE))
		pos = 0;
	else {
		int chan = dmap->dma;
		
		f=claim_dma_lock();
		clear_dma_ff(chan);
		
		if(!isa_dma_bridge_buggy)
			disable_dma(dmap->dma);
		
		pos = get_dma_residue(chan);
		
		pos = dmap->bytes_in_use - pos;

		if (!(dmap->mapping_flags & DMA_MAP_MAPPED)) {
			if (direction == DMODE_OUTPUT) {
				if (dmap->qhead == 0)
					if (pos > dmap->fragment_size)
						pos = 0;
			} else {
				if (dmap->qtail == 0)
					if (pos > dmap->fragment_size)
						pos = 0;
			}
		}
		if (pos < 0)
			pos = 0;
		if (pos >= dmap->bytes_in_use)
			pos = 0;
		
		if(!isa_dma_bridge_buggy)
			enable_dma(dmap->dma);
			
		release_dma_lock(f);
	}
	restore_flags(flags);
	/* printk( "%04x ",  pos); */

	return pos;
}
Ejemplo n.º 17
0
static __inline__ int 
NCR53c406a_dma_residual (void) {
    register int tmp;
    unsigned long flags;

    flags=claim_dma_lock();
    clear_dma_ff(dma_chan);
    tmp = get_dma_residue(dma_chan);
    release_dma_lock(flags);
    
    return tmp;
}
Ejemplo n.º 18
0
static void znet_release_resources (struct net_device *dev)
{
	struct znet_private *znet = netdev_priv(dev);
	unsigned long flags;

	release_region (znet->sia_base, znet->sia_size);
	release_region (dev->base_addr, znet->io_size);
	flags = claim_dma_lock();
	free_dma (znet->tx_dma);
	free_dma (znet->rx_dma);
	release_dma_lock (flags);
	free_irq (dev->irq, dev);
}
Ejemplo n.º 19
0
static int sscape_start_dma(int chan, unsigned long physaddr, int count, int dma_mode)
{
	unsigned long flags;

	flags = claim_dma_lock();
	disable_dma(chan);
	clear_dma_ff(chan);
	set_dma_mode(chan, dma_mode);
	set_dma_addr(chan, physaddr);
	set_dma_count(chan, count);
	enable_dma(chan);
	release_dma_lock(flags);
	return 0;
}
Ejemplo n.º 20
0
static inline void
jz_mmc_start_dma(int chan, unsigned long phyaddr, int count, int mode)
{
	unsigned long flags;

	flags = claim_dma_lock();
	disable_dma(chan);
	clear_dma_ff(chan);
	jz_set_dma_block_size(chan, 32);
	set_dma_mode(chan, mode);
	set_dma_addr(chan, phyaddr);
	set_dma_count(chan, count + 31);
	enable_dma(chan);
	release_dma_lock(flags);
}
Ejemplo n.º 21
0
static void close_dmap(struct audio_operations *adev, struct dma_buffparms *dmap)
{
	unsigned long flags;
	
	sound_close_dma(dmap->dma);
	if (dmap->flags & DMA_BUSY)
		dmap->dma_mode = DMODE_NONE;
	dmap->flags &= ~DMA_BUSY;
	
	flags=claim_dma_lock();
	disable_dma(dmap->dma);
	release_dma_lock(flags);
	
	if (sound_dmap_flag == DMAP_FREE_ON_CLOSE)
		sound_free_dmap(dmap);
}
Ejemplo n.º 22
0
/**
 * snd_dma_program - program an ISA DMA transfer
 * @dma: the dma number
 * @addr: the physical address of the buffer
 * @size: the DMA transfer size
 * @mode: the DMA transfer mode, DMA_MODE_XXX
 *
 * Programs an ISA DMA transfer for the given buffer.
 */
void snd_dma_program(unsigned long dma,
		     unsigned long addr, unsigned int size,
                     unsigned short mode)
{
	unsigned long flags;

	flags = claim_dma_lock();
	disable_dma(dma);
	clear_dma_ff(dma);
	set_dma_mode(dma, mode);
	set_dma_addr(dma, addr);
	set_dma_count(dma, size);
	if (!(mode & DMA_MODE_NO_ENABLE))
		enable_dma(dma);
	release_dma_lock(flags);
}
Ejemplo n.º 23
0
Archivo: z85230.c Proyecto: 274914765/C
static void z8530_dma_status(struct z8530_channel *chan)
{
    u8 status, altered;

    status=read_zsreg(chan, R0);
    altered=chan->status^status;
    
    chan->status=status;


    if(chan->dma_tx)
    {
        if(status&TxEOM)
        {
            unsigned long flags;
    
            flags=claim_dma_lock();
            disable_dma(chan->txdma);
            clear_dma_ff(chan->txdma);    
            chan->txdma_on=0;
            release_dma_lock(flags);
            z8530_tx_done(chan);
        }
    }

    if(altered&chan->dcdcheck)
    {
        if(status&chan->dcdcheck)
        {
            printk(KERN_INFO "%s: DCD raised\n", chan->dev->name);
            write_zsreg(chan, R3, chan->regs[3]|RxENABLE);
            if(chan->netdevice &&
                ((chan->netdevice->type == ARPHRD_HDLC) ||
                (chan->netdevice->type == ARPHRD_PPP)))
                sppp_reopen(chan->netdevice);
        }
        else
        {
            printk(KERN_INFO "%s:DCD lost\n", chan->dev->name);
            write_zsreg(chan, R3, chan->regs[3]&~RxENABLE);
            z8530_flush_fifo(chan);
        }
    }    

    write_zsctrl(chan, RES_EXT_INT);
    write_zsctrl(chan, RES_H_IUS);
}
Ejemplo n.º 24
0
static int sound_start_dma(struct dma_buffparms *dmap, unsigned long physaddr, int count, int dma_mode)
{
	unsigned long flags;
	int chan = dmap->dma;

	/* printk( "Start DMA%d %d, %d\n",  chan,  (int)(physaddr-dmap->raw_buf_phys),  count); */

	flags = claim_dma_lock();
	disable_dma(chan);
	clear_dma_ff(chan);
	set_dma_mode(chan, dma_mode);
	set_dma_addr(chan, physaddr);
	set_dma_count(chan, count);
	enable_dma(chan);
	release_dma_lock(flags);

	return 0;
}
Ejemplo n.º 25
0
static void dma_reset_output(int dev)
{
	struct audio_operations *adev = audio_devs[dev];
	unsigned long flags,f ;
	struct dma_buffparms *dmap = adev->dmap_out;

	if (!(dmap->flags & DMA_STARTED))	/* DMA is not active */
		return;

	/*
	 *	First wait until the current fragment has been played completely
	 */
	spin_lock_irqsave(&dmap->lock,flags);
	adev->dmap_out->flags |= DMA_SYNCING;

	adev->dmap_out->underrun_count = 0;
	if (!signal_pending(current) && adev->dmap_out->qlen && 
	    adev->dmap_out->underrun_count == 0){
		spin_unlock_irqrestore(&dmap->lock,flags);
		interruptible_sleep_on_timeout(&adev->out_sleeper,
					       dmabuf_timeout(dmap));
		spin_lock_irqsave(&dmap->lock,flags);
	}
	adev->dmap_out->flags &= ~(DMA_SYNCING | DMA_ACTIVE);

	/*
	 *	Finally shut the device off
	 */
	if (!(adev->flags & DMA_DUPLEX) || !adev->d->halt_output)
		adev->d->halt_io(dev);
	else
		adev->d->halt_output(dev);
	adev->dmap_out->flags &= ~DMA_STARTED;
	
	f=claim_dma_lock();
	clear_dma_ff(dmap->dma);
	disable_dma(dmap->dma);
	release_dma_lock(f);
	
	dmap->byte_counter = 0;
	reorganize_buffers(dev, adev->dmap_out, 0);
	dmap->qlen = dmap->qhead = dmap->qtail = dmap->user_counter = 0;
	spin_unlock_irqrestore(&dmap->lock,flags);
}
Ejemplo n.º 26
0
int tms380tr_close(struct net_device *dev)
{
	struct net_local *tp = netdev_priv(dev);
	netif_stop_queue(dev);
	
	del_timer(&tp->timer);

	

	tp->HaltInProgress 	= 1;
	tms380tr_exec_cmd(dev, OC_CLOSE);
	tp->timer.expires	= jiffies + 1*HZ;
	tp->timer.function 	= tms380tr_timer_end_wait;
	tp->timer.data 		= (unsigned long)dev;
	add_timer(&tp->timer);

	tms380tr_enable_interrupts(dev);

	tp->Sleeping = 1;
	interruptible_sleep_on(&tp->wait_for_tok_int);
	tp->TransmitCommandActive = 0;
    
	del_timer(&tp->timer);
	tms380tr_disable_interrupts(dev);
   
#ifdef CONFIG_ISA
	if(dev->dma > 0) 
	{
		unsigned long flags=claim_dma_lock();
		disable_dma(dev->dma);
		release_dma_lock(flags);
	}
#endif
	
	SIFWRITEW(0xFF00, SIFCMD);
#if 0
	if(dev->dma > 0) 
		SIFWRITEB(0xff, POSREG);
#endif
	tms380tr_cancel_tx_queue(tp);

	return (0);
}
Ejemplo n.º 27
0
static void handlecommand(struct net_device *dev)
{
	/* on entry, 0xfa and ltdmacbuf holds command */
	int dma = dev->dma;
	int base = dev->base_addr;
	unsigned long flags;

	flags=claim_dma_lock();
	disable_dma(dma);
	clear_dma_ff(dma);
	set_dma_mode(dma,DMA_MODE_WRITE);
	set_dma_addr(dma,virt_to_bus(ltdmacbuf));
	set_dma_count(dma,50);
	enable_dma(dma);
	release_dma_lock(flags);
	inb_p(base+3);
	inb_p(base+2);
	if ( wait_timeout(dev,0xfa) ) printk("timed out in handlecommand\n");
} 
Ejemplo n.º 28
0
/* Check to make sure that a DMA transfer hasn't timed out.  This should
 * never happen in theory, but seems to occur occasionally if the card gets
 * prodded at the wrong time.
 */
static inline void check_3c505_dma(struct net_device *dev)
{
	elp_device *adapter = dev->priv;
	if (adapter->dmaing && time_after(jiffies, adapter->current_dma.start_time + 10)) {
		unsigned long flags, f;
		printk(KERN_ERR "%s: DMA %s timed out, %d bytes left\n", dev->name, adapter->current_dma.direction ? "download" : "upload", get_dma_residue(dev->dma));
		spin_lock_irqsave(&adapter->lock, flags);
		adapter->dmaing = 0;
		adapter->busy = 0;

		f=claim_dma_lock();
		disable_dma(dev->dma);
		release_dma_lock(f);

		if (adapter->rx_active)
			adapter->rx_active--;
		outb_control(adapter->hcr_val & ~(DMAE | TCEN | DIR), dev);
		spin_unlock_irqrestore(&adapter->lock, flags);
	}
}
Ejemplo n.º 29
0
static void i8237A_resume(void)
{
	unsigned long flags;
	int i;

	flags = claim_dma_lock();

	dma_outb(0, DMA1_RESET_REG);
	dma_outb(0, DMA2_RESET_REG);

	for (i = 0; i < 8; i++) {
		set_dma_addr(i, 0x000000);
		
		set_dma_count(i, 1);
	}

	
	enable_dma(4);

	release_dma_lock(flags);
}
Ejemplo n.º 30
0
/* read data from the card */
static void handlefd(struct net_device *dev)
{
	int dma = dev->dma;
	int base = dev->base_addr;
	unsigned long flags;

	flags=claim_dma_lock();
	disable_dma(dma);
	clear_dma_ff(dma);
	set_dma_mode(dma,DMA_MODE_READ);
	set_dma_addr(dma,virt_to_bus(ltdmabuf));
	set_dma_count(dma,800);
	enable_dma(dma);
	release_dma_lock(flags);

	inb_p(base+3);
	inb_p(base+2);

	if ( wait_timeout(dev,0xfd) ) printk("timed out in handlefd\n");
	sendup_buffer(dev);
}