Esempio n. 1
0
int z8530_sync_dma_close(struct net_device *dev, struct z8530_channel *c)
{
	u8 chk;
	unsigned long flags;

	c->irqs = &z8530_nop;
	c->max = 0;
	c->sync = 0;

	/*
	 *	Disable the PC DMA channels
	 */

	flags=claim_dma_lock();
	disable_dma(c->rxdma);
	clear_dma_ff(c->rxdma);

	c->rxdma_on = 0;

	disable_dma(c->txdma);
	clear_dma_ff(c->txdma);
	release_dma_lock(flags);

	c->txdma_on = 0;
	c->tx_dma_used = 0;

	spin_lock_irqsave(c->lock, flags);

	/*
	 *	Disable DMA control mode
	 */

	c->regs[R1]&= ~WT_RDY_ENAB;
	write_zsreg(c, R1, c->regs[R1]);
	c->regs[R1]&= ~(WT_RDY_RT|WT_FN_RDYFN|INT_ERR_Rx);
	c->regs[R1]|= INT_ALL_Rx;
	write_zsreg(c, R1, c->regs[R1]);
	c->regs[R14]&= ~DTRREQ;
	write_zsreg(c, R14, c->regs[R14]);

	if(c->rx_buf[0])
	{
		free_page((unsigned long)c->rx_buf[0]);
		c->rx_buf[0]=NULL;
	}
	if(c->tx_dma_buf[0])
	{
		free_page((unsigned  long)c->tx_dma_buf[0]);
		c->tx_dma_buf[0]=NULL;
	}
	chk=read_zsreg(c,R0);
	write_zsreg(c, R3, c->regs[R3]);
	z8530_rtsdtr(c,0);

	spin_unlock_irqrestore(c->lock, flags);

	return 0;
}
Esempio n. 2
0
static ssize_t __dma_write(gpib_board_t *board, nec7210_private_t *priv, dma_addr_t address, size_t length)
{
	unsigned long flags, dma_irq_flags;
	int residue = 0;
	int retval = 0;

	spin_lock_irqsave(&board->spinlock, flags);

	/* program dma controller */
	dma_irq_flags = claim_dma_lock();
	disable_dma(priv->dma_channel);
	clear_dma_ff(priv->dma_channel);
	set_dma_count(priv->dma_channel, length);
	set_dma_addr(priv->dma_channel, address);
	set_dma_mode(priv->dma_channel, DMA_MODE_WRITE );
	enable_dma(priv->dma_channel);
	release_dma_lock(dma_irq_flags);

	// enable board's dma for output
	nec7210_set_reg_bits( priv, IMR2, HR_DMAO, HR_DMAO );

	clear_bit(WRITE_READY_BN, &priv->state);
	set_bit(DMA_WRITE_IN_PROGRESS_BN, &priv->state);

	spin_unlock_irqrestore(&board->spinlock, flags);

	// suspend until message is sent
	if(wait_event_interruptible(board->wait, test_bit(DMA_WRITE_IN_PROGRESS_BN, &priv->state) == 0 ||
		test_bit( BUS_ERROR_BN, &priv->state ) || test_bit( DEV_CLEAR_BN, &priv->state ) ||
		test_bit(TIMO_NUM, &board->status)))
	{
		GPIB_DPRINTK( "gpib write interrupted!\n" );
		retval = -ERESTARTSYS;
	}
	if(test_bit(TIMO_NUM, &board->status))
		retval = -ETIMEDOUT;
	if( test_and_clear_bit( DEV_CLEAR_BN, &priv->state ) )
		retval = -EINTR;
	if( test_and_clear_bit( BUS_ERROR_BN, &priv->state ) )
		retval = -EIO;

	// disable board's dma
	nec7210_set_reg_bits( priv, IMR2, HR_DMAO, 0 );

	dma_irq_flags = claim_dma_lock();
	clear_dma_ff(priv->dma_channel);
	disable_dma(priv->dma_channel);
	residue = get_dma_residue(priv->dma_channel);
	release_dma_lock( dma_irq_flags );

	if(residue)
		retval = -EPIPE;

	return retval ? retval : length;
}
static __inline__ int 
NCR53c406a_dma_setup (unsigned char *ptr, 
		      unsigned int count, 
		      unsigned char mode) {
    unsigned limit;
    unsigned long flags = 0;
    
    VDEB(printk("dma: before count=%d   ", count));
    if (dma_chan <=3) {
        if (count > 65536)
            count = 65536;
        limit = 65536 - (((unsigned) ptr) & 0xFFFF);
    } else {
        if (count > (65536<<1)) 
            count = (65536<<1);
        limit = (65536<<1) - (((unsigned) ptr) & 0x1FFFF);
    }
    
    if (count > limit) count = limit;
    
    VDEB(printk("after count=%d\n", count));
    if ((count & 1) || (((unsigned) ptr) & 1))
        panic ("NCR53c406a: attempted unaligned DMA transfer\n"); 
    
    flags=claim_dma_lock();
    disable_dma(dma_chan);
    clear_dma_ff(dma_chan);
    set_dma_addr(dma_chan, (long) ptr);
    set_dma_count(dma_chan, count);
    set_dma_mode(dma_chan, mode);
    enable_dma(dma_chan);
    release_dma_lock(flags);    
    
    return count;
}
Esempio n. 4
0
void DMAbuf_outputintr(int dev, int notify_only)
{
	struct audio_operations *adev = audio_devs[dev];
	unsigned long flags;
	struct dma_buffparms *dmap = adev->dmap_out;

	save_flags(flags);
	cli();
	if (!(dmap->flags & DMA_NODMA)) {
		int chan = dmap->dma, pos, n;
		unsigned long f;
		
		f=claim_dma_lock();
		
		if(!isa_dma_bridge_buggy)
			disable_dma(dmap->dma);
		clear_dma_ff(chan);
		pos = dmap->bytes_in_use - get_dma_residue(chan);
		if(!isa_dma_bridge_buggy)
			enable_dma(dmap->dma);
		release_dma_lock(f);
		
		pos = pos / dmap->fragment_size;	/* Actual qhead */
		if (pos < 0 || pos >= dmap->nbufs)
			pos = 0;
		n = 0;
		while (dmap->qhead != pos && n++ < dmap->nbufs)
			do_outputintr(dev, notify_only);
	}
	else
		do_outputintr(dev, notify_only);
	restore_flags(flags);
}
Esempio n. 5
0
/* called in irq context */
void DMAbuf_inputintr(int dev)
{
	struct audio_operations *adev = audio_devs[dev];
	struct dma_buffparms *dmap = adev->dmap_in;
	unsigned long flags;

	spin_lock_irqsave(&dmap->lock,flags);

	if (!(dmap->flags & DMA_NODMA)) {
		int chan = dmap->dma, pos, n;
		unsigned long f;
		
		f=claim_dma_lock();
		if(!isa_dma_bridge_buggy)
			disable_dma(dmap->dma);
		clear_dma_ff(chan);
		pos = dmap->bytes_in_use - get_dma_residue(chan);
		if(!isa_dma_bridge_buggy)
			enable_dma(dmap->dma);
		release_dma_lock(f);

		pos = pos / dmap->fragment_size;	/* Actual qhead */
		if (pos < 0 || pos >= dmap->nbufs)
			pos = 0;

		n = 0;
		while (dmap->qtail != pos && ++n < dmap->nbufs)
			do_inputintr(dev);
	} else
		do_inputintr(dev);
	spin_unlock_irqrestore(&dmap->lock,flags);
}
Esempio n. 6
0
static void wbsd_request_end(struct wbsd_host *host, struct mmc_request *mrq)
{
	unsigned long dmaflags;

	if (host->dma >= 0) {
		/*
		 * Release ISA DMA controller.
		 */
		dmaflags = claim_dma_lock();
		disable_dma(host->dma);
		clear_dma_ff(host->dma);
		release_dma_lock(dmaflags);

		/*
		 * Disable DMA on host.
		 */
		wbsd_write_index(host, WBSD_IDX_DMA, 0);
	}

	host->mrq = NULL;

	/*
	 * MMC layer might call back into the driver so first unlock.
	 */
	spin_unlock(&host->lock);
	mmc_request_done(host->mmc, mrq);
	spin_lock(&host->lock);
}
Esempio n. 7
0
static void handlewrite(struct net_device *dev)
{
	/* called *only* from idle, non-reentrant */
	/* on entry, 0xfb and ltdmabuf holds data */
	int dma = dev->dma;
	int base = dev->base_addr;
	unsigned long flags;
	
	flags=claim_dma_lock();
	disable_dma(dma);
	clear_dma_ff(dma);
	set_dma_mode(dma,DMA_MODE_WRITE);
	set_dma_addr(dma,virt_to_bus(ltdmabuf));
	set_dma_count(dma,800);
	enable_dma(dma);
	release_dma_lock(flags);
	
	inb_p(base+3);
	inb_p(base+2);

	if ( wait_timeout(dev,0xfb) ) {
		flags=claim_dma_lock();
		printk("timed out in handlewrite, dma res %d\n",
			get_dma_residue(dev->dma) );
		release_dma_lock(flags);
	}
}
Esempio n. 8
0
static void setup_tx_dma(struct pi_local *lp, int length)
{
    unsigned long dma_abs;
    unsigned long flags;
    unsigned long dmachan;

    save_flags(flags);
    cli();

    dmachan = lp->dmachan;
    dma_abs = (unsigned long) (lp->txdmabuf);

    if(!valid_dma_page(dma_abs, DMA_BUFF_SIZE + sizeof(struct mbuf)))
	panic("PI: TX buffer violates DMA boundary!");

    disable_dma(dmachan);
    /* Set DMA mode register to single transfers, incrementing address,
     *  no auto init, reads
     */
    set_dma_mode(dmachan, DMA_MODE_WRITE);
    clear_dma_ff(dmachan);
    set_dma_addr(dmachan, dma_abs);
    /* output byte count */
    set_dma_count(dmachan, length);

    restore_flags(flags);
}
Esempio n. 9
0
/**
 * snd_dma_pointer - return the current pointer to DMA transfer buffer in bytes
 * @dma: the dma number
 * @size: the dma transfer size
 *
 * Returns the current pointer in DMA tranfer buffer in bytes
 */
unsigned int snd_dma_pointer(unsigned long dma, unsigned int size)
{
	unsigned long flags;
	unsigned int result, result1;

	flags = claim_dma_lock();
	clear_dma_ff(dma);
	if (!isa_dma_bridge_buggy)
		disable_dma(dma);
	result = get_dma_residue(dma);
	/*
	 * HACK - read the counter again and choose higher value in order to
	 * avoid reading during counter lower byte roll over if the
	 * isa_dma_bridge_buggy is set.
	 */
	result1 = get_dma_residue(dma);
	if (!isa_dma_bridge_buggy)
		enable_dma(dma);
	release_dma_lock(flags);
	if (unlikely(result < result1))
		result = result1;
#ifdef CONFIG_SND_DEBUG
	if (result > size)
		snd_printk(KERN_ERR "pointer (0x%x) for DMA #%ld is greater than transfer size (0x%x)\n", result, dma, size);
#endif
	if (result >= size || result == 0)
		return 0;
	else
		return size - result;
}
Esempio n. 10
0
static void setup_DMA(void)
{
    unsigned long addr,count;
    unsigned char dma_code;

    dma_code = DMA_WRITE;
    if (command == FD_READ)
        dma_code = DMA_READ;
    if (command == FD_FORMAT) {
        addr = (long) tmp_floppy_area;
        count = floppy->sect*4;
    } else {
        addr = (long) CURRENT->buffer;
        count = 1024;
    }
    if (read_track) {
        /* mark buffer-track bad, in case all this fails.. */
        buffer_drive = buffer_track = -1;
        count = floppy->sect*floppy->head*512;
        addr = (long) floppy_track_buffer;
    } else if (addr >= LAST_DMA_ADDR) {
        addr = (long) tmp_floppy_area;
        if (command == FD_WRITE)
            copy_buffer(CURRENT->buffer,tmp_floppy_area);
    }
    cli();
    disable_dma(FLOPPY_DMA);
    clear_dma_ff(FLOPPY_DMA);
    set_dma_mode(FLOPPY_DMA, (command == FD_READ)? DMA_MODE_READ : DMA_MODE_WRITE);
    set_dma_addr(FLOPPY_DMA, addr);
    set_dma_count(FLOPPY_DMA, count);
    enable_dma(FLOPPY_DMA);
    sti();
}
Esempio n. 11
0
static void receive_packet(struct net_device *dev, int len)
{
	int rlen;
	elp_device *adapter = dev->priv;
	void *target;
	struct sk_buff *skb;
	unsigned long flags;

	rlen = (len + 1) & ~1;
	skb = dev_alloc_skb(rlen + 2);

	if (!skb) {
		printk(KERN_WARNING "%s: memory squeeze, dropping packet\n", dev->name);
		target = adapter->dma_buffer;
		adapter->current_dma.target = NULL;
		/* FIXME: stats */
		return;
	}

	skb_reserve(skb, 2);
	target = skb_put(skb, rlen);
	if ((unsigned long)(target + rlen) >= MAX_DMA_ADDRESS) {
		adapter->current_dma.target = target;
		target = adapter->dma_buffer;
	} else {
		adapter->current_dma.target = NULL;
	}

	/* if this happens, we die */
	if (test_and_set_bit(0, (void *) &adapter->dmaing))
		printk(KERN_ERR "%s: rx blocked, DMA in progress, dir %d\n", dev->name, adapter->current_dma.direction);

	skb->dev = dev;
	adapter->current_dma.direction = 0;
	adapter->current_dma.length = rlen;
	adapter->current_dma.skb = skb;
	adapter->current_dma.start_time = jiffies;

	outb_control(adapter->hcr_val | DIR | TCEN | DMAE, dev);

	flags=claim_dma_lock();
	disable_dma(dev->dma);
	clear_dma_ff(dev->dma);
	set_dma_mode(dev->dma, 0x04);	/* dma read */
	set_dma_addr(dev->dma, isa_virt_to_bus(target));
	set_dma_count(dev->dma, rlen);
	enable_dma(dev->dma);
	release_dma_lock(flags);

	if (elp_debug >= 3) {
		printk(KERN_DEBUG "%s: rx DMA transfer started\n", dev->name);
	}

	if (adapter->rx_active)
		adapter->rx_active--;

	if (!adapter->busy)
		printk(KERN_WARNING "%s: receive_packet called, busy not set.\n", dev->name);
}
Esempio n. 12
0
static void wbsd_finish_data(struct wbsd_host* host, struct mmc_data* data)
{
	unsigned long dmaflags;
	int count;
	
	WARN_ON(host->mrq == NULL);

	/*
	 * Send a stop command if needed.
	 */
	if (data->stop)
		wbsd_send_command(host, data->stop);
	
	/*
	 * DMA transfer?
	 */
	if (host->dma >= 0)
	{
		/*
		 * Disable DMA on the host.
		 */
		wbsd_write_index(host, WBSD_IDX_DMA, 0);
		
		/*
		 * Turn of ISA DMA controller.
		 */
		dmaflags = claim_dma_lock();
		disable_dma(host->dma);
		clear_dma_ff(host->dma);
		count = get_dma_residue(host->dma);
		release_dma_lock(dmaflags);
		
		/*
		 * Any leftover data?
		 */
		if (count)
		{
			printk(KERN_ERR DRIVER_NAME ": Incomplete DMA "
				"transfer. %d bytes left.\n", count);
			
			data->error = MMC_ERR_FAILED;
		}
		else
		{
			/*
			 * Transfer data from DMA buffer to
			 * SG list.
			 */
			if (data->flags & MMC_DATA_READ)
				wbsd_dma_to_sg(host, data);
			
			data->bytes_xfered = host->size;
		}
	}
	
	DBGF("Ending data transfer (%d bytes)\n", data->bytes_xfered);
	
	wbsd_request_end(host, host->mrq);
}
Esempio n. 13
0
static netdev_tx_t send_packet(struct net_device *dev, struct sk_buff *skb)
{
	elp_device *adapter = netdev_priv(dev);
	unsigned long target;
	unsigned long flags;

	unsigned int nlen = (((skb->len < 60) ? 60 : skb->len) + 1) & (~1);

	if (test_and_set_bit(0, (void *) &adapter->busy)) {
		if (elp_debug >= 2)
			pr_debug("%s: transmit blocked\n", dev->name);
		return false;
	}

	dev->stats.tx_bytes += nlen;

	adapter->tx_pcb.command = CMD_TRANSMIT_PACKET;
	adapter->tx_pcb.length = sizeof(struct Xmit_pkt);
	adapter->tx_pcb.data.xmit_pkt.buf_ofs
	    = adapter->tx_pcb.data.xmit_pkt.buf_seg = 0;	
	adapter->tx_pcb.data.xmit_pkt.pkt_len = nlen;

	if (!send_pcb(dev, &adapter->tx_pcb)) {
		adapter->busy = 0;
		return false;
	}
	
	if (test_and_set_bit(0, (void *) &adapter->dmaing))
		pr_debug("%s: tx: DMA %d in progress\n", dev->name, adapter->current_dma.direction);

	adapter->current_dma.direction = 1;
	adapter->current_dma.start_time = jiffies;

	if ((unsigned long)(skb->data + nlen) >= MAX_DMA_ADDRESS || nlen != skb->len) {
		skb_copy_from_linear_data(skb, adapter->dma_buffer, nlen);
		memset(adapter->dma_buffer+skb->len, 0, nlen-skb->len);
		target = isa_virt_to_bus(adapter->dma_buffer);
	}
	else {
		target = isa_virt_to_bus(skb->data);
	}
	adapter->current_dma.skb = skb;

	flags=claim_dma_lock();
	disable_dma(dev->dma);
	clear_dma_ff(dev->dma);
	set_dma_mode(dev->dma, 0x48);	
	set_dma_addr(dev->dma, target);
	set_dma_count(dev->dma, nlen);
	outb_control(adapter->hcr_val | DMAE | TCEN, dev);
	enable_dma(dev->dma);
	release_dma_lock(flags);

	if (elp_debug >= 3)
		pr_debug("%s: DMA transfer started\n", dev->name);

	return true;
}
Esempio n. 14
0
/**
 * snd_dma_disable - stop the ISA DMA transfer
 * @dma: the dma number
 *
 * Stops the ISA DMA transfer.
 */
void snd_dma_disable(unsigned long dma)
{
	unsigned long flags;

	flags = claim_dma_lock();
	clear_dma_ff(dma);
	disable_dma(dma);
	release_dma_lock(flags);
}
Esempio n. 15
0
void labpc_drain_dma(struct comedi_device *dev)
{
	struct labpc_private *devpriv = dev->private;
	struct comedi_subdevice *s = dev->read_subdev;
	struct comedi_async *async = s->async;
	struct comedi_cmd *cmd = &async->cmd;
	int status;
	unsigned long flags;
	unsigned int max_points, num_points, residue, leftover;
	int i;

	status = devpriv->stat1;

	flags = claim_dma_lock();
	disable_dma(devpriv->dma_chan);
	/* clear flip-flop to make sure 2-byte registers for
	 * count and address get set correctly */
	clear_dma_ff(devpriv->dma_chan);

	/* figure out how many points to read */
	max_points = devpriv->dma_transfer_size / sample_size;
	/* residue is the number of points left to be done on the dma
	 * transfer.  It should always be zero at this point unless
	 * the stop_src is set to external triggering.
	 */
	residue = get_dma_residue(devpriv->dma_chan) / sample_size;
	num_points = max_points - residue;
	if (cmd->stop_src == TRIG_COUNT && devpriv->count < num_points)
		num_points = devpriv->count;

	/* figure out how many points will be stored next time */
	leftover = 0;
	if (cmd->stop_src != TRIG_COUNT) {
		leftover = devpriv->dma_transfer_size / sample_size;
	} else if (devpriv->count > num_points) {
		leftover = devpriv->count - num_points;
		if (leftover > max_points)
			leftover = max_points;
	}

	/* write data to comedi buffer */
	for (i = 0; i < num_points; i++)
		cfc_write_to_buffer(s, devpriv->dma_buffer[i]);

	if (cmd->stop_src == TRIG_COUNT)
		devpriv->count -= num_points;

	/* set address and count for next transfer */
	set_dma_addr(devpriv->dma_chan, devpriv->dma_addr);
	set_dma_count(devpriv->dma_chan, leftover * sample_size);
	release_dma_lock(flags);

	async->events |= COMEDI_CB_BLOCK;
}
Esempio n. 16
0
int DMAbuf_get_buffer_pointer(int dev, struct dma_buffparms *dmap, int direction)
{
	/*
	 *	Try to approximate the active byte position of the DMA pointer within the
	 *	buffer area as well as possible.
	 */

	int pos;
	unsigned long flags;
	unsigned long f;

	save_flags(flags);
	cli();
	if (!(dmap->flags & DMA_ACTIVE))
		pos = 0;
	else {
		int chan = dmap->dma;
		
		f=claim_dma_lock();
		clear_dma_ff(chan);
		
		if(!isa_dma_bridge_buggy)
			disable_dma(dmap->dma);
		
		pos = get_dma_residue(chan);
		
		pos = dmap->bytes_in_use - pos;

		if (!(dmap->mapping_flags & DMA_MAP_MAPPED)) {
			if (direction == DMODE_OUTPUT) {
				if (dmap->qhead == 0)
					if (pos > dmap->fragment_size)
						pos = 0;
			} else {
				if (dmap->qtail == 0)
					if (pos > dmap->fragment_size)
						pos = 0;
			}
		}
		if (pos < 0)
			pos = 0;
		if (pos >= dmap->bytes_in_use)
			pos = 0;
		
		if(!isa_dma_bridge_buggy)
			enable_dma(dmap->dma);
			
		release_dma_lock(f);
	}
	restore_flags(flags);
	/* printk( "%04x ",  pos); */

	return pos;
}
Esempio n. 17
0
static __inline__ int 
NCR53c406a_dma_residual (void) {
    register int tmp;
    unsigned long flags = 0;
    save_flags(flags);
    cli();
    clear_dma_ff(dma_chan);
    tmp = get_dma_residue(dma_chan);
    restore_flags(flags);
    
    return tmp;
}
Esempio n. 18
0
static __inline__ int 
NCR53c406a_dma_residual (void) {
    register int tmp;
    unsigned long flags;

    flags=claim_dma_lock();
    clear_dma_ff(dma_chan);
    tmp = get_dma_residue(dma_chan);
    release_dma_lock(flags);
    
    return tmp;
}
Esempio n. 19
0
static int sscape_start_dma(int chan, unsigned long physaddr, int count, int dma_mode)
{
	unsigned long flags;

	flags = claim_dma_lock();
	disable_dma(chan);
	clear_dma_ff(chan);
	set_dma_mode(chan, dma_mode);
	set_dma_addr(chan, physaddr);
	set_dma_count(chan, count);
	enable_dma(chan);
	release_dma_lock(flags);
	return 0;
}
Esempio n. 20
0
static inline void
jz_mmc_start_dma(int chan, unsigned long phyaddr, int count, int mode)
{
	unsigned long flags;

	flags = claim_dma_lock();
	disable_dma(chan);
	clear_dma_ff(chan);
	jz_set_dma_block_size(chan, 32);
	set_dma_mode(chan, mode);
	set_dma_addr(chan, phyaddr);
	set_dma_count(chan, count + 31);
	enable_dma(chan);
	release_dma_lock(flags);
}
Esempio n. 21
0
/**
 * snd_dma_program - program an ISA DMA transfer
 * @dma: the dma number
 * @addr: the physical address of the buffer
 * @size: the DMA transfer size
 * @mode: the DMA transfer mode, DMA_MODE_XXX
 *
 * Programs an ISA DMA transfer for the given buffer.
 */
void snd_dma_program(unsigned long dma,
		     unsigned long addr, unsigned int size,
                     unsigned short mode)
{
	unsigned long flags;

	flags = claim_dma_lock();
	disable_dma(dma);
	clear_dma_ff(dma);
	set_dma_mode(dma, mode);
	set_dma_addr(dma, addr);
	set_dma_count(dma, size);
	if (!(mode & DMA_MODE_NO_ENABLE))
		enable_dma(dma);
	release_dma_lock(flags);
}
Esempio n. 22
0
File: z85230.c Progetto: 274914765/C
static void z8530_dma_status(struct z8530_channel *chan)
{
    u8 status, altered;

    status=read_zsreg(chan, R0);
    altered=chan->status^status;
    
    chan->status=status;


    if(chan->dma_tx)
    {
        if(status&TxEOM)
        {
            unsigned long flags;
    
            flags=claim_dma_lock();
            disable_dma(chan->txdma);
            clear_dma_ff(chan->txdma);    
            chan->txdma_on=0;
            release_dma_lock(flags);
            z8530_tx_done(chan);
        }
    }

    if(altered&chan->dcdcheck)
    {
        if(status&chan->dcdcheck)
        {
            printk(KERN_INFO "%s: DCD raised\n", chan->dev->name);
            write_zsreg(chan, R3, chan->regs[3]|RxENABLE);
            if(chan->netdevice &&
                ((chan->netdevice->type == ARPHRD_HDLC) ||
                (chan->netdevice->type == ARPHRD_PPP)))
                sppp_reopen(chan->netdevice);
        }
        else
        {
            printk(KERN_INFO "%s:DCD lost\n", chan->dev->name);
            write_zsreg(chan, R3, chan->regs[3]&~RxENABLE);
            z8530_flush_fifo(chan);
        }
    }    

    write_zsctrl(chan, RES_EXT_INT);
    write_zsctrl(chan, RES_H_IUS);
}
Esempio n. 23
0
static void setup_rx_dma(struct pt_local *lp)
{
	unsigned long flags;
	int cmd;
	unsigned long dma_abs;
	unsigned char dmachan;

	save_flags(flags);
	cli();

	dma_abs = (unsigned long) (lp->rcvbuf->data);
	dmachan = lp->dmachan;
	cmd = lp->base + CTL;

	if(!valid_dma_page(dma_abs, DMA_BUFF_SIZE + sizeof(struct mbuf)))
		panic("PI: RX buffer violates DMA boundary!");

	/* Get ready for RX DMA */
	wrtscc(lp->cardbase, cmd, R1, WT_FN_RDYFN | WT_RDY_RT | INT_ERR_Rx | EXT_INT_ENAB);

	disable_dma(dmachan);
	clear_dma_ff(dmachan);

	/*
	 *	Set DMA mode register to single transfers, incrementing address,
	 *	auto init, writes
	 */

	set_dma_mode(dmachan, DMA_MODE_READ | 0x10);
	set_dma_addr(dmachan, dma_abs);
	set_dma_count(dmachan, lp->bufsiz);
	enable_dma(dmachan);

	/*
	 *	If a packet is already coming in, this line is supposed to
	 *	avoid receiving a partial packet.
	 */

	wrtscc(lp->cardbase, cmd, R0, RES_Rx_CRC);

	/* Enable RX dma */
	wrtscc(lp->cardbase, cmd, R1,
		WT_RDY_ENAB | WT_FN_RDYFN | WT_RDY_RT | INT_ERR_Rx | EXT_INT_ENAB);

	restore_flags(flags);
}
Esempio n. 24
0
static int sound_start_dma(struct dma_buffparms *dmap, unsigned long physaddr, int count, int dma_mode)
{
	unsigned long flags;
	int chan = dmap->dma;

	/* printk( "Start DMA%d %d, %d\n",  chan,  (int)(physaddr-dmap->raw_buf_phys),  count); */

	flags = claim_dma_lock();
	disable_dma(chan);
	clear_dma_ff(chan);
	set_dma_mode(chan, dma_mode);
	set_dma_addr(chan, physaddr);
	set_dma_count(chan, count);
	enable_dma(chan);
	release_dma_lock(flags);

	return 0;
}
Esempio n. 25
0
static void dma_reset_output(int dev)
{
	struct audio_operations *adev = audio_devs[dev];
	unsigned long flags,f ;
	struct dma_buffparms *dmap = adev->dmap_out;

	if (!(dmap->flags & DMA_STARTED))	/* DMA is not active */
		return;

	/*
	 *	First wait until the current fragment has been played completely
	 */
	spin_lock_irqsave(&dmap->lock,flags);
	adev->dmap_out->flags |= DMA_SYNCING;

	adev->dmap_out->underrun_count = 0;
	if (!signal_pending(current) && adev->dmap_out->qlen && 
	    adev->dmap_out->underrun_count == 0){
		spin_unlock_irqrestore(&dmap->lock,flags);
		interruptible_sleep_on_timeout(&adev->out_sleeper,
					       dmabuf_timeout(dmap));
		spin_lock_irqsave(&dmap->lock,flags);
	}
	adev->dmap_out->flags &= ~(DMA_SYNCING | DMA_ACTIVE);

	/*
	 *	Finally shut the device off
	 */
	if (!(adev->flags & DMA_DUPLEX) || !adev->d->halt_output)
		adev->d->halt_io(dev);
	else
		adev->d->halt_output(dev);
	adev->dmap_out->flags &= ~DMA_STARTED;
	
	f=claim_dma_lock();
	clear_dma_ff(dmap->dma);
	disable_dma(dmap->dma);
	release_dma_lock(f);
	
	dmap->byte_counter = 0;
	reorganize_buffers(dev, adev->dmap_out, 0);
	dmap->qlen = dmap->qhead = dmap->qtail = dmap->user_counter = 0;
	spin_unlock_irqrestore(&dmap->lock,flags);
}
Esempio n. 26
0
static void handlecommand(struct net_device *dev)
{
	/* on entry, 0xfa and ltdmacbuf holds command */
	int dma = dev->dma;
	int base = dev->base_addr;
	unsigned long flags;

	flags=claim_dma_lock();
	disable_dma(dma);
	clear_dma_ff(dma);
	set_dma_mode(dma,DMA_MODE_WRITE);
	set_dma_addr(dma,virt_to_bus(ltdmacbuf));
	set_dma_count(dma,50);
	enable_dma(dma);
	release_dma_lock(flags);
	inb_p(base+3);
	inb_p(base+2);
	if ( wait_timeout(dev,0xfa) ) printk("timed out in handlecommand\n");
} 
Esempio n. 27
0
/* read data from the card */
static void handlefd(struct net_device *dev)
{
	int dma = dev->dma;
	int base = dev->base_addr;
	unsigned long flags;

	flags=claim_dma_lock();
	disable_dma(dma);
	clear_dma_ff(dma);
	set_dma_mode(dma,DMA_MODE_READ);
	set_dma_addr(dma,virt_to_bus(ltdmabuf));
	set_dma_count(dma,800);
	enable_dma(dma);
	release_dma_lock(flags);

	inb_p(base+3);
	inb_p(base+2);

	if ( wait_timeout(dev,0xfd) ) printk("timed out in handlefd\n");
	sendup_buffer(dev);
} 
Esempio n. 28
0
/* read a command from the card */
static void handlefc(struct net_device *dev)
{
	/* called *only* from idle, non-reentrant */
	int dma = dev->dma;
	int base = dev->base_addr;
	unsigned long flags;


	flags=claim_dma_lock();
	disable_dma(dma);
	clear_dma_ff(dma);
	set_dma_mode(dma,DMA_MODE_READ);
	set_dma_addr(dma,virt_to_bus(ltdmacbuf));
	set_dma_count(dma,50);
	enable_dma(dma);
	release_dma_lock(flags);

	inb_p(base+3);
	inb_p(base+2);

	if ( wait_timeout(dev,0xfc) ) printk("timed out in handlefc\n");
}
Esempio n. 29
0
/**
 * snd_dma_pointer - return the current pointer to DMA transfer buffer in bytes
 * @dma: the dma number
 * @size: the dma transfer size
 *
 * Returns the current pointer in DMA tranfer buffer in bytes
 */
unsigned int snd_dma_pointer(unsigned long dma, unsigned int size)
{
	unsigned long flags;
	unsigned int result;

	flags = claim_dma_lock();
	clear_dma_ff(dma);
	if (!isa_dma_bridge_buggy)
		disable_dma(dma);
	result = get_dma_residue(dma);
	if (!isa_dma_bridge_buggy)
		enable_dma(dma);
	release_dma_lock(flags);
#ifdef CONFIG_SND_DEBUG
	if (result > size)
		snd_printk(KERN_ERR "pointer (0x%x) for DMA #%ld is greater than transfer size (0x%x)\n", result, dma, size);
#endif
	if (result >= size || result == 0)
		return 0;
	else
		return size - result;
}
Esempio n. 30
0
void labpc_setup_dma(struct comedi_device *dev, struct comedi_subdevice *s)
{
	struct labpc_private *devpriv = dev->private;
	struct comedi_cmd *cmd = &s->async->cmd;
	unsigned long irq_flags;

	irq_flags = claim_dma_lock();
	disable_dma(devpriv->dma_chan);
	/* clear flip-flop to make sure 2-byte registers for
	 * count and address get set correctly */
	clear_dma_ff(devpriv->dma_chan);
	set_dma_addr(devpriv->dma_chan, devpriv->dma_addr);
	/* set appropriate size of transfer */
	devpriv->dma_transfer_size = labpc_suggest_transfer_size(cmd);
	if (cmd->stop_src == TRIG_COUNT &&
	    devpriv->count * sample_size < devpriv->dma_transfer_size)
		devpriv->dma_transfer_size = devpriv->count * sample_size;
	set_dma_count(devpriv->dma_chan, devpriv->dma_transfer_size);
	enable_dma(devpriv->dma_chan);
	release_dma_lock(irq_flags);
	/* set CMD3 bits for caller to enable DMA and interrupt */
	devpriv->cmd3 |= (CMD3_DMAEN | CMD3_DMATCINTEN);
}