Exemple #1
0
static void z8530_rx_done(struct z8530_channel *c)
{
	struct sk_buff *skb;
	int ct;
	
	/*
	 *	Is our receive engine in DMA mode
	 */
	 
	if(c->rxdma_on)
	{
		/*
		 *	Save the ready state and the buffer currently
		 *	being used as the DMA target
		 */
		 
		int ready=c->dma_ready;
		unsigned char *rxb=c->rx_buf[c->dma_num];
		unsigned long flags;
		
		/*
		 *	Complete this DMA. Neccessary to find the length
		 */		
		 
		flags=claim_dma_lock();
		
		disable_dma(c->rxdma);
		clear_dma_ff(c->rxdma);
		c->rxdma_on=0;
		ct=c->mtu-get_dma_residue(c->rxdma);
		if(ct<0)
			ct=2;	/* Shit happens.. */
		c->dma_ready=0;
		
		/*
		 *	Normal case: the other slot is free, start the next DMA
		 *	into it immediately.
		 */
		 
		if(ready)
		{
			c->dma_num^=1;
			set_dma_mode(c->rxdma, DMA_MODE_READ|0x10);
			set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[c->dma_num]));
			set_dma_count(c->rxdma, c->mtu);
			c->rxdma_on = 1;
			enable_dma(c->rxdma);
			/* Stop any frames that we missed the head of 
			   from passing */
			write_zsreg(c, R0, RES_Rx_CRC);
		}
		else
			/* Can't occur as we dont reenable the DMA irq until
			   after the flip is done */
			printk(KERN_WARNING "%s: DMA flip overrun!\n",
			       c->netdevice->name);

		release_dma_lock(flags);

		/*
		 *	Shove the old buffer into an sk_buff. We can't DMA
		 *	directly into one on a PC - it might be above the 16Mb
		 *	boundary. Optimisation - we could check to see if we
		 *	can avoid the copy. Optimisation 2 - make the memcpy
		 *	a copychecksum.
		 */

		skb = dev_alloc_skb(ct);
		if (skb == NULL) {
			c->netdevice->stats.rx_dropped++;
			printk(KERN_WARNING "%s: Memory squeeze.\n",
			       c->netdevice->name);
		} else {
			skb_put(skb, ct);
			skb_copy_to_linear_data(skb, rxb, ct);
			c->netdevice->stats.rx_packets++;
			c->netdevice->stats.rx_bytes += ct;
		}
		c->dma_ready = 1;
	} else {
		RT_LOCK;
		skb = c->skb;

		/*
		 *	The game we play for non DMA is similar. We want to
		 *	get the controller set up for the next packet as fast
		 *	as possible. We potentially only have one byte + the
		 *	fifo length for this. Thus we want to flip to the new
		 *	buffer and then mess around copying and allocating
		 *	things. For the current case it doesn't matter but
		 *	if you build a system where the sync irq isnt blocked
		 *	by the kernel IRQ disable then you need only block the
		 *	sync IRQ for the RT_LOCK area.
		 *
		 */
		ct=c->count;

		c->skb = c->skb2;
		c->count = 0;
		c->max = c->mtu;
		if (c->skb) {
			c->dptr = c->skb->data;
			c->max = c->mtu;
		} else {
			c->count = 0;
			c->max = 0;
		}
		RT_UNLOCK;

		c->skb2 = dev_alloc_skb(c->mtu);
		if (c->skb2 == NULL)
			printk(KERN_WARNING "%s: memory squeeze.\n",
			       c->netdevice->name);
		else
			skb_put(c->skb2, c->mtu);
		c->netdevice->stats.rx_packets++;
		c->netdevice->stats.rx_bytes += ct;
	}
	/*
	 *	If we received a frame we must now process it.
	 */
	if (skb) {
		skb_trim(skb, ct);
		c->rx_function(c, skb);
	} else {
		c->netdevice->stats.rx_dropped++;
		printk(KERN_ERR "%s: Lost a frame\n", c->netdevice->name);
	}
}
Exemple #2
0
int z8530_sync_dma_open(struct net_device *dev, struct z8530_channel *c)
{
	unsigned long cflags, dflags;
	
	c->sync = 1;
	c->mtu = dev->mtu+64;
	c->count = 0;
	c->skb = NULL;
	c->skb2 = NULL;
	/*
	 *	Load the DMA interfaces up
	 */
	c->rxdma_on = 0;
	c->txdma_on = 0;
	
	/*
	 *	Allocate the DMA flip buffers. Limit by page size.
	 *	Everyone runs 1500 mtu or less on wan links so this
	 *	should be fine.
	 */
	 
	if(c->mtu  > PAGE_SIZE/2)
		return -EMSGSIZE;
	 
	c->rx_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
	if(c->rx_buf[0]==NULL)
		return -ENOBUFS;
	c->rx_buf[1]=c->rx_buf[0]+PAGE_SIZE/2;
	
	c->tx_dma_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
	if(c->tx_dma_buf[0]==NULL)
	{
		free_page((unsigned long)c->rx_buf[0]);
		c->rx_buf[0]=NULL;
		return -ENOBUFS;
	}
	c->tx_dma_buf[1]=c->tx_dma_buf[0]+PAGE_SIZE/2;

	c->tx_dma_used=0;
	c->dma_tx = 1;
	c->dma_num=0;
	c->dma_ready=1;
	
	/*
	 *	Enable DMA control mode
	 */

	spin_lock_irqsave(c->lock, cflags);
	 
	/*
	 *	TX DMA via DIR/REQ
	 */
	 
	c->regs[R14]|= DTRREQ;
	write_zsreg(c, R14, c->regs[R14]);     

	c->regs[R1]&= ~TxINT_ENAB;
	write_zsreg(c, R1, c->regs[R1]);
	
	/*
	 *	RX DMA via W/Req
	 */	 

	c->regs[R1]|= WT_FN_RDYFN;
	c->regs[R1]|= WT_RDY_RT;
	c->regs[R1]|= INT_ERR_Rx;
	c->regs[R1]&= ~TxINT_ENAB;
	write_zsreg(c, R1, c->regs[R1]);
	c->regs[R1]|= WT_RDY_ENAB;
	write_zsreg(c, R1, c->regs[R1]);            
	
	/*
	 *	DMA interrupts
	 */
	 
	/*
	 *	Set up the DMA configuration
	 */	
	 
	dflags=claim_dma_lock();
	 
	disable_dma(c->rxdma);
	clear_dma_ff(c->rxdma);
	set_dma_mode(c->rxdma, DMA_MODE_READ|0x10);
	set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[0]));
	set_dma_count(c->rxdma, c->mtu);
	enable_dma(c->rxdma);

	disable_dma(c->txdma);
	clear_dma_ff(c->txdma);
	set_dma_mode(c->txdma, DMA_MODE_WRITE);
	disable_dma(c->txdma);
	
	release_dma_lock(dflags);
	
	/*
	 *	Select the DMA interrupt handlers
	 */

	c->rxdma_on = 1;
	c->txdma_on = 1;
	c->tx_dma_used = 1;
	 
	c->irqs = &z8530_dma_sync;
	z8530_rtsdtr(c,1);
	write_zsreg(c, R3, c->regs[R3]|RxENABLE);

	spin_unlock_irqrestore(c->lock, cflags);
	
	return 0;
}
Exemple #3
0
static void z8530_tx_begin(struct z8530_channel *c)
{
	unsigned long flags;
	if(c->tx_skb)
		return;
		
	c->tx_skb=c->tx_next_skb;
	c->tx_next_skb=NULL;
	c->tx_ptr=c->tx_next_ptr;
	
	if(c->tx_skb==NULL)
	{
		/* Idle on */
		if(c->dma_tx)
		{
			flags=claim_dma_lock();
			disable_dma(c->txdma);
			/*
			 *	Check if we crapped out.
			 */
			if (get_dma_residue(c->txdma))
			{
				c->netdevice->stats.tx_dropped++;
				c->netdevice->stats.tx_fifo_errors++;
			}
			release_dma_lock(flags);
		}
		c->txcount=0;
	}
	else
	{
		c->txcount=c->tx_skb->len;
		
		
		if(c->dma_tx)
		{
			 
			flags=claim_dma_lock();
			disable_dma(c->txdma);

			/*
			 *	These two are needed by the 8530/85C30
			 *	and must be issued when idling.
			 */
			 
			if(c->dev->type!=Z85230)
			{
				write_zsctrl(c, RES_Tx_CRC);
				write_zsctrl(c, RES_EOM_L);
			}	
			write_zsreg(c, R10, c->regs[10]&~ABUNDER);
			clear_dma_ff(c->txdma);
			set_dma_addr(c->txdma, virt_to_bus(c->tx_ptr));
			set_dma_count(c->txdma, c->txcount);
			enable_dma(c->txdma);
			release_dma_lock(flags);
			write_zsctrl(c, RES_EOM_L);
			write_zsreg(c, R5, c->regs[R5]|TxENAB);
		}
		else
		{

			/* ABUNDER off */
			write_zsreg(c, R10, c->regs[10]);
			write_zsctrl(c, RES_Tx_CRC);
	
			while(c->txcount && (read_zsreg(c,R0)&Tx_BUF_EMP))
			{		
				write_zsreg(c, R8, *c->tx_ptr++);
				c->txcount--;
			}

		}
	}
	/*
	 *	Since we emptied tx_skb we can ask for more
	 */
	netif_wake_queue(c->netdevice);
}
Exemple #4
0
static int send_packet(struct net_device *dev, struct sk_buff *skb)
{
	elp_device *adapter = dev->priv;
	unsigned long target;
	unsigned long flags;

	/*
	 * make sure the length is even and no shorter than 60 bytes
	 */
	unsigned int nlen = (((skb->len < 60) ? 60 : skb->len) + 1) & (~1);

	if (test_and_set_bit(0, (void *) &adapter->busy)) {
		if (elp_debug >= 2)
			printk("%s: transmit blocked\n", dev->name);
		return FALSE;
	}

	adapter->stats.tx_bytes += nlen;
	
	/*
	 * send the adapter a transmit packet command. Ignore segment and offset
	 * and make sure the length is even
	 */
	adapter->tx_pcb.command = CMD_TRANSMIT_PACKET;
	adapter->tx_pcb.length = sizeof(struct Xmit_pkt);
	adapter->tx_pcb.data.xmit_pkt.buf_ofs
	    = adapter->tx_pcb.data.xmit_pkt.buf_seg = 0;	/* Unused */
	adapter->tx_pcb.data.xmit_pkt.pkt_len = nlen;

	if (!send_pcb(dev, &adapter->tx_pcb)) {
		adapter->busy = 0;
		return FALSE;
	}
	/* if this happens, we die */
	if (test_and_set_bit(0, (void *) &adapter->dmaing))
		printk("%s: tx: DMA %d in progress\n", dev->name, adapter->current_dma.direction);

	adapter->current_dma.direction = 1;
	adapter->current_dma.start_time = jiffies;

	if ((unsigned long)(skb->data + nlen) >= MAX_DMA_ADDRESS || nlen != skb->len) {
		memcpy(adapter->dma_buffer, skb->data, skb->len);
		memset(adapter->dma_buffer+skb->len, 0, nlen-skb->len);
		target = virt_to_bus(adapter->dma_buffer);
	}
	else {
		target = virt_to_bus(skb->data);
	}
	adapter->current_dma.skb = skb;

	flags=claim_dma_lock();
	disable_dma(dev->dma);
	clear_dma_ff(dev->dma);
	set_dma_mode(dev->dma, 0x48);	/* dma memory -> io */
	set_dma_addr(dev->dma, target);
	set_dma_count(dev->dma, nlen);
	outb_control(adapter->hcr_val | DMAE | TCEN, dev);
	enable_dma(dev->dma);
	release_dma_lock(flags);
	
	if (elp_debug >= 3)
		printk("%s: DMA transfer started\n", dev->name);

	return TRUE;
}
static int __init ltpc_probe_dma(int base, int dma)
{
	int want = (dma == 3) ? 2 : (dma == 1) ? 1 : 3;
  	unsigned long timeout;
  	unsigned long f;
  
  	if (want & 1) {
		if (request_dma(1,"ltpc")) {
			want &= ~1;
		} else {
			f=claim_dma_lock();
			disable_dma(1);
			clear_dma_ff(1);
			set_dma_mode(1,DMA_MODE_WRITE);
			set_dma_addr(1,virt_to_bus(ltdmabuf));
			set_dma_count(1,sizeof(struct lt_mem));
			enable_dma(1);
			release_dma_lock(f);
		}
	}
	if (want & 2) {
		if (request_dma(3,"ltpc")) {
			want &= ~2;
		} else {
			f=claim_dma_lock();
			disable_dma(3);
			clear_dma_ff(3);
			set_dma_mode(3,DMA_MODE_WRITE);
			set_dma_addr(3,virt_to_bus(ltdmabuf));
			set_dma_count(3,sizeof(struct lt_mem));
			enable_dma(3);
			release_dma_lock(f);
		}
	}
	/* set up request */

	/* FIXME -- do timings better! */

	ltdmabuf[0] = LT_READMEM;
	ltdmabuf[1] = 1;  /* mailbox */
	ltdmabuf[2] = 0; ltdmabuf[3] = 0;  /* address */
	ltdmabuf[4] = 0; ltdmabuf[5] = 1;  /* read 0x0100 bytes */
	ltdmabuf[6] = 0; /* dunno if this is necessary */

	inb_p(io+1);
	inb_p(io+0);
	timeout = jiffies+100*HZ/100;
	while(time_before(jiffies, timeout)) {
		if ( 0xfa == inb_p(io+6) ) break;
	}

	inb_p(io+3);
	inb_p(io+2);
	while(time_before(jiffies, timeout)) {
		if ( 0xfb == inb_p(io+6) ) break;
	}

	/* release the other dma channel (if we opened both of them) */

	if ((want & 2) && (get_dma_residue(3)==sizeof(struct lt_mem))) {
		want &= ~2;
		free_dma(3);
	}

	if ((want & 1) && (get_dma_residue(1)==sizeof(struct lt_mem))) {
		want &= ~1;
		free_dma(1);
	}

	if (!want)
		return 0;

	return (want & 2) ? 3 : 1;
}
struct net_device * __init ltpc_probe(void)
{
	struct net_device *dev;
	int err = -ENOMEM;
	int x=0,y=0;
	int autoirq;
	unsigned long f;
	unsigned long timeout;

	dev = alloc_ltalkdev(sizeof(struct ltpc_private));
	if (!dev)
		goto out;

	/* probe for the I/O port address */
	
	if (io != 0x240 && request_region(0x220,8,"ltpc")) {
		x = inb_p(0x220+6);
		if ( (x!=0xff) && (x>=0xf0) ) {
			io = 0x220;
			goto got_port;
		}
		release_region(0x220,8);
	}
	if (io != 0x220 && request_region(0x240,8,"ltpc")) {
		y = inb_p(0x240+6);
		if ( (y!=0xff) && (y>=0xf0) ){ 
			io = 0x240;
			goto got_port;
		}
		release_region(0x240,8);
	} 

	/* give up in despair */
	printk(KERN_ERR "LocalTalk card not found; 220 = %02x, 240 = %02x.\n", x,y);
	err = -ENODEV;
	goto out1;

 got_port:
	/* probe for the IRQ line */
	if (irq < 2) {
		unsigned long irq_mask;

		irq_mask = probe_irq_on();
		/* reset the interrupt line */
		inb_p(io+7);
		inb_p(io+7);
		/* trigger an interrupt (I hope) */
		inb_p(io+6);
		mdelay(2);
		autoirq = probe_irq_off(irq_mask);

		if (autoirq == 0) {
			printk(KERN_ERR "ltpc: probe at %#x failed to detect IRQ line.\n", io);
		} else {
			irq = autoirq;
		}
	}

	/* allocate a DMA buffer */
	ltdmabuf = (unsigned char *) dma_mem_alloc(1000);
	if (!ltdmabuf) {
		printk(KERN_ERR "ltpc: mem alloc failed\n");
		err = -ENOMEM;
		goto out2;
	}

	ltdmacbuf = &ltdmabuf[800];

	if(debug & DEBUG_VERBOSE) {
		printk("ltdmabuf pointer %08lx\n",(unsigned long) ltdmabuf);
	}

	/* reset the card */

	inb_p(io+1);
	inb_p(io+3);

	msleep(20);

	inb_p(io+0);
	inb_p(io+2);
	inb_p(io+7); /* clear reset */
	inb_p(io+4); 
	inb_p(io+5);
	inb_p(io+5); /* enable dma */
	inb_p(io+6); /* tri-state interrupt line */

	ssleep(1);
	
	/* now, figure out which dma channel we're using, unless it's
	   already been specified */
	/* well, 0 is a legal DMA channel, but the LTPC card doesn't
	   use it... */
	dma = ltpc_probe_dma(io, dma);
	if (!dma) {  /* no dma channel */
		printk(KERN_ERR "No DMA channel found on ltpc card.\n");
		err = -ENODEV;
		goto out3;
	}

	/* print out friendly message */
	if(irq)
		printk(KERN_INFO "Apple/Farallon LocalTalk-PC card at %03x, IR%d, DMA%d.\n",io,irq,dma);
	else
		printk(KERN_INFO "Apple/Farallon LocalTalk-PC card at %03x, DMA%d.  Using polled mode.\n",io,dma);

	dev->netdev_ops = &ltpc_netdev;
	dev->base_addr = io;
	dev->irq = irq;
	dev->dma = dma;

	/* the card will want to send a result at this point */
	/* (I think... leaving out this part makes the kernel crash,
           so I put it back in...) */

	f=claim_dma_lock();
	disable_dma(dma);
	clear_dma_ff(dma);
	set_dma_mode(dma,DMA_MODE_READ);
	set_dma_addr(dma,virt_to_bus(ltdmabuf));
	set_dma_count(dma,0x100);
	enable_dma(dma);
	release_dma_lock(f);

	(void) inb_p(io+3);
	(void) inb_p(io+2);
	timeout = jiffies+100*HZ/100;

	while(time_before(jiffies, timeout)) {
		if( 0xf9 == inb_p(io+6))
			break;
		schedule();
	}

	if(debug & DEBUG_VERBOSE) {
		printk("setting up timer and irq\n");
	}

	/* grab it and don't let go :-) */
	if (irq && request_irq( irq, ltpc_interrupt, 0, "ltpc", dev) >= 0)
	{
		(void) inb_p(io+7);  /* enable interrupts from board */
		(void) inb_p(io+7);  /* and reset irq line */
	} else {
		if( irq )
			printk(KERN_ERR "ltpc: IRQ already in use, using polled mode.\n");
		dev->irq = 0;
		/* polled mode -- 20 times per second */
		/* this is really, really slow... should it poll more often? */
		init_timer(&ltpc_timer);
		ltpc_timer.function=ltpc_poll;
		ltpc_timer.data = (unsigned long) dev;

		ltpc_timer.expires = jiffies + HZ/20;
		add_timer(&ltpc_timer);
	}
	err = register_netdev(dev);
	if (err)
		goto out4;

	return NULL;
out4:
	del_timer_sync(&ltpc_timer);
	if (dev->irq)
		free_irq(dev->irq, dev);
out3:
	free_pages((unsigned long)ltdmabuf, get_order(1000));
out2:
	release_region(io, 8);
out1:
	free_netdev(dev);
out:
	return ERR_PTR(err);
}
Exemple #7
0
/**
 * pcm3718_dma_isr - interrupt service routine for DMA
 *                   data acquisition
 *
 * ptr: point to the private data of device object
 */
static void pcm3718_dma_isr(private_data *ptr)
{
	unsigned long ret,flags,i;
	private_data *privdata = ptr;
	adv_user_page *page = NULL;
	INT16U tmp;
	/* recieve data */
//while(advInp(privdata,8)&0x80) ;
//while(!advInp(privdata,8)&0x10) ;
	i = 0;
			privdata->item = 0;
			privdata->page_index=0;
//memset(privdata->user_buf,0,privdata->hwdmasize[0]);	

	//printk("----------%x convert %x\n",advInp(privdata,8)&0x10,privdata->hwdmasize[0]); 
   	advOutp( privdata,  0x08,  0 );   // clear interrupt request
   	advOutp( privdata,  0x19,  0 );   // clear interrupt request

	do {
		page = privdata->user_pages + privdata->page_index;
		if (privdata->item >= page->length) {
			privdata->page_index++;
			privdata->item = 0;
		}
	
		privdata->page_index %= privdata->page_num;
		privdata->cur_index %= privdata->conv_num;
		i++;
	
		page = privdata->user_pages + privdata->page_index;

		
        	// read data
        	tmp = privdata->dmabuf[privdata->cur_index];               
       
		memcpy((INT16U *) (page->page_addr + page->offset + privdata->item),
		       &tmp, sizeof(INT16U));
		
		//if(tmp&0x01!=1 ) {
		if(i<10 ) {
	//		printk("i :%x advin %x\n",i,tmp);
	//		printk("i :%x advin %x\n",i,advInp(privdata,1));
	//		printk("userbuf : %x\n",privdata->user_buf[i]);
	//		printk("hwuserbuf :%x\n",privdata->hwdmaptr[i]);

		}
		privdata->item += 2;
		privdata->cur_index++;
	} while (privdata->cur_index < privdata->conv_num);

//	memcpy(privdata->user_buf,privdata->dmabuf,privdata->hwdmasize[0]);
//memset(privdata->dmabuf,0,privdata->hwdmasize[0]);	
	///	printk("user buf 0 :%x\n",privdata->user_buf[0]);
	///	printk("user-1 buf %d :%x\n",i-1,privdata->user_buf[i-1]);
	///	printk("user buf %d :%x\n",i,privdata->user_buf[i]);
	//	printk("user buf 20 :%x\n",privdata->user_buf[20]);
	///	printk("cur_index:%d conv_num/2:%d tmp:%x\n",privdata->cur_index,privdata->conv_num/2,tmp);
		//printk("page index:%x page num:%x\n",privdata->page_index,privdata->page_num);

	if (!privdata->buf_stat) {
		privdata->cur_index = privdata->conv_num / 2;
		privdata->half_ready = 1;

		adv_process_info_set_event_all(&privdata->ptr_process_info, 0, 1);
	} else {
		privdata->cur_index = privdata->conv_num;
		privdata->half_ready = 2;
		privdata->trans += privdata->conv_num;
			adv_process_info_set_event_all(&privdata->ptr_process_info, 1, 1);

		if (!privdata->cyclic) { /* terminate */
			adv_process_info_set_event_all(&privdata->ptr_process_info, 2, 1);
	           	advOutp( privdata, 9,  0 );           // disable interrupt
	           	advOutp( privdata, 6,  0 );           // disable interrupt
			advOutp(privdata,8,0);
		//	privdata->ai_stop = 1;
		} else {	/* buffer change */

			if (privdata->overrun_flag==1) { /* overrun */
				adv_process_info_set_event_all(&privdata->ptr_process_info,
							       3,
							       1);
			} 
			privdata->overrun_flag = 1;
			
		}
	}

	if (privdata->cur_index == privdata->int_cnt) { /* interrupt count */
		//adv_process_info_set_event_all(&privdata->ptr_process_info, 0, 1);
	}
//	for(i=0;i<privdata->conv_num*sizeof(INT16U);i++){
//		privdata->user_buf[i] = privdata->dmabuf[i];
//	}

	if(privdata->cyclic){

        flags=claim_dma_lock();
        disable_dma(privdata->ioDMAbase);
	clear_dma_ff(privdata->ioDMAbase);
        set_dma_mode(privdata->ioDMAbase, DMA_MODE_READ);
        //set_dma_mode(privdata->ioDMAbase, DMA_MODE_READ|DMA_AUTOINIT);
        set_dma_addr(privdata->ioDMAbase, privdata->hwdmaptr);
        set_dma_count(privdata->ioDMAbase, privdata->hwdmasize[0]);
	ret = get_dma_residue(privdata->ioDMAbase);
        release_dma_lock(flags);
	}

        enable_dma(privdata->ioDMAbase);
	wake_up_interruptible(&privdata->event_wait);
	
	//privdata->buf_stat = !privdata->buf_stat;
}