Пример #1
0
/*
 * We have a good packet(s), get it/them out of the buffers.
 *
 * cgg - this driver works by creating (once) a circular list of receiver
 *       DMA descriptors that will be used serially by the Banyan.
 *       Because the descriptors are never unlinked from the list _they
 *       are always live_.  We are counting on Linux (and the chosen number
 *	 of buffers) to keep ahead of the hardware otherwise the same
 *	 descriptor might be used for more than one reception.
 */
static void
acacia_rx(struct net_device *dev)
{
    struct acacia_local* lp = (struct acacia_local *)dev->priv;
    volatile DMAD_t  rd = &lp->rd_ring[lp->rx_next_out];
    struct sk_buff *skb;
    u8* pkt_buf;
    u32 devcs;
    u32 count, pkt_len;

    /* cgg - keep going while we have received into more descriptors */

    while (IS_DMA_USED(rd->control)) {

        devcs = rd->devcs;

        pkt_len = RCVPKT_LENGTH(devcs);

        pkt_buf = &lp->rba[lp->rx_next_out * ACACIA_RBSIZE];

        /*
         * cgg - RESET the address pointer later - if we get a second
         * reception it will occur in the remains of the current
         * area of memory - protected by the diminished DMA count.
         */

        /*
         * Due to a bug in banyan processor, the packet length
         * given by devcs field and count field sometimes differ.
         * If that is the case, report Error.
         */
        count = ACACIA_RBSIZE - (u32)DMA_COUNT(rd->control);
        if( count != pkt_len) {
            lp->stats.rx_errors++;
        } else if (count < 64) {
            lp->stats.rx_errors++;
        } else if ((devcs & (/*ETHERDMA_IN_FD |*/ ETHRX_ld_m)) !=
                   (/*ETHERDMA_IN_FD |*/ ETHRX_ld_m)) {
            /* cgg - check that this is a whole packet */
            /* WARNING: DMA_FD bit incorrectly set in Acacia
               (errata ref #077) */
            lp->stats.rx_errors++;
            lp->stats.rx_over_errors++;
        } else if (devcs & ETHRX_rok_m) {
            /* must be the (first and) last descriptor then */

            /* Malloc up new buffer. */
            skb = dev_alloc_skb(pkt_len+2);
            if (skb == NULL) {
                err("no memory, dropping rx packet.\n");
                lp->stats.rx_dropped++;
            } else {
                /* else added by cgg - used to fall through! */
                /* invalidate the cache before copying
                   the buffer */
                dma_cache_inv((unsigned long)pkt_buf, pkt_len);

                skb->dev = dev;
                skb_reserve(skb, 2);	/* 16 bit align */
                skb_put(skb, pkt_len);	/* Make room */
                eth_copy_and_sum(skb, pkt_buf, pkt_len, 0);
                skb->protocol = eth_type_trans(skb, dev);
                /* pass the packet to upper layers */
                netif_rx(skb);
                dev->last_rx = jiffies;
                lp->stats.rx_packets++;
                lp->stats.rx_bytes += pkt_len;

                if (IS_RCV_MP(devcs))
                    lp->stats.multicast++;
            }

        } else {
            /* This should only happen if we enable
               accepting broken packets */
            lp->stats.rx_errors++;

            /* cgg - (re-)added statistics counters */
            if (IS_RCV_CRC_ERR(devcs)) {
                dbg(2, "RX CRC error\n");
                lp->stats.rx_crc_errors++;
            } else {
                if (IS_RCV_LOR_ERR(devcs)) {
                    dbg(2, "RX LOR error\n");
                    lp->stats.rx_length_errors++;
                }

                if (IS_RCV_LE_ERR(devcs)) {
                    dbg(2, "RX LE error\n");
                    lp->stats.rx_length_errors++;
                }
            }

            if (IS_RCV_OVR_ERR(devcs)) {
                /*
                 * The overflow errors are handled through
                 * an interrupt handler.
                 */
                lp->stats.rx_over_errors++;
            }
            /* code violation */
            if (IS_RCV_CV_ERR(devcs)) {
                dbg(2, "RX CV error\n");
                lp->stats.rx_frame_errors++;
            }

            if (IS_RCV_CES_ERR(devcs)) {
                dbg(2, "RX Preamble error\n");
            }
        }


        /* reset descriptor's curr_addr */
        rd->ca = virt_to_phys(pkt_buf);

        /*
         * cgg - clear the bits that let us see whether this
         * descriptor has been used or not & reset reception
         * length.
         */
        rd->control = DMAD_iod_m | DMA_COUNT(ACACIA_RBSIZE);
        rd->devcs = 0;
        lp->rx_next_out = (lp->rx_next_out + 1) & ACACIA_RDS_MASK;
        rd = &lp->rd_ring[lp->rx_next_out];

        /*
         * we'll deal with all possible interrupts up to the last
         * used descriptor - so cancel any interrupts that may have
         * arrisen while we've been processing.
         */
        writel(0, &lp->rx_dma_regs->dmas);
    }

    /*
     * If any worth-while packets have been received, dev_rint()
     * has done a mark_bh(NET_BH) for us and will work on them
     * when we get to the bottom-half routine.
     */
}
Пример #2
0
static void rc32434_rx_tasklet(unsigned long rx_data_dev)
#endif
{
	struct net_device *dev = (struct net_device *)rx_data_dev;	
	struct rc32434_local* lp = netdev_priv(dev);
	volatile DMAD_t  rd = &lp->rd_ring[lp->rx_next_done];
	struct sk_buff *skb, *skb_new;
	u8* pkt_buf;
	u32 devcs, count, pkt_len, pktuncrc_len;
	volatile u32 dmas;
#ifdef CONFIG_IDT_USE_NAPI
	u32 received = 0;
	int rx_work_limit = min(*budget,dev->quota);
#else
	unsigned long 	flags;
	spin_lock_irqsave(&lp->lock, flags);
#endif

	dma_cache_inv((u32)rd, sizeof(*rd));
	while ( (count = RC32434_RBSIZE - (u32)DMA_COUNT(rd->control)) != 0) {
#ifdef CONFIG_IDT_USE_NAPI
		if(--rx_work_limit <0)
                {
                        break;
                }
#endif
		/* init the var. used for the later operations within the while loop */
		skb_new = NULL;
		devcs = rd->devcs;
		pkt_len = RCVPKT_LENGTH(devcs);
		skb = lp->rx_skb[lp->rx_next_done];
      
		if (count < 64) {
			lp->stats.rx_errors++;
			lp->stats.rx_dropped++;			
		}
		else if ((devcs & ( ETHRX_ld_m)) !=	ETHRX_ld_m) {
			/* check that this is a whole packet */
			/* WARNING: DMA_FD bit incorrectly set in Rc32434 (errata ref #077) */
			lp->stats.rx_errors++;
			lp->stats.rx_dropped++;
		}
		else if ( (devcs & ETHRX_rok_m)  ) {
			
			{
				/* must be the (first and) last descriptor then */
				pkt_buf = (u8*)lp->rx_skb[lp->rx_next_done]->data;
				
				pktuncrc_len = pkt_len - 4;
				/* invalidate the cache */
				dma_cache_inv((unsigned long)pkt_buf, pktuncrc_len);
				
				/* Malloc up new buffer. */					  
				skb_new = dev_alloc_skb(RC32434_RBSIZE + 2);					             	
				
				if (skb_new != NULL){
					/* Make room */
					skb_put(skb, pktuncrc_len);		    
					
					skb->protocol = eth_type_trans(skb, dev);
					
					/* pass the packet to upper layers */
#ifdef CONFIG_IDT_USE_NAPI
					netif_receive_skb(skb);
#else
					netif_rx(skb);
#endif
					
					dev->last_rx = jiffies;
					lp->stats.rx_packets++;
					lp->stats.rx_bytes += pktuncrc_len;
					
					if (IS_RCV_MP(devcs))
						lp->stats.multicast++;
					
					/* 16 bit align */						  
					skb_reserve(skb_new, 2);	
					
					skb_new->dev = dev;
					lp->rx_skb[lp->rx_next_done] = skb_new;
				}
				else {
					ERR("no memory, dropping rx packet.\n");
					lp->stats.rx_errors++;		
					lp->stats.rx_dropped++;					
				}
			}
			
		}			
		else {
			/* This should only happen if we enable accepting broken packets */
			lp->stats.rx_errors++;
			lp->stats.rx_dropped++;
			
			/* add statistics counters */
			if (IS_RCV_CRC_ERR(devcs)) {
				DBG(2, "RX CRC error\n");
				lp->stats.rx_crc_errors++;
			} 
			else if (IS_RCV_LOR_ERR(devcs)) {
				DBG(2, "RX LOR error\n");
				lp->stats.rx_length_errors++;
			}				
			else if (IS_RCV_LE_ERR(devcs)) {
				DBG(2, "RX LE error\n");
				lp->stats.rx_length_errors++;
			}
			else if (IS_RCV_OVR_ERR(devcs)) {
				lp->stats.rx_over_errors++;
			}
			else if (IS_RCV_CV_ERR(devcs)) {
				/* code violation */
				DBG(2, "RX CV error\n");
				lp->stats.rx_frame_errors++;
			}
			else if (IS_RCV_CES_ERR(devcs)) {
				DBG(2, "RX Preamble error\n");
			}
		}
		
		rd->devcs = 0;
		
		/* restore descriptor's curr_addr */
		if(skb_new)
			rd->ca = CPHYSADDR(skb_new->data); 
		else
			rd->ca = CPHYSADDR(skb->data);
		
		rd->control = DMA_COUNT(RC32434_RBSIZE) |DMAD_cod_m |DMAD_iod_m;
		lp->rd_ring[(lp->rx_next_done-1)& RC32434_RDS_MASK].control &=  ~(DMAD_cod_m); 	
		
		lp->rx_next_done = (lp->rx_next_done + 1) & RC32434_RDS_MASK;
		dma_cache_wback((u32)rd, sizeof(*rd));
		rd = &lp->rd_ring[lp->rx_next_done];
		__raw_writel( ~DMAS_d_m, &lp->rx_dma_regs->dmas);
	}	
#ifdef CONFIG_IDT_USE_NAPI
        dev->quota -= received;
        *budget =- received;
        if(rx_work_limit < 0)
                goto not_done;
#endif
	
	dmas = __raw_readl(&lp->rx_dma_regs->dmas);
	
	if(dmas & DMAS_h_m) {
		__raw_writel( ~(DMAS_h_m | DMAS_e_m), &lp->rx_dma_regs->dmas);
#ifdef RC32434_PROC_DEBUG
		lp->dma_halt_cnt++;
#endif
		rd->devcs = 0;
		skb = lp->rx_skb[lp->rx_next_done];
		rd->ca = CPHYSADDR(skb->data);
		dma_cache_wback((u32)rd, sizeof(*rd));
		rc32434_chain_rx(lp,rd);
	}
	
#ifdef CONFIG_IDT_USE_NAPI
	netif_rx_complete(dev);
#endif
	/* Enable D H E bit in Rx DMA */
	__raw_writel(__raw_readl(&lp->rx_dma_regs->dmasm) & ~(DMASM_d_m | DMASM_h_m |DMASM_e_m), &lp->rx_dma_regs->dmasm); 
#ifdef CONFIG_IDT_USE_NAPI
	return 0;
 not_done:
	return 1;
#else
	spin_unlock_irqrestore(&lp->lock, flags);
	return;
#endif

	
}	
Пример #3
0
static int rc32434_rx(struct net_device *dev, int limit)
{
        struct rc32434_local *lp = netdev_priv(dev);
	volatile DMAD_t  rd = &lp->rd_ring[lp->rx_next_done];
        struct sk_buff *skb, *skb_new;
        u8 *pkt_buf;
        u32 devcs, pkt_len, dmas, rx_free_desc;
	u32 pktuncrc_len;
        int count;

	dma_cache_inv((u32)rd, sizeof(*rd));
	for (count = 0; count < limit; count++) {
		/* init the var. used for the later operations within the while loop */
		skb_new = NULL;
		devcs = rd->devcs;
		pkt_len = RCVPKT_LENGTH(devcs);
		skb = lp->rx_skb[lp->rx_next_done];
      
		if ((devcs & ( ETHRX_ld_m)) !=	ETHRX_ld_m) {
			/* check that this is a whole packet */
			/* WARNING: DMA_FD bit incorrectly set in Rc32434 (errata ref #077) */
			lp->stats.rx_errors++;
			lp->stats.rx_dropped++;
		}
		else if ( (devcs & ETHRX_rok_m)  ) {
			
				/* must be the (first and) last descriptor then */
				pkt_buf = (u8*)lp->rx_skb[lp->rx_next_done]->data;
				
				pktuncrc_len = pkt_len - 4;
				/* invalidate the cache */
				dma_cache_inv((unsigned long)pkt_buf, pktuncrc_len);
				
				/* Malloc up new buffer. */					  
				skb_new = netdev_alloc_skb(dev, RC32434_RBSIZE + 2);					             	
				
				if (skb_new != NULL){
					/* Make room */
					skb_put(skb, pktuncrc_len);		    
					
					skb->protocol = eth_type_trans(skb, dev);
					
					/* pass the packet to upper layers */
					netif_receive_skb(skb);
					
					dev->last_rx = jiffies;
					lp->stats.rx_packets++;
					lp->stats.rx_bytes += pktuncrc_len;
					
					if (IS_RCV_MP(devcs))
						lp->stats.multicast++;
					
					/* 16 bit align */						  
					skb_reserve(skb_new, 2);	
					
					skb_new->dev = dev;
					lp->rx_skb[lp->rx_next_done] = skb_new;
				}
				else {
					ERR("no memory, dropping rx packet.\n");
					lp->stats.rx_errors++;		
					lp->stats.rx_dropped++;					
				}
		}			
		else {
			/* This should only happen if we enable accepting broken packets */
			lp->stats.rx_errors++;
			lp->stats.rx_dropped++;
			
			/* add statistics counters */
			if (IS_RCV_CRC_ERR(devcs)) {
				DBG(2, "RX CRC error\n");
				lp->stats.rx_crc_errors++;
			} 
			else if (IS_RCV_LOR_ERR(devcs)) {
				DBG(2, "RX LOR error\n");
				lp->stats.rx_length_errors++;
			}				
			else if (IS_RCV_LE_ERR(devcs)) {
				DBG(2, "RX LE error\n");
				lp->stats.rx_length_errors++;
			}
			else if (IS_RCV_OVR_ERR(devcs)) {
				lp->stats.rx_over_errors++;
			}
			else if (IS_RCV_CV_ERR(devcs)) {
				/* code violation */
				DBG(2, "RX CV error\n");
				lp->stats.rx_frame_errors++;
			}
			else if (IS_RCV_CES_ERR(devcs)) {
				DBG(2, "RX Preamble error\n");
			}
		}
		rd->devcs = 0;
		
		/* restore descriptor's curr_addr */
		if(skb_new) {
			rd->ca = CPHYSADDR(skb_new->data);
		}
		else
			rd->ca = CPHYSADDR(skb->data);
		
		rd->control = DMA_COUNT(RC32434_RBSIZE) |DMAD_cod_m |DMAD_iod_m;
		lp->rd_ring[(lp->rx_next_done-1)& RC32434_RDS_MASK].control &=  ~(DMAD_cod_m); 	
		
		lp->rx_next_done = (lp->rx_next_done + 1) & RC32434_RDS_MASK;
		dma_cache_wback((u32)rd, sizeof(*rd));
		rd = &lp->rd_ring[lp->rx_next_done];
		__raw_writel( ~DMAS_d_m, &lp->rx_dma_regs->dmas);
	}	
	
	dmas = __raw_readl(&lp->rx_dma_regs->dmas);
	
	if(dmas & DMAS_h_m) {
		/* Mask off halt and error bits */
		__raw_writel( ~(DMAS_h_m | DMAS_e_m), &lp->rx_dma_regs->dmas);
#ifdef RC32434_PROC_DEBUG
		lp->dma_halt_cnt++;
#endif
		rd->devcs = 0;
		skb = lp->rx_skb[lp->rx_next_done];
		rd->ca = CPHYSADDR(skb->data);
		dma_cache_wback((u32)rd, sizeof(*rd));
		rc32434_chain_rx(lp,rd);
	}
	
	return count;
}