static int lan_saa9730_rx(struct net_device *dev)
{
	struct lan_saa9730_private *lp =
	    (struct lan_saa9730_private *) dev->priv;
	int len = 0;
	struct sk_buff *skb = 0;
	unsigned int rx_status;
	int BufferIndex;
	int PacketIndex;
	unsigned int *pPacket;
	unsigned char *pData;

	if (lan_saa9730_debug > 5)
		printk("lan_saa9730_rx interrupt\n");

	/* Clear receive interrupts. */
	OUTL(DMA_STATUS_MAC_RX_INT | DMA_STATUS_RX_INT |
	     DMA_STATUS_RX_TO_INT, &lp->lan_saa9730_regs->DmaStatus);

	/* Address next packet */
	if (lp->NextRcvToUseIsA)
		BufferIndex = 0;
	else
		BufferIndex = 1;
	PacketIndex = lp->NextRcvPacketIndex;
	pPacket = (unsigned int *) lp->RcvBuffer[BufferIndex][PacketIndex];
	rx_status = le32_to_cpu(*pPacket);

	/* Process each packet. */
	while ((rx_status & RX_STAT_CTL_OWNER_MSK) ==
	       (RXSF_HWDONE << RX_STAT_CTL_OWNER_SHF)) {
		/* Check the rx status. */
		if (rx_status & (RX_STATUS_GOOD << RX_STAT_CTL_STATUS_SHF)) {
			/* Received packet is good. */
			len = (rx_status & RX_STAT_CTL_LENGTH_MSK) >>
			    RX_STAT_CTL_LENGTH_SHF;

			pData = (unsigned char *) pPacket;
			pData += 4;
			skb = dev_alloc_skb(len + 2);
			if (skb == 0) {
				printk
				    ("%s: Memory squeeze, deferring packet.\n",
				     dev->name);
				lp->stats.rx_dropped++;
			} else {
				lp->stats.rx_bytes += len;
				lp->stats.rx_packets++;
				skb->dev = dev;
				skb_reserve(skb, 2);	/* 16 byte align */
				skb_put(skb, len);	/* make room */
				eth_copy_and_sum(skb,
						 (unsigned char *) pData,
						 len, 0);
				skb->protocol = eth_type_trans(skb, dev);
				netif_rx(skb);
			}
		} else {
			/* We got an error packet. */
			if (lan_saa9730_debug > 2)
/******************************************************************************
 *
 *      SkHandleDataPacket() - Send a data packet to upper layer
 *
 * Description:
 *      Send a data packet to the commonication protocol stack
 *
 * Returns:
 *      0 on success, 1 on error
 */
void SkHandleDataPacket(
SK_AC   *pAC,				/* Adapter context */
SK_U32  *pMsgBuff,			/* Message buffer */
SK_U32  FrameLength)		/* Message size */
{
	struct  sk_buff *pMsg = NULL;

	pMsg = alloc_skb(FrameLength+2, GFP_ATOMIC);
	if (pMsg == NULL) {
		printk("SkHandleDataPacket: No RX buffer!!\n");
		return;
	}
#ifdef VERBOSE_ASF_FIFO_DBUG_RPINTS
	printk("SkHandleDataPacket: Send packet!!\n");
#endif
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22)
	skb_copy_to_linear_data(pMsg, pMsgBuff,
		FrameLength);
#else
	eth_copy_and_sum(pMsg, (char *) pMsgBuff,
		FrameLength, 0);
#endif
	skb_put(pMsg, FrameLength);
	pMsg->ip_summed = CHECKSUM_NONE;
	pMsg->dev = pAC->dev[0];
	pMsg->protocol = eth_type_trans(pMsg, pAC->dev[0]);

#ifdef CONFIG_SK98LIN_NAPI
	netif_receive_skb(pMsg);
#else
	netif_rx(pMsg);
#endif
	pAC->dev[0]->last_rx = jiffies;
	return;
}
Exemple #3
0
static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp,
			      struct hpc3_ethregs *hregs,
			      struct sgiseeq_regs *sregs)
{
	struct sgiseeq_rx_desc *rd;
	struct sk_buff *skb = 0;
	unsigned char pkt_status;
	unsigned char *pkt_pointer = 0;
	int len = 0;
	unsigned int orig_end = PREV_RX(sp->rx_new);

	/* Service every received packet. */
	for_each_rx(rd, sp) {
		len = PKT_BUF_SZ - (rd->rdma.cntinfo & HPCDMA_BCNT) - 3;
		pkt_pointer = (unsigned char *)(long)rd->buf_vaddr;
		pkt_status = pkt_pointer[len + 2];

		if (pkt_status & SEEQ_RSTAT_FIG) {
			/* Packet is OK. */
			skb = dev_alloc_skb(len + 2);

			if (skb) {
				skb->dev = dev;
				skb_reserve(skb, 2);
				skb_put(skb, len);

				/* Copy out of kseg1 to avoid silly cache flush. */
				eth_copy_and_sum(skb, pkt_pointer + 2, len, 0);
				skb->protocol = eth_type_trans(skb, dev);

				/* We don't want to receive our own packets */
				if (memcmp(eth_hdr(skb)->h_source, dev->dev_addr, ETH_ALEN)) {
					netif_rx(skb);
					dev->last_rx = jiffies;
					sp->stats.rx_packets++;
					sp->stats.rx_bytes += len;
				} else {
					/* Silently drop my own packets */
					dev_kfree_skb_irq(skb);
				}
			} else {
				printk (KERN_NOTICE "%s: Memory squeeze, deferring packet.\n",
					dev->name);
				sp->stats.rx_dropped++;
			}
		} else {
			record_rx_errors(sp, pkt_status);
		}

		/* Return the entry to the ring pool. */
		rd->rdma.cntinfo = RCNTINFO_INIT;
		sp->rx_new = NEXT_RX(sp->rx_new);
	}
Exemple #4
0
static void hpna_read_irq( purb_t urb )
{
//	struct net_device *net_dev = urb->context;
	struct device *net_dev = urb->context;
	usb_hpna_t *hpna = net_dev->priv;
	int	count = urb->actual_length, res;
#if 1
	__u8    *p = (__u8 *)(hpna->rx_buff + count - 4);
	__u32	rx_status = le32_to_cpu 
			( p[0] | p[1]<<8 | p[2]<<16 | p[3]<<24 );
#else
	int	rx_status = *(int *)(hpna->rx_buff + count - 4);
#endif


	if ( urb->status ) {
		info( "%s: RX status %d\n", net_dev->name, urb->status );
		goto goon;
	}

	if ( !count )
		goto goon;
/*	if ( rx_status & 0x00010000 )
		goto goon;
*/	
	if ( rx_status & 0x000e0000 ) {
		dbg("%s: error receiving packet %x",
			net_dev->name, rx_status & 0xe0000);
		hpna->stats.rx_errors++;
		if(rx_status & 0x060000) hpna->stats.rx_length_errors++;
		if(rx_status & 0x080000) hpna->stats.rx_crc_errors++;
		if(rx_status & 0x100000) hpna->stats.rx_frame_errors++;
	} else {
		struct sk_buff	*skb;
		__u16 		pkt_len = (rx_status & 0xfff) - 8;


		if((skb = dev_alloc_skb(pkt_len+2)) != NULL ) {
			skb->dev = net_dev;
			skb_reserve(skb, 2);
			eth_copy_and_sum(skb, hpna->rx_buff, pkt_len, 0);
			skb_put(skb, pkt_len);
		} else
			goto	goon;
		skb->protocol = eth_type_trans(skb, net_dev);
		netif_rx(skb);
		hpna->stats.rx_packets++;
		hpna->stats.rx_bytes += pkt_len;
	}
goon:
	if ( (res = usb_submit_urb( &hpna->rx_urb )) )
		warn("failed rx_urb %d", res);
}
Exemple #5
0
static int mambonet_poll(struct net_device *dev, int *budget)
{
	struct netdev_private *np = dev->priv;
	int devno = np->devno;
	char buffer[1600];
	int ns;
	struct sk_buff *skb;
	int frames = 0;
	int max_frames = min(*budget, dev->quota);
	int ret = 0;

	while ((ns = mambonet_recv(devno, buffer, 1600)) > 0) {
		if ((skb = dev_alloc_skb(ns + 2)) != NULL) {
			skb->dev = dev;
			skb_reserve(skb, 2);	/* 16 byte align the IP
						 * header */
#ifdef HAS_IP_COPYSUM
			eth_copy_and_sum(skb, buffer, ns, 0);
			skb_put(skb, ns);
#else
			memcpy(skb_put(skb, ns), buffer, ns);
#endif
			skb->protocol = eth_type_trans(skb, dev);

			if (dev->irq)
				netif_receive_skb(skb);
			else
				netif_rx(skb);

			dev->last_rx = jiffies;
			np->stats.rx_packets++;
			np->stats.rx_bytes += ns;
		} else {
			printk("Failed to allocated skbuff, "
			       "dropping packet\n");
			np->stats.rx_dropped++;
			/* wait for another cycle */
			return 1;
		}
		++frames;
		if (frames > max_frames) {
			ret = 1;
			break;
		}
	}
	*budget -= frames;
	dev->quota -= frames;

	if ((!ret) && (dev->irq))
		netif_rx_complete(dev);

	return ret;
}
Exemple #6
0
static void read_bulk_callback(struct urb *urb)
{
	rtl8150_t *dev;
	int pkt_len, res;
	struct sk_buff *skb;
	struct net_device *netdev;
	u16 rx_stat;

	dev = urb->context;
	if (!dev) {
		warn("!dev");
		return;
	}
	netdev = dev->netdev;
	if (!netif_device_present(netdev)) {
		warn("netdev is not present");
		return;
	}
	switch (urb->status) {
	case 0:
		break;
	case -ENOENT:
		return;
	case -ETIMEDOUT:
		warn("need a device reset?..");
		goto goon;
	default:
		warn("Rx status %d", urb->status);
		goto goon;
	}

	pkt_len = urb->actual_length - 4;
	rx_stat = le16_to_cpu(*(u16 *) (dev->rx_buff + pkt_len));

	if (!(skb = dev_alloc_skb(pkt_len + 2)))
		goto goon;
	skb->dev = netdev;
	skb_reserve(skb, 2);
	eth_copy_and_sum(skb, dev->rx_buff, pkt_len, 0);
	skb_put(skb, pkt_len);
	skb->protocol = eth_type_trans(skb, netdev);
	netif_rx(skb);
	dev->stats.rx_packets++;
	dev->stats.rx_bytes += pkt_len;
goon:
	FILL_BULK_URB(dev->rx_urb, dev->udev, usb_rcvbulkpipe(dev->udev, 1),
		      dev->rx_buff, RTL8150_MAX_MTU, read_bulk_callback, dev);
	if ((res = usb_submit_urb(dev->rx_urb)))
		warn("%s: Rx urb submission failed %d", netdev->name, res);
}
Exemple #7
0
static void elmc_rcv_int(struct net_device *dev)
{
	int status;
	unsigned short totlen;
	struct sk_buff *skb;
	struct rbd_struct *rbd;
	struct priv *p = (struct priv *) dev->priv;

	for (; (status = p->rfd_top->status) & STAT_COMPL;) {
		rbd = (struct rbd_struct *) make32(p->rfd_top->rbd_offset);

		if (status & STAT_OK) {		/* frame received without error? */
			if ((totlen = rbd->status) & RBD_LAST) {	/* the first and the last buffer? */
				totlen &= RBD_MASK;	/* length of this frame */
				rbd->status = 0;
				skb = (struct sk_buff *) dev_alloc_skb(totlen + 2);
				if (skb != NULL) {
					skb->dev = dev;
					skb_reserve(skb, 2);	/* 16 byte alignment */
					skb_put(skb,totlen);
					eth_copy_and_sum(skb, (char *) p->base+(unsigned long) rbd->buffer,totlen,0);
					skb->protocol = eth_type_trans(skb, dev);
					netif_rx(skb);
					dev->last_rx = jiffies;
					p->stats.rx_packets++;
					p->stats.rx_bytes += totlen;
				} else {
					p->stats.rx_dropped++;
				}
			} else {
				printk(KERN_WARNING "%s: received oversized frame.\n", dev->name);
				p->stats.rx_dropped++;
			}
		} else {	/* frame !(ok), only with 'save-bad-frames' */
			printk(KERN_WARNING "%s: oops! rfd-error-status: %04x\n", dev->name, status);
			p->stats.rx_errors++;
		}
		p->rfd_top->status = 0;
		p->rfd_top->last = RFD_SUSP;
		p->rfd_last->last = 0;	/* delete RU_SUSP  */
		p->rfd_last = p->rfd_top;
		p->rfd_top = (struct rfd_struct *) make32(p->rfd_top->next);	/* step to next RFD */
	}
}
Exemple #8
0
static inline int rtl8169_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,
                                      struct RxDesc *desc,
                                      struct net_device *dev)
{
    int ret = -1;

    if (pkt_size < rx_copybreak) {
        struct sk_buff *skb;

        skb = dev_alloc_skb(pkt_size + 2);
        if (skb) {
            skb->dev = dev;
            skb_reserve(skb, 2);
            eth_copy_and_sum(skb, sk_buff[0]->tail, pkt_size, 0);
            *sk_buff = skb;
            rtl8169_return_to_asic(desc);
            ret = 0;
        }
    }
    return ret;
}
Exemple #9
0
/*
 * We have a good packet(s), get it/them out of the buffers.
 */
static void
sonic_rx(struct net_device *dev)
{
    unsigned int base_addr = dev->base_addr;
    struct sonic_local *lp = (struct sonic_local *)dev->priv;
    sonic_rd_t *rd = &lp->rda[lp->cur_rx & SONIC_RDS_MASK];
    int status;

    while (rd->in_use == 0) {
	struct sk_buff *skb;
	int pkt_len;
	unsigned char *pkt_ptr;
	
	status = rd->rx_status;
	if (sonic_debug > 3)
	  printk ("status %x, cur_rx %d, cur_rra %x\n",status,lp->cur_rx,lp->cur_rra);
	if (status & SONIC_RCR_PRX) {	    
	    pkt_len = rd->rx_pktlen;
	    pkt_ptr = (char *)sonic_chiptomem((rd->rx_pktptr_h << 16) +
						      rd->rx_pktptr_l);
	    
	    if (sonic_debug > 3)
	      printk ("pktptr %p (rba %p) h:%x l:%x, bsize h:%x l:%x\n", pkt_ptr,lp->rba,
		      rd->rx_pktptr_h,rd->rx_pktptr_l,
		      SONIC_READ(SONIC_RBWC1),SONIC_READ(SONIC_RBWC0));
	
	    /* Malloc up new buffer. */
	    skb = dev_alloc_skb(pkt_len+2);
	    if (skb == NULL) {
		printk("%s: Memory squeeze, dropping packet.\n", dev->name);
		lp->stats.rx_dropped++;
		break;
	    }
	    skb->dev = dev;
	    skb_reserve(skb,2);	/* 16 byte align */
	    skb_put(skb,pkt_len);	/* Make room */
	    eth_copy_and_sum(skb, pkt_ptr, pkt_len, 0);
	    skb->protocol=eth_type_trans(skb,dev);
	    netif_rx(skb);			/* pass the packet to upper layers */
	    lp->stats.rx_packets++;
	    lp->stats.rx_bytes += pkt_len;
	    
	} else {
	    /* This should only happen, if we enable accepting broken packets. */
	    lp->stats.rx_errors++;
	    if (status & SONIC_RCR_FAER) lp->stats.rx_frame_errors++;
	    if (status & SONIC_RCR_CRCR) lp->stats.rx_crc_errors++;
	}
	
	rd->in_use = 1;
	rd = &lp->rda[(++lp->cur_rx) & SONIC_RDS_MASK];
	/* now give back the buffer to the receive buffer area */
	if (status & SONIC_RCR_LPKT) {
	    /*
	     * this was the last packet out of the current receice buffer
	     * give the buffer back to the SONIC
	     */
	    lp->cur_rra += sizeof(sonic_rr_t);
	    if (lp->cur_rra > (lp->rra_laddr + (SONIC_NUM_RRS-1) * sizeof(sonic_rr_t)))
		lp->cur_rra = lp->rra_laddr;
	    SONIC_WRITE(SONIC_RWP, lp->cur_rra & 0xffff);
	} else
	    printk ("%s: rx desc without RCR_LPKT. Shouldn't happen !?\n",dev->name);
    }
    /*
     * If any worth-while packets have been received, dev_rint()
     * has done a mark_bh(NET_BH) for us and will work on them
     * when we get to the bottom-half routine.
     */
    return;
}
Exemple #10
0
static int tulip_rx(struct net_device *dev)
{
	struct tulip_private *tp = netdev_priv(dev);
	int entry = tp->cur_rx % RX_RING_SIZE;
	int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
	int received = 0;

	if (tulip_debug > 4)
		printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
			   tp->rx_ring[entry].status);
	/* If we own the next entry, it is a new packet. Send it up. */
	while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
		s32 status = le32_to_cpu(tp->rx_ring[entry].status);

		if (tulip_debug > 5)
			printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
				   dev->name, entry, status);
		if (--rx_work_limit < 0)
			break;
		if ((status & 0x38008300) != 0x0300) {
			if ((status & 0x38000300) != 0x0300) {
				/* Ingore earlier buffers. */
				if ((status & 0xffff) != 0x7fff) {
					if (tulip_debug > 1)
						printk(KERN_WARNING "%s: Oversized Ethernet frame "
							   "spanned multiple buffers, status %8.8x!\n",
							   dev->name, status);
					tp->stats.rx_length_errors++;
				}
			} else if (status & RxDescFatalErr) {
				/* There was a fatal error. */
				if (tulip_debug > 2)
					printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
						   dev->name, status);
				tp->stats.rx_errors++; /* end of a packet.*/
				if (status & 0x0890) tp->stats.rx_length_errors++;
				if (status & 0x0004) tp->stats.rx_frame_errors++;
				if (status & 0x0002) tp->stats.rx_crc_errors++;
				if (status & 0x0001) tp->stats.rx_fifo_errors++;
			}
		} else {
			/* Omit the four octet CRC from the length. */
			short pkt_len = ((status >> 16) & 0x7ff) - 4;
			struct sk_buff *skb;

#ifndef final_version
			if (pkt_len > 1518) {
				printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
					   dev->name, pkt_len, pkt_len);
				pkt_len = 1518;
				tp->stats.rx_length_errors++;
			}
#endif

			/* Check if the packet is long enough to accept without copying
			   to a minimally-sized skbuff. */
			if (pkt_len < tulip_rx_copybreak
				&& (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
				skb->dev = dev;
				skb_reserve(skb, 2);	/* 16 byte align the IP header */
				pci_dma_sync_single_for_cpu(tp->pdev,
							    tp->rx_buffers[entry].mapping,
							    pkt_len, PCI_DMA_FROMDEVICE);
#if ! defined(__alpha__)
				eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->data,
						 pkt_len, 0);
				skb_put(skb, pkt_len);
#else
				memcpy(skb_put(skb, pkt_len),
				       tp->rx_buffers[entry].skb->data,
				       pkt_len);
#endif
				pci_dma_sync_single_for_device(tp->pdev,
							       tp->rx_buffers[entry].mapping,
							       pkt_len, PCI_DMA_FROMDEVICE);
			} else { 	/* Pass up the skb already on the Rx ring. */
				char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
						     pkt_len);

#ifndef final_version
				if (tp->rx_buffers[entry].mapping !=
				    le32_to_cpu(tp->rx_ring[entry].buffer1)) {
					printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
					       "do not match in tulip_rx: %08x vs. %Lx %p / %p.\n",
					       dev->name,
					       le32_to_cpu(tp->rx_ring[entry].buffer1),
					       (long long)tp->rx_buffers[entry].mapping,
					       skb->head, temp);
				}
#endif

				pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
						 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);

				tp->rx_buffers[entry].skb = NULL;
				tp->rx_buffers[entry].mapping = 0;
			}
			skb->protocol = eth_type_trans(skb, dev);

			netif_rx(skb);

			dev->last_rx = jiffies;
			tp->stats.rx_packets++;
			tp->stats.rx_bytes += pkt_len;
		}
		received++;
		entry = (++tp->cur_rx) % RX_RING_SIZE;
	}
	return received;
}
/*
 * Au1000 receive routine.
 */
static int au1000_rx(struct net_device *dev)
{
	struct au1000_private *aup = (struct au1000_private *) dev->priv;
	struct sk_buff *skb;
	volatile rx_dma_t *prxd;
	u32 buff_stat, status;
	db_dest_t *pDB;

	if (au1000_debug > 4)
		printk("%s: au1000_rx head %d\n", dev->name, aup->rx_head);

	prxd = aup->rx_dma_ring[aup->rx_head];
	buff_stat = prxd->buff_stat;
	while (buff_stat & RX_T_DONE)  {
		status = prxd->status;
		pDB = aup->rx_db_inuse[aup->rx_head];
		update_rx_stats(dev, status);
		if (!(status & RX_ERROR))  {

			/* good frame */
			skb = dev_alloc_skb((status & RX_FRAME_LEN_MASK) + 2);
			if (skb == NULL) {
				printk(KERN_ERR
				       "%s: Memory squeeze, dropping packet.\n",
				       dev->name);
				aup->stats.rx_dropped++;
				continue;
			}
			skb->dev = dev;
			skb_reserve(skb, 2);	/* 16 byte IP header align */
			eth_copy_and_sum(skb, (unsigned char *)pDB->vaddr, 
					status & RX_FRAME_LEN_MASK, 0);
			skb_put(skb, status & RX_FRAME_LEN_MASK);
			skb->protocol = eth_type_trans(skb, dev);
			netif_rx(skb);	/* pass the packet to upper layers */
		}
		else {
			if (au1000_debug > 4) {
				if (status & RX_MISSED_FRAME) 
					printk("rx miss\n");
				if (status & RX_WDOG_TIMER) 
					printk("rx wdog\n");
				if (status & RX_RUNT) 
					printk("rx runt\n");
				if (status & RX_OVERLEN) 
					printk("rx overlen\n");
				if (status & RX_COLL)
					printk("rx coll\n");
				if (status & RX_MII_ERROR)
					printk("rx mii error\n");
				if (status & RX_CRC_ERROR)
					printk("rx crc error\n");
				if (status & RX_LEN_ERROR)
					printk("rx len error\n");
				if (status & RX_U_CNTRL_FRAME)
					printk("rx u control frame\n");
				if (status & RX_MISSED_FRAME)
					printk("rx miss\n");
			}
		}
		prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE);
		aup->rx_head = (aup->rx_head + 1) & (NUM_RX_DMA - 1);
		au_sync();

		/* next descriptor */
		prxd = aup->rx_dma_ring[aup->rx_head];
		buff_stat = prxd->buff_stat;
		dev->last_rx = jiffies;
	}
	return 0;
}
static void sun3_82586_rcv_int(struct net_device *dev)
{
	int status,cnt=0;
	unsigned short totlen;
	struct sk_buff *skb;
	struct rbd_struct *rbd;
	struct priv *p = (struct priv *) dev->priv;

	if(debuglevel > 0)
		printk("R");

	for(;(status = p->rfd_top->stat_high) & RFD_COMPL;)
	{
			rbd = (struct rbd_struct *) make32(p->rfd_top->rbd_offset);

			if(status & RFD_OK) /* frame received without error? */
			{
				if( (totlen = swab16(rbd->status)) & RBD_LAST) /* the first and the last buffer? */
				{
					totlen &= RBD_MASK; /* length of this frame */
					rbd->status = 0;
					skb = (struct sk_buff *) dev_alloc_skb(totlen+2);
					if(skb != NULL)
					{
						skb->dev = dev;
						skb_reserve(skb,2);
						skb_put(skb,totlen);
						eth_copy_and_sum(skb,(char *) p->base+swab32((unsigned long) rbd->buffer),totlen,0);
						skb->protocol=eth_type_trans(skb,dev);
						netif_rx(skb);
						p->stats.rx_packets++;
					}
					else
						p->stats.rx_dropped++;
				}
				else
				{
					int rstat;
						 /* free all RBD's until RBD_LAST is set */
					totlen = 0;
					while(!((rstat=swab16(rbd->status)) & RBD_LAST))
					{
						totlen += rstat & RBD_MASK;
						if(!rstat)
						{
							printk("%s: Whoops .. no end mark in RBD list\n",dev->name);
							break;
						}
						rbd->status = 0;
						rbd = (struct rbd_struct *) make32(rbd->next);
					}
					totlen += rstat & RBD_MASK;
					rbd->status = 0;
					printk("%s: received oversized frame! length: %d\n",dev->name,totlen);
					p->stats.rx_dropped++;
			 }
		}
		else /* frame !(ok), only with 'save-bad-frames' */
		{
			printk("%s: oops! rfd-error-status: %04x\n",dev->name,status);
			p->stats.rx_errors++;
		}
		p->rfd_top->stat_high = 0;
		p->rfd_top->last = RFD_SUSP; /* maybe exchange by RFD_LAST */
		p->rfd_top->rbd_offset = 0xffff;
		p->rfd_last->last = 0;				/* delete RFD_SUSP	*/
		p->rfd_last = p->rfd_top;
		p->rfd_top = (struct rfd_struct *) make32(p->rfd_top->next); /* step to next RFD */
		p->scb->rfa_offset = make16(p->rfd_top);

		if(debuglevel > 0)
			printk("%d",cnt++);
	}

	if(automatic_resume)
	{
		WAIT_4_SCB_CMD();
		p->scb->cmd_ruc = RUC_RESUME;
		sun3_attn586();
		WAIT_4_SCB_CMD_RUC();
	}

#ifdef WAIT_4_BUSY
	{
		int i;
		for(i=0;i<1024;i++)
		{
			if(p->rfd_top->status)
				break;
			DELAY_16();
			if(i == 1023)
				printk("%s: RU hasn't fetched next RFD (not busy/complete)\n",dev->name);
		}
	}
#endif

#if 0
	if(!at_least_one)
	{
		int i;
		volatile struct rfd_struct *rfds=p->rfd_top;
		volatile struct rbd_struct *rbds;
		printk("%s: received a FC intr. without having a frame: %04x %d\n",dev->name,status,old_at_least);
		for(i=0;i< (p->num_recv_buffs+4);i++)
		{
			rbds = (struct rbd_struct *) make32(rfds->rbd_offset);
			printk("%04x:%04x ",rfds->status,rbds->status);
			rfds = (struct rfd_struct *) make32(rfds->next);
		}
		printk("\nerrs: %04x %04x stat: %04x\n",(int)p->scb->rsc_errs,(int)p->scb->ovrn_errs,(int)p->scb->status);
		printk("\nerrs: %04x %04x rus: %02x, cus: %02x\n",(int)p->scb->rsc_errs,(int)p->scb->ovrn_errs,(int)p->scb->rus,(int)p->scb->cus);
	}
	old_at_least = at_least_one;
#endif

	if(debuglevel > 0)
		printk("r");
}
Exemple #13
0
static int tulip_rx(struct net_device *dev)
{
	struct tulip_private *tp = (struct tulip_private *)dev->priv;
	int entry = tp->cur_rx % RX_RING_SIZE;
	int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
	int received = 0;

#ifdef CONFIG_NET_HW_FLOWCONTROL
        int drop = 0, mit_sel = 0;

/* that one buffer is needed for mit activation; or might be a
   bug in the ring buffer code; check later -- JHS*/

        if (rx_work_limit >=RX_RING_SIZE) rx_work_limit--;
#endif

	if (tulip_debug > 4)
		printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
			   tp->rx_ring[entry].status);
	/* If we own the next entry, it is a new packet. Send it up. */
	while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
		s32 status = le32_to_cpu(tp->rx_ring[entry].status);

		if (tulip_debug > 5)
			printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
				   dev->name, entry, status);
		if (--rx_work_limit < 0)
			break;
		if ((status & 0x38008300) != 0x0300) {
			if ((status & 0x38000300) != 0x0300) {
				/* Ingore earlier buffers. */
				if ((status & 0xffff) != 0x7fff) {
					if (tulip_debug > 1)
						printk(KERN_WARNING "%s: Oversized Ethernet frame "
							   "spanned multiple buffers, status %8.8x!\n",
							   dev->name, status);
					tp->stats.rx_length_errors++;
				}
			} else if (status & RxDescFatalErr) {
				/* There was a fatal error. */
				if (tulip_debug > 2)
					printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
						   dev->name, status);
				tp->stats.rx_errors++; /* end of a packet.*/
				if (status & 0x0890) tp->stats.rx_length_errors++;
				if (status & 0x0004) tp->stats.rx_frame_errors++;
				if (status & 0x0002) tp->stats.rx_crc_errors++;
				if (status & 0x0001) tp->stats.rx_fifo_errors++;
			}
		} else {
			/* Omit the four octet CRC from the length. */
			short pkt_len = ((status >> 16) & 0x7ff) - 4;
			struct sk_buff *skb;

#ifndef final_version
			if (pkt_len > 1518) {
				printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
					   dev->name, pkt_len, pkt_len);
				pkt_len = 1518;
				tp->stats.rx_length_errors++;
			}
#endif

#ifdef CONFIG_NET_HW_FLOWCONTROL
                        drop = atomic_read(&netdev_dropping);
                        if (drop)
                                goto throttle;
#endif
			/* Check if the packet is long enough to accept without copying
			   to a minimally-sized skbuff. */
			if (pkt_len < tulip_rx_copybreak
				&& (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
				skb->dev = dev;
				skb_reserve(skb, 2);	/* 16 byte align the IP header */
				pci_dma_sync_single(tp->pdev,
						    tp->rx_buffers[entry].mapping,
						    pkt_len, PCI_DMA_FROMDEVICE);
#if ! defined(__alpha__)
				eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->tail,
						 pkt_len, 0);
				skb_put(skb, pkt_len);
#else
				memcpy(skb_put(skb, pkt_len),
				       tp->rx_buffers[entry].skb->tail,
				       pkt_len);
#endif
			} else { 	/* Pass up the skb already on the Rx ring. */
				char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
						     pkt_len);

#ifndef final_version
				if (tp->rx_buffers[entry].mapping !=
				    le32_to_cpu(tp->rx_ring[entry].buffer1)) {
					printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
					       "do not match in tulip_rx: %08x vs. %Lx %p / %p.\n",
					       dev->name,
					       le32_to_cpu(tp->rx_ring[entry].buffer1),
					       (long long)tp->rx_buffers[entry].mapping,
					       skb->head, temp);
				}
#endif

				pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
						 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);

				tp->rx_buffers[entry].skb = NULL;
				tp->rx_buffers[entry].mapping = 0;
			}
			skb->protocol = eth_type_trans(skb, dev);
#ifdef CONFIG_NET_HW_FLOWCONTROL
                        mit_sel =
#endif
			netif_rx(skb);

#ifdef CONFIG_NET_HW_FLOWCONTROL
                        switch (mit_sel) {
                        case NET_RX_SUCCESS:
                        case NET_RX_CN_LOW:
                        case NET_RX_CN_MOD:
                                break;

                        case NET_RX_CN_HIGH:
                                rx_work_limit -= NET_RX_CN_HIGH; /* additional*/
                                break;
                        case NET_RX_DROP:
                                rx_work_limit = -1;
                                break;
                        default:
                                printk("unknown feedback return code %d\n", mit_sel);
                                break;
                        }

                        drop = atomic_read(&netdev_dropping);
                        if (drop) {
throttle:
                                rx_work_limit = -1;
                                mit_sel = NET_RX_DROP;

                                if (tp->fc_bit) {
                                        long ioaddr = dev->base_addr;

                                        /* disable Rx & RxNoBuf ints. */
                                        outl(tulip_tbl[tp->chip_id].valid_intrs&RX_A_NBF_STOP, ioaddr + CSR7);
                                        set_bit(tp->fc_bit, &netdev_fc_xoff);
                                }
                        }
#endif
			dev->last_rx = jiffies;
			tp->stats.rx_packets++;
			tp->stats.rx_bytes += pkt_len;
		}
		received++;
		entry = (++tp->cur_rx) % RX_RING_SIZE;
	}
#ifdef CONFIG_NET_HW_FLOWCONTROL

        /* We use this simplistic scheme for IM. It's proven by
           real life installations. We can have IM enabled
           continuesly but this would cause unnecessary latency.
           Unfortunely we can't use all the NET_RX_* feedback here.
           This would turn on IM for devices that is not contributing
           to backlog congestion with unnecessary latency.

           We monitor the device RX-ring and have:

           HW Interrupt Mitigation either ON or OFF.

           ON:  More then 1 pkt received (per intr.) OR we are dropping
           OFF: Only 1 pkt received

           Note. We only use min and max (0, 15) settings from mit_table */


        if( tp->flags &  HAS_INTR_MITIGATION) {
                if((received > 1 || mit_sel == NET_RX_DROP)
                   && tp->mit_sel != 15 ) {
                        tp->mit_sel = 15;
                        tp->mit_change = 1; /* Force IM change */
                }
                if((received <= 1 && mit_sel != NET_RX_DROP) && tp->mit_sel != 0 ) {
                        tp->mit_sel = 0;
                        tp->mit_change = 1; /* Force IM change */
                }
        }

        return RX_RING_SIZE+1; /* maxrx+1 */
#else
	return received;
#endif
}
Exemple #14
0
fec_enet_rx(struct net_device *dev)
#endif
{
	struct	fec_enet_private *fep;
	volatile fec_t	*fecp;
	volatile cbd_t *bdp;
	struct	sk_buff	*skb;
	ushort	pkt_len;
	__u8 *data;

	fep = dev->priv;
	fecp = (volatile fec_t*)dev->base_addr;

	/* First, grab all of the stats for the incoming packet.
	 * These get messed up if we get called due to a busy condition.
	 */
	bdp = fep->cur_rx;

while (!(bdp->cbd_sc & BD_ENET_RX_EMPTY)) {

#ifndef final_version
	/* Since we have allocated space to hold a complete frame,
	 * the last indicator should be set.
	 */
	if ((bdp->cbd_sc & BD_ENET_RX_LAST) == 0)
		printk("FEC ENET: rcv is not +last\n");
#endif

	/* Check for errors. */
	if (bdp->cbd_sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
			   BD_ENET_RX_CR | BD_ENET_RX_OV)) {
		fep->stats.rx_errors++;
		if (bdp->cbd_sc & (BD_ENET_RX_LG | BD_ENET_RX_SH)) {
		/* Frame too long or too short. */
			fep->stats.rx_length_errors++;
		}
		if (bdp->cbd_sc & BD_ENET_RX_NO)	/* Frame alignment */
			fep->stats.rx_frame_errors++;
		if (bdp->cbd_sc & BD_ENET_RX_CR)	/* CRC Error */
			fep->stats.rx_crc_errors++;
		if (bdp->cbd_sc & BD_ENET_RX_OV)	/* FIFO overrun */
			fep->stats.rx_crc_errors++;
	}

	/* Report late collisions as a frame error.
	 * On this error, the BD is closed, but we don't know what we
	 * have in the buffer.  So, just drop this frame on the floor.
	 */
	if (bdp->cbd_sc & BD_ENET_RX_CL) {
		fep->stats.rx_errors++;
		fep->stats.rx_frame_errors++;
		goto rx_processing_done;
	}

	/* Process the incoming frame.
	 */
	fep->stats.rx_packets++;
	pkt_len = bdp->cbd_datlen;
	fep->stats.rx_bytes += pkt_len;
	data = fep->rx_vaddr[bdp - fep->rx_bd_base];

#ifdef CONFIG_FEC_PACKETHOOK
	/* Packet hook ... */
	if (fep->ph_rxhandler) {
		if (((struct ethhdr *)data)->h_proto == fep->ph_proto) {
			switch (fep->ph_rxhandler(data, pkt_len, regval,
						  fep->ph_priv)) {
			case 1:
				goto rx_processing_done;
				break;
			case 0:
				break;
			default:
				fep->stats.rx_errors++;
				goto rx_processing_done;
			}
		}
	}

	/* If it wasn't filtered - copy it to an sk buffer. */
#endif

	/* This does 16 byte alignment, exactly what we need.
	 * The packet length includes FCS, but we don't want to
	 * include that when passing upstream as it messes up
	 * bridging applications.
	 */
	skb = dev_alloc_skb(pkt_len-4);

	if (skb == NULL) {
		printk("%s: Memory squeeze, dropping packet.\n", dev->name);
		fep->stats.rx_dropped++;
	} else {
		skb->dev = dev;
		skb_put(skb,pkt_len-4);	/* Make room */
		eth_copy_and_sum(skb, data, pkt_len-4, 0);
		skb->protocol=eth_type_trans(skb,dev);
		netif_rx(skb);
	}
  rx_processing_done:

	/* Clear the status flags for this buffer.
	*/
	bdp->cbd_sc &= ~BD_ENET_RX_STATS;

	/* Mark the buffer empty.
	*/
	bdp->cbd_sc |= BD_ENET_RX_EMPTY;

	/* Update BD pointer to next entry.
	*/
	if (bdp->cbd_sc & BD_ENET_RX_WRAP)
		bdp = fep->rx_bd_base;
	else
		bdp++;

#if 1
	/* Doing this here will keep the FEC running while we process
	 * incoming frames.  On a heavily loaded network, we should be
	 * able to keep up at the expense of system resources.
	 */
	fecp->fec_r_des_active = 0x01000000;
#endif
#ifdef CONFIG_FEC_PACKETHOOK
	/* Re-read register. Not exactly guaranteed to be correct,
	   but... */
	if (fep->ph_regaddr) regval = *fep->ph_regaddr;
#endif
   } /* while (!(bdp->cbd_sc & BD_ENET_RX_EMPTY)) */
	fep->cur_rx = (cbd_t *)bdp;

#if 0
	/* Doing this here will allow us to process all frames in the
	 * ring before the FEC is allowed to put more there.  On a heavily
	 * loaded network, some frames may be lost.  Unfortunately, this
	 * increases the interrupt overhead since we can potentially work
	 * our way back to the interrupt return only to come right back
	 * here.
	 */
	fecp->fec_r_des_active = 0x01000000;
#endif
}
Exemple #15
0
static void read_bulk_callback( struct urb *urb )
{
	ether_dev_t *ether_dev = urb->context;
	struct net_device *net;
	int count = urb->actual_length, res;
	struct sk_buff	*skb;

	// Sanity check 
	if ( !ether_dev || !(ether_dev->flags & CDC_ETHER_RUNNING) ) {
		dbg("BULK IN callback but driver is not active!");
		return;
	}

	net = ether_dev->net;
	if ( !netif_device_present(net) ) {
		// Somebody killed our network interface...
		return;
	}

	if ( ether_dev->flags & CDC_ETHER_RX_BUSY ) {
		// Are we already trying to receive a frame???
		ether_dev->stats.rx_errors++;
		dbg("ether_dev Rx busy");
		return;
	}

	// We are busy, leave us alone!
	ether_dev->flags |= CDC_ETHER_RX_BUSY;

	switch ( urb->status ) {
		case USB_ST_NOERROR:
			break;
		case USB_ST_NORESPONSE:
			dbg( "no repsonse in BULK IN" );
			ether_dev->flags &= ~CDC_ETHER_RX_BUSY;
			break;
		default:
			dbg( "%s: RX status %d", net->name, urb->status );
			goto goon;
	}

	// Check to make sure we got some data...
	if ( !count ) {
		// We got no data!!!
		goto goon;
	}

	// Tell the kernel we want some memory
	if ( !(skb = dev_alloc_skb(count)) ) {
		// We got no receive buffer.
		goto goon;
	}

	// Here's where it came from
	skb->dev = net;
	
	// Now we copy it over
	eth_copy_and_sum(skb, ether_dev->rx_buff, count, 0);
	
	// Not sure
	skb_put(skb, count);
	// Not sure here either
	skb->protocol = eth_type_trans(skb, net);
	
	// Ship it off to the kernel
	netif_rx(skb);
	
	// update out statistics
	ether_dev->stats.rx_packets++;
	ether_dev->stats.rx_bytes += count;

goon:
	// Prep the USB to wait for another frame
	FILL_BULK_URB( &ether_dev->rx_urb, ether_dev->usb,
			usb_rcvbulkpipe(ether_dev->usb, ether_dev->data_ep_in),
			ether_dev->rx_buff, ether_dev->wMaxSegmentSize, 
			read_bulk_callback, ether_dev );
			
	// Give this to the USB subsystem so it can tell us 
	// when more data arrives.
	if ( (res = usb_submit_urb(&ether_dev->rx_urb)) ) {
		warn( __FUNCTION__ " failed submint rx_urb %d", res);
	}
	
	// We are no longer busy, show us the frames!!!
	ether_dev->flags &= ~CDC_ETHER_RX_BUSY;
}
Exemple #16
0
/* During a receive, the cur_rx points to the current incoming buffer.
 * When we update through the ring, if the next incoming buffer has
 * not been given to the system, we just set the empty indicator,
 * effectively tossing the packet.
 */
static int
scc_enet_rx(struct net_device *dev)
{
	struct	scc_enet_private *cep;
	volatile cbd_t	*bdp;
	struct	sk_buff *skb;
	ushort	pkt_len;

	cep = (struct scc_enet_private *)dev->priv;

	/* First, grab all of the stats for the incoming packet.
	 * These get messed up if we get called due to a busy condition.
	 */
	bdp = cep->cur_rx;

for (;;) {
	if (bdp->cbd_sc & BD_ENET_RX_EMPTY)
		break;

#ifndef final_version
	/* Since we have allocated space to hold a complete frame, both
	 * the first and last indicators should be set.
	 */
	if ((bdp->cbd_sc & (BD_ENET_RX_FIRST | BD_ENET_RX_LAST)) !=
		(BD_ENET_RX_FIRST | BD_ENET_RX_LAST))
			printk("CPM ENET: rcv is not first+last\n");
#endif

	/* Frame too long or too short.
	*/
	if (bdp->cbd_sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
		cep->stats.rx_length_errors++;
	if (bdp->cbd_sc & BD_ENET_RX_NO)	/* Frame alignment */
		cep->stats.rx_frame_errors++;
	if (bdp->cbd_sc & BD_ENET_RX_CR)	/* CRC Error */
		cep->stats.rx_crc_errors++;
	if (bdp->cbd_sc & BD_ENET_RX_OV)	/* FIFO overrun */
		cep->stats.rx_crc_errors++;

	/* Report late collisions as a frame error.
	 * On this error, the BD is closed, but we don't know what we
	 * have in the buffer.  So, just drop this frame on the floor.
	 */
	if (bdp->cbd_sc & BD_ENET_RX_CL) {
		cep->stats.rx_frame_errors++;
	}
	else {

		/* Process the incoming frame.
		*/
		cep->stats.rx_packets++;
		pkt_len = bdp->cbd_datlen;
		cep->stats.rx_bytes += pkt_len;

		/* This does 16 byte alignment, much more than we need.
		 * The packet length includes FCS, but we don't want to
		 * include that when passing upstream as it messes up
		 * bridging applications.
		 */
		skb = dev_alloc_skb(pkt_len-4);

		if (skb == NULL) {
			printk("%s: Memory squeeze, dropping packet.\n", dev->name);
			cep->stats.rx_dropped++;
		}
		else {
			skb->dev = dev;
			skb_put(skb,pkt_len-4);	/* Make room */
			eth_copy_and_sum(skb,
				cep->rx_vaddr[bdp - cep->rx_bd_base],
				pkt_len-4, 0);
			skb->protocol=eth_type_trans(skb,dev);
			netif_rx(skb);
		}
	}

	/* Clear the status flags for this buffer.
	*/
	bdp->cbd_sc &= ~BD_ENET_RX_STATS;

	/* Mark the buffer empty.
	*/
	bdp->cbd_sc |= BD_ENET_RX_EMPTY;

	/* Update BD pointer to next entry.
	*/
	if (bdp->cbd_sc & BD_ENET_RX_WRAP)
		bdp = cep->rx_bd_base;
	else
		bdp++;

   }
	cep->cur_rx = (cbd_t *)bdp;

	return 0;
}
Exemple #17
0
/* This routine is logically part of the interrupt handler, but separated
   for clarity and better register allocation. */
static int netdev_rx(struct net_device *dev)
{
	struct netdev_private *np = dev->priv;
	int entry = np->cur_rx % RX_RING_SIZE;
	int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;

	if (debug > 4) {
		printk(KERN_DEBUG " In netdev_rx(), entry %d status %4.4x.\n",
			   entry, np->rx_ring[entry].status);
	}

	/* If EOP is set on the next entry, it's a new packet. Send it up. */
	while (1) {
		struct netdev_desc *desc = &(np->rx_ring[entry]);
		u32 frame_status;
		int pkt_len;

		if (!(desc->status & DescOwn))
			break;
		frame_status = le32_to_cpu(desc->status);
		pkt_len = frame_status & 0x1fff;	/* Chip omits the CRC. */
		if (debug > 4)
			printk(KERN_DEBUG "  netdev_rx() status was %8.8x.\n",
				   frame_status);
		if (--boguscnt < 0)
			break;
		pci_dma_sync_single(np->pci_dev, desc->frag[0].addr,
			np->rx_buf_sz, PCI_DMA_FROMDEVICE);
		
		if (frame_status & 0x001f4000) {
			/* There was a error. */
			if (debug > 2)
				printk(KERN_DEBUG "  netdev_rx() Rx error was %8.8x.\n",
					   frame_status);
			np->stats.rx_errors++;
			if (frame_status & 0x00100000) np->stats.rx_length_errors++;
			if (frame_status & 0x00010000) np->stats.rx_fifo_errors++;
			if (frame_status & 0x00060000) np->stats.rx_frame_errors++;
			if (frame_status & 0x00080000) np->stats.rx_crc_errors++;
			if (frame_status & 0x00100000) {
				printk(KERN_WARNING "%s: Oversized Ethernet frame,"
					   " status %8.8x.\n",
					   dev->name, frame_status);
			}
		} else {
			struct sk_buff *skb;

#ifndef final_version
			if (debug > 4)
				printk(KERN_DEBUG "  netdev_rx() normal Rx pkt length %d"
					   ", bogus_cnt %d.\n",
					   pkt_len, boguscnt);
#endif
			/* Check if the packet is long enough to accept without copying
			   to a minimally-sized skbuff. */
			if (pkt_len < rx_copybreak
				&& (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
				skb->dev = dev;
				skb_reserve(skb, 2);	/* 16 byte align the IP header */
				eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);
				skb_put(skb, pkt_len);
			} else {
				pci_unmap_single(np->pci_dev, 
					desc->frag[0].addr,
					np->rx_buf_sz, 
					PCI_DMA_FROMDEVICE);
				skb_put(skb = np->rx_skbuff[entry], pkt_len);
				np->rx_skbuff[entry] = NULL;
			}
			skb->protocol = eth_type_trans(skb, dev);
			/* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
			netif_rx(skb);
			dev->last_rx = jiffies;
		}
		entry = (++np->cur_rx) % RX_RING_SIZE;
	}

	/* Refill the Rx ring buffers. */
	for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
		struct sk_buff *skb;
		entry = np->dirty_rx % RX_RING_SIZE;
		if (np->rx_skbuff[entry] == NULL) {
			skb = dev_alloc_skb(np->rx_buf_sz);
			np->rx_skbuff[entry] = skb;
			if (skb == NULL)
				break;		/* Better luck next round. */
			skb->dev = dev;		/* Mark as being used by this device. */
			skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
			np->rx_ring[entry].frag[0].addr = cpu_to_le32(
				pci_map_single(np->pci_dev, skb->tail, 
					np->rx_buf_sz, PCI_DMA_FROMDEVICE));
		}
		/* Perhaps we need not reset this field. */
		np->rx_ring[entry].frag[0].length =
			cpu_to_le32(np->rx_buf_sz | LastFrag);
		np->rx_ring[entry].status = 0;
	}

	/* No need to restart Rx engine, it will poll. */
	return 0;
}
Exemple #18
0
/* This routine is logically part of the interrupt handler, but isolated
   for clarity and better register allocation. */
static int netdev_rx(struct device *dev)
{
	struct netdev_private *np = (struct netdev_private *)dev->priv;
	int entry = np->cur_rx % RX_RING_SIZE;
	int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;

	if (debug > 4) {
		printk(KERN_DEBUG " In netdev_rx(), entry %d status %4.4x.\n",
			   entry, np->rx_head_desc->rx_length);
	}

	/* If EOP is set on the next entry, it's a new packet. Send it up. */
	while ( ! (np->rx_head_desc->rx_length & DescOwn)) {
		struct rx_desc *desc = np->rx_head_desc;
		int data_size = desc->rx_length;
		u16 desc_status = desc->rx_status;

		if (debug > 4)
			printk(KERN_DEBUG "  netdev_rx() status is %4.4x.\n",
				   desc_status);
		if (--boguscnt < 0)
			break;
		if ( (desc_status & (RxWholePkt | RxErr)) !=  RxWholePkt) {
			if ((desc_status & RxWholePkt) !=  RxWholePkt) {
				printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
					   "multiple buffers, entry %#x length %d status %4.4x!\n",
					   dev->name, np->cur_rx, data_size, desc_status);
				printk(KERN_WARNING "%s: Oversized Ethernet frame %p vs %p.\n",
					   dev->name, np->rx_head_desc,
					   &np->rx_ring[np->cur_rx % RX_RING_SIZE]);
				np->stats.rx_length_errors++;
			} else if (desc_status & RxErr) {
				/* There was a error. */
				if (debug > 2)
					printk(KERN_DEBUG "  netdev_rx() Rx error was %8.8x.\n",
						   desc_status);
				np->stats.rx_errors++;
				if (desc_status & 0x0030) np->stats.rx_length_errors++;
				if (desc_status & 0x0048) np->stats.rx_fifo_errors++;
				if (desc_status & 0x0004) np->stats.rx_frame_errors++;
				if (desc_status & 0x0002) np->stats.rx_crc_errors++;
			}
		} else {
			struct sk_buff *skb;
			/* Length should omit the CRC */
			u16 pkt_len = data_size - 4;

			/* Check if the packet is long enough to accept without copying
			   to a minimally-sized skbuff. */
			if (pkt_len < rx_copybreak
				&& (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
				skb->dev = dev;
				skb_reserve(skb, 2);	/* 16 byte align the IP header */
#if ! defined(__alpha__) || USE_IP_COPYSUM		/* Avoid misaligned on Alpha */
				eth_copy_and_sum(skb, bus_to_virt(desc->addr),
								 pkt_len, 0);
				skb_put(skb, pkt_len);
#else
				memcpy(skb_put(skb,pkt_len), bus_to_virt(desc->addr), pkt_len);
#endif
			} else {
				skb_put(skb = np->rx_skbuff[entry], pkt_len);
				np->rx_skbuff[entry] = NULL;
			}
			skb->protocol = eth_type_trans(skb, dev);
			np->stats.rx_bytes+=skb->len;
			netif_rx(skb);
			dev->last_rx = jiffies;
			np->stats.rx_packets++;
		}
		entry = (++np->cur_rx) % RX_RING_SIZE;
		np->rx_head_desc = &np->rx_ring[entry];
	}

	/* Refill the Rx ring buffers. */
	for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
		struct sk_buff *skb;
		entry = np->dirty_rx % RX_RING_SIZE;
		if (np->rx_skbuff[entry] == NULL) {
			skb = dev_alloc_skb(np->rx_buf_sz);
			np->rx_skbuff[entry] = skb;
			if (skb == NULL)
				break;			/* Better luck next round. */
			skb->dev = dev;			/* Mark as being used by this device. */
			np->rx_ring[entry].addr = virt_to_bus(skb->tail);
		}
		np->rx_ring[entry].rx_status = 0;
		np->rx_ring[entry].rx_length = DescOwn;
	}

	/* Pre-emptively restart Rx engine. */
	writew(CmdRxDemand | np->chip_cmd, dev->base_addr + ChipCmd);
	return 0;
}
// Reception.
// probe() fills in an rx_urb. When the net device is brought up
// the urb is activated, and this callback gets run for incoming
// data.
static void
ecos_usbeth_rx_callback(struct urb* urb)
{
    ecos_usbeth*        usbeth  = (ecos_usbeth*) urb->context;
    struct net_device*  net     = usbeth->net_dev;
    struct sk_buff*     skb;
    int                 len;
    int                 res;

    if (0 != urb->status) {
        // This happens numerous times during a disconnect. Do not
        // issue a warning, but do clear the status field or things
        // get confused when resubmitting.
        //
        // Some host hardware does not distinguish between CRC errors
        // (very rare) and timeouts (perfectly normal). Do not
        // increment the error count if it might have been a timeout.
        if (USB_ST_CRC != urb->status) {
            usbeth->stats.rx_errors++;
        }
        urb->status = 0;
    } else if (2 > urb->actual_length) {
        // With some hardware the target may have to send a bogus
        // first packet. Just ignore those.

    } else {
        len = usbeth->rx_buffer[0] + (usbeth->rx_buffer[1] << 8);
        if (len > (urb->actual_length - 2)) {
            usbeth->stats.rx_errors++;
            usbeth->stats.rx_length_errors++;
            printk("ecos_usbeth: warning, packet size mismatch, got %d bytes, expected %d\n",
                   urb->actual_length, len);
        } else {
            skb = dev_alloc_skb(len + 2);
            if ((struct sk_buff*)0 == skb) {
                printk("ecos_usbeth: failed to alloc skb, dropping packet\n");
                usbeth->stats.rx_dropped++;
            } else {
#if 0
                {
                    int i;
                    printk("--------------------------------------------------------------\n");
                    printk("ecos_usbeth RX: total size %d\n", len);
                    for (i = 0; (i < len) && (i < 128); i+= 8) {
                        printk("rx  %x %x %x %x %x %x %x %x\n",
                               usbeth->rx_buffer[i+0], usbeth->rx_buffer[i+1], usbeth->rx_buffer[i+2], usbeth->rx_buffer[i+3],
                               usbeth->rx_buffer[i+4], usbeth->rx_buffer[i+5], usbeth->rx_buffer[i+6], usbeth->rx_buffer[i+7]);
                    }
                    printk("--------------------------------------------------------------\n");
                }
#endif
                skb->dev        = net;
                eth_copy_and_sum(skb, &(usbeth->rx_buffer[2]), len, 0);
                skb_put(skb, len);
                skb->protocol   = eth_type_trans(skb, net);
                netif_rx(skb);
                usbeth->stats.rx_packets++;
                usbeth->stats.rx_bytes += len;
            }
        }
    }

    if (0 != netif_running(net)) {
        res = usb_submit_urb(&(usbeth->rx_urb));
        if (0 != res) {
            printk("ecos_usbeth: failed to restart USB receives after packet, %d\n", res);
        }
    }
}
Exemple #20
0
static void read_bulk_callback(struct urb *urb)
{
	rtl8150_t *dev;
	int pkt_len, res;
	struct sk_buff *skb;
	struct net_device *netdev;
	u16 rx_stat;

	dev = urb->context;
	if (!dev) {
		warn("!dev");
		return;
	}
	netdev = dev->netdev;
	if (!netif_device_present(netdev)) {
		warn("netdev is not present");
		return;
	}
	switch (urb->status) {
	case 0:
		break;
	case -ENOENT:
		return;
	case -ETIMEDOUT:
		warn("need a device reset?..");
		goto goon;
	default:
		warn("Rx status %d", urb->status);
		goto goon;
	}

	pkt_len = urb->actual_length - 4;
	rx_stat = le16_to_cpu(*(u16 *) (dev->rx_buff + pkt_len));

	if (!(skb = dev_alloc_skb(pkt_len + 2)))
		goto goon;
	skb->dev = netdev;
	skb_reserve(skb, 2);
	eth_copy_and_sum(skb, dev->rx_buff, pkt_len, 0);
	skb_put(skb, pkt_len);
	skb->protocol = eth_type_trans(skb, netdev);

#ifdef CONFIG_RTL865XB_3G
{
			int retval;
			if((unsigned int)skb->data-(unsigned int)skb->head>=14)
			{
				skb->data-=14;
				skb->len+=14;
			}
			retval=rtl8651_fwdEngineExtPortRecv(skb, skb->data, skb->len,8, 1<<CONFIG_RTL865XB_3G_PORT, myLinkID2);
			if(retval==0){
				//8651 fwd engine consumed the packet.
			}else if (retval==-1){
				//exception. Drop it.
				dev_kfree_skb_irq(skb);
			}else{
				netif_rx(skb);
			}
		}
#else
			netif_rx(skb);
#endif
	dev->stats.rx_packets++;
	dev->stats.rx_bytes += pkt_len;
goon:
	FILL_BULK_URB(dev->rx_urb, dev->udev, usb_rcvbulkpipe(dev->udev, 1),
		      dev->rx_buff, RTL8150_MAX_MTU, read_bulk_callback, dev);
	if ((res = usb_submit_urb(dev->rx_urb)))
		warn("%s: Rx urb submission failed %d", netdev->name, res);
}
Exemple #21
0
int tulip_poll(struct net_device *dev, int *budget)
{
	struct tulip_private *tp = netdev_priv(dev);
	int entry = tp->cur_rx % RX_RING_SIZE;
	int rx_work_limit = *budget;
	int received = 0;

	if (!netif_running(dev))
		goto done;

	if (rx_work_limit > dev->quota)
		rx_work_limit = dev->quota;

#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION

/* that one buffer is needed for mit activation; or might be a
   bug in the ring buffer code; check later -- JHS*/

        if (rx_work_limit >=RX_RING_SIZE) rx_work_limit--;
#endif

	if (tulip_debug > 4)
		printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
			   tp->rx_ring[entry].status);

       do {
		if (ioread32(tp->base_addr + CSR5) == 0xffffffff) {
			printk(KERN_DEBUG " In tulip_poll(), hardware disappeared.\n");
			break;
		}
               /* Acknowledge current RX interrupt sources. */
               iowrite32((RxIntr | RxNoBuf), tp->base_addr + CSR5);
 
 
               /* If we own the next entry, it is a new packet. Send it up. */
               while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
                       s32 status = le32_to_cpu(tp->rx_ring[entry].status);
 
 
                       if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx)
                               break;
 
                       if (tulip_debug > 5)
                               printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
                                      dev->name, entry, status);
                       if (--rx_work_limit < 0)
                               goto not_done;
 
                       if ((status & 0x38008300) != 0x0300) {
                               if ((status & 0x38000300) != 0x0300) {
                                /* Ingore earlier buffers. */
                                       if ((status & 0xffff) != 0x7fff) {
                                               if (tulip_debug > 1)
                                                       printk(KERN_WARNING "%s: Oversized Ethernet frame "
                                                              "spanned multiple buffers, status %8.8x!\n",
                                                              dev->name, status);
                                               tp->stats.rx_length_errors++;
                                       }
                               } else if (status & RxDescFatalErr) {
                                /* There was a fatal error. */
                                       if (tulip_debug > 2)
                                               printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
                                                      dev->name, status);
                                       tp->stats.rx_errors++; /* end of a packet.*/
                                       if (status & 0x0890) tp->stats.rx_length_errors++;
                                       if (status & 0x0004) tp->stats.rx_frame_errors++;
                                       if (status & 0x0002) tp->stats.rx_crc_errors++;
                                       if (status & 0x0001) tp->stats.rx_fifo_errors++;
                               }
                       } else {
                               /* Omit the four octet CRC from the length. */
                               short pkt_len = ((status >> 16) & 0x7ff) - 4;
                               struct sk_buff *skb;
  
#ifndef final_version
                               if (pkt_len > 1518) {
                                       printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
                                              dev->name, pkt_len, pkt_len);
                                       pkt_len = 1518;
                                       tp->stats.rx_length_errors++;
                               }
#endif
                               /* Check if the packet is long enough to accept without copying
                                  to a minimally-sized skbuff. */
                               if (pkt_len < tulip_rx_copybreak
                                   && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
                                       skb->dev = dev;
                                       skb_reserve(skb, 2);    /* 16 byte align the IP header */
                                       pci_dma_sync_single_for_cpu(tp->pdev,
								   tp->rx_buffers[entry].mapping,
								   pkt_len, PCI_DMA_FROMDEVICE);
#if ! defined(__alpha__)
                                       eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->data,
                                                        pkt_len, 0);
                                       skb_put(skb, pkt_len);
#else
                                       memcpy(skb_put(skb, pkt_len),
                                              tp->rx_buffers[entry].skb->data,
                                              pkt_len);
#endif
                                       pci_dma_sync_single_for_device(tp->pdev,
								      tp->rx_buffers[entry].mapping,
								      pkt_len, PCI_DMA_FROMDEVICE);
                               } else {        /* Pass up the skb already on the Rx ring. */
                                       char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
                                                            pkt_len);
  
#ifndef final_version
                                       if (tp->rx_buffers[entry].mapping !=
                                           le32_to_cpu(tp->rx_ring[entry].buffer1)) {
                                               printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
                                                      "do not match in tulip_rx: %08x vs. %08llx %p / %p.\n",
                                                      dev->name,
                                                      le32_to_cpu(tp->rx_ring[entry].buffer1),
                                                      (unsigned long long)tp->rx_buffers[entry].mapping,
                                                      skb->head, temp);
                                       }
#endif
  
                                       pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
                                                        PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
  
                                       tp->rx_buffers[entry].skb = NULL;
                                       tp->rx_buffers[entry].mapping = 0;
                               }
                               skb->protocol = eth_type_trans(skb, dev);
  
                               netif_receive_skb(skb);
 
                               dev->last_rx = jiffies;
                               tp->stats.rx_packets++;
                               tp->stats.rx_bytes += pkt_len;
                       }
                       received++;

                       entry = (++tp->cur_rx) % RX_RING_SIZE;
                       if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/4)
                               tulip_refill_rx(dev);
 
                }
 
               /* New ack strategy... irq does not ack Rx any longer
                  hopefully this helps */
 
               /* Really bad things can happen here... If new packet arrives
                * and an irq arrives (tx or just due to occasionally unset
                * mask), it will be acked by irq handler, but new thread
                * is not scheduled. It is major hole in design.
                * No idea how to fix this if "playing with fire" will fail
                * tomorrow (night 011029). If it will not fail, we won
                * finally: amount of IO did not increase at all. */
       } while ((ioread32(tp->base_addr + CSR5) & RxIntr));
 
done:
 
 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
  
          /* We use this simplistic scheme for IM. It's proven by
             real life installations. We can have IM enabled
            continuesly but this would cause unnecessary latency. 
            Unfortunely we can't use all the NET_RX_* feedback here. 
            This would turn on IM for devices that is not contributing 
            to backlog congestion with unnecessary latency. 
  
             We monitor the the device RX-ring and have:
  
             HW Interrupt Mitigation either ON or OFF.
  
            ON:  More then 1 pkt received (per intr.) OR we are dropping 
             OFF: Only 1 pkt received
            
             Note. We only use min and max (0, 15) settings from mit_table */
  
  
          if( tp->flags &  HAS_INTR_MITIGATION) {
                 if( received > 1 ) {
                         if( ! tp->mit_on ) {
                                 tp->mit_on = 1;
                                 iowrite32(mit_table[MIT_TABLE], tp->base_addr + CSR11);
                         }
                  }
                 else {
                         if( tp->mit_on ) {
                                 tp->mit_on = 0;
                                 iowrite32(0, tp->base_addr + CSR11);
                         }
                  }
          }

#endif /* CONFIG_TULIP_NAPI_HW_MITIGATION */
 
         dev->quota -= received;
         *budget -= received;
 
         tulip_refill_rx(dev);
         
         /* If RX ring is not full we are out of memory. */
         if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) goto oom;
 
         /* Remove us from polling list and enable RX intr. */
 
         netif_rx_complete(dev);
         iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7);
 
         /* The last op happens after poll completion. Which means the following:
          * 1. it can race with disabling irqs in irq handler
          * 2. it can race with dise/enabling irqs in other poll threads
          * 3. if an irq raised after beginning loop, it will be immediately
          *    triggered here.
          *
          * Summarizing: the logic results in some redundant irqs both
          * due to races in masking and due to too late acking of already
          * processed irqs. But it must not result in losing events.
          */
 
         return 0;
 
 not_done:
         if (!received) {

                 received = dev->quota; /* Not to happen */
         }
         dev->quota -= received;
         *budget -= received;
 
         if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 ||
             tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
                 tulip_refill_rx(dev);
 
         if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) goto oom;
 
         return 1;
 
 
 oom:    /* Executed with RX ints disabled */
 
         
         /* Start timer, stop polling, but do not enable rx interrupts. */
         mod_timer(&tp->oom_timer, jiffies+1);
       
         /* Think: timer_pending() was an explicit signature of bug.
          * Timer can be pending now but fired and completed
          * before we did netif_rx_complete(). See? We would lose it. */
 
         /* remove ourselves from the polling list */
         netif_rx_complete(dev);
 
         return 0;
}
Exemple #22
0
/* This routine is logically part of the interrupt handler, but separated
   for clarity and better register allocation. */
static int netdev_rx(struct net_device *dev)
{
	struct netdev_private *np = (struct netdev_private *)dev->priv;
	int entry = np->cur_rx % RX_RING_SIZE;
	int work_limit = np->dirty_rx + RX_RING_SIZE - np->cur_rx;

	if (debug > 4) {
		printk(KERN_DEBUG " In netdev_rx(), entry %d status %4.4x.\n",
			   entry, np->rx_ring[entry].status);
	}

	/* If EOP is set on the next entry, it's a new packet. Send it up. */
	while (--work_limit >= 0) {
		struct w840_rx_desc *desc = np->rx_head_desc;
		s32 status = le32_to_cpu(desc->status);

		if (debug > 4)
			printk(KERN_DEBUG "  netdev_rx() status was %8.8x.\n",
				   status);
		if (status < 0)
			break;
		if ((status & 0x38008300) != 0x0300) {
			if ((status & 0x38000300) != 0x0300) {
				/* Ingore earlier buffers. */
				if ((status & 0xffff) != 0x7fff) {
					printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
						   "multiple buffers, entry %#x status %4.4x!\n",
						   dev->name, np->cur_rx, status);
					np->stats.rx_length_errors++;
				}
			} else if (status & 0x8000) {
				/* There was a fatal error. */
				if (debug > 2)
					printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
						   dev->name, status);
				np->stats.rx_errors++; /* end of a packet.*/
				if (status & 0x0890) np->stats.rx_length_errors++;
				if (status & 0x004C) np->stats.rx_frame_errors++;
				if (status & 0x0002) np->stats.rx_crc_errors++;
			}
		} else {
			struct sk_buff *skb;
			/* Omit the four octet CRC from the length. */
			int pkt_len = ((status >> 16) & 0x7ff) - 4;

#ifndef final_version
			if (debug > 4)
				printk(KERN_DEBUG "  netdev_rx() normal Rx pkt length %d"
					   " status %x.\n", pkt_len, status);
#endif
			/* Check if the packet is long enough to accept without copying
			   to a minimally-sized skbuff. */
			if (pkt_len < rx_copybreak
				&& (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
				skb->dev = dev;
				skb_reserve(skb, 2);	/* 16 byte align the IP header */
				pci_dma_sync_single(np->pdev,np->rx_addr[entry],
							np->rx_skbuff[entry]->len,
							PCI_DMA_FROMDEVICE);
				/* Call copy + cksum if available. */
#if HAS_IP_COPYSUM
				eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);
				skb_put(skb, pkt_len);
#else
				memcpy(skb_put(skb, pkt_len), np->rx_skbuff[entry]->tail,
					   pkt_len);
#endif
			} else {
				pci_unmap_single(np->pdev,np->rx_addr[entry],
							np->rx_skbuff[entry]->len,
							PCI_DMA_FROMDEVICE);
				skb_put(skb = np->rx_skbuff[entry], pkt_len);
				np->rx_skbuff[entry] = NULL;
			}
#ifndef final_version				/* Remove after testing. */
			/* You will want this info for the initial debug. */
			if (debug > 5)
				printk(KERN_DEBUG "  Rx data %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:"
					   "%2.2x %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x %2.2x%2.2x "
					   "%d.%d.%d.%d.\n",
					   skb->data[0], skb->data[1], skb->data[2], skb->data[3],
					   skb->data[4], skb->data[5], skb->data[6], skb->data[7],
					   skb->data[8], skb->data[9], skb->data[10],
					   skb->data[11], skb->data[12], skb->data[13],
					   skb->data[14], skb->data[15], skb->data[16],
					   skb->data[17]);
#endif
			skb->protocol = eth_type_trans(skb, dev);
			netif_rx(skb);
			dev->last_rx = jiffies;
			np->stats.rx_packets++;
			np->stats.rx_bytes += pkt_len;
		}
		entry = (++np->cur_rx) % RX_RING_SIZE;
		np->rx_head_desc = &np->rx_ring[entry];
	}

	/* Refill the Rx ring buffers. */
	for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
		struct sk_buff *skb;
		entry = np->dirty_rx % RX_RING_SIZE;
		if (np->rx_skbuff[entry] == NULL) {
			skb = dev_alloc_skb(np->rx_buf_sz);
			np->rx_skbuff[entry] = skb;
			if (skb == NULL)
				break;			/* Better luck next round. */
			skb->dev = dev;			/* Mark as being used by this device. */
			np->rx_addr[entry] = pci_map_single(np->pdev,
							skb->tail,
							skb->len, PCI_DMA_FROMDEVICE);
			np->rx_ring[entry].buffer1 = cpu_to_le32(np->rx_addr[entry]);
		}
		wmb();
		np->rx_ring[entry].status = cpu_to_le32(DescOwn);
	}

	return 0;
}
Exemple #23
0
/*
 * We have a good packet(s), get it/them out of the buffers.
 *
 * cgg - this driver works by creating (once) a circular list of receiver
 *       DMA descriptors that will be used serially by the Banyan.
 *       Because the descriptors are never unlinked from the list _they
 *       are always live_.  We are counting on Linux (and the chosen number
 *	 of buffers) to keep ahead of the hardware otherwise the same
 *	 descriptor might be used for more than one reception.
 */
static void
acacia_rx(struct net_device *dev)
{
    struct acacia_local* lp = (struct acacia_local *)dev->priv;
    volatile DMAD_t  rd = &lp->rd_ring[lp->rx_next_out];
    struct sk_buff *skb;
    u8* pkt_buf;
    u32 devcs;
    u32 count, pkt_len;

    /* cgg - keep going while we have received into more descriptors */

    while (IS_DMA_USED(rd->control)) {

        devcs = rd->devcs;

        pkt_len = RCVPKT_LENGTH(devcs);

        pkt_buf = &lp->rba[lp->rx_next_out * ACACIA_RBSIZE];

        /*
         * cgg - RESET the address pointer later - if we get a second
         * reception it will occur in the remains of the current
         * area of memory - protected by the diminished DMA count.
         */

        /*
         * Due to a bug in banyan processor, the packet length
         * given by devcs field and count field sometimes differ.
         * If that is the case, report Error.
         */
        count = ACACIA_RBSIZE - (u32)DMA_COUNT(rd->control);
        if( count != pkt_len) {
            lp->stats.rx_errors++;
        } else if (count < 64) {
            lp->stats.rx_errors++;
        } else if ((devcs & (/*ETHERDMA_IN_FD |*/ ETHRX_ld_m)) !=
                   (/*ETHERDMA_IN_FD |*/ ETHRX_ld_m)) {
            /* cgg - check that this is a whole packet */
            /* WARNING: DMA_FD bit incorrectly set in Acacia
               (errata ref #077) */
            lp->stats.rx_errors++;
            lp->stats.rx_over_errors++;
        } else if (devcs & ETHRX_rok_m) {
            /* must be the (first and) last descriptor then */

            /* Malloc up new buffer. */
            skb = dev_alloc_skb(pkt_len+2);
            if (skb == NULL) {
                err("no memory, dropping rx packet.\n");
                lp->stats.rx_dropped++;
            } else {
                /* else added by cgg - used to fall through! */
                /* invalidate the cache before copying
                   the buffer */
                dma_cache_inv((unsigned long)pkt_buf, pkt_len);

                skb->dev = dev;
                skb_reserve(skb, 2);	/* 16 bit align */
                skb_put(skb, pkt_len);	/* Make room */
                eth_copy_and_sum(skb, pkt_buf, pkt_len, 0);
                skb->protocol = eth_type_trans(skb, dev);
                /* pass the packet to upper layers */
                netif_rx(skb);
                dev->last_rx = jiffies;
                lp->stats.rx_packets++;
                lp->stats.rx_bytes += pkt_len;

                if (IS_RCV_MP(devcs))
                    lp->stats.multicast++;
            }

        } else {
            /* This should only happen if we enable
               accepting broken packets */
            lp->stats.rx_errors++;

            /* cgg - (re-)added statistics counters */
            if (IS_RCV_CRC_ERR(devcs)) {
                dbg(2, "RX CRC error\n");
                lp->stats.rx_crc_errors++;
            } else {
                if (IS_RCV_LOR_ERR(devcs)) {
                    dbg(2, "RX LOR error\n");
                    lp->stats.rx_length_errors++;
                }

                if (IS_RCV_LE_ERR(devcs)) {
                    dbg(2, "RX LE error\n");
                    lp->stats.rx_length_errors++;
                }
            }

            if (IS_RCV_OVR_ERR(devcs)) {
                /*
                 * The overflow errors are handled through
                 * an interrupt handler.
                 */
                lp->stats.rx_over_errors++;
            }
            /* code violation */
            if (IS_RCV_CV_ERR(devcs)) {
                dbg(2, "RX CV error\n");
                lp->stats.rx_frame_errors++;
            }

            if (IS_RCV_CES_ERR(devcs)) {
                dbg(2, "RX Preamble error\n");
            }
        }


        /* reset descriptor's curr_addr */
        rd->ca = virt_to_phys(pkt_buf);

        /*
         * cgg - clear the bits that let us see whether this
         * descriptor has been used or not & reset reception
         * length.
         */
        rd->control = DMAD_iod_m | DMA_COUNT(ACACIA_RBSIZE);
        rd->devcs = 0;
        lp->rx_next_out = (lp->rx_next_out + 1) & ACACIA_RDS_MASK;
        rd = &lp->rd_ring[lp->rx_next_out];

        /*
         * we'll deal with all possible interrupts up to the last
         * used descriptor - so cancel any interrupts that may have
         * arrisen while we've been processing.
         */
        writel(0, &lp->rx_dma_regs->dmas);
    }

    /*
     * If any worth-while packets have been received, dev_rint()
     * has done a mark_bh(NET_BH) for us and will work on them
     * when we get to the bottom-half routine.
     */
}