Exemple #1
0
static int virtnet_poll(struct napi_struct *napi, int budget)
{
	struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi);
	struct sk_buff *skb = NULL;
	unsigned int len, received = 0;

again:
	while (received < budget &&
	       (skb = vi->rvq->vq_ops->get_buf(vi->rvq, &len)) != NULL) {
		__skb_unlink(skb, &vi->recv);
		receive_skb(vi->dev, skb, len);
		vi->num--;
		received++;
	}

	/* FIXME: If we oom and completely run out of inbufs, we need
	 * to start a timer trying to fill more. */
	if (vi->num < vi->max / 2)
		try_fill_recv(vi);

	/* Out of packets? */
	if (received < budget) {
		netif_rx_complete(vi->dev, napi);
		if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq))
		    && napi_schedule_prep(napi)) {
			vi->rvq->vq_ops->disable_cb(vi->rvq);
			__netif_rx_schedule(vi->dev, napi);
			goto again;
		}
	}

	return received;
}
Exemple #2
0
/**
 * gelic_net_poll - NAPI poll function called by the stack to return packets
 * @netdev: interface device structure
 * @budget: number of packets we can pass to the stack at most
 *
 * returns 0 if no more packets available to the driver/stack. Returns 1,
 * if the quota is exceeded, but the driver has still packets.
 *
 */
static int gelic_net_poll(struct net_device *netdev, int *budget)
{
	struct gelic_net_card *card = netdev_priv(netdev);
	int packets_to_do, packets_done = 0;
	int no_more_packets = 0;

	packets_to_do = min(*budget, netdev->quota);

	while (packets_to_do) {
		if (gelic_net_decode_one_descr(card)) {
			packets_done++;
			packets_to_do--;
		} else {
			/* no more packets for the stack */
			no_more_packets = 1;
			break;
		}
	}
	netdev->quota -= packets_done;
	*budget -= packets_done;
	if (no_more_packets) {
		netif_rx_complete(netdev);
		gelic_net_rx_irq_on(card);
		return 0;
	} else
		return 1;
}
Exemple #3
0
static int vb_net_poll(struct napi_struct *napi, int budget)
{
	struct voicebus *vb = container_of(napi, struct voicebus, napi);
	int count;

	count = vb_net_receive(vb, budget);

	if (!skb_queue_len(&vb->captured_packets)) {
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
		netif_rx_complete(vb->netdev, &vb->napi);
#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30)
		netif_rx_complete(&vb->napi);
#else
		napi_complete(&vb->napi);
#endif
	}
	return count;
}
Exemple #4
0
static int mambonet_poll(struct net_device *dev, int *budget)
{
	struct netdev_private *np = dev->priv;
	int devno = np->devno;
	char buffer[1600];
	int ns;
	struct sk_buff *skb;
	int frames = 0;
	int max_frames = min(*budget, dev->quota);
	int ret = 0;

	while ((ns = mambonet_recv(devno, buffer, 1600)) > 0) {
		if ((skb = dev_alloc_skb(ns + 2)) != NULL) {
			skb->dev = dev;
			skb_reserve(skb, 2);	/* 16 byte align the IP
						 * header */
#ifdef HAS_IP_COPYSUM
			eth_copy_and_sum(skb, buffer, ns, 0);
			skb_put(skb, ns);
#else
			memcpy(skb_put(skb, ns), buffer, ns);
#endif
			skb->protocol = eth_type_trans(skb, dev);

			if (dev->irq)
				netif_receive_skb(skb);
			else
				netif_rx(skb);

			dev->last_rx = jiffies;
			np->stats.rx_packets++;
			np->stats.rx_bytes += ns;
		} else {
			printk("Failed to allocated skbuff, "
			       "dropping packet\n");
			np->stats.rx_dropped++;
			/* wait for another cycle */
			return 1;
		}
		++frames;
		if (frames > max_frames) {
			ret = 1;
			break;
		}
	}
	*budget -= frames;
	dev->quota -= frames;

	if ((!ret) && (dev->irq))
		netif_rx_complete(dev);

	return ret;
}
Exemple #5
0
static int ag71xx_poll(struct napi_struct *napi, int limit)
{
	struct ag71xx *ag = container_of(napi, struct ag71xx, napi);
#ifdef AG71XX_NAPI_TX
	struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag);
#endif
	struct net_device *dev = ag->dev;
	unsigned long flags;
	u32 status;
	int done;

#ifdef AG71XX_NAPI_TX
	ar71xx_ddr_flush(pdata->flush_reg);
	ag71xx_tx_packets(ag);
#endif

	DBG("%s: processing RX ring\n", dev->name);
	done = ag71xx_rx_packets(ag, limit);

	/* TODO: add OOM handler */

	status = ag71xx_rr(ag, AG71XX_REG_INT_STATUS);
	status &= AG71XX_INT_POLL;

	if ((done < limit) && (!status)) {
		DBG("%s: disable polling mode, done=%d, status=%x\n",
			dev->name, done, status);

		netif_rx_complete(dev, napi);

		/* enable interrupts */
		spin_lock_irqsave(&ag->lock, flags);
		ag71xx_int_enable(ag, AG71XX_INT_POLL);
		spin_unlock_irqrestore(&ag->lock, flags);
		return 0;
	}

	if (status & AG71XX_INT_RX_OF) {
		if (netif_msg_rx_err(ag))
			printk(KERN_ALERT "%s: rx owerflow, restarting dma\n",
				dev->name);

		/* ack interrupt */
		ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_OF);
		/* restart RX */
		ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE);
	}

	DBG("%s: stay in polling mode, done=%d, status=%x\n",
			dev->name, done, status);
	return 1;
}
Exemple #6
0
static int enic_poll(struct napi_struct *napi, int budget)
{
	struct enic *enic = container_of(napi, struct enic, napi);
	struct net_device *netdev = enic->netdev;
	unsigned int rq_work_to_do = budget;
	unsigned int wq_work_to_do = -1; /* no limit */
	unsigned int  work_done, rq_work_done, wq_work_done;

	/* Service RQ (first) and WQ
	 */

	rq_work_done = vnic_cq_service(&enic->cq[ENIC_CQ_RQ],
		rq_work_to_do, enic_rq_service, NULL);

	wq_work_done = vnic_cq_service(&enic->cq[ENIC_CQ_WQ],
		wq_work_to_do, enic_wq_service, NULL);

	/* Accumulate intr event credits for this polling
	 * cycle.  An intr event is the completion of a
	 * a WQ or RQ packet.
	 */

	work_done = rq_work_done + wq_work_done;

	if (work_done > 0)
		vnic_intr_return_credits(&enic->intr[ENIC_INTX_WQ_RQ],
			work_done,
			0 /* don't unmask intr */,
			0 /* don't reset intr timer */);

	if (rq_work_done > 0) {

		/* Replenish RQ
		 */

		vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);

	} else {

		/* If no work done, flush all LROs and exit polling
		 */

		if (netdev->features & NETIF_F_LRO)
			lro_flush_all(&enic->lro_mgr);

		netif_rx_complete(napi);
		vnic_intr_unmask(&enic->intr[ENIC_MSIX_RQ]);
	}

	return rq_work_done;
}
Exemple #7
0
static int b44_poll(struct net_device *netdev, int *budget)
{
	struct b44 *bp = netdev_priv(netdev);
	int done;

	spin_lock_irq(&bp->lock);

	if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
		/* spin_lock(&bp->tx_lock); */
		b44_tx(bp);
		/* spin_unlock(&bp->tx_lock); */
	}
	spin_unlock_irq(&bp->lock);

	done = 1;
	if (bp->istat & ISTAT_RX) {
		int orig_budget = *budget;
		int work_done;

		if (orig_budget > netdev->quota)
			orig_budget = netdev->quota;

		work_done = b44_rx(bp, orig_budget);

		*budget -= work_done;
		netdev->quota -= work_done;

		if (work_done >= orig_budget)
			done = 0;
	}

	if (bp->istat & ISTAT_ERRORS) {
		spin_lock_irq(&bp->lock);
		b44_halt(bp);
		b44_init_rings(bp);
		b44_init_hw(bp);
		netif_wake_queue(bp->dev);
		spin_unlock_irq(&bp->lock);
		done = 1;
	}

	if (done) {
		netif_rx_complete(netdev);
		b44_enable_ints(bp);
	}

	return (done ? 0 : 1);
}
Exemple #8
0
/* Linux 2.6 Kernel Ethernet Poll function 
 * Call only RX processing in the poll function - TX is taken care of in interrupt context 
 */
int cpmac_poll(struct net_device *p_dev, int *budget)
{

	CpmacNetDevice *hDDA = netdev_priv(p_dev);

	unsigned int work = min(p_dev->quota, *budget);

	unsigned int pkts_pending = 0;

	/* This is used to pass the rx packets to be processed and return the number of rx packets processed */
	RxTxParams *napi_params = &hDDA->napiRxTx;

	if (!hDDA->setToClose) {

		napi_params->rxPkts = work;

		napi_params->txPkts = CPMAC_DDA_DEFAULT_TX_MAX_SERVICE;

		/* Process packets - Call the DDC packet processing function */
		hDDA->ddcIf->pktProcess(hDDA->hDDC, &pkts_pending, napi_params);	/* work returns the number of rx packets processed */

		/* If more packets reschedule the tasklet or call pktProcessEnd */
		if (!pkts_pending) {

			netif_rx_complete(p_dev);

			hDDA->ddcIf->pktProcessEnd(hDDA->hDDC, NULL);

			return 0;

		}

		else if (!test_bit(0, &hDDA->setToClose)) {

			*budget -= napi_params->retRxPkts;

			p_dev->quota -= napi_params->retRxPkts;

			return 1;

		}

	}

	return 0;		/* We are closing down, so dont process anything */

}
static int korina_poll(struct napi_struct *napi, int budget)
{
	struct korina_private *lp =
		container_of(napi, struct korina_private, napi);
	struct net_device *dev = lp->dev;
	int work_done;

	work_done = korina_rx(dev, budget);
	if (work_done < budget) {
		netif_rx_complete(napi);

		writel(readl(&lp->rx_dma_regs->dmasm) &
			~(DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR),
			&lp->rx_dma_regs->dmasm);
	}
	return work_done;
}
Exemple #10
0
static int rc32434_poll(struct napi_struct *napi, int budget)
{
        struct rc32434_local *lp =
                container_of(napi, struct rc32434_local, napi);
        struct net_device *dev = lp->dev;
        int work_done;

        work_done = rc32434_rx(dev, budget);
        if (work_done < budget) {
                netif_rx_complete(dev, napi);

		/* Mask off interrupts */
                writel(readl(&lp->rx_dma_regs->dmasm) &
			(DMASM_d_m | DMASM_h_m |DMASM_e_m),
                        &lp->rx_dma_regs->dmasm);
        }
        return work_done;
}
Exemple #11
0
static int vb_net_poll(struct net_device *netdev, int *budget)
{
	struct voicebus *vb = voicebus_from_netdev(netdev);
	int count = 0;
	int quota = min(netdev->quota, *budget);

	count = vb_net_receive(vb, quota);

	*budget -=       count;
	netdev->quota -= count;

	if (!skb_queue_len(&vb->captured_packets)) {
		netif_rx_complete(netdev);
		return 0;
	} else {
		return -1;
	}
}
/* dev always points to nds[0].  */
static int ixpdev_poll(struct napi_struct *napi, int budget)
{
	struct ixpdev_priv *ip = container_of(napi, struct ixpdev_priv, napi);
	struct net_device *dev = ip->dev;
	int rx;

	rx = 0;
	do {
		ixp2000_reg_write(IXP2000_IRQ_THD_RAW_STATUS_A_0, 0x00ff);

		rx = ixpdev_rx(dev, rx, budget);
		if (rx >= budget)
			break;
	} while (ixp2000_reg_read(IXP2000_IRQ_THD_RAW_STATUS_A_0) & 0x00ff);

	netif_rx_complete(napi);
	ixp2000_reg_write(IXP2000_IRQ_THD_ENABLE_SET_A_0, 0x00ff);

	return rx;
}
Exemple #13
0
/* Rx CQ polling - called by NAPI */
int mlx4_en_poll_rx_cq(struct net_device *poll_dev, int *budget)
{
	struct mlx4_en_cq *cq = poll_dev->priv;
	struct net_device *dev = cq->dev;
	struct mlx4_en_priv *priv = netdev_priv(dev);
	int done;
	int work = min(*budget, poll_dev->quota);

	done = mlx4_en_process_rx_cq(dev, cq, work);
	dev->quota -= done;
	*budget -= done;

	/* If we used up all the quota - we're probably not done yet... */
	if (done == work) {
		INC_PERF_COUNTER(priv->pstats.napi_quota);
		return 1;
	}

        /* Done for now */
	netif_rx_complete(poll_dev);
	mlx4_en_arm_cq(priv, cq);
	return 0;
}
Exemple #14
0
/*
 * rx poll func, called by network core
 */
static int enet_poll(struct net_device *dev, int *budget)
{
	struct tangox_enet_priv *priv;
	volatile struct enet_desc *rx, *rx1;
	int limit, received;
	unsigned int rx_eoc;

	priv = netdev_priv(dev);
	rx_eoc = priv->rx_eoc;

	/* calculate how many rx packet we are allowed to fetch */
	limit = *budget;
	if (*budget > dev->quota)
		limit = dev->quota;
	received = 0;

	/* process no more than "limit" done rx */
	while (limit > 0) {
		struct sk_buff *skb;
		volatile u32 *r_addr;
		u32 report_cache;
		unsigned int len = 0;
		int pkt_dropped = 0;

		rx = &priv->rx_descs[priv->last_rx_desc];

		/* we need multiple read on this volatile, avoid
		 * memory access at each time */
		r_addr = (volatile u32 *)KSEG1ADDR((u32)&(priv->rx_report[priv->last_rx_desc]));
		report_cache = __raw_readl(r_addr);

#ifdef ETH_DEBUG
		if (rx->config & DESC_EOC) {
			/* should not happen */
			printk("%s i=0x%x rx=0x%x report=0x%x config=0x%x limit=0x%x\n", 
				__FUNCTION__, priv->last_rx_desc, rx, report_cache, rx->config, limit);
		}
#endif
		if (report_cache == 0){ 
			uint32_t *next_r_addr;
			uint32_t next_report_cache;
			next_r_addr = (uint32_t *)KSEG1ADDR((u32)&(priv->rx_report[(priv->last_rx_desc+1)%RX_DESC_COUNT]));
			next_report_cache = __raw_readl(next_r_addr);

			/*check see if next one on error*/
			if(!enet_rx_error(next_report_cache))
				break;
		}

		--limit;

		if (likely((skb = priv->rx_skbs[priv->last_rx_desc]) != NULL)) {

			len = RX_BYTES_TRANSFERRED(report_cache);
			if((report_cache ==0) ||enet_rx_error(report_cache)){

#ifndef ENABLE_MULTICAST
				if (report_cache & RX_MULTICAST_PKT){ 
					DBG("%s RX_MULTICAST_PKT report=0x%x\n", __FUNCTION__, report_cache);				
					priv->stats.rx_length_errors++;
				}
#endif
				if (report_cache & RX_FCS_ERR) {
					DBG("%s RX_FCS_ERR report=0x%x\n", __FUNCTION__, report_cache);				
					priv->stats.rx_crc_errors++;
				}

				if (report_cache & RX_LATE_COLLISION){ 
					DBG("%s RX_LATE_COLLSION report=0x%x\n", __FUNCTION__, report_cache);				
				}

				if (report_cache &  RX_FIFO_OVERRUN ){ 
					DBG("%s RX_FIFO_OVERRUN report=0x%x\n", __FUNCTION__, report_cache);				
				}

				if (report_cache & RX_RUNT_PKT) {
					DBG("%s RX_RUNT_PKT report=0x%x\n", __FUNCTION__, report_cache);				
				}

				if (report_cache & (RX_FRAME_LEN_ERROR | RX_LENGTH_ERR) ||
				     len > RX_BUF_SIZE) {
					priv->stats.rx_length_errors++;
				}

				priv->stats.rx_errors++;
				pkt_dropped = 1;
				goto done_checking;

			} else {

				/* ok, seems  valid, adjust skb  proto and len
				 * and give it to kernel */
				skb->dev = dev;
				skb_put(skb, len);
				skb->protocol = eth_type_trans(skb, dev);
				netif_receive_skb(skb);
#ifdef ETH_DEBUG
				if(len > 0){
					int i;
					DBG("-----received data------\n");
					for (i=0; i< len; i++){
						if(i%16==0 && i>0)
							DBG("\n");
						DBG("%02x ", skb->data[i]);					
					}
					DBG("\n--------------------------\n");
				}
#endif
			}
done_checking:
			rx_eoc = priv->last_rx_desc;

			if (pkt_dropped)
				goto rearm;

			priv->stats.rx_packets++;
			priv->stats.rx_bytes += len;
			dev->last_rx = jiffies;
			priv->rx_skbs[priv->last_rx_desc] = NULL;
			/* we will re-alloc an skb for this slot */
		}

		if (unlikely((skb = dev_alloc_skb(RX_BUF_SIZE + SKB_RESERVE_SIZE)) == NULL)) {
			printk("%s: failed to allocation sk_buff.\n", priv->name);
			rx->config = DESC_BTS(2) | DESC_EOF/* | DESC_ID*/;
			mb();
			break;
		}

		rx->config = RX_BUF_SIZE | DESC_BTS(2) | DESC_EOF/* | DESC_ID*/;

		skb_reserve(skb, SKB_RESERVE_SIZE);
		rx->s_addr = PHYSADDR((void*)(skb->data));
		dma_cache_inv((unsigned long)skb->data, RX_BUF_SIZE);
		priv->rx_skbs[priv->last_rx_desc] = skb;

rearm:
		/* rearm descriptor */
		__raw_writel(0, r_addr);
		priv->last_rx_desc++;
		priv->last_rx_desc %= RX_DESC_COUNT;
		received++;

	}

	if (received != 0) {
		rx = &priv->rx_descs[rx_eoc];
		rx->config |= DESC_EOC;
		mb();
		rx1 = &priv->rx_descs[priv->rx_eoc];
		rx1->config &= ~DESC_EOC; 
		mb();
		priv->rx_eoc = rx_eoc;
	
		dev->quota -= received;
		*budget -= received;
	}

	enet_start_rx(priv);

	if (limit <= 0) {
		/* breaked, but there is still work to do */
		return 1;
	}

	netif_rx_complete(dev);
	return 0;
}
Exemple #15
0
int tulip_poll(struct net_device *dev, int *budget)
{
	struct tulip_private *tp = netdev_priv(dev);
	int entry = tp->cur_rx % RX_RING_SIZE;
	int rx_work_limit = *budget;
	int received = 0;

	if (!netif_running(dev))
		goto done;

	if (rx_work_limit > dev->quota)
		rx_work_limit = dev->quota;

#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION

/* that one buffer is needed for mit activation; or might be a
   bug in the ring buffer code; check later -- JHS*/

        if (rx_work_limit >=RX_RING_SIZE) rx_work_limit--;
#endif

	if (tulip_debug > 4)
		printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
			   tp->rx_ring[entry].status);

       do {
		if (ioread32(tp->base_addr + CSR5) == 0xffffffff) {
			printk(KERN_DEBUG " In tulip_poll(), hardware disappeared.\n");
			break;
		}
               /* Acknowledge current RX interrupt sources. */
               iowrite32((RxIntr | RxNoBuf), tp->base_addr + CSR5);
 
 
               /* If we own the next entry, it is a new packet. Send it up. */
               while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
                       s32 status = le32_to_cpu(tp->rx_ring[entry].status);
 
 
                       if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx)
                               break;
 
                       if (tulip_debug > 5)
                               printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
                                      dev->name, entry, status);
                       if (--rx_work_limit < 0)
                               goto not_done;
 
                       if ((status & 0x38008300) != 0x0300) {
                               if ((status & 0x38000300) != 0x0300) {
                                /* Ingore earlier buffers. */
                                       if ((status & 0xffff) != 0x7fff) {
                                               if (tulip_debug > 1)
                                                       printk(KERN_WARNING "%s: Oversized Ethernet frame "
                                                              "spanned multiple buffers, status %8.8x!\n",
                                                              dev->name, status);
                                               tp->stats.rx_length_errors++;
                                       }
                               } else if (status & RxDescFatalErr) {
                                /* There was a fatal error. */
                                       if (tulip_debug > 2)
                                               printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
                                                      dev->name, status);
                                       tp->stats.rx_errors++; /* end of a packet.*/
                                       if (status & 0x0890) tp->stats.rx_length_errors++;
                                       if (status & 0x0004) tp->stats.rx_frame_errors++;
                                       if (status & 0x0002) tp->stats.rx_crc_errors++;
                                       if (status & 0x0001) tp->stats.rx_fifo_errors++;
                               }
                       } else {
                               /* Omit the four octet CRC from the length. */
                               short pkt_len = ((status >> 16) & 0x7ff) - 4;
                               struct sk_buff *skb;
  
#ifndef final_version
                               if (pkt_len > 1518) {
                                       printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
                                              dev->name, pkt_len, pkt_len);
                                       pkt_len = 1518;
                                       tp->stats.rx_length_errors++;
                               }
#endif
                               /* Check if the packet is long enough to accept without copying
                                  to a minimally-sized skbuff. */
                               if (pkt_len < tulip_rx_copybreak
                                   && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
                                       skb->dev = dev;
                                       skb_reserve(skb, 2);    /* 16 byte align the IP header */
                                       pci_dma_sync_single_for_cpu(tp->pdev,
								   tp->rx_buffers[entry].mapping,
								   pkt_len, PCI_DMA_FROMDEVICE);
#if ! defined(__alpha__)
                                       eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->data,
                                                        pkt_len, 0);
                                       skb_put(skb, pkt_len);
#else
                                       memcpy(skb_put(skb, pkt_len),
                                              tp->rx_buffers[entry].skb->data,
                                              pkt_len);
#endif
                                       pci_dma_sync_single_for_device(tp->pdev,
								      tp->rx_buffers[entry].mapping,
								      pkt_len, PCI_DMA_FROMDEVICE);
                               } else {        /* Pass up the skb already on the Rx ring. */
                                       char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
                                                            pkt_len);
  
#ifndef final_version
                                       if (tp->rx_buffers[entry].mapping !=
                                           le32_to_cpu(tp->rx_ring[entry].buffer1)) {
                                               printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
                                                      "do not match in tulip_rx: %08x vs. %08llx %p / %p.\n",
                                                      dev->name,
                                                      le32_to_cpu(tp->rx_ring[entry].buffer1),
                                                      (unsigned long long)tp->rx_buffers[entry].mapping,
                                                      skb->head, temp);
                                       }
#endif
  
                                       pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
                                                        PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
  
                                       tp->rx_buffers[entry].skb = NULL;
                                       tp->rx_buffers[entry].mapping = 0;
                               }
                               skb->protocol = eth_type_trans(skb, dev);
  
                               netif_receive_skb(skb);
 
                               dev->last_rx = jiffies;
                               tp->stats.rx_packets++;
                               tp->stats.rx_bytes += pkt_len;
                       }
                       received++;

                       entry = (++tp->cur_rx) % RX_RING_SIZE;
                       if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/4)
                               tulip_refill_rx(dev);
 
                }
 
               /* New ack strategy... irq does not ack Rx any longer
                  hopefully this helps */
 
               /* Really bad things can happen here... If new packet arrives
                * and an irq arrives (tx or just due to occasionally unset
                * mask), it will be acked by irq handler, but new thread
                * is not scheduled. It is major hole in design.
                * No idea how to fix this if "playing with fire" will fail
                * tomorrow (night 011029). If it will not fail, we won
                * finally: amount of IO did not increase at all. */
       } while ((ioread32(tp->base_addr + CSR5) & RxIntr));
 
done:
 
 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
  
          /* We use this simplistic scheme for IM. It's proven by
             real life installations. We can have IM enabled
            continuesly but this would cause unnecessary latency. 
            Unfortunely we can't use all the NET_RX_* feedback here. 
            This would turn on IM for devices that is not contributing 
            to backlog congestion with unnecessary latency. 
  
             We monitor the the device RX-ring and have:
  
             HW Interrupt Mitigation either ON or OFF.
  
            ON:  More then 1 pkt received (per intr.) OR we are dropping 
             OFF: Only 1 pkt received
            
             Note. We only use min and max (0, 15) settings from mit_table */
  
  
          if( tp->flags &  HAS_INTR_MITIGATION) {
                 if( received > 1 ) {
                         if( ! tp->mit_on ) {
                                 tp->mit_on = 1;
                                 iowrite32(mit_table[MIT_TABLE], tp->base_addr + CSR11);
                         }
                  }
                 else {
                         if( tp->mit_on ) {
                                 tp->mit_on = 0;
                                 iowrite32(0, tp->base_addr + CSR11);
                         }
                  }
          }

#endif /* CONFIG_TULIP_NAPI_HW_MITIGATION */
 
         dev->quota -= received;
         *budget -= received;
 
         tulip_refill_rx(dev);
         
         /* If RX ring is not full we are out of memory. */
         if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) goto oom;
 
         /* Remove us from polling list and enable RX intr. */
 
         netif_rx_complete(dev);
         iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7);
 
         /* The last op happens after poll completion. Which means the following:
          * 1. it can race with disabling irqs in irq handler
          * 2. it can race with dise/enabling irqs in other poll threads
          * 3. if an irq raised after beginning loop, it will be immediately
          *    triggered here.
          *
          * Summarizing: the logic results in some redundant irqs both
          * due to races in masking and due to too late acking of already
          * processed irqs. But it must not result in losing events.
          */
 
         return 0;
 
 not_done:
         if (!received) {

                 received = dev->quota; /* Not to happen */
         }
         dev->quota -= received;
         *budget -= received;
 
         if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 ||
             tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
                 tulip_refill_rx(dev);
 
         if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) goto oom;
 
         return 1;
 
 
 oom:    /* Executed with RX ints disabled */
 
         
         /* Start timer, stop polling, but do not enable rx interrupts. */
         mod_timer(&tp->oom_timer, jiffies+1);
       
         /* Think: timer_pending() was an explicit signature of bug.
          * Timer can be pending now but fired and completed
          * before we did netif_rx_complete(). See? We would lose it. */
 
         /* remove ourselves from the polling list */
         netif_rx_complete(dev);
 
         return 0;
}
Exemple #16
0
static void rc32434_rx_tasklet(unsigned long rx_data_dev)
#endif
{
	struct net_device *dev = (struct net_device *)rx_data_dev;	
	struct rc32434_local* lp = netdev_priv(dev);
	volatile DMAD_t  rd = &lp->rd_ring[lp->rx_next_done];
	struct sk_buff *skb, *skb_new;
	u8* pkt_buf;
	u32 devcs, count, pkt_len, pktuncrc_len;
	volatile u32 dmas;
#ifdef CONFIG_IDT_USE_NAPI
	u32 received = 0;
	int rx_work_limit = min(*budget,dev->quota);
#else
	unsigned long 	flags;
	spin_lock_irqsave(&lp->lock, flags);
#endif

	dma_cache_inv((u32)rd, sizeof(*rd));
	while ( (count = RC32434_RBSIZE - (u32)DMA_COUNT(rd->control)) != 0) {
#ifdef CONFIG_IDT_USE_NAPI
		if(--rx_work_limit <0)
                {
                        break;
                }
#endif
		/* init the var. used for the later operations within the while loop */
		skb_new = NULL;
		devcs = rd->devcs;
		pkt_len = RCVPKT_LENGTH(devcs);
		skb = lp->rx_skb[lp->rx_next_done];
      
		if (count < 64) {
			lp->stats.rx_errors++;
			lp->stats.rx_dropped++;			
		}
		else if ((devcs & ( ETHRX_ld_m)) !=	ETHRX_ld_m) {
			/* check that this is a whole packet */
			/* WARNING: DMA_FD bit incorrectly set in Rc32434 (errata ref #077) */
			lp->stats.rx_errors++;
			lp->stats.rx_dropped++;
		}
		else if ( (devcs & ETHRX_rok_m)  ) {
			
			{
				/* must be the (first and) last descriptor then */
				pkt_buf = (u8*)lp->rx_skb[lp->rx_next_done]->data;
				
				pktuncrc_len = pkt_len - 4;
				/* invalidate the cache */
				dma_cache_inv((unsigned long)pkt_buf, pktuncrc_len);
				
				/* Malloc up new buffer. */					  
				skb_new = dev_alloc_skb(RC32434_RBSIZE + 2);					             	
				
				if (skb_new != NULL){
					/* Make room */
					skb_put(skb, pktuncrc_len);		    
					
					skb->protocol = eth_type_trans(skb, dev);
					
					/* pass the packet to upper layers */
#ifdef CONFIG_IDT_USE_NAPI
					netif_receive_skb(skb);
#else
					netif_rx(skb);
#endif
					
					dev->last_rx = jiffies;
					lp->stats.rx_packets++;
					lp->stats.rx_bytes += pktuncrc_len;
					
					if (IS_RCV_MP(devcs))
						lp->stats.multicast++;
					
					/* 16 bit align */						  
					skb_reserve(skb_new, 2);	
					
					skb_new->dev = dev;
					lp->rx_skb[lp->rx_next_done] = skb_new;
				}
				else {
					ERR("no memory, dropping rx packet.\n");
					lp->stats.rx_errors++;		
					lp->stats.rx_dropped++;					
				}
			}
			
		}			
		else {
			/* This should only happen if we enable accepting broken packets */
			lp->stats.rx_errors++;
			lp->stats.rx_dropped++;
			
			/* add statistics counters */
			if (IS_RCV_CRC_ERR(devcs)) {
				DBG(2, "RX CRC error\n");
				lp->stats.rx_crc_errors++;
			} 
			else if (IS_RCV_LOR_ERR(devcs)) {
				DBG(2, "RX LOR error\n");
				lp->stats.rx_length_errors++;
			}				
			else if (IS_RCV_LE_ERR(devcs)) {
				DBG(2, "RX LE error\n");
				lp->stats.rx_length_errors++;
			}
			else if (IS_RCV_OVR_ERR(devcs)) {
				lp->stats.rx_over_errors++;
			}
			else if (IS_RCV_CV_ERR(devcs)) {
				/* code violation */
				DBG(2, "RX CV error\n");
				lp->stats.rx_frame_errors++;
			}
			else if (IS_RCV_CES_ERR(devcs)) {
				DBG(2, "RX Preamble error\n");
			}
		}
		
		rd->devcs = 0;
		
		/* restore descriptor's curr_addr */
		if(skb_new)
			rd->ca = CPHYSADDR(skb_new->data); 
		else
			rd->ca = CPHYSADDR(skb->data);
		
		rd->control = DMA_COUNT(RC32434_RBSIZE) |DMAD_cod_m |DMAD_iod_m;
		lp->rd_ring[(lp->rx_next_done-1)& RC32434_RDS_MASK].control &=  ~(DMAD_cod_m); 	
		
		lp->rx_next_done = (lp->rx_next_done + 1) & RC32434_RDS_MASK;
		dma_cache_wback((u32)rd, sizeof(*rd));
		rd = &lp->rd_ring[lp->rx_next_done];
		__raw_writel( ~DMAS_d_m, &lp->rx_dma_regs->dmas);
	}	
#ifdef CONFIG_IDT_USE_NAPI
        dev->quota -= received;
        *budget =- received;
        if(rx_work_limit < 0)
                goto not_done;
#endif
	
	dmas = __raw_readl(&lp->rx_dma_regs->dmas);
	
	if(dmas & DMAS_h_m) {
		__raw_writel( ~(DMAS_h_m | DMAS_e_m), &lp->rx_dma_regs->dmas);
#ifdef RC32434_PROC_DEBUG
		lp->dma_halt_cnt++;
#endif
		rd->devcs = 0;
		skb = lp->rx_skb[lp->rx_next_done];
		rd->ca = CPHYSADDR(skb->data);
		dma_cache_wback((u32)rd, sizeof(*rd));
		rc32434_chain_rx(lp,rd);
	}
	
#ifdef CONFIG_IDT_USE_NAPI
	netif_rx_complete(dev);
#endif
	/* Enable D H E bit in Rx DMA */
	__raw_writel(__raw_readl(&lp->rx_dma_regs->dmasm) & ~(DMASM_d_m | DMASM_h_m |DMASM_e_m), &lp->rx_dma_regs->dmasm); 
#ifdef CONFIG_IDT_USE_NAPI
	return 0;
 not_done:
	return 1;
#else
	spin_unlock_irqrestore(&lp->lock, flags);
	return;
#endif

	
}	
Exemple #17
0
static int ag71xx_poll(struct napi_struct *napi, int limit)
{
	struct ag71xx *ag = container_of(napi, struct ag71xx, napi);
	struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag);
	struct net_device *dev = ag->dev;
	struct ag71xx_ring *rx_ring;
	unsigned long flags;
	u32 status;
	int done;

	pdata->ddr_flush();
	ag71xx_tx_packets(ag);

	DBG("%s: processing RX ring\n", dev->name);
	done = ag71xx_rx_packets(ag, limit);

	rx_ring = &ag->rx_ring;
	if (rx_ring->buf[rx_ring->dirty % AG71XX_RX_RING_SIZE].skb == NULL)
		goto oom;

	status = ag71xx_rr(ag, AG71XX_REG_RX_STATUS);
	if (unlikely(status & RX_STATUS_OF)) {
		ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_OF);
		dev->stats.rx_fifo_errors++;

		/* restart RX */
		ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE);
	}

	if (done < limit) {
		if (status & RX_STATUS_PR)
			goto more;

		status = ag71xx_rr(ag, AG71XX_REG_TX_STATUS);
		if (status & TX_STATUS_PS)
			goto more;

		DBG("%s: disable polling mode, done=%d, limit=%d\n",
			dev->name, done, limit);

		netif_rx_complete(dev, napi);

		/* enable interrupts */
		spin_lock_irqsave(&ag->lock, flags);
		ag71xx_int_enable(ag, AG71XX_INT_POLL);
		spin_unlock_irqrestore(&ag->lock, flags);
		return done;
	}

 more:
	DBG("%s: stay in polling mode, done=%d, limit=%d\n",
			dev->name, done, limit);
	return done;

 oom:
	if (netif_msg_rx_err(ag))
		printk(KERN_DEBUG "%s: out of memory\n", dev->name);

	mod_timer(&ag->oom_timer, jiffies + AG71XX_OOM_REFILL);
	netif_rx_complete(dev, napi);
	return 0;
}
static int cpmac_poll(struct napi_struct *napi, int budget)
{
	struct sk_buff *skb;
	struct cpmac_desc *desc, *restart;
	struct cpmac_priv *priv = container_of(napi, struct cpmac_priv, napi);
	int received = 0, processed = 0;

	spin_lock(&priv->rx_lock);
	if (unlikely(!priv->rx_head)) {
		if (netif_msg_rx_err(priv) && net_ratelimit())
			printk(KERN_WARNING "%s: rx: polling, but no queue\n",
			       priv->dev->name);
		spin_unlock(&priv->rx_lock);
		netif_rx_complete(priv->dev, napi);
		return 0;
	}

	desc = priv->rx_head;
	restart = NULL;
	while (((desc->dataflags & CPMAC_OWN) == 0) && (received < budget)) {
		processed++;

		if ((desc->dataflags & CPMAC_EOQ) != 0) {
			/* The last update to eoq->hw_next didn't happen
			* soon enough, and the receiver stopped here.
			*Remember this descriptor so we can restart
			* the receiver after freeing some space.
			*/
			if (unlikely(restart)) {
				if (netif_msg_rx_err(priv))
					printk(KERN_ERR "%s: poll found a"
						" duplicate EOQ: %p and %p\n",
						priv->dev->name, restart, desc);
				goto fatal_error;
			}

			restart = desc->next;
		}

		skb = cpmac_rx_one(priv, desc);
		if (likely(skb)) {
			netif_receive_skb(skb);
			received++;
		}
		desc = desc->next;
	}

	if (desc != priv->rx_head) {
		/* We freed some buffers, but not the whole ring,
		 * add what we did free to the rx list */
		desc->prev->hw_next = (u32)0;
		priv->rx_head->prev->hw_next = priv->rx_head->mapping;
	}

	/* Optimization: If we did not actually process an EOQ (perhaps because
	 * of quota limits), check to see if the tail of the queue has EOQ set.
	* We should immediately restart in that case so that the receiver can
	* restart and run in parallel with more packet processing.
	* This lets us handle slightly larger bursts before running
	* out of ring space (assuming dev->weight < ring_size) */

	if (!restart &&
	     (priv->rx_head->prev->dataflags & (CPMAC_OWN|CPMAC_EOQ))
		    == CPMAC_EOQ &&
	     (priv->rx_head->dataflags & CPMAC_OWN) != 0) {
		/* reset EOQ so the poll loop (above) doesn't try to
		* restart this when it eventually gets to this descriptor.
		*/
		priv->rx_head->prev->dataflags &= ~CPMAC_EOQ;
		restart = priv->rx_head;
	}

	if (restart) {
		priv->dev->stats.rx_errors++;
		priv->dev->stats.rx_fifo_errors++;
		if (netif_msg_rx_err(priv) && net_ratelimit())
			printk(KERN_WARNING "%s: rx dma ring overrun\n",
			       priv->dev->name);

		if (unlikely((restart->dataflags & CPMAC_OWN) == 0)) {
			if (netif_msg_drv(priv))
				printk(KERN_ERR "%s: cpmac_poll is trying to "
					"restart rx from a descriptor that's "
					"not free: %p\n",
					priv->dev->name, restart);
				goto fatal_error;
		}

		cpmac_write(priv->regs, CPMAC_RX_PTR(0), restart->mapping);
	}

	priv->rx_head = desc;
	spin_unlock(&priv->rx_lock);
	if (unlikely(netif_msg_rx_status(priv)))
		printk(KERN_DEBUG "%s: poll processed %d packets\n",
		       priv->dev->name, received);
	if (processed == 0) {
		/* we ran out of packets to read,
		 * revert to interrupt-driven mode */
		netif_rx_complete(priv->dev, napi);
		cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1);
		return 0;
	}

	return 1;

fatal_error:
	/* Something went horribly wrong.
	 * Reset hardware to try to recover rather than wedging. */

	if (netif_msg_drv(priv)) {
		printk(KERN_ERR "%s: cpmac_poll is confused. "
				"Resetting hardware\n", priv->dev->name);
		cpmac_dump_all_desc(priv->dev);
		printk(KERN_DEBUG "%s: RX_PTR(0)=0x%08x RX_ACK(0)=0x%08x\n",
			priv->dev->name,
			cpmac_read(priv->regs, CPMAC_RX_PTR(0)),
			cpmac_read(priv->regs, CPMAC_RX_ACK(0)));
	}

	spin_unlock(&priv->rx_lock);
	netif_rx_complete(priv->dev, napi);
	netif_tx_stop_all_queues(priv->dev);
	napi_disable(&priv->napi);

	atomic_inc(&priv->reset_pending);
	cpmac_hw_stop(priv->dev);
	if (!schedule_work(&priv->reset_work))
		atomic_dec(&priv->reset_pending);
	return 0;

}
Exemple #19
0
/* NAPI receive function */
static int fs_enet_rx_napi(struct net_device *dev, int *budget)
{
	struct fs_enet_private *fep = netdev_priv(dev);
	const struct fs_platform_info *fpi = fep->fpi;
	cbd_t *bdp;
	struct sk_buff *skb, *skbn, *skbt;
	int received = 0;
	u16 pkt_len, sc;
	int curidx;
	int rx_work_limit = 0;	/* pacify gcc */

	rx_work_limit = min(dev->quota, *budget);

	if (!netif_running(dev))
		return 0;

	/*
	 * First, grab all of the stats for the incoming packet.
	 * These get messed up if we get called due to a busy condition.
	 */
	bdp = fep->cur_rx;

	/* clear RX status bits for napi*/
	(*fep->ops->napi_clear_rx_event)(dev);

	while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) {

		curidx = bdp - fep->rx_bd_base;

		/*
		 * Since we have allocated space to hold a complete frame,
		 * the last indicator should be set.
		 */
		if ((sc & BD_ENET_RX_LAST) == 0)
			printk(KERN_WARNING DRV_MODULE_NAME
			       ": %s rcv is not +last\n",
			       dev->name);

		/*
		 * Check for errors.
		 */
		if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL |
			  BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) {
			fep->stats.rx_errors++;
			/* Frame too long or too short. */
			if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
				fep->stats.rx_length_errors++;
			/* Frame alignment */
			if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL))
				fep->stats.rx_frame_errors++;
			/* CRC Error */
			if (sc & BD_ENET_RX_CR)
				fep->stats.rx_crc_errors++;
			/* FIFO overrun */
			if (sc & BD_ENET_RX_OV)
				fep->stats.rx_crc_errors++;

			skb = fep->rx_skbuff[curidx];

			dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
				L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
				DMA_FROM_DEVICE);

			skbn = skb;

		} else {

			/* napi, got packet but no quota */
			if (--rx_work_limit < 0)
				break;

			skb = fep->rx_skbuff[curidx];

			dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
				L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
				DMA_FROM_DEVICE);

			/*
			 * Process the incoming frame.
			 */
			fep->stats.rx_packets++;
			pkt_len = CBDR_DATLEN(bdp) - 4;	/* remove CRC */
			fep->stats.rx_bytes += pkt_len + 4;

			if (pkt_len <= fpi->rx_copybreak) {
				/* +2 to make IP header L1 cache aligned */
				skbn = dev_alloc_skb(pkt_len + 2);
				if (skbn != NULL) {
					skb_reserve(skbn, 2);	/* align IP header */
					memcpy(skbn->data, skb->data, pkt_len);
					/* swap */
					skbt = skb;
					skb = skbn;
					skbn = skbt;
				}
			} else
				skbn = dev_alloc_skb(ENET_RX_FRSIZE);

			if (skbn != NULL) {
				skb->dev = dev;
				skb_put(skb, pkt_len);	/* Make room */
				skb->protocol = eth_type_trans(skb, dev);
				received++;
				netif_receive_skb(skb);
			} else {
				printk(KERN_WARNING DRV_MODULE_NAME
				       ": %s Memory squeeze, dropping packet.\n",
				       dev->name);
				fep->stats.rx_dropped++;
				skbn = skb;
			}
		}

		fep->rx_skbuff[curidx] = skbn;
		CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data,
			     L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
			     DMA_FROM_DEVICE));
		CBDW_DATLEN(bdp, 0);
		CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);

		/*
		 * Update BD pointer to next entry.
		 */
		if ((sc & BD_ENET_RX_WRAP) == 0)
			bdp++;
		else
			bdp = fep->rx_bd_base;

		(*fep->ops->rx_bd_done)(dev);
	}

	fep->cur_rx = bdp;

	dev->quota -= received;
	*budget -= received;

	if (rx_work_limit < 0)
		return 1;	/* not done */

	/* done */
	netif_rx_complete(dev);

	(*fep->ops->napi_enable_rx)(dev);

	return 0;
}
Exemple #20
0
static int tsi108_poll(net_device * dev, int *budget)
{
	tsi108_prv_data *data = netdev_priv(dev);
	u32 estat = TSI108_ETH_READ_REG(TSI108_EC_RXESTAT);
	u32 intstat = TSI108_ETH_READ_REG(TSI108_EC_INTSTAT);
	int total_budget = min(*budget, dev->quota);
	int num_received = 0, num_filled = 0, budget_used;

	intstat &= TSI108_INT_RXQUEUE0 | TSI108_INT_RXTHRESH |
	    TSI108_INT_RXOVERRUN | TSI108_INT_RXERROR | TSI108_INT_RXWAIT;

	TSI108_ETH_WRITE_REG(TSI108_EC_RXESTAT, estat);
	TSI108_ETH_WRITE_REG(TSI108_EC_INTSTAT, intstat);

	if (data->rxpending || (estat & TSI108_EC_RXESTAT_Q0_DESCINT))
		num_received = tsi108_check_for_completed_rx(dev, total_budget);

	/* This should normally fill no more slots than the number of
	 * packets received in tsi108_check_for_completed_rx().  The exception
	 * is when we previously ran out of memory for RX SKBs.  In that
	 * case, it's helpful to obey the budget, not only so that the
	 * CPU isn't hogged, but so that memory (which may still be low)
	 * is not hogged by one device.
	 *
	 * A work unit is considered to be two SKBs to allow us to catch
	 * up when the ring has shrunk due to out-of-memory but we're
	 * still removing the full budget's worth of packets each time.
	 */

	if (data->rxfree < TSI108_RXRING_LEN)
		num_filled = tsi108_refill_rx(dev, total_budget * 2);

	if (intstat & TSI108_INT_RXERROR) {
		u32 err = TSI108_ETH_READ_REG(TSI108_EC_RXERR);
		TSI108_ETH_WRITE_REG(TSI108_EC_RXERR, err);

		if (err) {
			if (net_ratelimit())
				printk(KERN_DEBUG "%s: RX error %x\n",
				       dev->name, err);

			if (!(TSI108_ETH_READ_REG(TSI108_EC_RXSTAT) &
			      TSI108_EC_RXSTAT_QUEUE0))
				tsi108_restart_rx(data, dev);
		}
	}

	if (intstat & TSI108_INT_RXOVERRUN) {
		unsigned long flags;

		spin_lock_irqsave(&data->misclock, flags);
		data->stats.rx_fifo_errors++;
		spin_unlock_irqrestore(&data->misclock, flags);
	}

	budget_used = max(num_received, num_filled / 2);

	*budget -= budget_used;
	dev->quota -= budget_used;

	if (budget_used != total_budget) {
		data->rxpending = 0;
		netif_rx_complete(dev);

		TSI108_ETH_WRITE_REG(TSI108_EC_INTMASK,
				     TSI108_ETH_READ_REG(TSI108_EC_INTMASK)
				     & ~(TSI108_INT_RXQUEUE0
					 | TSI108_INT_RXTHRESH |
					 TSI108_INT_RXOVERRUN |
					 TSI108_INT_RXERROR |
					 TSI108_INT_RXWAIT));

		mb();

		/* IRQs are level-triggered, so no need to re-check */
		return 0;
	} else {
		data->rxpending = 1;
	}

	return 1;
}
Exemple #21
0
static int hss_hdlc_poll(struct napi_struct *napi, int budget)
{
	struct port *port = container_of(napi, struct port, napi);
	struct net_device *dev = port->netdev;
	unsigned int rxq = queue_ids[port->id].rx;
	unsigned int rxfreeq = queue_ids[port->id].rxfree;
	int received = 0;

#if DEBUG_RX
	printk(KERN_DEBUG "%s: hss_hdlc_poll\n", dev->name);
#endif

	while (received < budget) {
		struct sk_buff *skb;
		struct desc *desc;
		int n;
#ifdef __ARMEB__
		struct sk_buff *temp;
		u32 phys;
#endif

		if ((n = queue_get_desc(rxq, port, 0)) < 0) {
#if DEBUG_RX
			printk(KERN_DEBUG "%s: hss_hdlc_poll"
			       " netif_rx_complete\n", dev->name);
#endif
			netif_rx_complete(napi);
			qmgr_enable_irq(rxq);
			if (!qmgr_stat_empty(rxq) &&
			    netif_rx_reschedule(napi)) {
#if DEBUG_RX
				printk(KERN_DEBUG "%s: hss_hdlc_poll"
				       " netif_rx_reschedule succeeded\n",
				       dev->name);
#endif
				qmgr_disable_irq(rxq);
				continue;
			}
#if DEBUG_RX
			printk(KERN_DEBUG "%s: hss_hdlc_poll all done\n",
			       dev->name);
#endif
			return received; /* all work done */
		}

		desc = rx_desc_ptr(port, n);
#if 0 /* FIXME - error_count counts modulo 256, perhaps we should use it */
		if (desc->error_count)
			printk(KERN_DEBUG "%s: hss_hdlc_poll status 0x%02X"
			       " errors %u\n", dev->name, desc->status,
			       desc->error_count);
#endif
		skb = NULL;
		switch (desc->status) {
		case 0:
#ifdef __ARMEB__
			if ((skb = netdev_alloc_skb(dev, RX_SIZE)) != NULL) {
				phys = dma_map_single(&dev->dev, skb->data,
						      RX_SIZE,
						      DMA_FROM_DEVICE);
				if (dma_mapping_error(&dev->dev, phys)) {
					dev_kfree_skb(skb);
					skb = NULL;
				}
			}
#else
			skb = netdev_alloc_skb(dev, desc->pkt_len);
#endif
			if (!skb)
				dev->stats.rx_dropped++;
			break;
		case ERR_HDLC_ALIGN:
		case ERR_HDLC_ABORT:
			dev->stats.rx_frame_errors++;
			dev->stats.rx_errors++;
			break;
		case ERR_HDLC_FCS:
			dev->stats.rx_crc_errors++;
			dev->stats.rx_errors++;
			break;
		case ERR_HDLC_TOO_LONG:
			dev->stats.rx_length_errors++;
			dev->stats.rx_errors++;
			break;
		default:	/* FIXME - remove printk */
			printk(KERN_ERR "%s: hss_hdlc_poll: status 0x%02X"
			       " errors %u\n", dev->name, desc->status,
			       desc->error_count);
			dev->stats.rx_errors++;
		}

		if (!skb) {
			/* put the desc back on RX-ready queue */
			desc->buf_len = RX_SIZE;
			desc->pkt_len = desc->status = 0;
			queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
			continue;
		}

		/* process received frame */
#ifdef __ARMEB__
		temp = skb;
		skb = port->rx_buff_tab[n];
		dma_unmap_single(&dev->dev, desc->data,
				 RX_SIZE, DMA_FROM_DEVICE);
#else
		dma_sync_single(&dev->dev, desc->data,
				RX_SIZE, DMA_FROM_DEVICE);
		memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n],
			      ALIGN(desc->pkt_len, 4) / 4);
#endif
		skb_put(skb, desc->pkt_len);

		debug_pkt(dev, "hss_hdlc_poll", skb->data, skb->len);

		skb->protocol = hdlc_type_trans(skb, dev);
		dev->stats.rx_packets++;
		dev->stats.rx_bytes += skb->len;
		netif_receive_skb(skb);

		/* put the new buffer on RX-free queue */
#ifdef __ARMEB__
		port->rx_buff_tab[n] = temp;
		desc->data = phys;
#endif
		desc->buf_len = RX_SIZE;
		desc->pkt_len = 0;
		queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
		received++;
	}
#if DEBUG_RX
	printk(KERN_DEBUG "hss_hdlc_poll: end, not all work done\n");
#endif
	return received;	/* not all work done */
}
Exemple #22
0
static int ibmveth_poll(struct napi_struct *napi, int budget)
{
	struct ibmveth_adapter *adapter = container_of(napi, struct ibmveth_adapter, napi);
	struct net_device *netdev = adapter->netdev;
	int frames_processed = 0;
	unsigned long lpar_rc;

 restart_poll:
	do {
		struct sk_buff *skb;

		if (!ibmveth_rxq_pending_buffer(adapter))
			break;

		rmb();
		if (!ibmveth_rxq_buffer_valid(adapter)) {
			wmb(); /* suggested by larson1 */
			adapter->rx_invalid_buffer++;
			ibmveth_debug_printk("recycling invalid buffer\n");
			ibmveth_rxq_recycle_buffer(adapter);
		} else {
			int length = ibmveth_rxq_frame_length(adapter);
			int offset = ibmveth_rxq_frame_offset(adapter);
			int csum_good = ibmveth_rxq_csum_good(adapter);

			skb = ibmveth_rxq_get_buffer(adapter);
			if (csum_good)
				skb->ip_summed = CHECKSUM_UNNECESSARY;

			ibmveth_rxq_harvest_buffer(adapter);

			skb_reserve(skb, offset);
			skb_put(skb, length);
			skb->protocol = eth_type_trans(skb, netdev);

			netif_receive_skb(skb);	/* send it up */

			netdev->stats.rx_packets++;
			netdev->stats.rx_bytes += length;
			frames_processed++;
			netdev->last_rx = jiffies;
		}
	} while (frames_processed < budget);

	ibmveth_replenish_task(adapter);

	if (frames_processed < budget) {
		/* We think we are done - reenable interrupts,
		 * then check once more to make sure we are done.
		 */
		lpar_rc = h_vio_signal(adapter->vdev->unit_address,
				       VIO_IRQ_ENABLE);

		ibmveth_assert(lpar_rc == H_SUCCESS);

		netif_rx_complete(netdev, napi);

		if (ibmveth_rxq_pending_buffer(adapter) &&
		    netif_rx_reschedule(netdev, napi)) {
			lpar_rc = h_vio_signal(adapter->vdev->unit_address,
					       VIO_IRQ_DISABLE);
			goto restart_poll;
		}
	}

	return frames_processed;
}