示例#1
0
文件: sn_netdev.c 项目: NetSys/bess
/* The return value says how many packets are actually received */
static int sn_poll(struct napi_struct *napi, int budget)
{
	struct sn_queue *rx_queue;

	int ret;

	rx_queue = container_of(napi, struct sn_queue, rx.napi);

	if (!spin_trylock(&rx_queue->rx.lock))
		return 0;

	rx_queue->rx.stats.polls++;

	ret = sn_poll_action(rx_queue, budget);

	if (ret < budget) {
		napi_complete(napi);
		sn_enable_interrupt(rx_queue);

		/* last check for race condition.
		 * see sn_enable_interrupt() */
		if (rx_queue->dev->ops->pending_rx(rx_queue)) {
			napi_reschedule(napi);
			sn_disable_interrupt(rx_queue);
		}
	}

	spin_unlock(&rx_queue->rx.lock);

	return ret;
}
static int greth_poll(struct napi_struct *napi, int budget)
{
	struct greth_private *greth;
	int work_done = 0;
	greth = container_of(napi, struct greth_private, napi);

	if (greth->gbit_mac) {
		greth_clean_tx_gbit(greth->netdev);
	} else {
		greth_clean_tx(greth->netdev);
	}

restart_poll:
	if (greth->gbit_mac) {
		work_done += greth_rx_gbit(greth->netdev, budget - work_done);
	} else {
		work_done += greth_rx(greth->netdev, budget - work_done);
	}

	if (work_done < budget) {

		napi_complete(napi);

		if (greth_pending_packets(greth)) {
			napi_reschedule(napi);
			goto restart_poll;
		}
	}

	greth_enable_irqs(greth);
	return work_done;
}
示例#3
0
文件: ubi32-eth.c 项目: 7LK/McWRT
static int ubi32_eth_napi_poll(struct napi_struct *napi, int budget)
{
	struct ubi32_eth_private *priv = container_of(napi, struct ubi32_eth_private, napi);
	struct net_device *dev = priv->dev;
	u32_t count;

	if (priv->tx_tail != priv->regs->tx_out) {
                ubi32_eth_tx_done(dev);
        }

	count = ubi32_eth_receive(dev, budget);

	if (count < budget) {
		napi_complete(napi);
		priv->regs->int_mask |= (UBI32_ETH_VP_INT_RX | UBI32_ETH_VP_INT_TX);
		if ((priv->rx_tail != priv->regs->rx_out) || (priv->tx_tail != priv->regs->tx_out)) {
			if (napi_reschedule(napi)) {
				priv->regs->int_mask = 0;
			}
		}
	}
	return count;
}
示例#4
0
/**
 * nps_enet_poll - NAPI poll handler.
 * @napi:       Pointer to napi_struct structure.
 * @budget:     How many frames to process on one call.
 *
 * returns:     Number of processed frames
 */
static int nps_enet_poll(struct napi_struct *napi, int budget)
{
	struct net_device *ndev = napi->dev;
	struct nps_enet_priv *priv = netdev_priv(ndev);
	u32 work_done;

	nps_enet_tx_handler(ndev);
	work_done = nps_enet_rx_handler(ndev);
	if (work_done < budget) {
		u32 buf_int_enable_value = 0;
		u32 tx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL);
		u32 tx_ctrl_ct =
			(tx_ctrl_value & TX_CTL_CT_MASK) >> TX_CTL_CT_SHIFT;

		napi_complete(napi);

		/* set tx_done and rx_rdy bits */
		buf_int_enable_value |= NPS_ENET_ENABLE << RX_RDY_SHIFT;
		buf_int_enable_value |= NPS_ENET_ENABLE << TX_DONE_SHIFT;

		nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE,
				 buf_int_enable_value);

		/* in case we will get a tx interrupt while interrupts
		 * are masked, we will lose it since the tx is edge interrupt.
		 * specifically, while executing the code section above,
		 * between nps_enet_tx_handler and the interrupts enable, all
		 * tx requests will be stuck until we will get an rx interrupt.
		 * the two code lines below will solve this situation by
		 * re-adding ourselves to the poll list.
		 */

		if (priv->tx_skb && !tx_ctrl_ct) {
			nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0);
			napi_reschedule(napi);
		}
	}
示例#5
0
static int ibmveth_poll(struct napi_struct *napi, int budget)
{
	struct ibmveth_adapter *adapter = container_of(napi, struct ibmveth_adapter, napi);
	struct net_device *netdev = adapter->netdev;
	int frames_processed = 0;
	unsigned long lpar_rc;

 restart_poll:
	do {
		struct sk_buff *skb;

		if (!ibmveth_rxq_pending_buffer(adapter))
			break;

		rmb();
		if (!ibmveth_rxq_buffer_valid(adapter)) {
			wmb(); 
			adapter->rx_invalid_buffer++;
			ibmveth_debug_printk("recycling invalid buffer\n");
			ibmveth_rxq_recycle_buffer(adapter);
		} else {
			int length = ibmveth_rxq_frame_length(adapter);
			int offset = ibmveth_rxq_frame_offset(adapter);
			int csum_good = ibmveth_rxq_csum_good(adapter);

			skb = ibmveth_rxq_get_buffer(adapter);
			if (csum_good)
				skb->ip_summed = CHECKSUM_UNNECESSARY;

			ibmveth_rxq_harvest_buffer(adapter);

			skb_reserve(skb, offset);
			skb_put(skb, length);
			skb->protocol = eth_type_trans(skb, netdev);

			netif_receive_skb(skb);	

			netdev->stats.rx_packets++;
			netdev->stats.rx_bytes += length;
			frames_processed++;
		}
	} while (frames_processed < budget);

	ibmveth_replenish_task(adapter);

	if (frames_processed < budget) {
		
		lpar_rc = h_vio_signal(adapter->vdev->unit_address,
				       VIO_IRQ_ENABLE);

		ibmveth_assert(lpar_rc == H_SUCCESS);

		napi_complete(napi);

		if (ibmveth_rxq_pending_buffer(adapter) &&
		    napi_reschedule(napi)) {
			lpar_rc = h_vio_signal(adapter->vdev->unit_address,
					       VIO_IRQ_DISABLE);
			goto restart_poll;
		}
	}

	return frames_processed;
}
示例#6
0
static int hss_hdlc_poll(struct napi_struct *napi, int budget)
{
	struct port *port = container_of(napi, struct port, napi);
	struct net_device *dev = port->netdev;
	unsigned int rxq = queue_ids[port->id].rx;
	unsigned int rxfreeq = queue_ids[port->id].rxfree;
	int received = 0;

#if DEBUG_RX
	printk(KERN_DEBUG "%s: hss_hdlc_poll\n", dev->name);
#endif

	while (received < budget) {
		struct sk_buff *skb;
		struct desc *desc;
		int n;
#ifdef __ARMEB__
		struct sk_buff *temp;
		u32 phys;
#endif

		if ((n = queue_get_desc(rxq, port, 0)) < 0) {
#if DEBUG_RX
			printk(KERN_DEBUG "%s: hss_hdlc_poll"
			       " napi_complete\n", dev->name);
#endif
			napi_complete(napi);
			qmgr_enable_irq(rxq);
			if (!qmgr_stat_empty(rxq) &&
			    napi_reschedule(napi)) {
#if DEBUG_RX
				printk(KERN_DEBUG "%s: hss_hdlc_poll"
				       " napi_reschedule succeeded\n",
				       dev->name);
#endif
				qmgr_disable_irq(rxq);
				continue;
			}
#if DEBUG_RX
			printk(KERN_DEBUG "%s: hss_hdlc_poll all done\n",
			       dev->name);
#endif
			return received; 
		}

		desc = rx_desc_ptr(port, n);
#if 0 
		if (desc->error_count)
			printk(KERN_DEBUG "%s: hss_hdlc_poll status 0x%02X"
			       " errors %u\n", dev->name, desc->status,
			       desc->error_count);
#endif
		skb = NULL;
		switch (desc->status) {
		case 0:
#ifdef __ARMEB__
			if ((skb = netdev_alloc_skb(dev, RX_SIZE)) != NULL) {
				phys = dma_map_single(&dev->dev, skb->data,
						      RX_SIZE,
						      DMA_FROM_DEVICE);
				if (dma_mapping_error(&dev->dev, phys)) {
					dev_kfree_skb(skb);
					skb = NULL;
				}
			}
#else
			skb = netdev_alloc_skb(dev, desc->pkt_len);
#endif
			if (!skb)
				dev->stats.rx_dropped++;
			break;
		case ERR_HDLC_ALIGN:
		case ERR_HDLC_ABORT:
			dev->stats.rx_frame_errors++;
			dev->stats.rx_errors++;
			break;
		case ERR_HDLC_FCS:
			dev->stats.rx_crc_errors++;
			dev->stats.rx_errors++;
			break;
		case ERR_HDLC_TOO_LONG:
			dev->stats.rx_length_errors++;
			dev->stats.rx_errors++;
			break;
		default:	
			netdev_err(dev, "hss_hdlc_poll: status 0x%02X errors %u\n",
				   desc->status, desc->error_count);
			dev->stats.rx_errors++;
		}

		if (!skb) {
			
			desc->buf_len = RX_SIZE;
			desc->pkt_len = desc->status = 0;
			queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
			continue;
		}

		
#ifdef __ARMEB__
		temp = skb;
		skb = port->rx_buff_tab[n];
		dma_unmap_single(&dev->dev, desc->data,
				 RX_SIZE, DMA_FROM_DEVICE);
#else
		dma_sync_single_for_cpu(&dev->dev, desc->data,
					RX_SIZE, DMA_FROM_DEVICE);
		memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n],
			      ALIGN(desc->pkt_len, 4) / 4);
#endif
		skb_put(skb, desc->pkt_len);

		debug_pkt(dev, "hss_hdlc_poll", skb->data, skb->len);

		skb->protocol = hdlc_type_trans(skb, dev);
		dev->stats.rx_packets++;
		dev->stats.rx_bytes += skb->len;
		netif_receive_skb(skb);

		
#ifdef __ARMEB__
		port->rx_buff_tab[n] = temp;
		desc->data = phys;
#endif
		desc->buf_len = RX_SIZE;
		desc->pkt_len = 0;
		queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
		received++;
	}
#if DEBUG_RX
	printk(KERN_DEBUG "hss_hdlc_poll: end, not all work done\n");
#endif
	return received;	
}
示例#7
0
static int eth_poll(struct napi_struct *napi, int budget)
{
	struct port *port = container_of(napi, struct port, napi);
	struct net_device *dev = port->netdev;
	unsigned int rxq = port->plat->rxq, rxfreeq = RXFREE_QUEUE(port->id);
	int received = 0;

#if DEBUG_RX
	printk(KERN_DEBUG "%s: eth_poll\n", dev->name);
#endif

	while (received < budget) {
		struct sk_buff *skb;
		struct desc *desc;
		int n;
#ifdef __ARMEB__
		struct sk_buff *temp;
		u32 phys;
#endif

		if ((n = queue_get_desc(rxq, port, 0)) < 0) {
#if DEBUG_RX
			printk(KERN_DEBUG "%s: eth_poll napi_complete\n",
			       dev->name);
#endif
			napi_complete(napi);
			qmgr_enable_irq(rxq);
			if (!qmgr_stat_empty(rxq) &&
			    napi_reschedule(napi)) {
#if DEBUG_RX
				printk(KERN_DEBUG "%s: eth_poll"
				       " napi_reschedule successed\n",
				       dev->name);
#endif
				qmgr_disable_irq(rxq);
				continue;
			}
#if DEBUG_RX
			printk(KERN_DEBUG "%s: eth_poll all done\n",
			       dev->name);
#endif
			return received; /* all work done */
		}

		desc = rx_desc_ptr(port, n);

#ifdef __ARMEB__
		if ((skb = netdev_alloc_skb(dev, RX_BUFF_SIZE))) {
			phys = dma_map_single(&dev->dev, skb->data,
					      RX_BUFF_SIZE, DMA_FROM_DEVICE);
			if (dma_mapping_error(&dev->dev, phys)) {
				dev_kfree_skb(skb);
				skb = NULL;
			}
		}
#else
		skb = netdev_alloc_skb(dev,
				       ALIGN(NET_IP_ALIGN + desc->pkt_len, 4));
#endif

		if (!skb) {
			dev->stats.rx_dropped++;
			/* put the desc back on RX-ready queue */
			desc->buf_len = MAX_MRU;
			desc->pkt_len = 0;
			queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
			continue;
		}

		/* process received frame */
#ifdef __ARMEB__
		temp = skb;
		skb = port->rx_buff_tab[n];
		dma_unmap_single(&dev->dev, desc->data - NET_IP_ALIGN,
				 RX_BUFF_SIZE, DMA_FROM_DEVICE);
#else
		dma_sync_single(&dev->dev, desc->data - NET_IP_ALIGN,
				RX_BUFF_SIZE, DMA_FROM_DEVICE);
		memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n],
			      ALIGN(NET_IP_ALIGN + desc->pkt_len, 4) / 4);
#endif
		skb_reserve(skb, NET_IP_ALIGN);
		skb_put(skb, desc->pkt_len);

		debug_pkt(dev, "eth_poll", skb->data, skb->len);

		skb->protocol = eth_type_trans(skb, dev);
		dev->stats.rx_packets++;
		dev->stats.rx_bytes += skb->len;
		netif_receive_skb(skb);

		/* put the new buffer on RX-free queue */
#ifdef __ARMEB__
		port->rx_buff_tab[n] = temp;
		desc->data = phys + NET_IP_ALIGN;
#endif
		desc->buf_len = MAX_MRU;
		desc->pkt_len = 0;
		queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
		received++;
	}

#if DEBUG_RX
	printk(KERN_DEBUG "eth_poll(): end, not all work done\n");
#endif
	return received;		/* not all work done */
}
示例#8
0
static int ibmveth_poll(struct napi_struct *napi, int budget)
{
	struct ibmveth_adapter *adapter =
			container_of(napi, struct ibmveth_adapter, napi);
	struct net_device *netdev = adapter->netdev;
	int frames_processed = 0;
	unsigned long lpar_rc;

restart_poll:
	do {
		if (!ibmveth_rxq_pending_buffer(adapter))
			break;

		smp_rmb();
		if (!ibmveth_rxq_buffer_valid(adapter)) {
			wmb(); /* suggested by larson1 */
			adapter->rx_invalid_buffer++;
			netdev_dbg(netdev, "recycling invalid buffer\n");
			ibmveth_rxq_recycle_buffer(adapter);
		} else {
			struct sk_buff *skb, *new_skb;
			int length = ibmveth_rxq_frame_length(adapter);
			int offset = ibmveth_rxq_frame_offset(adapter);
			int csum_good = ibmveth_rxq_csum_good(adapter);

			skb = ibmveth_rxq_get_buffer(adapter);

			new_skb = NULL;
			if (length < rx_copybreak)
				new_skb = netdev_alloc_skb(netdev, length);

			if (new_skb) {
				skb_copy_to_linear_data(new_skb,
							skb->data + offset,
							length);
				if (rx_flush)
					ibmveth_flush_buffer(skb->data,
						length + offset);
				if (!ibmveth_rxq_recycle_buffer(adapter))
					kfree_skb(skb);
				skb = new_skb;
			} else {
				ibmveth_rxq_harvest_buffer(adapter);
				skb_reserve(skb, offset);
			}

			skb_put(skb, length);
			skb->protocol = eth_type_trans(skb, netdev);

			if (csum_good)
				skb->ip_summed = CHECKSUM_UNNECESSARY;

			netif_receive_skb(skb);	/* send it up */

			netdev->stats.rx_packets++;
			netdev->stats.rx_bytes += length;
			frames_processed++;
		}
	} while (frames_processed < budget);

	ibmveth_replenish_task(adapter);

	if (frames_processed < budget) {
		/* We think we are done - reenable interrupts,
		 * then check once more to make sure we are done.
		 */
		lpar_rc = h_vio_signal(adapter->vdev->unit_address,
				       VIO_IRQ_ENABLE);

		BUG_ON(lpar_rc != H_SUCCESS);

		napi_complete(napi);

		if (ibmveth_rxq_pending_buffer(adapter) &&
		    napi_reschedule(napi)) {
			lpar_rc = h_vio_signal(adapter->vdev->unit_address,
					       VIO_IRQ_DISABLE);
			goto restart_poll;
		}
	}

	return frames_processed;
}
示例#9
0
static int fjes_poll(struct napi_struct *napi, int budget)
{
	struct fjes_adapter *adapter =
			container_of(napi, struct fjes_adapter, napi);
	struct net_device *netdev = napi->dev;
	struct fjes_hw *hw = &adapter->hw;
	struct sk_buff *skb;
	int work_done = 0;
	int cur_epid = 0;
	int epidx;
	size_t frame_len;
	void *frame;

	spin_lock(&hw->rx_status_lock);
	for (epidx = 0; epidx < hw->max_epid; epidx++) {
		if (epidx == hw->my_epid)
			continue;

		if (fjes_hw_get_partner_ep_status(hw, epidx) ==
		    EP_PARTNER_SHARED)
			adapter->hw.ep_shm_info[epidx]
				   .tx.info->v1i.rx_status |= FJES_RX_POLL_WORK;
	}
	spin_unlock(&hw->rx_status_lock);

	while (work_done < budget) {
		prefetch(&adapter->hw);
		frame = fjes_rxframe_get(adapter, &frame_len, &cur_epid);

		if (frame) {
			skb = napi_alloc_skb(napi, frame_len);
			if (!skb) {
				adapter->stats64.rx_dropped += 1;
				hw->ep_shm_info[cur_epid].net_stats
							 .rx_dropped += 1;
				adapter->stats64.rx_errors += 1;
				hw->ep_shm_info[cur_epid].net_stats
							 .rx_errors += 1;
			} else {
				memcpy(skb_put(skb, frame_len),
				       frame, frame_len);
				skb->protocol = eth_type_trans(skb, netdev);
				skb->ip_summed = CHECKSUM_UNNECESSARY;

				netif_receive_skb(skb);

				work_done++;

				adapter->stats64.rx_packets += 1;
				hw->ep_shm_info[cur_epid].net_stats
							 .rx_packets += 1;
				adapter->stats64.rx_bytes += frame_len;
				hw->ep_shm_info[cur_epid].net_stats
							 .rx_bytes += frame_len;

				if (is_multicast_ether_addr(
					((struct ethhdr *)frame)->h_dest)) {
					adapter->stats64.multicast += 1;
					hw->ep_shm_info[cur_epid].net_stats
								 .multicast += 1;
				}
			}

			fjes_rxframe_release(adapter, cur_epid);
			adapter->unset_rx_last = true;
		} else {
			break;
		}
	}

	if (work_done < budget) {
		napi_complete_done(napi, work_done);

		if (adapter->unset_rx_last) {
			adapter->rx_last_jiffies = jiffies;
			adapter->unset_rx_last = false;
		}

		if (((long)jiffies - (long)adapter->rx_last_jiffies) < 3) {
			napi_reschedule(napi);
		} else {
			spin_lock(&hw->rx_status_lock);
			for (epidx = 0; epidx < hw->max_epid; epidx++) {
				if (epidx == hw->my_epid)
					continue;
				if (fjes_hw_get_partner_ep_status(hw, epidx) ==
				    EP_PARTNER_SHARED)
					adapter->hw.ep_shm_info[epidx].tx
						   .info->v1i.rx_status &=
						~FJES_RX_POLL_WORK;
			}
			spin_unlock(&hw->rx_status_lock);

			fjes_hw_set_irqmask(hw, REG_ICTL_MASK_RX_DATA, false);
		}
	}

	return work_done;
}
static int rmnet_mhi_poll(struct napi_struct *napi, int budget)
{
	int received_packets = 0;
	struct net_device *dev = napi->dev;
	struct rmnet_mhi_private *rmnet_mhi_ptr =
			*(struct rmnet_mhi_private **)netdev_priv(dev);
	enum MHI_STATUS res = MHI_STATUS_reserved;
	bool should_reschedule = true;
	struct sk_buff *skb;
	struct mhi_skb_priv *skb_priv;
	int r, cur_mru;

	rmnet_log(MSG_VERBOSE, "Entered\n");
	rmnet_mhi_ptr->mru = mru;
	while (received_packets < budget) {
		struct mhi_result *result =
		      mhi_poll(rmnet_mhi_ptr->rx_client_handle);
		if (result->transaction_status == MHI_STATUS_DEVICE_NOT_READY) {
			rmnet_log(MSG_INFO,
				  "Transaction status not ready, continuing\n");
			break;
		} else if (result->transaction_status != MHI_STATUS_SUCCESS &&
			   result->transaction_status != MHI_STATUS_OVERFLOW) {
			rmnet_log(MSG_CRITICAL,
				  "mhi_poll failed, error %d\n",
				  result->transaction_status);
			break;
		}

		/* Nothing more to read, or out of buffers in MHI layer */
		if (unlikely(!result->buf_addr || !result->bytes_xferd)) {
			rmnet_log(MSG_CRITICAL,
				  "Not valid buff not rescheduling\n");
			should_reschedule = false;
			break;
		}

		skb = skb_dequeue(&(rmnet_mhi_ptr->rx_buffers));
		if (unlikely(!skb)) {
			rmnet_log(MSG_CRITICAL,
				  "No RX buffers to match");
			break;
		}

		skb_priv = (struct mhi_skb_priv *)(skb->cb);

		/* Setup the tail to the end of data */
		skb_put(skb, result->bytes_xferd);

		skb->dev = dev;
		skb->protocol = rmnet_mhi_ip_type_trans(skb);

		if (result->transaction_status == MHI_STATUS_OVERFLOW)
			r = rmnet_mhi_process_fragment(rmnet_mhi_ptr, skb, 1);
		else
			r = rmnet_mhi_process_fragment(rmnet_mhi_ptr, skb, 0);
		if (r) {
			rmnet_log(MSG_CRITICAL,
				  "Failed to process fragmented packet ret %d",
				   r);
			BUG();
		}

		/* Statistics */
		received_packets++;
		dev->stats.rx_packets++;
		dev->stats.rx_bytes += result->bytes_xferd;

		/* Need to allocate a new buffer instead of this one */
		cur_mru = rmnet_mhi_ptr->mru;
		skb = alloc_skb(cur_mru, GFP_ATOMIC);
		if (unlikely(!skb)) {
			rmnet_log(MSG_CRITICAL,
				  "Can't allocate a new RX buffer for MHI");
			break;
		}
		skb_priv = (struct mhi_skb_priv *)(skb->cb);
		skb_priv->dma_size = cur_mru;

		rmnet_log(MSG_VERBOSE,
		  "Allocated SKB of MRU 0x%x, SKB_DATA 0%p SKB_LEN 0x%x\n",
				rmnet_mhi_ptr->mru, skb->data, skb->len);
		/* Reserve headroom, tail == data */
		skb_reserve(skb, MHI_RX_HEADROOM);
		skb_priv->dma_size -= MHI_RX_HEADROOM;
		skb_priv->dma_addr = 0;

		rmnet_log(MSG_VERBOSE,
			 "Mapped SKB %p to DMA Addr 0x%lx, DMA_SIZE: 0x%lx\n",
			  skb->data,
			  (uintptr_t)skb->data,
			  (uintptr_t)skb_priv->dma_size);


		res = mhi_queue_xfer(
			rmnet_mhi_ptr->rx_client_handle,
			skb->data, skb_priv->dma_size, MHI_EOT);

		if (unlikely(MHI_STATUS_SUCCESS != res)) {
			rmnet_log(MSG_CRITICAL,
				"mhi_queue_xfer failed, error %d", res);
			dev_kfree_skb_irq(skb);
			break;
		}
		skb_queue_tail(&rmnet_mhi_ptr->rx_buffers, skb);
	} /* while (received_packets < budget) or any other error */

	napi_complete(napi);

	/* We got a NULL descriptor back */
	if (should_reschedule == false) {
		if (atomic_read(&rmnet_mhi_ptr->irq_masked_cntr)) {
			atomic_dec(&rmnet_mhi_ptr->irq_masked_cntr);
			mhi_unmask_irq(rmnet_mhi_ptr->rx_client_handle);
		}
	} else {
		if (received_packets == budget)
			rx_napi_budget_overflow[rmnet_mhi_ptr->dev_index]++;
		napi_reschedule(napi);
	}

	rx_napi_skb_burst_min[rmnet_mhi_ptr->dev_index] =
	min((unsigned long)received_packets,
	    rx_napi_skb_burst_min[rmnet_mhi_ptr->dev_index]);

	rx_napi_skb_burst_max[rmnet_mhi_ptr->dev_index] =
	max((unsigned long)received_packets,
	    rx_napi_skb_burst_max[rmnet_mhi_ptr->dev_index]);

	rmnet_log(MSG_VERBOSE, "Exited, polled %d pkts\n", received_packets);
	return received_packets;
}
示例#11
0
static int rmnet_mhi_poll(struct napi_struct *napi, int budget)
{
	int received_packets = 0;
	struct net_device *dev = napi->dev;
	struct rmnet_mhi_private *rmnet_mhi_ptr =
			*(struct rmnet_mhi_private **)netdev_priv(dev);
	enum MHI_STATUS res = MHI_STATUS_reserved;
	bool should_reschedule = true;
	struct sk_buff *skb;
	dma_addr_t dma_addr;
	uintptr_t *cb_ptr;

	rmnet_log(MSG_VERBOSE, "Entered\n");
	while (received_packets < budget) {
		struct mhi_result *result =
		      mhi_poll(rmnet_mhi_ptr->rx_client_handle);
		if (result->transaction_status == MHI_STATUS_DEVICE_NOT_READY) {
			continue;
		} else if (result->transaction_status != MHI_STATUS_SUCCESS) {
			rmnet_log(MSG_CRITICAL,
				  "mhi_poll failed, error is %d\n",
				  result->transaction_status);
			break;
		}

		/* Nothing more to read, or out of buffers in MHI layer */
		if (unlikely(!result->payload_buf ||
						!result->bytes_xferd)) {
			should_reschedule = false;
			break;
		}

		skb = skb_dequeue(&(rmnet_mhi_ptr->rx_buffers));
		if (unlikely(!skb)) {
			rmnet_log(MSG_CRITICAL,
				  "No RX buffers to match");
			break;
		}

		cb_ptr = (uintptr_t *)skb->cb;
		dma_addr = (dma_addr_t)(uintptr_t)(*cb_ptr);

		/* Sanity check, ensuring that this is actually the buffer */
		if (unlikely(dma_addr != result->payload_buf)) {
			rmnet_log(MSG_CRITICAL,
				  "Buf mismatch, expected 0x%lx, got 0x%lx",
					(uintptr_t)dma_addr,
					(uintptr_t)result->payload_buf);
			break;
		}

		dma_unmap_single(&(dev->dev), dma_addr,
				(rmnet_mhi_ptr->mru - MHI_RX_HEADROOM),
				 DMA_FROM_DEVICE);
		skb_put(skb, result->bytes_xferd);

		skb->dev = dev;
		skb->protocol = rmnet_mhi_ip_type_trans(skb);

		netif_receive_skb(skb);

		/* Statistics */
		received_packets++;
		dev->stats.rx_packets++;
		dev->stats.rx_bytes += result->bytes_xferd;

		/* Need to allocate a new buffer instead of this one */
		skb = alloc_skb(rmnet_mhi_ptr->mru, GFP_ATOMIC);

		if (unlikely(!skb)) {
			rmnet_log(MSG_CRITICAL,
				  "Can't allocate a new RX buffer for MHI");
			break;
		}

		skb_reserve(skb, MHI_RX_HEADROOM);

		cb_ptr = (uintptr_t *)skb->cb;
		dma_addr = dma_map_single(&(dev->dev), skb->data,
					(rmnet_mhi_ptr->mru - MHI_RX_HEADROOM),
					DMA_FROM_DEVICE);
		*cb_ptr = (uintptr_t)dma_addr;

		if (unlikely(dma_mapping_error(&(dev->dev), dma_addr))) {
			rmnet_log(MSG_CRITICAL,
				  "DMA mapping error in polling function");
			dev_kfree_skb_irq(skb);
			break;
		}

		res = mhi_queue_xfer(
			rmnet_mhi_ptr->rx_client_handle,
			(uintptr_t)dma_addr, rmnet_mhi_ptr->mru, MHI_EOT);

		if (unlikely(MHI_STATUS_SUCCESS != res)) {
			rmnet_log(MSG_CRITICAL,
				"mhi_queue_xfer failed, error %d", res);
			dma_unmap_single(&(dev->dev), dma_addr,
					(rmnet_mhi_ptr->mru - MHI_RX_HEADROOM),
					DMA_FROM_DEVICE);

			dev_kfree_skb_irq(skb);
			break;
		}

		skb_queue_tail(&(rmnet_mhi_ptr->rx_buffers), skb);

	} /* while (received_packets < budget) or any other error */

	napi_complete(napi);

	/* We got a NULL descriptor back */
	if (should_reschedule == false) {
		if (rmnet_mhi_ptr->irq_masked_cntr) {
			mhi_unmask_irq(rmnet_mhi_ptr->rx_client_handle);
			--rmnet_mhi_ptr->irq_masked_cntr;
		}
	} else {
		if (received_packets == budget)
			rx_napi_budget_overflow[rmnet_mhi_ptr->dev_index]++;
		napi_reschedule(napi);
	}

	rx_napi_skb_burst_min[rmnet_mhi_ptr->dev_index] =
	min((unsigned long)received_packets,
	    rx_napi_skb_burst_min[rmnet_mhi_ptr->dev_index]);

	rx_napi_skb_burst_max[rmnet_mhi_ptr->dev_index] =
	max((unsigned long)received_packets,
	    rx_napi_skb_burst_max[rmnet_mhi_ptr->dev_index]);

	rmnet_log(MSG_VERBOSE, "Exited, polled %d pkts\n", received_packets);
	return received_packets;
}
示例#12
0
/* TODO: No error handling yet */
static int rmnet_mhi_poll(struct napi_struct *napi, int budget)
{
	int received_packets = 0;
	struct net_device *dev = napi->dev;
	struct rmnet_mhi_private *rmnet_mhi_ptr = netdev_priv(dev);
	MHI_STATUS res = MHI_STATUS_reserved;
	bool should_reschedule = true;
	struct sk_buff *skb;
	dma_addr_t dma_addr;
	uintptr_t *cb_ptr;

	/* Reset the watchdog? */

	while (received_packets < budget) {
		mhi_result *result =
		      mhi_poll(rmnet_mhi_ptr->rx_client_handle);

		if(result->transaction_status == MHI_STATUS_DEVICE_NOT_READY) {
			continue;
		} else if (result->transaction_status != MHI_STATUS_SUCCESS) {
			/* TODO: Handle error */
			pr_err("%s: mhi_poll failed, error is %d",
			       __func__, result->transaction_status);
			break;
		}

		/* Nothing more to read, or out of buffers in MHI layer */
		if (unlikely(0 == result->payload_buf || 0 == result->bytes_xferd)) {
			should_reschedule = false;
			break;
		}

		/* Assumption
		   ----------
		   The buffer returned back is guaranteed to be the first buffer
		   that was allocated for the RX, so we just dequeue the head.
		*/

		/* Take the first one */
		skb = skb_dequeue(&(rmnet_mhi_ptr->rx_buffers));
		if (unlikely(0 == skb)) {
			/* TODO: This shouldn't happen, we had a guard above */
			pr_err("%s: No RX buffers to match", __func__);
			break;
		}

		cb_ptr = (uintptr_t *)skb->cb;
		dma_addr = (dma_addr_t)(uintptr_t)(*cb_ptr);

		/* Sanity check, ensuring that this is actually the buffer */
		if (unlikely((uintptr_t)dma_addr != (uintptr_t)result->payload_buf)) {
			/* TODO: Handle error */
			pr_err("%s: Unexpected physical address mismatch, expected 0x%lx, got 0x%lx",
			       __func__, (uintptr_t)dma_addr, (uintptr_t)result->payload_buf);
			break;
		}

		dma_unmap_single(&(dev->dev), dma_addr,
				(rmnet_mhi_ptr->mru - MHI_RX_HEADROOM),
				 DMA_FROM_DEVICE);
		skb_put(skb, result->bytes_xferd);

		skb->dev = dev;
		skb->protocol = rmnet_mhi_ip_type_trans(skb);

		netif_receive_skb(skb);

		/* Statistics */
		received_packets++;
		dev->stats.rx_packets++;
		dev->stats.rx_bytes += result->bytes_xferd;

		/* Need to allocate a new buffer instead of this one
		   (TODO: Maybe we can do it @ the end?)
		 */
		skb = alloc_skb(rmnet_mhi_ptr->mru, GFP_ATOMIC);

		if (unlikely(0 == skb)) {
			/* TODO: Handle error */
			pr_err("%s: Can't allocate a new RX buffer for MHI",
			       __func__);
			break;
		}

		skb_reserve(skb, MHI_RX_HEADROOM);

		cb_ptr = (uintptr_t *)skb->cb;
		dma_addr = dma_map_single(&(dev->dev), skb->data,
					 rmnet_mhi_ptr->mru - MHI_RX_HEADROOM,
					  DMA_FROM_DEVICE);
		*cb_ptr = (uintptr_t)dma_addr;

		if (unlikely(dma_mapping_error(&(dev->dev), dma_addr))) {
			pr_err("%s: DMA mapping error in polling function",
			       __func__);
			/* TODO: Handle error */
			dev_kfree_skb_irq(skb);
			break;
		}

		/* TODO: What do we do in such a scenario in
			which we can't allocate a RX buffer? */
		if (unlikely(DMA_RANGE_CHECK(dma_addr,
				    rmnet_mhi_ptr->mru,
				    MHI_DMA_MASK))) {
			pr_err("%s: RX buffer is out of MHI DMA address range",
			       __func__);
			dma_unmap_single(&(dev->dev), dma_addr,
					(rmnet_mhi_ptr->mru - MHI_RX_HEADROOM),
							 DMA_FROM_DEVICE);
			dev_kfree_skb_irq(skb);
			break;
		}

		res = mhi_queue_xfer(
			rmnet_mhi_ptr->rx_client_handle,
			(uintptr_t)dma_addr, rmnet_mhi_ptr->mru, 0, 0);

		if (unlikely(MHI_STATUS_SUCCESS != res)) {
			/* TODO: Handle error */
			pr_err("%s: mhi_queue_xfer failed, error %d",
			       __func__, res);
			dma_unmap_single(&(dev->dev), dma_addr,
					(rmnet_mhi_ptr->mru - MHI_RX_HEADROOM),
							 DMA_FROM_DEVICE);

			dev_kfree_skb_irq(skb);
			break;
		}

		skb_queue_tail(&(rmnet_mhi_ptr->rx_buffers), skb);

	} /* while (received_packets < budget) or any other error */

	napi_complete(napi);

	/* We got a NULL descriptor back */
	if (false == should_reschedule) {
		if (atomic_read(&rmnet_mhi_ptr->irq_masked)) {
			atomic_dec(&rmnet_mhi_ptr->irq_masked);
			mhi_unmask_irq(rmnet_mhi_ptr->rx_client_handle);
		}
	} else {
		if (received_packets == budget)
			rx_napi_budget_overflow[rmnet_mhi_ptr->dev_index]++;
		napi_reschedule(napi);
	}

	/* Start a watchdog? */

	rx_napi_skb_burst_min[rmnet_mhi_ptr->dev_index] =
	min((unsigned long)received_packets,
	    rx_napi_skb_burst_min[rmnet_mhi_ptr->dev_index]);

	rx_napi_skb_burst_max[rmnet_mhi_ptr->dev_index] =
	max((unsigned long)received_packets,
	    rx_napi_skb_burst_max[rmnet_mhi_ptr->dev_index]);

	return received_packets;
}
示例#13
0
static int ibmveth_poll(struct napi_struct *napi, int budget)
{
	struct ibmveth_adapter *adapter =
			container_of(napi, struct ibmveth_adapter, napi);
	struct net_device *netdev = adapter->netdev;
	int frames_processed = 0;
	unsigned long lpar_rc;
	struct iphdr *iph;
	u16 mss = 0;

restart_poll:
	while (frames_processed < budget) {
		if (!ibmveth_rxq_pending_buffer(adapter))
			break;

		smp_rmb();
		if (!ibmveth_rxq_buffer_valid(adapter)) {
			wmb(); /* suggested by larson1 */
			adapter->rx_invalid_buffer++;
			netdev_dbg(netdev, "recycling invalid buffer\n");
			ibmveth_rxq_recycle_buffer(adapter);
		} else {
			struct sk_buff *skb, *new_skb;
			int length = ibmveth_rxq_frame_length(adapter);
			int offset = ibmveth_rxq_frame_offset(adapter);
			int csum_good = ibmveth_rxq_csum_good(adapter);
			int lrg_pkt = ibmveth_rxq_large_packet(adapter);

			skb = ibmveth_rxq_get_buffer(adapter);

			/* if the large packet bit is set in the rx queue
			 * descriptor, the mss will be written by PHYP eight
			 * bytes from the start of the rx buffer, which is
			 * skb->data at this stage
			 */
			if (lrg_pkt) {
				__be64 *rxmss = (__be64 *)(skb->data + 8);

				mss = (u16)be64_to_cpu(*rxmss);
			}

			new_skb = NULL;
			if (length < rx_copybreak)
				new_skb = netdev_alloc_skb(netdev, length);

			if (new_skb) {
				skb_copy_to_linear_data(new_skb,
							skb->data + offset,
							length);
				if (rx_flush)
					ibmveth_flush_buffer(skb->data,
						length + offset);
				if (!ibmveth_rxq_recycle_buffer(adapter))
					kfree_skb(skb);
				skb = new_skb;
			} else {
				ibmveth_rxq_harvest_buffer(adapter);
				skb_reserve(skb, offset);
			}

			skb_put(skb, length);
			skb->protocol = eth_type_trans(skb, netdev);

			if (csum_good) {
				skb->ip_summed = CHECKSUM_UNNECESSARY;
				if (be16_to_cpu(skb->protocol) == ETH_P_IP) {
					iph = (struct iphdr *)skb->data;

					/* If the IP checksum is not offloaded and if the packet
					 *  is large send, the checksum must be rebuilt.
					 */
					if (iph->check == 0xffff) {
						iph->check = 0;
						iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
					}
				}
			}

			if (length > netdev->mtu + ETH_HLEN) {
				ibmveth_rx_mss_helper(skb, mss, lrg_pkt);
				adapter->rx_large_packets++;
			}

			napi_gro_receive(napi, skb);	/* send it up */

			netdev->stats.rx_packets++;
			netdev->stats.rx_bytes += length;
			frames_processed++;
		}
	}

	ibmveth_replenish_task(adapter);

	if (frames_processed < budget) {
		napi_complete(napi);

		/* We think we are done - reenable interrupts,
		 * then check once more to make sure we are done.
		 */
		lpar_rc = h_vio_signal(adapter->vdev->unit_address,
				       VIO_IRQ_ENABLE);

		BUG_ON(lpar_rc != H_SUCCESS);

		if (ibmveth_rxq_pending_buffer(adapter) &&
		    napi_reschedule(napi)) {
			lpar_rc = h_vio_signal(adapter->vdev->unit_address,
					       VIO_IRQ_DISABLE);
			goto restart_poll;
		}
	}

	return frames_processed;
}