static int ibmveth_poll(struct napi_struct *napi, int budget) { struct ibmveth_adapter *adapter = container_of(napi, struct ibmveth_adapter, napi); struct net_device *netdev = adapter->netdev; int frames_processed = 0; unsigned long lpar_rc; restart_poll: do { if (!ibmveth_rxq_pending_buffer(adapter)) break; smp_rmb(); if (!ibmveth_rxq_buffer_valid(adapter)) { wmb(); /* suggested by larson1 */ adapter->rx_invalid_buffer++; netdev_dbg(netdev, "recycling invalid buffer\n"); ibmveth_rxq_recycle_buffer(adapter); } else { struct sk_buff *skb, *new_skb; int length = ibmveth_rxq_frame_length(adapter); int offset = ibmveth_rxq_frame_offset(adapter); int csum_good = ibmveth_rxq_csum_good(adapter); skb = ibmveth_rxq_get_buffer(adapter); new_skb = NULL; if (length < rx_copybreak) new_skb = netdev_alloc_skb(netdev, length); if (new_skb) { skb_copy_to_linear_data(new_skb, skb->data + offset, length); if (rx_flush) ibmveth_flush_buffer(skb->data, length + offset); if (!ibmveth_rxq_recycle_buffer(adapter)) kfree_skb(skb); skb = new_skb; } else { ibmveth_rxq_harvest_buffer(adapter); skb_reserve(skb, offset); } skb_put(skb, length); skb->protocol = eth_type_trans(skb, netdev); if (csum_good) skb->ip_summed = CHECKSUM_UNNECESSARY; netif_receive_skb(skb); /* send it up */ netdev->stats.rx_packets++; netdev->stats.rx_bytes += length; frames_processed++; } } while (frames_processed < budget); ibmveth_replenish_task(adapter); if (frames_processed < budget) { /* We think we are done - reenable interrupts, * then check once more to make sure we are done. */ lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_ENABLE); BUG_ON(lpar_rc != H_SUCCESS); napi_complete(napi); if (ibmveth_rxq_pending_buffer(adapter) && napi_reschedule(napi)) { lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE); goto restart_poll; } } return frames_processed; }
static int ibmveth_poll(struct napi_struct *napi, int budget) { struct ibmveth_adapter *adapter = container_of(napi, struct ibmveth_adapter, napi); struct net_device *netdev = adapter->netdev; int frames_processed = 0; unsigned long lpar_rc; restart_poll: do { struct sk_buff *skb; if (!ibmveth_rxq_pending_buffer(adapter)) break; rmb(); if (!ibmveth_rxq_buffer_valid(adapter)) { wmb(); adapter->rx_invalid_buffer++; ibmveth_debug_printk("recycling invalid buffer\n"); ibmveth_rxq_recycle_buffer(adapter); } else { int length = ibmveth_rxq_frame_length(adapter); int offset = ibmveth_rxq_frame_offset(adapter); int csum_good = ibmveth_rxq_csum_good(adapter); skb = ibmveth_rxq_get_buffer(adapter); if (csum_good) skb->ip_summed = CHECKSUM_UNNECESSARY; ibmveth_rxq_harvest_buffer(adapter); skb_reserve(skb, offset); skb_put(skb, length); skb->protocol = eth_type_trans(skb, netdev); netif_receive_skb(skb); netdev->stats.rx_packets++; netdev->stats.rx_bytes += length; frames_processed++; } } while (frames_processed < budget); ibmveth_replenish_task(adapter); if (frames_processed < budget) { lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_ENABLE); ibmveth_assert(lpar_rc == H_SUCCESS); napi_complete(napi); if (ibmveth_rxq_pending_buffer(adapter) && napi_reschedule(napi)) { lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE); goto restart_poll; } } return frames_processed; }
static int ibmveth_poll(struct napi_struct *napi, int budget) { struct ibmveth_adapter *adapter = container_of(napi, struct ibmveth_adapter, napi); struct net_device *netdev = adapter->netdev; int frames_processed = 0; unsigned long lpar_rc; struct iphdr *iph; u16 mss = 0; restart_poll: while (frames_processed < budget) { if (!ibmveth_rxq_pending_buffer(adapter)) break; smp_rmb(); if (!ibmveth_rxq_buffer_valid(adapter)) { wmb(); /* suggested by larson1 */ adapter->rx_invalid_buffer++; netdev_dbg(netdev, "recycling invalid buffer\n"); ibmveth_rxq_recycle_buffer(adapter); } else { struct sk_buff *skb, *new_skb; int length = ibmveth_rxq_frame_length(adapter); int offset = ibmveth_rxq_frame_offset(adapter); int csum_good = ibmveth_rxq_csum_good(adapter); int lrg_pkt = ibmveth_rxq_large_packet(adapter); skb = ibmveth_rxq_get_buffer(adapter); /* if the large packet bit is set in the rx queue * descriptor, the mss will be written by PHYP eight * bytes from the start of the rx buffer, which is * skb->data at this stage */ if (lrg_pkt) { __be64 *rxmss = (__be64 *)(skb->data + 8); mss = (u16)be64_to_cpu(*rxmss); } new_skb = NULL; if (length < rx_copybreak) new_skb = netdev_alloc_skb(netdev, length); if (new_skb) { skb_copy_to_linear_data(new_skb, skb->data + offset, length); if (rx_flush) ibmveth_flush_buffer(skb->data, length + offset); if (!ibmveth_rxq_recycle_buffer(adapter)) kfree_skb(skb); skb = new_skb; } else { ibmveth_rxq_harvest_buffer(adapter); skb_reserve(skb, offset); } skb_put(skb, length); skb->protocol = eth_type_trans(skb, netdev); if (csum_good) { skb->ip_summed = CHECKSUM_UNNECESSARY; if (be16_to_cpu(skb->protocol) == ETH_P_IP) { iph = (struct iphdr *)skb->data; /* If the IP checksum is not offloaded and if the packet * is large send, the checksum must be rebuilt. */ if (iph->check == 0xffff) { iph->check = 0; iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); } } } if (length > netdev->mtu + ETH_HLEN) { ibmveth_rx_mss_helper(skb, mss, lrg_pkt); adapter->rx_large_packets++; } napi_gro_receive(napi, skb); /* send it up */ netdev->stats.rx_packets++; netdev->stats.rx_bytes += length; frames_processed++; } } ibmveth_replenish_task(adapter); if (frames_processed < budget) { napi_complete(napi); /* We think we are done - reenable interrupts, * then check once more to make sure we are done. */ lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_ENABLE); BUG_ON(lpar_rc != H_SUCCESS); if (ibmveth_rxq_pending_buffer(adapter) && napi_reschedule(napi)) { lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE); goto restart_poll; } } return frames_processed; }