Beispiel #1
0
/* hfi1_vnic_handle_rx - handle skb receive */
static void hfi1_vnic_handle_rx(struct hfi1_vnic_rx_queue *rxq,
				int *work_done, int work_to_do)
{
	struct hfi1_vnic_vport_info *vinfo = rxq->vinfo;
	struct sk_buff *skb;
	int rc;

	while (1) {
		if (*work_done >= work_to_do)
			break;

		skb = hfi1_vnic_get_skb(rxq);
		if (unlikely(!skb))
			break;

		rc = hfi1_vnic_decap_skb(rxq, skb);
		/* update rx counters */
		hfi1_vnic_update_rx_counters(vinfo, rxq->idx, skb, rc);
		if (unlikely(rc)) {
			dev_kfree_skb_any(skb);
			continue;
		}

		skb_checksum_none_assert(skb);
		skb->protocol = eth_type_trans(skb, rxq->netdev);

		napi_gro_receive(&rxq->napi, skb);
		(*work_done)++;
	}
}
Beispiel #2
0
static int hisi_femac_rx(struct net_device *dev, int limit)
{
	struct hisi_femac_priv *priv = netdev_priv(dev);
	struct hisi_femac_queue *rxq = &priv->rxq;
	struct sk_buff *skb;
	dma_addr_t addr;
	u32 rx_pkt_info, pos, len, rx_pkts_num = 0;

	pos = rxq->tail;
	while (readl(priv->glb_base + GLB_IRQ_RAW) & IRQ_INT_RX_RDY) {
		rx_pkt_info = readl(priv->port_base + IQFRM_DES);
		len = rx_pkt_info & RX_FRAME_LEN_MASK;
		len -= ETH_FCS_LEN;

		/* tell hardware we will deal with this packet */
		writel(IRQ_INT_RX_RDY, priv->glb_base + GLB_IRQ_RAW);

		rx_pkts_num++;

		skb = rxq->skb[pos];
		if (unlikely(!skb)) {
			netdev_err(dev, "rx skb NULL. pos=%d\n", pos);
			break;
		}
		rxq->skb[pos] = NULL;

		addr = rxq->dma_phys[pos];
		dma_unmap_single(priv->dev, addr, MAX_FRAME_SIZE,
				 DMA_FROM_DEVICE);
		skb_put(skb, len);
		if (unlikely(skb->len > MAX_FRAME_SIZE)) {
			netdev_err(dev, "rcv len err, len = %d\n", skb->len);
			dev->stats.rx_errors++;
			dev->stats.rx_length_errors++;
			dev_kfree_skb_any(skb);
			goto next;
		}

		skb->protocol = eth_type_trans(skb, dev);
		napi_gro_receive(&priv->napi, skb);
		dev->stats.rx_packets++;
		dev->stats.rx_bytes += skb->len;
next:
		pos = (pos + 1) % rxq->num;
		if (rx_pkts_num >= limit)
			break;
	}
	rxq->tail = pos;

	hisi_femac_rx_refill(priv);

	return rx_pkts_num;
}
Beispiel #3
0
/* Pass a received packet up through the generic LRO stack
 *
 * Handles driverlink veto, and passes the fragment up via
 * the appropriate LRO method
 */
static void efx_rx_packet_lro(struct efx_channel *channel,
			      struct efx_rx_buffer *rx_buf,
			      bool checksummed)
{
	struct napi_struct *napi = &channel->napi_str;
	gro_result_t gro_result;

	/* Pass the skb/page into the LRO engine */
	if (rx_buf->page) {
		struct page *page = rx_buf->page;
		struct sk_buff *skb;

		EFX_BUG_ON_PARANOID(rx_buf->skb);
		rx_buf->page = NULL;

		skb = napi_get_frags(napi);
		if (!skb) {
			put_page(page);
			return;
		}

		skb_shinfo(skb)->frags[0].page = page;
		skb_shinfo(skb)->frags[0].page_offset =
			efx_rx_buf_offset(rx_buf);
		skb_shinfo(skb)->frags[0].size = rx_buf->len;
		skb_shinfo(skb)->nr_frags = 1;

		skb->len = rx_buf->len;
		skb->data_len = rx_buf->len;
		skb->truesize += rx_buf->len;
		skb->ip_summed =
			checksummed ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE;

		skb_record_rx_queue(skb, channel->channel);

		gro_result = napi_gro_frags(napi);
	} else {
		struct sk_buff *skb = rx_buf->skb;

		EFX_BUG_ON_PARANOID(!skb);
		EFX_BUG_ON_PARANOID(!checksummed);
		rx_buf->skb = NULL;

		gro_result = napi_gro_receive(napi, skb);
	}

	if (gro_result == GRO_NORMAL) {
		channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
	} else if (gro_result != GRO_DROP) {
		channel->rx_alloc_level += RX_ALLOC_FACTOR_LRO;
		channel->irq_mod_score += 2;
	}
}
Beispiel #4
0
/* Pass a received packet up through the generic LRO stack
 *
 * Handles driverlink veto, and passes the fragment up via
 * the appropriate LRO method
 */
static void efx_rx_packet_lro(struct efx_channel *channel,
			      struct efx_rx_buffer *rx_buf)
{
	struct napi_struct *napi = &channel->napi_str;

	/* Pass the skb/page into the LRO engine */
	if (rx_buf->page) {
		struct sk_buff *skb = napi_get_frags(napi);

		if (!skb) {
			put_page(rx_buf->page);
			goto out;
		}

		skb_shinfo(skb)->frags[0].page = rx_buf->page;
		skb_shinfo(skb)->frags[0].page_offset =
			efx_rx_buf_offset(rx_buf);
		skb_shinfo(skb)->frags[0].size = rx_buf->len;
		skb_shinfo(skb)->nr_frags = 1;

		skb->len = rx_buf->len;
		skb->data_len = rx_buf->len;
		skb->truesize += rx_buf->len;
		skb->ip_summed = CHECKSUM_UNNECESSARY;

		napi_gro_frags(napi);

out:
		EFX_BUG_ON_PARANOID(rx_buf->skb);
		rx_buf->page = NULL;
	} else {
		EFX_BUG_ON_PARANOID(!rx_buf->skb);

		napi_gro_receive(napi, rx_buf->skb);
		rx_buf->skb = NULL;
	}
}
Beispiel #5
0
static int xge_rx_poll(struct net_device *ndev, unsigned int budget)
{
	struct xge_pdata *pdata = netdev_priv(ndev);
	struct device *dev = &pdata->pdev->dev;
	struct xge_desc_ring *rx_ring;
	struct xge_raw_desc *raw_desc;
	struct sk_buff *skb;
	dma_addr_t dma_addr;
	int processed = 0;
	u8 head, rx_error;
	int i, ret;
	u32 data;
	u16 len;

	rx_ring = pdata->rx_ring;
	head = rx_ring->head;

	data = xge_rd_csr(pdata, DMARXSTATUS);
	if (!GET_BITS(RXPKTCOUNT, data))
		return 0;

	for (i = 0; i < budget; i++) {
		raw_desc = &rx_ring->raw_desc[head];

		if (GET_BITS(E, le64_to_cpu(raw_desc->m0)))
			break;

		dma_rmb();

		skb = rx_ring->pkt_info[head].skb;
		rx_ring->pkt_info[head].skb = NULL;
		dma_addr = rx_ring->pkt_info[head].dma_addr;
		len = GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0));
		dma_unmap_single(dev, dma_addr, XGENE_ENET_STD_MTU,
				 DMA_FROM_DEVICE);

		rx_error = GET_BITS(D, le64_to_cpu(raw_desc->m2));
		if (unlikely(rx_error)) {
			pdata->stats.rx_errors++;
			dev_kfree_skb_any(skb);
			goto out;
		}

		skb_put(skb, len);
		skb->protocol = eth_type_trans(skb, ndev);

		pdata->stats.rx_packets++;
		pdata->stats.rx_bytes += len;
		napi_gro_receive(&pdata->napi, skb);
out:
		ret = xge_refill_buffers(ndev, 1);
		xge_wr_csr(pdata, DMARXSTATUS, 1);
		xge_wr_csr(pdata, DMARXCTRL, 1);

		if (ret)
			break;

		head = (head + 1) & (XGENE_ENET_NUM_DESC - 1);
		processed++;
	}

	rx_ring->head = head;

	return processed;
}
Beispiel #6
0
static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
			     int weight)
{
	u32 end_slot;
	int handled = 0;

	end_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_STATUS);
	end_slot &= BGMAC_DMA_RX_STATDPTR;
	end_slot -= ring->index_base;
	end_slot &= BGMAC_DMA_RX_STATDPTR;
	end_slot /= sizeof(struct bgmac_dma_desc);

	while (ring->start != end_slot) {
		struct device *dma_dev = bgmac->core->dma_dev;
		struct bgmac_slot_info *slot = &ring->slots[ring->start];
		struct bgmac_rx_header *rx = slot->buf + BGMAC_RX_BUF_OFFSET;
		struct sk_buff *skb;
		void *buf = slot->buf;
		dma_addr_t dma_addr = slot->dma_addr;
		u16 len, flags;

		do {
			/* Prepare new skb as replacement */
			if (bgmac_dma_rx_skb_for_slot(bgmac, slot)) {
				bgmac_dma_rx_poison_buf(dma_dev, slot);
				break;
			}

			/* Unmap buffer to make it accessible to the CPU */
			dma_unmap_single(dma_dev, dma_addr,
					 BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);

			/* Get info from the header */
			len = le16_to_cpu(rx->len);
			flags = le16_to_cpu(rx->flags);

			/* Check for poison and drop or pass the packet */
			if (len == 0xdead && flags == 0xbeef) {
				bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n",
					  ring->start);
				put_page(virt_to_head_page(buf));
				break;
			}

			if (len > BGMAC_RX_ALLOC_SIZE) {
				bgmac_err(bgmac, "Found oversized packet at slot %d, DMA issue!\n",
					  ring->start);
				put_page(virt_to_head_page(buf));
				break;
			}

			/* Omit CRC. */
			len -= ETH_FCS_LEN;

			skb = build_skb(buf, BGMAC_RX_ALLOC_SIZE);
			if (unlikely(!skb)) {
				bgmac_err(bgmac, "build_skb failed\n");
				put_page(virt_to_head_page(buf));
				break;
			}
			skb_put(skb, BGMAC_RX_FRAME_OFFSET +
				BGMAC_RX_BUF_OFFSET + len);
			skb_pull(skb, BGMAC_RX_FRAME_OFFSET +
				 BGMAC_RX_BUF_OFFSET);

			skb_checksum_none_assert(skb);
			skb->protocol = eth_type_trans(skb, bgmac->net_dev);
			napi_gro_receive(&bgmac->napi, skb);
			handled++;
		} while (0);

		bgmac_dma_rx_setup_desc(bgmac, ring, ring->start);

		if (++ring->start >= BGMAC_RX_RING_SLOTS)
			ring->start = 0;

		if (handled >= weight) /* Should never be greater */
			break;
	}

	bgmac_dma_rx_update_index(bgmac, ring);

	return handled;
}
Beispiel #7
0
/** Routine to push packets arriving on Octeon interface upto network layer.
 * @param oct_id   - octeon device id.
 * @param skbuff   - skbuff struct to be passed to network layer.
 * @param len      - size of total data received.
 * @param rh       - Control header associated with the packet
 * @param param    - additional control data with the packet
 * @param arg      - farg registered in droq_ops
 */
static void
liquidio_push_packet(u32 octeon_id __attribute__((unused)),
		     void *skbuff,
		     u32 len,
		     union octeon_rh *rh,
		     void *param,
		     void *arg)
{
	struct net_device *netdev = (struct net_device *)arg;
	struct octeon_droq *droq =
	    container_of(param, struct octeon_droq, napi);
	struct sk_buff *skb = (struct sk_buff *)skbuff;
	struct skb_shared_hwtstamps *shhwtstamps;
	struct napi_struct *napi = param;
	u16 vtag = 0;
	u32 r_dh_off;
	u64 ns;

	if (netdev) {
		struct lio *lio = GET_LIO(netdev);
		struct octeon_device *oct = lio->oct_dev;

		/* Do not proceed if the interface is not in RUNNING state. */
		if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
			recv_buffer_free(skb);
			droq->stats.rx_dropped++;
			return;
		}

		skb->dev = netdev;

		skb_record_rx_queue(skb, droq->q_no);
		if (likely(len > MIN_SKB_SIZE)) {
			struct octeon_skb_page_info *pg_info;
			unsigned char *va;

			pg_info = ((struct octeon_skb_page_info *)(skb->cb));
			if (pg_info->page) {
				/* For Paged allocation use the frags */
				va = page_address(pg_info->page) +
					pg_info->page_offset;
				memcpy(skb->data, va, MIN_SKB_SIZE);
				skb_put(skb, MIN_SKB_SIZE);
				skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
						pg_info->page,
						pg_info->page_offset +
						MIN_SKB_SIZE,
						len - MIN_SKB_SIZE,
						LIO_RXBUFFER_SZ);
			}
		} else {
			struct octeon_skb_page_info *pg_info =
				((struct octeon_skb_page_info *)(skb->cb));
			skb_copy_to_linear_data(skb, page_address(pg_info->page)
						+ pg_info->page_offset, len);
			skb_put(skb, len);
			put_page(pg_info->page);
		}

		r_dh_off = (rh->r_dh.len - 1) * BYTES_PER_DHLEN_UNIT;

		if (oct->ptp_enable) {
			if (rh->r_dh.has_hwtstamp) {
				/* timestamp is included from the hardware at
				 * the beginning of the packet.
				 */
				if (ifstate_check
					(lio,
					 LIO_IFSTATE_RX_TIMESTAMP_ENABLED)) {
					/* Nanoseconds are in the first 64-bits
					 * of the packet.
					 */
					memcpy(&ns, (skb->data + r_dh_off),
					       sizeof(ns));
					r_dh_off -= BYTES_PER_DHLEN_UNIT;
					shhwtstamps = skb_hwtstamps(skb);
					shhwtstamps->hwtstamp =
						ns_to_ktime(ns +
							    lio->ptp_adjust);
				}
			}
		}

		if (rh->r_dh.has_hash) {
			__be32 *hash_be = (__be32 *)(skb->data + r_dh_off);
			u32 hash = be32_to_cpu(*hash_be);

			skb_set_hash(skb, hash, PKT_HASH_TYPE_L4);
			r_dh_off -= BYTES_PER_DHLEN_UNIT;
		}

		skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT);
		skb->protocol = eth_type_trans(skb, skb->dev);

		if ((netdev->features & NETIF_F_RXCSUM) &&
		    (((rh->r_dh.encap_on) &&
		      (rh->r_dh.csum_verified & CNNIC_TUN_CSUM_VERIFIED)) ||
		     (!(rh->r_dh.encap_on) &&
		      (rh->r_dh.csum_verified & CNNIC_CSUM_VERIFIED))))
			/* checksum has already been verified */
			skb->ip_summed = CHECKSUM_UNNECESSARY;
		else
			skb->ip_summed = CHECKSUM_NONE;

		/* Setting Encapsulation field on basis of status received
		 * from the firmware
		 */
		if (rh->r_dh.encap_on) {
			skb->encapsulation = 1;
			skb->csum_level = 1;
			droq->stats.rx_vxlan++;
		}

		/* inbound VLAN tag */
		if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
		    rh->r_dh.vlan) {
			u16 priority = rh->r_dh.priority;
			u16 vid = rh->r_dh.vlan;

			vtag = (priority << VLAN_PRIO_SHIFT) | vid;
			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
		}

		napi_gro_receive(napi, skb);

		droq->stats.rx_bytes_received += len -
			rh->r_dh.len * BYTES_PER_DHLEN_UNIT;
		droq->stats.rx_pkts_received++;
	} else {
		recv_buffer_free(skb);
	}
}
Beispiel #8
0
static int xlgmac_rx_poll(struct xlgmac_channel *channel, int budget)
{
	struct xlgmac_pdata *pdata = channel->pdata;
	struct xlgmac_ring *ring = channel->rx_ring;
	struct net_device *netdev = pdata->netdev;
	unsigned int len, dma_desc_len, max_len;
	unsigned int context_next, context;
	struct xlgmac_desc_data *desc_data;
	struct xlgmac_pkt_info *pkt_info;
	unsigned int incomplete, error;
	struct xlgmac_hw_ops *hw_ops;
	unsigned int received = 0;
	struct napi_struct *napi;
	struct sk_buff *skb;
	int packet_count = 0;

	hw_ops = &pdata->hw_ops;

	/* Nothing to do if there isn't a Rx ring for this channel */
	if (!ring)
		return 0;

	incomplete = 0;
	context_next = 0;

	napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;

	desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
	pkt_info = &ring->pkt_info;
	while (packet_count < budget) {
		/* First time in loop see if we need to restore state */
		if (!received && desc_data->state_saved) {
			skb = desc_data->state.skb;
			error = desc_data->state.error;
			len = desc_data->state.len;
		} else {
			memset(pkt_info, 0, sizeof(*pkt_info));
			skb = NULL;
			error = 0;
			len = 0;
		}

read_again:
		desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);

		if (xlgmac_rx_dirty_desc(ring) > XLGMAC_RX_DESC_MAX_DIRTY)
			xlgmac_rx_refresh(channel);

		if (hw_ops->dev_read(channel))
			break;

		received++;
		ring->cur++;

		incomplete = XLGMAC_GET_REG_BITS(
					pkt_info->attributes,
					RX_PACKET_ATTRIBUTES_INCOMPLETE_POS,
					RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN);
		context_next = XLGMAC_GET_REG_BITS(
					pkt_info->attributes,
					RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_POS,
					RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_LEN);
		context = XLGMAC_GET_REG_BITS(
					pkt_info->attributes,
					RX_PACKET_ATTRIBUTES_CONTEXT_POS,
					RX_PACKET_ATTRIBUTES_CONTEXT_LEN);

		/* Earlier error, just drain the remaining data */
		if ((incomplete || context_next) && error)
			goto read_again;

		if (error || pkt_info->errors) {
			if (pkt_info->errors)
				netif_err(pdata, rx_err, netdev,
					  "error in received packet\n");
			dev_kfree_skb(skb);
			goto next_packet;
		}

		if (!context) {
			/* Length is cumulative, get this descriptor's length */
			dma_desc_len = desc_data->rx.len - len;
			len += dma_desc_len;

			if (dma_desc_len && !skb) {
				skb = xlgmac_create_skb(pdata, napi, desc_data,
							dma_desc_len);
				if (!skb)
					error = 1;
			} else if (dma_desc_len) {
				dma_sync_single_range_for_cpu(
						pdata->dev,
						desc_data->rx.buf.dma_base,
						desc_data->rx.buf.dma_off,
						desc_data->rx.buf.dma_len,
						DMA_FROM_DEVICE);

				skb_add_rx_frag(
					skb, skb_shinfo(skb)->nr_frags,
					desc_data->rx.buf.pa.pages,
					desc_data->rx.buf.pa.pages_offset,
					dma_desc_len,
					desc_data->rx.buf.dma_len);
				desc_data->rx.buf.pa.pages = NULL;
			}
		}

		if (incomplete || context_next)
			goto read_again;

		if (!skb)
			goto next_packet;

		/* Be sure we don't exceed the configured MTU */
		max_len = netdev->mtu + ETH_HLEN;
		if (!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
		    (skb->protocol == htons(ETH_P_8021Q)))
			max_len += VLAN_HLEN;

		if (skb->len > max_len) {
			netif_err(pdata, rx_err, netdev,
				  "packet length exceeds configured MTU\n");
			dev_kfree_skb(skb);
			goto next_packet;
		}

		if (netif_msg_pktdata(pdata))
			xlgmac_print_pkt(netdev, skb, false);

		skb_checksum_none_assert(skb);
		if (XLGMAC_GET_REG_BITS(pkt_info->attributes,
					RX_PACKET_ATTRIBUTES_CSUM_DONE_POS,
				    RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN))
			skb->ip_summed = CHECKSUM_UNNECESSARY;

		if (XLGMAC_GET_REG_BITS(pkt_info->attributes,
					RX_PACKET_ATTRIBUTES_VLAN_CTAG_POS,
				    RX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN)) {
			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
					       pkt_info->vlan_ctag);
			pdata->stats.rx_vlan_packets++;
		}

		if (XLGMAC_GET_REG_BITS(pkt_info->attributes,
					RX_PACKET_ATTRIBUTES_RSS_HASH_POS,
				    RX_PACKET_ATTRIBUTES_RSS_HASH_LEN))
			skb_set_hash(skb, pkt_info->rss_hash,
				     pkt_info->rss_hash_type);

		skb->dev = netdev;
		skb->protocol = eth_type_trans(skb, netdev);
		skb_record_rx_queue(skb, channel->queue_index);

		napi_gro_receive(napi, skb);

next_packet:
		packet_count++;
	}

	/* Check if we need to save state before leaving */
	if (received && (incomplete || context_next)) {
		desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
		desc_data->state_saved = 1;
		desc_data->state.skb = skb;
		desc_data->state.len = len;
		desc_data->state.error = error;
	}

	XLGMAC_PR("packet_count = %d\n", packet_count);

	return packet_count;
}
Beispiel #9
0
/*
 * netvsc_recv_callback -  Callback when we receive a packet from the
 * "wire" on the specified device.
 */
int netvsc_recv_callback(struct net_device *net,
			 struct vmbus_channel *channel,
			 void  *data, u32 len,
			 const struct ndis_tcp_ip_checksum_info *csum_info,
			 const struct ndis_pkt_8021q_info *vlan)
{
	struct net_device_context *net_device_ctx = netdev_priv(net);
	struct netvsc_device *net_device = net_device_ctx->nvdev;
	u16 q_idx = channel->offermsg.offer.sub_channel_index;
	struct netvsc_channel *nvchan = &net_device->chan_table[q_idx];
	struct sk_buff *skb;
	struct sk_buff *vf_skb;
	struct netvsc_stats *rx_stats;
	int ret = 0;

	if (!net || net->reg_state != NETREG_REGISTERED)
		return NVSP_STAT_FAIL;

	if (READ_ONCE(net_device_ctx->vf_inject)) {
		atomic_inc(&net_device_ctx->vf_use_cnt);
		if (!READ_ONCE(net_device_ctx->vf_inject)) {
			/*
			 * We raced; just move on.
			 */
			atomic_dec(&net_device_ctx->vf_use_cnt);
			goto vf_injection_done;
		}

		/*
		 * Inject this packet into the VF inerface.
		 * On Hyper-V, multicast and brodcast packets
		 * are only delivered on the synthetic interface
		 * (after subjecting these to policy filters on
		 * the host). Deliver these via the VF interface
		 * in the guest.
		 */
		vf_skb = netvsc_alloc_recv_skb(net_device_ctx->vf_netdev,
					       csum_info, vlan, data, len);
		if (vf_skb != NULL) {
			++net_device_ctx->vf_netdev->stats.rx_packets;
			net_device_ctx->vf_netdev->stats.rx_bytes +=
				len;
			netif_receive_skb(vf_skb);
		} else {
			++net->stats.rx_dropped;
			ret = NVSP_STAT_FAIL;
		}
		atomic_dec(&net_device_ctx->vf_use_cnt);
		return ret;
	}

vf_injection_done:
	rx_stats = &nvchan->rx_stats;

	/* Allocate a skb - TODO direct I/O to pages? */
	skb = netvsc_alloc_recv_skb(net, csum_info, vlan, data, len);
	if (unlikely(!skb)) {
		++net->stats.rx_dropped;
		return NVSP_STAT_FAIL;
	}
	skb_record_rx_queue(skb, q_idx);

	u64_stats_update_begin(&rx_stats->syncp);
	rx_stats->packets++;
	rx_stats->bytes += len;

	if (skb->pkt_type == PACKET_BROADCAST)
		++rx_stats->broadcast;
	else if (skb->pkt_type == PACKET_MULTICAST)
		++rx_stats->multicast;
	u64_stats_update_end(&rx_stats->syncp);

	net->stats.rx_packets++;
	net->stats.rx_bytes += len;

	napi_gro_receive(&nvchan->napi, skb);

	return 0;
}
Beispiel #10
0
static inline int
pdma_recv(struct net_device* dev, END_DEVICE* ei_local, int work_todo)
{
	struct PDMA_rxdesc *rxd_ring;
	struct sk_buff *new_skb, *rx_skb;
	int gmac_no = PSE_PORT_GMAC1;
	int work_done = 0;
	u32 rxd_dma_owner_idx;
	u32 rxd_info2, rxd_info4;
#if defined (CONFIG_RAETH_HW_VLAN_RX)
	u32 rxd_info3;
#endif
#if defined (CONFIG_RAETH_SPECIAL_TAG)
	struct vlan_ethhdr *veth;
#endif

	rxd_dma_owner_idx = le32_to_cpu(sysRegRead(RX_CALC_IDX0));

	while (work_done < work_todo) {
		rxd_dma_owner_idx = (rxd_dma_owner_idx + 1) % NUM_RX_DESC;
		rxd_ring = &ei_local->rxd_ring[rxd_dma_owner_idx];
		
		if (!(rxd_ring->rxd_info2 & RX2_DMA_DONE))
			break;
		
		/* load completed skb pointer */
		rx_skb = ei_local->rxd_buff[rxd_dma_owner_idx];
		
		/* copy RX desc to CPU */
		rxd_info2 = rxd_ring->rxd_info2;
#if defined (CONFIG_RAETH_HW_VLAN_RX)
		rxd_info3 = rxd_ring->rxd_info3;
#endif
		rxd_info4 = rxd_ring->rxd_info4;
		
#if defined (CONFIG_PSEUDO_SUPPORT)
		gmac_no = RX4_DMA_SP(rxd_info4);
#endif
		/* We have to check the free memory size is big enough
		 * before pass the packet to cpu */
		new_skb = __dev_alloc_skb(MAX_RX_LENGTH + NET_IP_ALIGN, GFP_ATOMIC);
		if (unlikely(new_skb == NULL)) {
#if defined (RAETH_PDMA_V2)
			rxd_ring->rxd_info2 = RX2_DMA_SDL0_SET(MAX_RX_LENGTH);
#else
			rxd_ring->rxd_info2 = RX2_DMA_LS0;
#endif
			/* move CPU pointer to next RXD */
			sysRegWrite(RX_CALC_IDX0, cpu_to_le32(rxd_dma_owner_idx));
			
			inc_rx_drop(ei_local, gmac_no);
#if !defined (CONFIG_RAETH_NAPI)
			/* mean need reschedule */
			work_done = work_todo;
#endif
#if defined (CONFIG_RAETH_DEBUG)
			if (net_ratelimit())
				printk(KERN_ERR "%s: Failed to alloc new RX skb! (GMAC: %d)\n", RAETH_DEV_NAME, gmac_no);
#endif
			break;
		}
#if !defined (RAETH_PDMA_V2)
		skb_reserve(new_skb, NET_IP_ALIGN);
#endif
		/* store new empty skb pointer */
		ei_local->rxd_buff[rxd_dma_owner_idx] = new_skb;
		
		/* map new skb to ring (unmap is not required on generic mips mm) */
		rxd_ring->rxd_info1 = (u32)dma_map_single(NULL, new_skb->data, MAX_RX_LENGTH, DMA_FROM_DEVICE);
#if defined (RAETH_PDMA_V2)
		rxd_ring->rxd_info2 = RX2_DMA_SDL0_SET(MAX_RX_LENGTH);
#else
		rxd_ring->rxd_info2 = RX2_DMA_LS0;
#endif
		wmb();
		
		/* move CPU pointer to next RXD */
		sysRegWrite(RX_CALC_IDX0, cpu_to_le32(rxd_dma_owner_idx));
		
		/* skb processing */
		rx_skb->len = RX2_DMA_SDL0_GET(rxd_info2);
#if defined (RAETH_PDMA_V2)
		rx_skb->data += NET_IP_ALIGN;
#endif
		rx_skb->tail = rx_skb->data + rx_skb->len;

#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
		FOE_MAGIC_TAG(rx_skb) = FOE_MAGIC_GE;
		DO_FILL_FOE_DESC(rx_skb, (rxd_info4 & ~(RX4_DMA_ALG_SET)));
#endif

#if defined (CONFIG_RAETH_CHECKSUM_OFFLOAD)
		if (rxd_info4 & RX4_DMA_L4FVLD)
			rx_skb->ip_summed = CHECKSUM_UNNECESSARY;
#endif

#if defined (CONFIG_RAETH_HW_VLAN_RX)
		if ((rxd_info2 & RX2_DMA_TAG) && rxd_info3) {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
			__vlan_hwaccel_put_tag(rx_skb, __constant_htons(ETH_P_8021Q), RX3_DMA_VID(rxd_info3));
#else
			__vlan_hwaccel_put_tag(rx_skb, RX3_DMA_VID(rxd_info3));
#endif
		}
#endif

#if defined (CONFIG_PSEUDO_SUPPORT)
		if (gmac_no == PSE_PORT_GMAC2)
			rx_skb->protocol = eth_type_trans(rx_skb, ei_local->PseudoDev);
		else
#endif
			rx_skb->protocol = eth_type_trans(rx_skb, dev);

#if defined (CONFIG_RAETH_SPECIAL_TAG)
#if defined (CONFIG_MT7530_GSW)
#define ESW_TAG_ID	0x00
#else
#define ESW_TAG_ID	0x81
#endif
		// port0: 0x8100 => 0x8100 0001
		// port1: 0x8101 => 0x8100 0002
		// port2: 0x8102 => 0x8100 0003
		// port3: 0x8103 => 0x8100 0004
		// port4: 0x8104 => 0x8100 0005
		// port5: 0x8105 => 0x8100 0006
		veth = vlan_eth_hdr(rx_skb);
		if ((veth->h_vlan_proto & 0xFF) == ESW_TAG_ID) {
			veth->h_vlan_TCI = htons((((veth->h_vlan_proto >> 8) & 0xF) + 1));
			veth->h_vlan_proto = __constant_htons(ETH_P_8021Q);
			rx_skb->protocol = veth->h_vlan_proto;
		}
#endif

/* ra_sw_nat_hook_rx return 1 --> continue
 * ra_sw_nat_hook_rx return 0 --> FWD & without netif_rx
 */
#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
		if((ra_sw_nat_hook_rx == NULL) ||
		   (ra_sw_nat_hook_rx != NULL && ra_sw_nat_hook_rx(rx_skb)))
#endif
		{
#if defined (CONFIG_RAETH_NAPI)
#if defined (CONFIG_RAETH_NAPI_GRO)
			if (rx_skb->ip_summed == CHECKSUM_UNNECESSARY)
				napi_gro_receive(&ei_local->napi, rx_skb);
			else
#endif
			netif_receive_skb(rx_skb);
#else
			netif_rx(rx_skb);
#endif
		}
		
		work_done++;
	}
Beispiel #11
0
/*
 * netvsc_recv_callback -  Callback when we receive a packet from the
 * "wire" on the specified device.
 */
int netvsc_recv_callback(struct net_device *net,
			 struct vmbus_channel *channel,
			 void  *data, u32 len,
			 const struct ndis_tcp_ip_checksum_info *csum_info,
			 const struct ndis_pkt_8021q_info *vlan)
{
	struct net_device_context *net_device_ctx = netdev_priv(net);
	struct netvsc_device *net_device;
	u16 q_idx = channel->offermsg.offer.sub_channel_index;
	struct netvsc_channel *nvchan;
	struct net_device *vf_netdev;
	struct sk_buff *skb;
	struct netvsc_stats *rx_stats;

	if (net->reg_state != NETREG_REGISTERED)
		return NVSP_STAT_FAIL;

	/*
	 * If necessary, inject this packet into the VF interface.
	 * On Hyper-V, multicast and brodcast packets are only delivered
	 * to the synthetic interface (after subjecting these to
	 * policy filters on the host). Deliver these via the VF
	 * interface in the guest.
	 */
	rcu_read_lock();
	net_device = rcu_dereference(net_device_ctx->nvdev);
	if (unlikely(!net_device))
		goto drop;

	nvchan = &net_device->chan_table[q_idx];
	vf_netdev = rcu_dereference(net_device_ctx->vf_netdev);
	if (vf_netdev && (vf_netdev->flags & IFF_UP))
		net = vf_netdev;

	/* Allocate a skb - TODO direct I/O to pages? */
	skb = netvsc_alloc_recv_skb(net, &nvchan->napi,
				    csum_info, vlan, data, len);
	if (unlikely(!skb)) {
drop:
		++net->stats.rx_dropped;
		rcu_read_unlock();
		return NVSP_STAT_FAIL;
	}

	if (net != vf_netdev)
		skb_record_rx_queue(skb, q_idx);

	/*
	 * Even if injecting the packet, record the statistics
	 * on the synthetic device because modifying the VF device
	 * statistics will not work correctly.
	 */
	rx_stats = &nvchan->rx_stats;
	u64_stats_update_begin(&rx_stats->syncp);
	rx_stats->packets++;
	rx_stats->bytes += len;

	if (skb->pkt_type == PACKET_BROADCAST)
		++rx_stats->broadcast;
	else if (skb->pkt_type == PACKET_MULTICAST)
		++rx_stats->multicast;
	u64_stats_update_end(&rx_stats->syncp);

	napi_gro_receive(&nvchan->napi, skb);
	rcu_read_unlock();

	return 0;
}
Beispiel #12
0
static int sprdwl_rx_handler(struct napi_struct *napi, int budget)
{
	struct sprdwl_priv *priv = container_of(napi, struct sprdwl_priv, napi);
	struct sblock blk;
	struct sk_buff *skb;
	int ret, work_done;
	u16 decryp_data_len = 0;
	struct wlan_sblock_recv_data *data;
	uint32_t length = 0;
#ifdef CONFIG_SPRDWL_FW_ZEROCOPY
	u8 offset = 0;
#endif
	for (work_done = 0; work_done < budget; work_done++) {
		ret = sblock_receive(WLAN_CP_ID, WLAN_SBLOCK_CH, &blk, 0);
		if (ret) {
			dev_dbg(&priv->ndev->dev, "no more sblock to read\n");
			break;
		}
#ifdef CONFIG_SPRDWL_FW_ZEROCOPY
		offset = *(u8 *)blk.addr;
		length = blk.length - 2 - offset;
#else
		length = blk.length;
#endif
		/*16 bytes align */
		skb = dev_alloc_skb(length + NET_IP_ALIGN);
		if (!skb) {
			dev_err(&priv->ndev->dev,
				"Failed to allocate skbuff!\n");
			priv->ndev->stats.rx_dropped++;
			goto rx_failed;
		}
#ifdef CONFIG_SPRDWL_FW_ZEROCOPY
		data = (struct wlan_sblock_recv_data *)(blk.addr + 2 + offset);
#else
		data = (struct wlan_sblock_recv_data *)blk.addr;
#endif

		if (data->is_encrypted == 1) {
			if (priv->connect_status == SPRDWL_CONNECTED &&
			    priv->cipher_type == SPRDWL_CIPHER_WAPI &&
			    priv->key_len[GROUP][priv->key_index[GROUP]] != 0 &&
			    priv->
			    key_len[PAIRWISE][priv->key_index[PAIRWISE]] != 0) {
				u8 snap_header[6] = { 0xaa, 0xaa, 0x03,
					0x00, 0x00, 0x00
				};
				skb_reserve(skb, NET_IP_ALIGN);
				decryp_data_len = wlan_rx_wapi_decryption(priv,
						   (u8 *)&data->u2.encrypt,
						   data->u1.encrypt.header_len,
						   (length -
						   sizeof(data->is_encrypted) -
						   sizeof(data->u1) -
						   data->u1.encrypt.header_len),
						   (skb->data + 12));
				if (decryp_data_len == 0) {
					dev_err(&priv->ndev->dev,
						"Failed to decrypt WAPI data!\n");
					priv->ndev->stats.rx_dropped++;
					dev_kfree_skb(skb);
					goto rx_failed;
				}
				if (memcmp((skb->data + 12), snap_header,
					   sizeof(snap_header)) == 0) {
					skb_reserve(skb, 6);
					/* copy the eth address from eth header,
					 * but not copy eth type
					 */
					memcpy(skb->data,
					       data->u2.encrypt.
					       mac_header.addr1, 6);
					memcpy(skb->data + 6,
					       data->u2.encrypt.
					       mac_header.addr2, 6);
					skb_put(skb, (decryp_data_len + 6));
				} else {
					/* copy eth header */
					memcpy(skb->data,
					       data->u2.encrypt.
					       mac_header.addr3, 6);
					memcpy(skb->data + 6,
					       data->u2.encrypt.
					       mac_header.addr2, 6);
					skb_put(skb, (decryp_data_len + 12));
				}
			} else {
				dev_err(&priv->ndev->dev,
					"wrong encryption data!\n");
				priv->ndev->stats.rx_dropped++;
				dev_kfree_skb(skb);
				goto rx_failed;
			}
		} else if (data->is_encrypted == 0) {
			skb_reserve(skb, NET_IP_ALIGN);
			/* dec the first encrypt byte */
			memcpy(skb->data, (u8 *)&data->u2,
			       (length - sizeof(data->is_encrypted) -
				sizeof(data->u1)));
			skb_put(skb,
				(length - sizeof(data->is_encrypted) -
				 sizeof(data->u1)));
		} else {
			dev_err(&priv->ndev->dev,
				"wrong data fromat recieved!\n");
			priv->ndev->stats.rx_dropped++;
			dev_kfree_skb(skb);
			goto rx_failed;
		}

#ifdef DUMP_RECEIVE_PACKET
		print_hex_dump(KERN_DEBUG, "receive packet: ",
			       DUMP_PREFIX_OFFSET, 16, 1, skb->data, skb->len,
			       0);
#endif
		skb->dev = priv->ndev;
		skb->protocol = eth_type_trans(skb, priv->ndev);
		/* CHECKSUM_UNNECESSARY not supported by our hardware */
		/* skb->ip_summed = CHECKSUM_UNNECESSARY; */

		priv->ndev->stats.rx_packets++;
		priv->ndev->stats.rx_bytes += skb->len;

		napi_gro_receive(napi, skb);

rx_failed:
		ret = sblock_release(WLAN_CP_ID, WLAN_SBLOCK_CH, &blk);
		if (ret)
			dev_err(&priv->ndev->dev,
				"Failed to release sblock (%d)!\n", ret);
	}
	if (work_done < budget)
		napi_complete(napi);

	return work_done;
}
Beispiel #13
0
static int hip04_rx_poll(struct napi_struct *napi, int budget)
{
    struct hip04_priv *priv = container_of(napi, struct hip04_priv, napi);
    struct net_device *ndev = priv->ndev;
    struct net_device_stats *stats = &ndev->stats;
    unsigned int cnt = hip04_recv_cnt(priv);
    struct rx_desc *desc;
    struct sk_buff *skb;
    unsigned char *buf;
    bool last = false;
    dma_addr_t phys;
    int rx = 0;
    int tx_remaining;
    u16 len;
    u32 err;

    while (cnt && !last) {
        buf = priv->rx_buf[priv->rx_head];
        skb = build_skb(buf, priv->rx_buf_size);
        if (unlikely(!skb)) {
            net_dbg_ratelimited("build_skb failed\n");
            goto refill;
        }

        dma_unmap_single(&ndev->dev, priv->rx_phys[priv->rx_head],
                         RX_BUF_SIZE, DMA_FROM_DEVICE);
        priv->rx_phys[priv->rx_head] = 0;

        desc = (struct rx_desc *)skb->data;
        len = be16_to_cpu(desc->pkt_len);
        err = be32_to_cpu(desc->pkt_err);

        if (0 == len) {
            dev_kfree_skb_any(skb);
            last = true;
        } else if ((err & RX_PKT_ERR) || (len >= GMAC_MAX_PKT_LEN)) {
            dev_kfree_skb_any(skb);
            stats->rx_dropped++;
            stats->rx_errors++;
        } else {
            skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
            skb_put(skb, len);
            skb->protocol = eth_type_trans(skb, ndev);
            napi_gro_receive(&priv->napi, skb);
            stats->rx_packets++;
            stats->rx_bytes += len;
            rx++;
        }

refill:
        buf = netdev_alloc_frag(priv->rx_buf_size);
        if (!buf)
            goto done;
        phys = dma_map_single(&ndev->dev, buf,
                              RX_BUF_SIZE, DMA_FROM_DEVICE);
        if (dma_mapping_error(&ndev->dev, phys))
            goto done;
        priv->rx_buf[priv->rx_head] = buf;
        priv->rx_phys[priv->rx_head] = phys;
        hip04_set_recv_desc(priv, phys);

        priv->rx_head = RX_NEXT(priv->rx_head);
        if (rx >= budget)
            goto done;

        if (--cnt == 0)
            cnt = hip04_recv_cnt(priv);
    }

    if (!(priv->reg_inten & RCV_INT)) {
        /* enable rx interrupt */
        priv->reg_inten |= RCV_INT;
        writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN);
    }
    napi_complete(napi);
done:
    /* clean up tx descriptors and start a new timer if necessary */
    tx_remaining = hip04_tx_reclaim(ndev, false);
    if (rx < budget && tx_remaining)
        hip04_start_tx_timer(priv);

    return rx;
}
Beispiel #14
0
static int stmmac_rx(struct stmmac_priv *priv, int limit)
{
	unsigned int rxsize = priv->dma_rx_size;
	unsigned int entry = priv->cur_rx % rxsize;
	unsigned int next_entry;
	unsigned int count = 0;
	struct dma_desc *p = priv->dma_rx + entry;
	struct dma_desc *p_next;

#ifdef STMMAC_RX_DEBUG
	if (netif_msg_hw(priv)) {
		pr_debug(">>> stmmac_rx: descriptor ring:\n");
		display_ring(priv->dma_rx, rxsize);
	}
#endif
	count = 0;
	while (!priv->hw->desc->get_rx_owner(p)) {
		int status;

		if (count >= limit)
			break;

		count++;

		next_entry = (++priv->cur_rx) % rxsize;
		p_next = priv->dma_rx + next_entry;
		prefetch(p_next);

		/* read the status of the incoming frame */
		status = (priv->hw->desc->rx_status(&priv->dev->stats,
						    &priv->xstats, p));
		if (unlikely(status == discard_frame))
			priv->dev->stats.rx_errors++;
		else {
			struct sk_buff *skb;
			int frame_len;

			frame_len = priv->hw->desc->get_rx_frame_len(p);
			/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
			 * Type frames (LLC/LLC-SNAP) */
			if (unlikely(status != llc_snap))
				frame_len -= ETH_FCS_LEN;
#ifdef STMMAC_RX_DEBUG
			if (frame_len > ETH_FRAME_LEN)
				pr_debug("\tRX frame size %d, COE status: %d\n",
					frame_len, status);

			if (netif_msg_hw(priv))
				pr_debug("\tdesc: %p [entry %d] buff=0x%x\n",
					p, entry, p->des2);
#endif
			skb = priv->rx_skbuff[entry];
			if (unlikely(!skb)) {
				pr_err("%s: Inconsistent Rx descriptor chain\n",
					priv->dev->name);
				priv->dev->stats.rx_dropped++;
				break;
			}
			prefetch(skb->data - NET_IP_ALIGN);
			priv->rx_skbuff[entry] = NULL;

			skb_put(skb, frame_len);
			dma_unmap_single(priv->device,
					 priv->rx_skbuff_dma[entry],
					 priv->dma_buf_sz, DMA_FROM_DEVICE);
#ifdef STMMAC_RX_DEBUG
			if (netif_msg_pktdata(priv)) {
				pr_info(" frame received (%dbytes)", frame_len);
				print_pkt(skb->data, frame_len);
			}
#endif
			skb->protocol = eth_type_trans(skb, priv->dev);

			if (unlikely(status == csum_none)) {
				/* always for the old mac 10/100 */
				skb_checksum_none_assert(skb);
				netif_receive_skb(skb);
			} else {
				skb->ip_summed = CHECKSUM_UNNECESSARY;
				napi_gro_receive(&priv->napi, skb);
			}

			priv->dev->stats.rx_packets++;
			priv->dev->stats.rx_bytes += frame_len;
		}
		entry = next_entry;
		p = p_next;	/* use prefetched values */
	}

	stmmac_rx_refill(priv);

	priv->xstats.rx_pkt_n += count;

	return count;
}
Beispiel #15
0
/* During a receive, the cur_rx points to the current incoming buffer.
 * When we update through the ring, if the next incoming buffer has
 * not been given to the system, we just set the empty indicator,
 * effectively tossing the packet.
 */
static int
fec_enet_rx(struct net_device *ndev, int budget)
{
	struct fec_enet_private *fep = netdev_priv(ndev);
	const struct platform_device_id *id_entry =
				platform_get_device_id(fep->pdev);
	struct bufdesc *bdp;
	unsigned short status;
	struct	sk_buff	*skb;
	ushort	pkt_len;
	__u8 *data;
	int	pkt_received = 0;

#ifdef CONFIG_M532x
	flush_cache_all();
#endif

	/* First, grab all of the stats for the incoming packet.
	 * These get messed up if we get called due to a busy condition.
	 */
	bdp = fep->cur_rx;

	while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {

		if (pkt_received >= budget)
			break;
		pkt_received++;

		/* Since we have allocated space to hold a complete frame,
		 * the last indicator should be set.
		 */
		if ((status & BD_ENET_RX_LAST) == 0)
			printk("FEC ENET: rcv is not +last\n");

		if (!fep->opened)
			goto rx_processing_done;

		/* Check for errors. */
		if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
			   BD_ENET_RX_CR | BD_ENET_RX_OV)) {
			ndev->stats.rx_errors++;
			if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) {
				/* Frame too long or too short. */
				ndev->stats.rx_length_errors++;
			}
			if (status & BD_ENET_RX_NO)	/* Frame alignment */
				ndev->stats.rx_frame_errors++;
			if (status & BD_ENET_RX_CR)	/* CRC Error */
				ndev->stats.rx_crc_errors++;
			if (status & BD_ENET_RX_OV)	/* FIFO overrun */
				ndev->stats.rx_fifo_errors++;
		}

		/* Report late collisions as a frame error.
		 * On this error, the BD is closed, but we don't know what we
		 * have in the buffer.  So, just drop this frame on the floor.
		 */
		if (status & BD_ENET_RX_CL) {
			ndev->stats.rx_errors++;
			ndev->stats.rx_frame_errors++;
			goto rx_processing_done;
		}

		/* Process the incoming frame. */
		ndev->stats.rx_packets++;
		pkt_len = bdp->cbd_datlen;
		ndev->stats.rx_bytes += pkt_len;
		data = (__u8*)__va(bdp->cbd_bufaddr);

		dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
				FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE);

		if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
			swap_buffer(data, pkt_len);

		/* This does 16 byte alignment, exactly what we need.
		 * The packet length includes FCS, but we don't want to
		 * include that when passing upstream as it messes up
		 * bridging applications.
		 */
		skb = netdev_alloc_skb(ndev, pkt_len - 4 + NET_IP_ALIGN);

		if (unlikely(!skb)) {
			printk("%s: Memory squeeze, dropping packet.\n",
					ndev->name);
			ndev->stats.rx_dropped++;
		} else {
			skb_reserve(skb, NET_IP_ALIGN);
			skb_put(skb, pkt_len - 4);	/* Make room */
			skb_copy_to_linear_data(skb, data, pkt_len - 4);
			skb->protocol = eth_type_trans(skb, ndev);

			/* Get receive timestamp from the skb */
			if (fep->hwts_rx_en && fep->bufdesc_ex) {
				struct skb_shared_hwtstamps *shhwtstamps =
							    skb_hwtstamps(skb);
				unsigned long flags;
				struct bufdesc_ex *ebdp =
					(struct bufdesc_ex *)bdp;

				memset(shhwtstamps, 0, sizeof(*shhwtstamps));

				spin_lock_irqsave(&fep->tmreg_lock, flags);
				shhwtstamps->hwtstamp = ns_to_ktime(
				    timecounter_cyc2time(&fep->tc, ebdp->ts));
				spin_unlock_irqrestore(&fep->tmreg_lock, flags);
			}

			if (!skb_defer_rx_timestamp(skb))
				napi_gro_receive(&fep->napi, skb);
		}

		bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data,
				FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE);
rx_processing_done:
		/* Clear the status flags for this buffer */
		status &= ~BD_ENET_RX_STATS;

		/* Mark the buffer empty */
		status |= BD_ENET_RX_EMPTY;
		bdp->cbd_sc = status;

		if (fep->bufdesc_ex) {
			struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;

			ebdp->cbd_esc = BD_ENET_RX_INT;
			ebdp->cbd_prot = 0;
			ebdp->cbd_bdu = 0;
		}

		/* Update BD pointer to next entry */
		if (status & BD_ENET_RX_WRAP)
			bdp = fep->rx_bd_base;
		else
			bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
		/* Doing this here will keep the FEC running while we process
		 * incoming frames.  On a heavily loaded network, we should be
		 * able to keep up at the expense of system resources.
		 */
		writel(0, fep->hwp + FEC_R_DES_ACTIVE);
	}
	fep->cur_rx = bdp;

	return pkt_received;
}
Beispiel #16
0
/**
 * nfp_net_rx() - receive up to @budget packets on @rx_ring
 * @rx_ring:   RX ring to receive from
 * @budget:    NAPI budget
 *
 * Note, this function is separated out from the napi poll function to
 * more cleanly separate packet receive code from other bookkeeping
 * functions performed in the napi poll function.
 *
 * There are differences between the NFP-3200 firmware and the
 * NFP-6000 firmware.  The NFP-3200 firmware uses a dedicated RX queue
 * to indicate that new packets have arrived.  The NFP-6000 does not
 * have this queue and uses the DD bit in the RX descriptor. This
 * method cannot be used on the NFP-3200 as it causes a race
 * condition: The RX ring write pointer on the NFP-3200 is updated
 * after packets (and descriptors) have been DMAed.  If the DD bit is
 * used and subsequently the read pointer is updated this may lead to
 * the RX queue to underflow (if the firmware has not yet update the
 * write pointer).  Therefore we use slightly ugly conditional code
 * below to handle the differences.  We may, in the future update the
 * NFP-3200 firmware to behave the same as the firmware on the
 * NFP-6000.
 *
 * Return: Number of packets received.
 */
static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
{
	struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
	struct nfp_net *nn = r_vec->nfp_net;
	unsigned int data_len, meta_len;
	int avail = 0, pkts_polled = 0;
	struct sk_buff *skb, *new_skb;
	struct nfp_net_rx_desc *rxd;
	dma_addr_t new_dma_addr;
	u32 qcp_wr_p;
	int idx;

	if (nn->is_nfp3200) {
		/* Work out how many packets arrived */
		qcp_wr_p = nfp_qcp_wr_ptr_read(rx_ring->qcp_rx);
		idx = rx_ring->rd_p % rx_ring->cnt;

		if (qcp_wr_p == idx)
			/* No new packets */
			return 0;

		if (qcp_wr_p > idx)
			avail = qcp_wr_p - idx;
		else
			avail = qcp_wr_p + rx_ring->cnt - idx;
	} else {
		avail = budget + 1;
	}

	while (avail > 0 && pkts_polled < budget) {
		idx = rx_ring->rd_p % rx_ring->cnt;

		rxd = &rx_ring->rxds[idx];
		if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD)) {
			if (nn->is_nfp3200)
				nn_dbg(nn, "RX descriptor not valid (DD)%d:%u rxd[0]=%#x rxd[1]=%#x\n",
				       rx_ring->idx, idx,
				       rxd->vals[0], rxd->vals[1]);
			break;
		}
		/* Memory barrier to ensure that we won't do other reads
		 * before the DD bit.
		 */
		dma_rmb();

		rx_ring->rd_p++;
		pkts_polled++;
		avail--;

		skb = rx_ring->rxbufs[idx].skb;

		new_skb = nfp_net_rx_alloc_one(rx_ring, &new_dma_addr,
					       nn->fl_bufsz);
		if (!new_skb) {
			nfp_net_rx_give_one(rx_ring, rx_ring->rxbufs[idx].skb,
					    rx_ring->rxbufs[idx].dma_addr);
			u64_stats_update_begin(&r_vec->rx_sync);
			r_vec->rx_drops++;
			u64_stats_update_end(&r_vec->rx_sync);
			continue;
		}

		dma_unmap_single(&nn->pdev->dev,
				 rx_ring->rxbufs[idx].dma_addr,
				 nn->fl_bufsz, DMA_FROM_DEVICE);

		nfp_net_rx_give_one(rx_ring, new_skb, new_dma_addr);

		/*         < meta_len >
		 *  <-- [rx_offset] -->
		 *  ---------------------------------------------------------
		 * | [XX] |  metadata  |             packet           | XXXX |
		 *  ---------------------------------------------------------
		 *         <---------------- data_len --------------->
		 *
		 * The rx_offset is fixed for all packets, the meta_len can vary
		 * on a packet by packet basis. If rx_offset is set to zero
		 * (_RX_OFFSET_DYNAMIC) metadata starts at the beginning of the
		 * buffer and is immediately followed by the packet (no [XX]).
		 */
		meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
		data_len = le16_to_cpu(rxd->rxd.data_len);

		if (nn->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
			skb_reserve(skb, meta_len);
		else
			skb_reserve(skb, nn->rx_offset);
		skb_put(skb, data_len - meta_len);

		nfp_net_set_hash(nn->netdev, skb, rxd);

		/* Pad small frames to minimum */
		if (skb_put_padto(skb, 60))
			break;

		/* Stats update */
		u64_stats_update_begin(&r_vec->rx_sync);
		r_vec->rx_pkts++;
		r_vec->rx_bytes += skb->len;
		u64_stats_update_end(&r_vec->rx_sync);

		skb_record_rx_queue(skb, rx_ring->idx);
		skb->protocol = eth_type_trans(skb, nn->netdev);

		nfp_net_rx_csum(nn, r_vec, rxd, skb);

		if (rxd->rxd.flags & PCIE_DESC_RX_VLAN)
			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
					       le16_to_cpu(rxd->rxd.vlan));

		napi_gro_receive(&rx_ring->r_vec->napi, skb);
	}

	if (nn->is_nfp3200)
		nfp_qcp_rd_ptr_add(rx_ring->qcp_rx, pkts_polled);

	return pkts_polled;
}
Beispiel #17
0
static int ibmveth_poll(struct napi_struct *napi, int budget)
{
	struct ibmveth_adapter *adapter =
			container_of(napi, struct ibmveth_adapter, napi);
	struct net_device *netdev = adapter->netdev;
	int frames_processed = 0;
	unsigned long lpar_rc;
	struct iphdr *iph;

restart_poll:
	while (frames_processed < budget) {
		if (!ibmveth_rxq_pending_buffer(adapter))
			break;

		smp_rmb();
		if (!ibmveth_rxq_buffer_valid(adapter)) {
			wmb(); /* suggested by larson1 */
			adapter->rx_invalid_buffer++;
			netdev_dbg(netdev, "recycling invalid buffer\n");
			ibmveth_rxq_recycle_buffer(adapter);
		} else {
			struct sk_buff *skb, *new_skb;
			int length = ibmveth_rxq_frame_length(adapter);
			int offset = ibmveth_rxq_frame_offset(adapter);
			int csum_good = ibmveth_rxq_csum_good(adapter);

			skb = ibmveth_rxq_get_buffer(adapter);

			new_skb = NULL;
			if (length < rx_copybreak)
				new_skb = netdev_alloc_skb(netdev, length);

			if (new_skb) {
				skb_copy_to_linear_data(new_skb,
							skb->data + offset,
							length);
				if (rx_flush)
					ibmveth_flush_buffer(skb->data,
						length + offset);
				if (!ibmveth_rxq_recycle_buffer(adapter))
					kfree_skb(skb);
				skb = new_skb;
			} else {
				ibmveth_rxq_harvest_buffer(adapter);
				skb_reserve(skb, offset);
			}

			skb_put(skb, length);
			skb->protocol = eth_type_trans(skb, netdev);

			if (csum_good) {
				skb->ip_summed = CHECKSUM_UNNECESSARY;
				if (be16_to_cpu(skb->protocol) == ETH_P_IP) {
					iph = (struct iphdr *)skb->data;

					/* If the IP checksum is not offloaded and if the packet
					 *  is large send, the checksum must be rebuilt.
					 */
					if (iph->check == 0xffff) {
						iph->check = 0;
						iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
						adapter->rx_large_packets++;
					}
				}
			}

			napi_gro_receive(napi, skb);	/* send it up */

			netdev->stats.rx_packets++;
			netdev->stats.rx_bytes += length;
			frames_processed++;
		}
	}

	ibmveth_replenish_task(adapter);

	if (frames_processed < budget) {
		napi_complete(napi);

		/* We think we are done - reenable interrupts,
		 * then check once more to make sure we are done.
		 */
		lpar_rc = h_vio_signal(adapter->vdev->unit_address,
				       VIO_IRQ_ENABLE);

		BUG_ON(lpar_rc != H_SUCCESS);

		if (ibmveth_rxq_pending_buffer(adapter) &&
		    napi_reschedule(napi)) {
			lpar_rc = h_vio_signal(adapter->vdev->unit_address,
					       VIO_IRQ_DISABLE);
			goto restart_poll;
		}
	}

	return frames_processed;
}
Beispiel #18
0
static int qtnf_pcie_pearl_rx_poll(struct napi_struct *napi, int budget)
{
	struct qtnf_bus *bus = container_of(napi, struct qtnf_bus, mux_napi);
	struct qtnf_pcie_pearl_state *ps = get_bus_priv(bus);
	struct qtnf_pcie_bus_priv *priv = &ps->base;
	struct net_device *ndev = NULL;
	struct sk_buff *skb = NULL;
	int processed = 0;
	struct qtnf_pearl_rx_bd *rxbd;
	dma_addr_t skb_paddr;
	int consume;
	u32 descw;
	u32 psize;
	u16 r_idx;
	u16 w_idx;
	int ret;

	while (processed < budget) {
		if (!qtnf_rx_data_ready(ps))
			goto rx_out;

		r_idx = priv->rx_bd_r_index;
		rxbd = &ps->rx_bd_vbase[r_idx];
		descw = le32_to_cpu(rxbd->info);

		skb = priv->rx_skb[r_idx];
		psize = QTN_GET_LEN(descw);
		consume = 1;

		if (!(descw & QTN_TXDONE_MASK)) {
			pr_warn("skip invalid rxbd[%d]\n", r_idx);
			consume = 0;
		}

		if (!skb) {
			pr_warn("skip missing rx_skb[%d]\n", r_idx);
			consume = 0;
		}

		if (skb && (skb_tailroom(skb) <  psize)) {
			pr_err("skip packet with invalid length: %u > %u\n",
			       psize, skb_tailroom(skb));
			consume = 0;
		}

		if (skb) {
			skb_paddr = QTN_HOST_ADDR(le32_to_cpu(rxbd->addr_h),
						  le32_to_cpu(rxbd->addr));
			pci_unmap_single(priv->pdev, skb_paddr, SKB_BUF_SIZE,
					 PCI_DMA_FROMDEVICE);
		}

		if (consume) {
			skb_put(skb, psize);
			ndev = qtnf_classify_skb(bus, skb);
			if (likely(ndev)) {
				qtnf_update_rx_stats(ndev, skb);
				skb->protocol = eth_type_trans(skb, ndev);
				napi_gro_receive(napi, skb);
			} else {
				pr_debug("drop untagged skb\n");
				bus->mux_dev.stats.rx_dropped++;
				dev_kfree_skb_any(skb);
			}
		} else {
			if (skb) {
				bus->mux_dev.stats.rx_dropped++;
				dev_kfree_skb_any(skb);
			}
		}

		priv->rx_skb[r_idx] = NULL;
		if (++r_idx >= priv->rx_bd_num)
			r_idx = 0;

		priv->rx_bd_r_index = r_idx;

		/* repalce processed buffer by a new one */
		w_idx = priv->rx_bd_w_index;
		while (CIRC_SPACE(priv->rx_bd_w_index, priv->rx_bd_r_index,
				  priv->rx_bd_num) > 0) {
			if (++w_idx >= priv->rx_bd_num)
				w_idx = 0;

			ret = pearl_skb2rbd_attach(ps, w_idx);
			if (ret) {
				pr_err("failed to allocate new rx_skb[%d]\n",
				       w_idx);
				break;
			}
		}

		processed++;
	}

rx_out:
	if (processed < budget) {
		napi_complete(napi);
		qtnf_en_rxdone_irq(ps);
	}

	return processed;
}
gro_result_t vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp,
			      unsigned int vlan_tci, struct sk_buff *skb)
{
	__vlan_hwaccel_put_tag(skb, vlan_tci);
	return napi_gro_receive(napi, skb);
}