Example #1
0
static void queue_process(struct work_struct *work)
{
	struct netpoll_info *npinfo =
		container_of(work, struct netpoll_info, tx_work.work);
	struct sk_buff *skb;
	unsigned long flags;

	while ((skb = skb_dequeue(&npinfo->txq))) {
		struct net_device *dev = skb->dev;

		if (!netif_device_present(dev) || !netif_running(dev)) {
			__kfree_skb(skb);
			continue;
		}

		local_irq_save(flags);
		netif_tx_lock(dev);
		if (netif_queue_stopped(dev) ||
		    dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) {
			skb_queue_head(&npinfo->txq, skb);
			netif_tx_unlock(dev);
			local_irq_restore(flags);

			schedule_delayed_work(&npinfo->tx_work, HZ/10);
			return;
		}
		netif_tx_unlock(dev);
		local_irq_restore(flags);
	}
}
static void dev_watchdog(unsigned long arg)
{
	struct net_device *dev = (struct net_device *)arg;

	netif_tx_lock(dev);
	if (dev->qdisc != &noop_qdisc) {
		if (netif_device_present(dev) &&
		    netif_running(dev) &&
		    netif_carrier_ok(dev)) {
			if (netif_queue_stopped(dev) &&
			    time_after(jiffies, dev->trans_start + dev->watchdog_timeo)) {
#if 0 // Fix QA-Bug #8011 on Agile.
				printk(KERN_INFO "NETDEV WATCHDOG: %s: transmit timed out\n",
				       dev->name);
#endif
				dev->tx_timeout(dev);
			}
			if (!mod_timer(&dev->watchdog_timer, round_jiffies(jiffies + dev->watchdog_timeo)))
				dev_hold(dev);
		}
	}
	netif_tx_unlock(dev);

	dev_put(dev);
}
Example #3
0
void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
{
	unsigned fill_level;
	struct efx_nic *efx = tx_queue->efx;

	EFX_BUG_ON_PARANOID(index > efx->type->txd_ring_mask);

	efx_dequeue_buffers(tx_queue, index);

	/* See if we need to restart the netif queue.  This barrier
	 * separates the update of read_count from the test of
	 * stopped. */
	smp_mb();
	if (unlikely(tx_queue->stopped) && likely(efx->port_enabled)) {
		fill_level = tx_queue->insert_count - tx_queue->read_count;
		if (fill_level < EFX_NETDEV_TX_THRESHOLD(tx_queue)) {
			EFX_BUG_ON_PARANOID(!efx_dev_registered(efx));

			/* Do this under netif_tx_lock(), to avoid racing
			 * with efx_xmit(). */
			netif_tx_lock(efx->net_dev);
			if (tx_queue->stopped) {
				tx_queue->stopped = 0;
				efx_wake_queue(efx);
			}
			netif_tx_unlock(efx->net_dev);
		}
	}
}
Example #4
0
int bgmac_enet_resume(struct bgmac *bgmac)
{
	int rc;

	if (!netif_running(bgmac->net_dev))
		return 0;

	rc = bgmac_dma_init(bgmac);
	if (rc)
		return rc;

	bgmac_chip_init(bgmac);

	napi_enable(&bgmac->napi);

	netif_tx_lock(bgmac->net_dev);
	netif_device_attach(bgmac->net_dev);
	netif_tx_unlock(bgmac->net_dev);

	netif_start_queue(bgmac->net_dev);

	phy_start(bgmac->net_dev->phydev);

	return 0;
}
/**
 * rmnet_map_send_ack() - Send N/ACK message for MAP commands
 * @skb: Socket buffer containing the MAP command message
 * @type: N/ACK message selector
 * @config: Physical end-point configuration of ingress device
 *
 * skb is modified to contain the message type selector. The message is then
 * transmitted on skb->dev. Note that this function grabs global Tx lock on
 * skb->dev for latency reasons.
 *
 * Return:
 *      - void
 */
static void rmnet_map_send_ack(struct sk_buff *skb,
			       unsigned char type,
			       struct rmnet_phys_ep_conf_s *config)
{
	struct rmnet_map_control_command_s *cmd;
	int xmit_status;

	if (unlikely(!skb))
		BUG();

	skb->protocol = htons(ETH_P_MAP);

	if ((config->ingress_data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV3) ||
	    (config->ingress_data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV4)) {
		if (unlikely(skb->len < (sizeof(struct rmnet_map_header_s) +
		    + RMNET_MAP_GET_LENGTH(skb)
		    + sizeof(struct rmnet_map_dl_checksum_trailer_s)))) {
			rmnet_stats_dl_checksum(
			  RMNET_MAP_CHECKSUM_ERR_BAD_BUFFER);
			return;
		}

		skb_trim(skb, skb->len -
			 sizeof(struct rmnet_map_dl_checksum_trailer_s));
	}

	cmd = RMNET_MAP_GET_CMD_START(skb);
	cmd->cmd_type = type & 0x03;

	netif_tx_lock(skb->dev);
	xmit_status = skb->dev->netdev_ops->ndo_start_xmit(skb, skb->dev);
	netif_tx_unlock(skb->dev);

	LOGD("MAP command ACK=%hhu sent with rc: %d", type & 0x03, xmit_status);
}
Example #6
0
/*
 * NOTE: Called under dev->queue_lock with locally disabled BH.
 *
 * __LINK_STATE_QDISC_RUNNING guarantees only one CPU can process this
 * device at a time. dev->queue_lock serializes queue accesses for
 * this device AND dev->qdisc pointer itself.
 *
 *  netif_tx_lock serializes accesses to device driver.
 *
 *  dev->queue_lock and netif_tx_lock are mutually exclusive,
 *  if one is grabbed, another must be free.
 *
 * Note, that this procedure can be called by a watchdog timer
 *
 * Returns to the caller:
 *				0  - queue is empty or throttled.
 *				>0 - queue is not empty.
 *
 */
static inline int qdisc_restart(struct net_device *dev)
{
	struct Qdisc *q = dev->qdisc;
	struct sk_buff *skb;
	unsigned lockless;
	int ret;

	/* Dequeue packet */
	if (unlikely((skb = dev_dequeue_skb(dev, q)) == NULL))
		return 0;

	/*
	 * When the driver has LLTX set, it does its own locking in
	 * start_xmit. These checks are worth it because even uncongested
	 * locks can be quite expensive. The driver can do a trylock, as
	 * is being done here; in case of lock contention it should return
	 * NETDEV_TX_LOCKED and the packet will be requeued.
	 */
	lockless = (dev->features & NETIF_F_LLTX);

	if (!lockless && !netif_tx_trylock(dev)) {
		/* Another CPU grabbed the driver tx lock */
		return handle_dev_cpu_collision(skb, dev, q);
	}

	/* And release queue */
	spin_unlock(&dev->queue_lock);

	ret = dev_hard_start_xmit(skb, dev);

	if (!lockless)
		netif_tx_unlock(dev);

	spin_lock(&dev->queue_lock);
	q = dev->qdisc;

	switch (ret) {
	case NETDEV_TX_OK:
		/* Driver sent out skb successfully */
		ret = qdisc_qlen(q);
		break;

	case NETDEV_TX_LOCKED:
		/* Driver try lock failed */
		ret = handle_dev_cpu_collision(skb, dev, q);
		break;

	default:
		/* Driver returned NETDEV_TX_BUSY - requeue skb */
		if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit()))
			printk(KERN_WARNING "BUG %s code %d qlen %d\n",
			       dev->name, ret, q->q.qlen);

		ret = dev_requeue_skb(skb, dev, q);
		break;
	}

	return ret;
}
Example #7
0
void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
			     struct net_device *dev)
{
	int status;
	struct netpoll_info *npinfo;

	if (!np || !dev || !netif_running(dev)) {
		__kfree_skb(skb);
		return;
	}

	npinfo = dev->npinfo;

	/* avoid recursion */
	if (npinfo->poll_owner == smp_processor_id() ||
	    np->dev->xmit_lock_owner == smp_processor_id()) {
		if (np->drop)
			np->drop(skb);
		else
			__kfree_skb(skb);
		return;
	}

	do {
		npinfo->tries--;
		netif_tx_lock(dev);

		/*
		 * network drivers do not expect to be called if the queue is
		 * stopped.
		 */
		status = NETDEV_TX_BUSY;
		if (!netif_queue_stopped(dev)) {
			dev->priv_flags |= IFF_IN_NETPOLL;
			status = dev->hard_start_xmit(skb, dev);
			dev->priv_flags &= ~IFF_IN_NETPOLL;
		}

		netif_tx_unlock(dev);

		/* success */
		if(!status) {
			npinfo->tries = MAX_RETRIES; /* reset */
			return;
		}

		/* transmit busy */
		netpoll_poll_dev(dev);
		udelay(50);
	} while (npinfo->tries > 0);
}
Example #8
0
static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
{
	int status = NETDEV_TX_BUSY;
	unsigned long tries;
	struct net_device *dev = np->dev;
	struct netpoll_info *npinfo = np->dev->npinfo;

	if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
		__kfree_skb(skb);
		return;
	}

	/* don't get messages out of order, and no recursion */
	if (skb_queue_len(&npinfo->txq) == 0 &&
		    npinfo->poll_owner != smp_processor_id()) {
		unsigned long flags;

		local_irq_save(flags);
		/* try until next clock tick */
		for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
		     tries > 0; --tries) {
			if (netif_tx_trylock(dev)) {
				if (!netif_queue_stopped(dev))
					status = dev->hard_start_xmit(skb, dev);
				netif_tx_unlock(dev);

				if (status == NETDEV_TX_OK)
					break;

			}

			/* tickle device maybe there is some cleanup */
			netpoll_poll(np);

			udelay(USEC_PER_POLL);
		}
		local_irq_restore(flags);
	}

	if (status != NETDEV_TX_OK) {
		skb_queue_tail(&npinfo->txq, skb);
		schedule_delayed_work(&npinfo->tx_work,0);
	}
}
Example #9
0
static void dev_watchdog(struct timer_list *t)
{
	struct net_device *dev = from_timer(dev, t, watchdog_timer);

	netif_tx_lock(dev);
	if (!qdisc_tx_is_noop(dev)) {
		if (netif_device_present(dev) &&
		    netif_running(dev) &&
		    netif_carrier_ok(dev)) {
			int some_queue_timedout = 0;
			unsigned int i;
			unsigned long trans_start;

			for (i = 0; i < dev->num_tx_queues; i++) {
				struct netdev_queue *txq;

				txq = netdev_get_tx_queue(dev, i);
				trans_start = txq->trans_start;
				if (netif_xmit_stopped(txq) &&
				    time_after(jiffies, (trans_start +
							 dev->watchdog_timeo))) {
					some_queue_timedout = 1;
					txq->trans_timeout++;
					break;
				}
			}

			if (some_queue_timedout) {
				WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n",
				       dev->name, netdev_drivername(dev), i);
				dev->netdev_ops->ndo_tx_timeout(dev);
			}
			if (!mod_timer(&dev->watchdog_timer,
				       round_jiffies(jiffies +
						     dev->watchdog_timeo)))
				dev_hold(dev);
		}
	}
	netif_tx_unlock(dev);

	dev_put(dev);
}
Example #10
0
static void rmnet_map_send_ack(struct sk_buff *skb,
			       unsigned char type,
			       struct rmnet_port *port)
{
	struct rmnet_map_control_command *cmd;
	struct net_device *dev = skb->dev;

	if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
		skb_trim(skb,
			 skb->len - sizeof(struct rmnet_map_dl_csum_trailer));

	skb->protocol = htons(ETH_P_MAP);

	cmd = RMNET_MAP_GET_CMD_START(skb);
	cmd->cmd_type = type & 0x03;

	netif_tx_lock(dev);
	dev->netdev_ops->ndo_start_xmit(skb, dev);
	netif_tx_unlock(dev);
}
Example #11
0
int bgmac_enet_suspend(struct bgmac *bgmac)
{
	if (!netif_running(bgmac->net_dev))
		return 0;

	phy_stop(bgmac->net_dev->phydev);

	netif_stop_queue(bgmac->net_dev);

	napi_disable(&bgmac->napi);

	netif_tx_lock(bgmac->net_dev);
	netif_device_detach(bgmac->net_dev);
	netif_tx_unlock(bgmac->net_dev);

	bgmac_chip_intrs_off(bgmac);
	bgmac_chip_reset(bgmac);
	bgmac_dma_cleanup(bgmac);

	return 0;
}
static void dev_watchdog(unsigned long arg)
{
	struct net_device *dev = (struct net_device *)arg;

	netif_tx_lock(dev);
	if (!qdisc_tx_is_noop(dev)) {
		if (netif_device_present(dev) &&
		    netif_running(dev) &&
		    netif_carrier_ok(dev)) {
			int some_queue_stopped = 0;
			unsigned int i;

			for (i = 0; i < dev->num_tx_queues; i++) {
				struct netdev_queue *txq;

				txq = netdev_get_tx_queue(dev, i);
				if (netif_tx_queue_stopped(txq)) {
					some_queue_stopped = 1;
					break;
				}
			}

			if (some_queue_stopped &&
			    time_after(jiffies, (dev->trans_start +
						 dev->watchdog_timeo))) {
				char drivername[64];
				WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit timed out\n",
				       dev->name, netdev_drivername(dev, drivername, 64));
				dev->tx_timeout(dev);
			}
			if (!mod_timer(&dev->watchdog_timer,
				       round_jiffies(jiffies +
						     dev->watchdog_timeo)))
				dev_hold(dev);
		}
	}
	netif_tx_unlock(dev);

	dev_put(dev);
}
Example #13
0
static void hisi_femac_xmit_reclaim(struct net_device *dev)
{
	struct sk_buff *skb;
	struct hisi_femac_priv *priv = netdev_priv(dev);
	struct hisi_femac_queue *txq = &priv->txq;
	unsigned int bytes_compl = 0, pkts_compl = 0;
	u32 val;

	netif_tx_lock(dev);

	val = readl(priv->port_base + ADDRQ_STAT) & TX_CNT_INUSE_MASK;
	while (val < priv->tx_fifo_used_cnt) {
		skb = txq->skb[txq->tail];
		if (unlikely(!skb)) {
			netdev_err(dev, "xmitq_cnt_inuse=%d, tx_fifo_used=%d\n",
				   val, priv->tx_fifo_used_cnt);
			break;
		}
		hisi_femac_tx_dma_unmap(priv, skb, txq->tail);
		pkts_compl++;
		bytes_compl += skb->len;
		dev_kfree_skb_any(skb);

		priv->tx_fifo_used_cnt--;

		val = readl(priv->port_base + ADDRQ_STAT) & TX_CNT_INUSE_MASK;
		txq->skb[txq->tail] = NULL;
		txq->tail = (txq->tail + 1) % txq->num;
	}

	netdev_completed_queue(dev, pkts_compl, bytes_compl);

	if (unlikely(netif_queue_stopped(dev)) && pkts_compl)
		netif_wake_queue(dev);

	netif_tx_unlock(dev);
}
Example #14
0
/* Kick device.

   Returns:  0  - queue is empty or throttled.
	    >0  - queue is not empty.

   NOTE: Called under dev->queue_lock with locally disabled BH.
*/
static inline int qdisc_restart(struct net_device *dev)
{
	struct Qdisc *q = dev->qdisc;
	struct sk_buff *skb;

	/* Dequeue packet */
	if (((skb = dev->gso_skb)) || ((skb = q->dequeue(q)))) {
		unsigned nolock = (dev->features & NETIF_F_LLTX);

		dev->gso_skb = NULL;

		/*
		 * When the driver has LLTX set it does its own locking
		 * in start_xmit. No need to add additional overhead by
		 * locking again. These checks are worth it because
		 * even uncongested locks can be quite expensive.
		 * The driver can do trylock like here too, in case
		 * of lock congestion it should return -1 and the packet
		 * will be requeued.
		 */
		if (!nolock) {
			if (!netif_tx_trylock(dev)) {
			collision:
				/* So, someone grabbed the driver. */

				/* It may be transient configuration error,
				   when hard_start_xmit() recurses. We detect
				   it by checking xmit owner and drop the
				   packet when deadloop is detected.
				*/
				if (dev->xmit_lock_owner == smp_processor_id()) {
					kfree_skb(skb);
					if (net_ratelimit())
						printk(KERN_DEBUG "Dead loop on netdevice %s, fix it urgently!\n", dev->name);
					goto out;
				}
				__get_cpu_var(netdev_rx_stat).cpu_collision++;
				goto requeue;
			}
		}

		{
			/* And release queue */
			spin_unlock(&dev->queue_lock);

			if (!netif_queue_stopped(dev)) {
				int ret;

				ret = dev_hard_start_xmit(skb, dev);
				if (ret == NETDEV_TX_OK) {
					if (!nolock) {
						netif_tx_unlock(dev);
					}
					spin_lock(&dev->queue_lock);
					q = dev->qdisc;
					goto out;
				}
				if (ret == NETDEV_TX_LOCKED && nolock) {
					spin_lock(&dev->queue_lock);
					q = dev->qdisc;
					goto collision;
				}
			}

			/* NETDEV_TX_BUSY - we need to requeue */
			/* Release the driver */
			if (!nolock) {
				netif_tx_unlock(dev);
			}
			spin_lock(&dev->queue_lock);
			q = dev->qdisc;
		}

		/* Device kicked us out :(
		   This is possible in three cases:

		   0. driver is locked
		   1. fastroute is enabled
		   2. device cannot determine busy state
		      before start of transmission (f.e. dialout)
		   3. device is buggy (ppp)
		 */

requeue:
		if (unlikely(q == &noop_qdisc))
			kfree_skb(skb);
		else if (skb->next)
			dev->gso_skb = skb;
		else
			q->ops->requeue(skb, q);
		netif_schedule(dev);
	}
	return 0;

out:
	BUG_ON((int) q->q.qlen < 0);
	return q->q.qlen;
}
Example #15
0
File: ifb.c Project: cilynx/dd-wrt
static void ri_tasklet(unsigned long dev)
{

	struct net_device *_dev = (struct net_device *)dev;
	struct ifb_private *dp = netdev_priv(_dev);
	struct net_device_stats *stats = &dp->stats;
	struct sk_buff *skb;

	dp->st_task_enter++;
	if ((skb = skb_peek(&dp->tq)) == NULL) {
		dp->st_txq_refl_try++;
		if (netif_tx_trylock(_dev)) {
			dp->st_rxq_enter++;
			while ((skb = skb_dequeue(&dp->rq)) != NULL) {
				skb_queue_tail(&dp->tq, skb);
				dp->st_rx2tx_tran++;
			}
			netif_tx_unlock(_dev);
		} else {
			/* reschedule */
			dp->st_rxq_notenter++;
			goto resched;
		}
	}

	while ((skb = skb_dequeue(&dp->tq)) != NULL) {
		u32 from = G_TC_FROM(skb->tc_verd);

		skb->tc_verd = 0;
		skb->tc_verd = SET_TC_NCLS(skb->tc_verd);
		stats->tx_packets++;
		stats->tx_bytes +=skb->len;

		skb->dev = __dev_get_by_index(skb->iif);
		if (!skb->dev) {
			dev_kfree_skb(skb);
			stats->tx_dropped++;
			break;
		}
		skb->iif = _dev->ifindex;

		if (from & AT_EGRESS) {
			dp->st_rx_frm_egr++;
			dev_queue_xmit(skb);
		} else if (from & AT_INGRESS) {
			dp->st_rx_frm_ing++;
			skb_pull(skb, skb->dev->hard_header_len);
			netif_rx(skb);
		} else
			BUG();
	}

	if (netif_tx_trylock(_dev)) {
		dp->st_rxq_check++;
		if ((skb = skb_peek(&dp->rq)) == NULL) {
			dp->tasklet_pending = 0;
			if (netif_queue_stopped(_dev))
				netif_wake_queue(_dev);
		} else {
			dp->st_rxq_rsch++;
			netif_tx_unlock(_dev);
			goto resched;
		}
		netif_tx_unlock(_dev);
	} else {
resched:
		dp->tasklet_pending = 1;
		tasklet_schedule(&dp->ifb_tasklet);
	}

}
Example #16
0
/**
 * stmmac_tx:
 * @priv: private driver structure
 * Description: it reclaims resources after transmission completes.
 */
static void stmmac_tx(struct stmmac_priv *priv)
{
	unsigned int txsize = priv->dma_tx_size;

	while (priv->dirty_tx != priv->cur_tx) {
		int last;
		unsigned int entry = priv->dirty_tx % txsize;
		struct sk_buff *skb = priv->tx_skbuff[entry];
		struct dma_desc *p = priv->dma_tx + entry;

		/* Check if the descriptor is owned by the DMA. */
		if (priv->hw->desc->get_tx_owner(p))
			break;

		/* Verify tx error by looking at the last segment */
		last = priv->hw->desc->get_tx_ls(p);
		if (likely(last)) {
			int tx_error =
				priv->hw->desc->tx_status(&priv->dev->stats,
							  &priv->xstats, p,
							  priv->ioaddr);
			if (likely(tx_error == 0)) {
				priv->dev->stats.tx_packets++;
				priv->xstats.tx_pkt_n++;
			} else
				priv->dev->stats.tx_errors++;
		}
		TX_DBG("%s: curr %d, dirty %d\n", __func__,
			priv->cur_tx, priv->dirty_tx);

		if (likely(p->des2))
			dma_unmap_single(priv->device, p->des2,
					 priv->hw->desc->get_tx_len(p),
					 DMA_TO_DEVICE);
		if (unlikely(p->des3))
			p->des3 = 0;

		if (likely(skb != NULL)) {
			/*
			 * If there's room in the queue (limit it to size)
			 * we add this skb back into the pool,
			 * if it's the right size.
			 */
			if ((skb_queue_len(&priv->rx_recycle) <
				priv->dma_rx_size) &&
				skb_recycle_check(skb, priv->dma_buf_sz))
				__skb_queue_head(&priv->rx_recycle, skb);
			else
				dev_kfree_skb(skb);

			priv->tx_skbuff[entry] = NULL;
		}

		priv->hw->desc->release_tx_desc(p);

		entry = (++priv->dirty_tx) % txsize;
	}
	if (unlikely(netif_queue_stopped(priv->dev) &&
		     stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv))) {
		netif_tx_lock(priv->dev);
		if (netif_queue_stopped(priv->dev) &&
		     stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv)) {
			TX_DBG("%s: restart transmit\n", __func__);
			netif_wake_queue(priv->dev);
		}
		netif_tx_unlock(priv->dev);
	}
}
Example #17
0
int vnic_rx(struct vnic_login *login, struct sk_buff *skb, struct ib_wc *wc)
{
	ASSERT(skb);
	vnic_dbg_skb("RX", skb, (unsigned long)-1, (unsigned long)0);

	if (no_bxm) {
		/* In no_bxm mode, we update neigh table based on ARP reqlies
		 * QPN & LID are retrieved from the IB completion
		 * ATTENTION: on RSS mode, make sure that ARPs are
		 * sent on base QPN
		 */
		struct vnic_neigh *neighe;
		struct ethhdr *eth_hdr = (struct ethhdr *)skb->data;
		struct arphdr *arp_hdr = (struct arphdr *)(skb->data + ETH_HLEN);
		u16 eth_proto = ntohs(eth_hdr->h_proto);
		u16 arp_proto = ntohs(arp_hdr->ar_op);

		if (eth_proto != ETH_P_ARP)
			goto out;
		if (arp_proto == ARPOP_REQUEST)
			vnic_dbg_data(login->name, "ARP REQUEST\n");
		else
			vnic_dbg_data(login->name, "ARP REPLY\n");

		/* don't stop TX queue, only try, this way we avoid blocking 
		 * IRQs in TX flow (performance wise).
		 * other vnic_neighe_* functions are not called in parallel 
		 * to this flow (in no_bxm mode)
		 */
		if (!vnic_netif_tx_trylock(login->dev))
			goto out;

		neighe = vnic_neighe_search(login, eth_hdr->h_source);
		if (!IS_ERR(neighe)) {
			/* if IB address didn't change, do nothing */
			if (neighe->qpn == wc->src_qp &&
			    neighe->lid == wc->slid)
				goto unlock;
			/* else, del old neigh entry, and add a new one */
			vnic_neighe_del(login, neighe);
			vnic_neighe_dealloc(neighe);
		}

		/* RSS: assume that your neighbours are like you */
		neighe = vnic_neighe_alloc(login, eth_hdr->h_source,
					   wc->slid, wc->src_qp,
					   login->rx_rings_num > 1 ? 1 : 0);
		if (IS_ERR(neighe))
			goto unlock;
		if (vnic_neighe_add(login, neighe))
			vnic_neighe_dealloc(neighe);
unlock:
		netif_tx_unlock(login->dev);
	}
out:

	/* shared_vnic may receive PACKET_OTHERHOST
	 * we 'fix' the pkt_type here so the kernel
	 * won't drop it
	 */
	if (skb->pkt_type == PACKET_OTHERHOST && login->shared_vnic)
		skb->pkt_type = PACKET_HOST;

	netif_receive_skb(skb);

	return 0;

}