Beispiel #1
0
static int netvsc_change_mtu(struct net_device *ndev, int mtu)
{
	struct net_device_context *ndevctx = netdev_priv(ndev);
	struct hv_device *hdev =  ndevctx->device_ctx;
	struct netvsc_device *nvdev = hv_get_drvdata(hdev);
	struct netvsc_device_info device_info;
	int limit = ETH_DATA_LEN;

	if (nvdev == NULL || nvdev->destroy)
		return -ENODEV;

	if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
		limit = NETVSC_MTU;

	if (mtu < 68 || mtu > limit)
		return -EINVAL;

	nvdev->start_remove = true;
	cancel_work_sync(&ndevctx->work);
	netif_tx_disable(ndev);
	rndis_filter_device_remove(hdev);

	ndev->mtu = mtu;

	ndevctx->device_ctx = hdev;
	hv_set_drvdata(hdev, ndev);
	device_info.ring_size = ring_size;
	rndis_filter_device_add(hdev, &device_info);
	netif_tx_wake_all_queues(ndev);

	return 0;
}
Beispiel #2
0
static int netvsc_open(struct net_device *net)
{
	struct net_device_context *net_device_ctx = netdev_priv(net);
	struct hv_device *device_obj = net_device_ctx->device_ctx;
	struct netvsc_device *nvdev = net_device_ctx->nvdev;
	struct rndis_device *rdev;
	int ret = 0;

	netif_carrier_off(net);

	/* Open up the device */
	ret = rndis_filter_open(device_obj);
	if (ret != 0) {
		netdev_err(net, "unable to open device (ret %d).\n", ret);
		return ret;
	}

	netif_tx_wake_all_queues(net);

	rdev = nvdev->extension;
	if (!rdev->link_state)
		netif_carrier_on(net);

	return ret;
}
/* Change the current ring parameters, stopping the controller if
 * necessary so that we don't mess things up while we're in
 * motion.  We wait for the ring to be clean before reallocating
 * the rings. */
static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals)
{
	struct gfar_private *priv = netdev_priv(dev);
	int err = 0, i = 0;

	if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE)
		return -EINVAL;

	if (!is_power_of_2(rvals->rx_pending)) {
		printk("%s: Ring sizes must be a power of 2\n",
				dev->name);
		return -EINVAL;
	}

	if (rvals->tx_pending > GFAR_TX_MAX_RING_SIZE)
		return -EINVAL;

	if (!is_power_of_2(rvals->tx_pending)) {
		printk("%s: Ring sizes must be a power of 2\n",
				dev->name);
		return -EINVAL;
	}

	if (dev->flags & IFF_UP) {
		unsigned long flags;

		/* Halt TX and RX, and process the frames which
		 * have already been received */
		local_irq_save(flags);
		lock_tx_qs(priv);
		lock_rx_qs(priv);

		gfar_halt(dev);

		unlock_rx_qs(priv);
		unlock_tx_qs(priv);
		local_irq_restore(flags);

		for (i = 0; i < priv->num_rx_queues; i++)
			gfar_clean_rx_ring(priv->rx_queue[i],
					priv->rx_queue[i]->rx_ring_size);

		/* Now we take down the rings to rebuild them */
		stop_gfar(dev);
	}

	/* Change the size */
	for (i = 0; i < priv->num_rx_queues; i++) {
		priv->rx_queue[i]->rx_ring_size = rvals->rx_pending;
		priv->tx_queue[i]->tx_ring_size = rvals->tx_pending;
		priv->tx_queue[i]->num_txbdfree = priv->tx_queue[i]->tx_ring_size;
	}

	/* Rebuild the rings with the new size */
	if (dev->flags & IFF_UP) {
		err = startup_gfar(dev);
		netif_tx_wake_all_queues(dev);
	}
	return err;
}
Beispiel #4
0
/***********************************************************
 * mv_eth_start --                                         *
 *   start a network device. connect and enable interrupts *
 *   set hw defaults. fill rx buffers. restart phy link    *
 *   auto neg. set device link flags. report status.       *
 ***********************************************************/
static int mv_eth_start(struct net_device *dev)
{
	struct eth_port *priv = MV_ETH_PRIV(dev);
	int group;

	/* in default link is down */
	netif_carrier_off(dev);

	/* Stop the TX queue - it will be enabled upon PHY status change after link-up interrupt/timer */
	netif_tx_stop_all_queues(dev);

	/* fill rx buffers, start rx/tx activity, set coalescing */
	if (mv_eth_start_internals(priv, dev->mtu) != 0) {
		printk(KERN_ERR "%s: start internals failed\n", dev->name);
		goto error;
	}

	/* enable polling on the port, must be used after netif_poll_disable */
	if (priv->flags & MV_ETH_F_CONNECT_LINUX)
		for (group = 0; group < CONFIG_MV_ETH_NAPI_GROUPS; group++)
			napi_enable(priv->napiGroup[group]);


	if ((priv->flags & MV_ETH_F_LINK_UP) && !(priv->flags & MV_ETH_F_EXT_SWITCH)) {

		if (mv_eth_ctrl_is_tx_enabled(priv)) {
			netif_carrier_on(dev);
			netif_tx_wake_all_queues(dev);
		}
		printk(KERN_NOTICE "%s: link up\n", dev->name);
	}
#ifdef CONFIG_MV_ETH_SWITCH_LINK
	if (priv->flags & MV_ETH_F_EXT_SWITCH) {
		struct eth_netdev *dev_priv = MV_DEV_PRIV(dev);

		dev_priv->link_map = 0;
		mv_switch_link_update_event(dev_priv->port_map, 1);
	}
#endif /* CONFIG_MV_ETH_SWITCH_LINK */

	if (priv->flags & MV_ETH_F_CONNECT_LINUX) {
		/* connect to port interrupt line */
		if (request_irq(dev->irq, mv_eth_isr, (IRQF_DISABLED|IRQF_SAMPLE_RANDOM), "mv_eth", priv)) {
			printk(KERN_ERR "cannot request irq %d for %s port %d\n", dev->irq, dev->name, priv->port);
			if (priv->flags & MV_ETH_F_CONNECT_LINUX)
				napi_disable(priv->napiGroup[CPU_GROUP_DEF]);
			goto error;
		}

		/* unmask interrupts */
		mv_eth_interrupts_unmask(priv);
		smp_call_function_many(cpu_online_mask, (smp_call_func_t)mv_eth_interrupts_unmask, (void *)priv, 1);

		printk(KERN_NOTICE "%s: started\n", dev->name);
	}
	return 0;
error:
	printk(KERN_ERR "%s: start failed\n", dev->name);
	return -1;
}
Beispiel #5
0
/*
 * This function wakes up all queues in net_device
 */
void mwifiex_wake_up_net_dev_queue(struct net_device *netdev,
					struct mwifiex_adapter *adapter)
{
	unsigned long dev_queue_flags;

	spin_lock_irqsave(&adapter->queue_lock, dev_queue_flags);
	netif_tx_wake_all_queues(netdev);
	spin_unlock_irqrestore(&adapter->queue_lock, dev_queue_flags);
}
void wil_link_on(struct wil6210_priv *wil)
{
	struct net_device *ndev = wil_to_ndev(wil);

	wil_dbg_misc(wil, "%s()\n", __func__);

	netif_carrier_on(ndev);
	netif_tx_wake_all_queues(ndev);
}
Beispiel #7
0
/* This is invoked whenever the remote processor completed processing
 * a TX msg we just sent, and the buffer is put back to the used ring.
 */
static void cfv_release_used_buf(struct virtqueue *vq_tx)
{
	struct cfv_info *cfv = vq_tx->vdev->priv;
	unsigned long flags;

	BUG_ON(vq_tx != cfv->vq_tx);

	for (;;) {
		unsigned int len;
		struct buf_info *buf_info;

		/* Get used buffer from used ring to recycle used descriptors */
		spin_lock_irqsave(&cfv->tx_lock, flags);
		buf_info = virtqueue_get_buf(vq_tx, &len);
		spin_unlock_irqrestore(&cfv->tx_lock, flags);

		/* Stop looping if there are no more buffers to free */
		if (!buf_info)
			break;

		free_buf_info(cfv, buf_info);

		/* watermark_tx indicates if we previously stopped the tx
		 * queues. If we have enough free stots in the virtio ring,
		 * re-establish memory reserved and open up tx queues.
		 */
		if (cfv->vq_tx->num_free <= cfv->watermark_tx)
			continue;

		/* Re-establish memory reserve */
		if (cfv->reserved_mem == 0 && cfv->genpool)
			cfv->reserved_mem =
				gen_pool_alloc(cfv->genpool,
					       cfv->reserved_size);

		/* Open up the tx queues */
		if (cfv->reserved_mem) {
			cfv->watermark_tx =
				virtqueue_get_vring_size(cfv->vq_tx);
			netif_tx_wake_all_queues(cfv->ndev);
			/* Buffers are recycled in cfv_netdev_tx, so
			 * disable notifications when queues are opened.
			 */
			virtqueue_disable_cb(cfv->vq_tx);
			++cfv->stats.tx_flow_on;
		} else {
			/* if no memory reserve, wait for more free slots */
			WARN_ON(cfv->watermark_tx >
			       virtqueue_get_vring_size(cfv->vq_tx));
			cfv->watermark_tx +=
				virtqueue_get_vring_size(cfv->vq_tx) / 4;
		}
	}
}
Beispiel #8
0
Datei: main.c Projekt: krzk/linux
static int
nfp_flower_repr_netdev_open(struct nfp_app *app, struct nfp_repr *repr)
{
	int err;

	err = nfp_flower_cmsg_portmod(repr, true, repr->netdev->mtu, false);
	if (err)
		return err;

	netif_tx_wake_all_queues(repr->netdev);

	return 0;
}
Beispiel #9
0
static void ccmni_tx_timeout(struct net_device *dev)
{
	ccmni_instance_t *ccmni = (ccmni_instance_t *)netdev_priv(dev);
	ccmni_ctl_block_t *ccmni_ctl = ccmni_ctl_blk[ccmni->md_id];

	CCMNI_INF_MSG(ccmni->md_id, "ccmni%d_tx_timeout: usage_cnt=%d, timeout=%ds\n",
		ccmni->index, atomic_read(&ccmni->usage), (ccmni->dev->watchdog_timeo/HZ));

	dev->stats.tx_errors++;
	if (atomic_read(&ccmni->usage) > 0) {
		if (ccmni_ctl->ccci_ops->md_ability & MODEM_CAP_CCMNI_MQ)
			netif_tx_wake_all_queues(dev);
		else
			netif_wake_queue(dev);
	}
}
Beispiel #10
0
static void cpmac_hw_error(struct work_struct *work)
{
	struct cpmac_priv *priv =
		container_of(work, struct cpmac_priv, reset_work);

	spin_lock(&priv->rx_lock);
	cpmac_clear_rx(priv->dev);
	spin_unlock(&priv->rx_lock);
	cpmac_clear_tx(priv->dev);
	cpmac_hw_start(priv->dev);
	barrier();
	atomic_dec(&priv->reset_pending);

	netif_tx_wake_all_queues(priv->dev);
	cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3);
}
static int gfar_set_rx_csum(struct net_device *dev, uint32_t data)
{
	struct gfar_private *priv = netdev_priv(dev);
	unsigned long flags;
	int err = 0, i = 0;

	if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM))
		return -EOPNOTSUPP;


	if (dev->flags & IFF_UP) {
		/* Halt TX and RX, and process the frames which
		 * have already been received */
		local_irq_save(flags);
		lock_tx_qs(priv);
		lock_rx_qs(priv);

		gfar_halt(dev);

		unlock_tx_qs(priv);
		unlock_rx_qs(priv);
		local_irq_save(flags);

		for (i = 0; i < priv->num_rx_queues; i++)
			gfar_clean_rx_ring(priv->rx_queue[i],
					priv->rx_queue[i]->rx_ring_size);

		/* Now we take down the rings to rebuild them */
		stop_gfar(dev);
	}

	spin_lock_irqsave(&priv->bflock, flags);
	priv->rx_csum_enable = data;
	spin_unlock_irqrestore(&priv->bflock, flags);

	if (dev->flags & IFF_UP) {
		err = startup_gfar(dev);
		netif_tx_wake_all_queues(dev);
	}
	return err;
}
Beispiel #12
0
int gfar_set_features(struct net_device *dev, netdev_features_t features)
{
	struct gfar_private *priv = netdev_priv(dev);
	unsigned long flags;
	int err = 0, i = 0;
	netdev_features_t changed = dev->features ^ features;

	if (changed & (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX))
		gfar_vlan_mode(dev, features);

	if (!(changed & NETIF_F_RXCSUM))
		return 0;

	if (dev->flags & IFF_UP) {
		/* Halt TX and RX, and process the frames which
		 * have already been received
		 */
		local_irq_save(flags);
		lock_tx_qs(priv);
		lock_rx_qs(priv);

		gfar_halt(dev);

		unlock_tx_qs(priv);
		unlock_rx_qs(priv);
		local_irq_restore(flags);

		for (i = 0; i < priv->num_rx_queues; i++)
			gfar_clean_rx_ring(priv->rx_queue[i],
					   priv->rx_queue[i]->rx_ring_size);

		/* Now we take down the rings to rebuild them */
		stop_gfar(dev);

		dev->features = features;

		err = startup_gfar(dev);
		netif_tx_wake_all_queues(dev);
	}
	return err;
}
Beispiel #13
0
static void sreset_start_adapter(struct rtw_adapter *padapter)
{
	struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
	struct xmit_priv	*pxmitpriv = &padapter->xmitpriv;

	if (padapter == NULL)
		return;

	DBG_8723A("%s(%s)\n", __func__, padapter->pnetdev->name);

	if (check_fwstate(pmlmepriv, _FW_LINKED))
		sreset_restore_network_status(padapter);

	/* TODO: OS and HCI independent */
	tasklet_hi_schedule(&pxmitpriv->xmit_tasklet);

	mod_timer(&padapter->mlmepriv.dynamic_chk_timer,
		  jiffies + msecs_to_jiffies(2000));

	if (rtw_netif_queue_stopped(padapter->pnetdev))
		netif_tx_wake_all_queues(padapter->pnetdev);
}
/*----------------------------------------------------------------------------

   @brief TX frame block timeout handler
          Resume TX, and reset TX frame count

   @param hdd_context_t pHddCtx
        Global hdd context

   @return NONE

----------------------------------------------------------------------------*/
void hddDevTmTxBlockTimeoutHandler(void *usrData)
{
   hdd_context_t        *pHddCtx = (hdd_context_t *)usrData;
   hdd_adapter_t        *staAdapater;
   /* Sanity, This should not happen */
   if(NULL == pHddCtx)
   {
      VOS_TRACE(VOS_MODULE_ID_HDD,VOS_TRACE_LEVEL_ERROR,
                "%s: NULL Context", __func__);
      VOS_ASSERT(0);
      return;
   }

   staAdapater = hdd_get_adapter(pHddCtx, WLAN_HDD_INFRA_STATION);

   if(NULL == staAdapater)
   {
      VOS_TRACE(VOS_MODULE_ID_HDD,VOS_TRACE_LEVEL_ERROR,
                "%s: NULL Adapter", __func__);
      VOS_ASSERT(0);
      return;
   }

   if(mutex_lock_interruptible(&pHddCtx->tmInfo.tmOperationLock))
   {
      VOS_TRACE(VOS_MODULE_ID_HDD,VOS_TRACE_LEVEL_ERROR,
                "%s: Acquire lock fail", __func__);
      return;
   }
   pHddCtx->tmInfo.txFrameCount = 0;

   /* Resume TX flow */
    
   netif_tx_wake_all_queues(staAdapater->dev);
   pHddCtx->tmInfo.qBlocked = VOS_FALSE;
   mutex_unlock(&pHddCtx->tmInfo.tmOperationLock);

   return;
}
Beispiel #15
0
int rtw_hw_resume23a(struct rtw_adapter *padapter)
{
	struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
	struct net_device *pnetdev = padapter->pnetdev;

	if (padapter) { /* system resume */
		DBG_8723A("==> rtw_hw_resume23a\n");
		down(&pwrpriv->lock);
		pwrpriv->bips_processing = true;
		rtw_reset_drv_sw23a(padapter);

		if (pm_netdev_open23a(pnetdev, false)) {
			up(&pwrpriv->lock);
			goto error_exit;
		}

		netif_device_attach(pnetdev);
		netif_carrier_on(pnetdev);

		if (!rtw_netif_queue_stopped(pnetdev))
			netif_tx_start_all_queues(pnetdev);
		else
			netif_tx_wake_all_queues(pnetdev);

		pwrpriv->bkeepfwalive = false;

		pwrpriv->rf_pwrstate = rf_on;
		pwrpriv->bips_processing = false;

		up(&pwrpriv->lock);
	} else {
		goto error_exit;
	}
	return 0;
error_exit:
	DBG_8723A("%s, Open net dev failed\n", __func__);
	return -1;
}
Beispiel #16
0
int netdev_open23a(struct net_device *pnetdev)
{
	struct rtw_adapter *padapter = netdev_priv(pnetdev);
	struct pwrctrl_priv *pwrctrlpriv;
	int ret = 0;
	int status;

	RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+871x_drv - dev_open\n"));
	DBG_8723A("+871x_drv - drv_open, bup =%d\n", padapter->bup);

	mutex_lock(&adapter_to_dvobj(padapter)->hw_init_mutex);

	pwrctrlpriv = &padapter->pwrctrlpriv;
	if (pwrctrlpriv->ps_flag) {
		padapter->net_closed = false;
		goto netdev_open23a_normal_process;
	}

	if (!padapter->bup) {
		padapter->bDriverStopped = false;
		padapter->bSurpriseRemoved = false;
		padapter->bCardDisableWOHSM = false;

		status = rtw_hal_init23a(padapter);
		if (status == _FAIL) {
			RT_TRACE(_module_os_intfs_c_, _drv_err_,
				 ("rtl871x_hal_init(): Can't init h/w!\n"));
			goto netdev_open23a_error;
		}

		DBG_8723A("MAC Address = "MAC_FMT"\n",
			  MAC_ARG(pnetdev->dev_addr));

		if (init_hw_mlme_ext23a(padapter) == _FAIL) {
			DBG_8723A("can't init mlme_ext_priv\n");
			goto netdev_open23a_error;
		}

		rtl8723au_inirp_init(padapter);

		rtw_cfg80211_init_wiphy(padapter);

		rtw_led_control(padapter, LED_CTL_NO_LINK);

		padapter->bup = true;
	}
	padapter->net_closed = false;

	mod_timer(&padapter->mlmepriv.dynamic_chk_timer,
		  jiffies + msecs_to_jiffies(2000));

	padapter->pwrctrlpriv.bips_processing = false;
	rtw_set_pwr_state_check_timer(&padapter->pwrctrlpriv);

	/* netif_carrier_on(pnetdev);call this func when
	   rtw23a_joinbss_event_cb return success */
	if (!rtw_netif_queue_stopped(pnetdev))
		netif_tx_start_all_queues(pnetdev);
	else
		netif_tx_wake_all_queues(pnetdev);

netdev_open23a_normal_process:
	RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-871x_drv - dev_open\n"));
	DBG_8723A("-871x_drv - drv_open, bup =%d\n", padapter->bup);
exit:
	mutex_unlock(&adapter_to_dvobj(padapter)->hw_init_mutex);
	return ret;

netdev_open23a_error:
	padapter->bup = false;

	netif_carrier_off(pnetdev);
	netif_tx_stop_all_queues(pnetdev);

	RT_TRACE(_module_os_intfs_c_, _drv_err_,
		 ("-871x_drv - dev_open, fail!\n"));
	DBG_8723A("-871x_drv - drv_open fail, bup =%d\n", padapter->bup);

	ret = -1;
	goto exit;
}
Beispiel #17
0
int _netdev_open(struct net_device *pnetdev)
{
	uint status;
	struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
	struct pwrctrl_priv *pwrctrlpriv = &padapter->pwrctrlpriv;

	RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+88eu_drv - dev_open\n"));
	DBG_88E("+88eu_drv - drv_open, bup =%d\n", padapter->bup);

	if (pwrctrlpriv->ps_flag) {
		padapter->net_closed = false;
		goto netdev_open_normal_process;
	}

	if (!padapter->bup) {
		padapter->bDriverStopped = false;
		padapter->bSurpriseRemoved = false;

		status = rtw_hal_init(padapter);
		if (status == _FAIL) {
			RT_TRACE(_module_os_intfs_c_, _drv_err_, ("rtl88eu_hal_init(): Can't init h/w!\n"));
			goto netdev_open_error;
		}

		pr_info("MAC Address = %pM\n", pnetdev->dev_addr);

		status = rtw_start_drv_threads(padapter);
		if (status == _FAIL) {
			pr_info("Initialize driver software resource Failed!\n");
			goto netdev_open_error;
		}

		if (init_hw_mlme_ext(padapter) == _FAIL) {
			pr_info("can't init mlme_ext_priv\n");
			goto netdev_open_error;
		}
		if (padapter->intf_start)
			padapter->intf_start(padapter);
		rtw_proc_init_one(pnetdev);

		rtw_led_control(padapter, LED_CTL_NO_LINK);

		padapter->bup = true;
	}
	padapter->net_closed = false;

	_set_timer(&padapter->mlmepriv.dynamic_chk_timer, 2000);

	padapter->pwrctrlpriv.bips_processing = false;
	rtw_set_pwr_state_check_timer(&padapter->pwrctrlpriv);

	if (!rtw_netif_queue_stopped(pnetdev))
		netif_tx_start_all_queues(pnetdev);
	else
		netif_tx_wake_all_queues(pnetdev);

netdev_open_normal_process:
	RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-88eu_drv - dev_open\n"));
	DBG_88E("-88eu_drv - drv_open, bup =%d\n", padapter->bup);
	return 0;

netdev_open_error:
	padapter->bup = false;
	netif_carrier_off(pnetdev);
	netif_tx_stop_all_queues(pnetdev);
	RT_TRACE(_module_os_intfs_c_, _drv_err_, ("-88eu_drv - dev_open, fail!\n"));
	DBG_88E("-88eu_drv - drv_open fail, bup =%d\n", padapter->bup);
	return -1;
}
Beispiel #18
0
static int fjes_change_mtu(struct net_device *netdev, int new_mtu)
{
	struct fjes_adapter *adapter = netdev_priv(netdev);
	bool running = netif_running(netdev);
	struct fjes_hw *hw = &adapter->hw;
	unsigned long flags;
	int ret = -EINVAL;
	int idx, epidx;

	for (idx = 0; fjes_support_mtu[idx] != 0; idx++) {
		if (new_mtu <= fjes_support_mtu[idx]) {
			new_mtu = fjes_support_mtu[idx];
			if (new_mtu == netdev->mtu)
				return 0;

			ret = 0;
			break;
		}
	}

	if (ret)
		return ret;

	if (running) {
		spin_lock_irqsave(&hw->rx_status_lock, flags);
		for (epidx = 0; epidx < hw->max_epid; epidx++) {
			if (epidx == hw->my_epid)
				continue;
			hw->ep_shm_info[epidx].tx.info->v1i.rx_status &=
				~FJES_RX_MTU_CHANGING_DONE;
		}
		spin_unlock_irqrestore(&hw->rx_status_lock, flags);

		netif_tx_stop_all_queues(netdev);
		netif_carrier_off(netdev);
		cancel_work_sync(&adapter->tx_stall_task);
		napi_disable(&adapter->napi);

		msleep(1000);

		netif_tx_stop_all_queues(netdev);
	}

	netdev->mtu = new_mtu;

	if (running) {
		for (epidx = 0; epidx < hw->max_epid; epidx++) {
			if (epidx == hw->my_epid)
				continue;

			spin_lock_irqsave(&hw->rx_status_lock, flags);
			fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
					    netdev->dev_addr,
					    netdev->mtu);

			hw->ep_shm_info[epidx].tx.info->v1i.rx_status |=
				FJES_RX_MTU_CHANGING_DONE;
			spin_unlock_irqrestore(&hw->rx_status_lock, flags);
		}

		netif_tx_wake_all_queues(netdev);
		netif_carrier_on(netdev);
		napi_enable(&adapter->napi);
		napi_schedule(&adapter->napi);
	}

	return ret;
}
Beispiel #19
0
static void connect(struct backend_info *be)
{
	int err;
	struct xenbus_device *dev = be->dev;
	unsigned long credit_bytes, credit_usec;
	unsigned int queue_index;
	unsigned int requested_num_queues;
	struct xenvif_queue *queue;

	/* Check whether the frontend requested multiple queues
	 * and read the number requested.
	 */
	err = xenbus_scanf(XBT_NIL, dev->otherend,
			   "multi-queue-num-queues",
			   "%u", &requested_num_queues);
	if (err < 0) {
		requested_num_queues = 1; /* Fall back to single queue */
	} else if (requested_num_queues > xenvif_max_queues) {
		/* buggy or malicious guest */
		xenbus_dev_fatal(dev, err,
				 "guest requested %u queues, exceeding the maximum of %u.",
				 requested_num_queues, xenvif_max_queues);
		return;
	}

	err = xen_net_read_mac(dev, be->vif->fe_dev_addr);
	if (err) {
		xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
		return;
	}

	xen_net_read_rate(dev, &credit_bytes, &credit_usec);
	xen_unregister_watchers(be->vif);
	xen_register_watchers(dev, be->vif);
	read_xenbus_vif_flags(be);

	err = connect_ctrl_ring(be);
	if (err) {
		xenbus_dev_fatal(dev, err, "connecting control ring");
		return;
	}

	/* Use the number of queues requested by the frontend */
	be->vif->queues = vzalloc(requested_num_queues *
				  sizeof(struct xenvif_queue));
	if (!be->vif->queues) {
		xenbus_dev_fatal(dev, -ENOMEM,
				 "allocating queues");
		return;
	}

	be->vif->num_queues = requested_num_queues;
	be->vif->stalled_queues = requested_num_queues;

	for (queue_index = 0; queue_index < requested_num_queues; ++queue_index) {
		queue = &be->vif->queues[queue_index];
		queue->vif = be->vif;
		queue->id = queue_index;
		snprintf(queue->name, sizeof(queue->name), "%s-q%u",
				be->vif->dev->name, queue->id);

		err = xenvif_init_queue(queue);
		if (err) {
			/* xenvif_init_queue() cleans up after itself on
			 * failure, but we need to clean up any previously
			 * initialised queues. Set num_queues to i so that
			 * earlier queues can be destroyed using the regular
			 * disconnect logic.
			 */
			be->vif->num_queues = queue_index;
			goto err;
		}

		queue->credit_bytes = credit_bytes;
		queue->remaining_credit = credit_bytes;
		queue->credit_usec = credit_usec;

		err = connect_data_rings(be, queue);
		if (err) {
			/* connect_data_rings() cleans up after itself on
			 * failure, but we need to clean up after
			 * xenvif_init_queue() here, and also clean up any
			 * previously initialised queues.
			 */
			xenvif_deinit_queue(queue);
			be->vif->num_queues = queue_index;
			goto err;
		}
	}

#ifdef CONFIG_DEBUG_FS
	xenvif_debugfs_addif(be->vif);
#endif /* CONFIG_DEBUG_FS */

	/* Initialisation completed, tell core driver the number of
	 * active queues.
	 */
	rtnl_lock();
	netif_set_real_num_tx_queues(be->vif->dev, requested_num_queues);
	netif_set_real_num_rx_queues(be->vif->dev, requested_num_queues);
	rtnl_unlock();

	xenvif_carrier_on(be->vif);

	unregister_hotplug_status_watch(be);
	err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch,
				   hotplug_status_changed,
				   "%s/%s", dev->nodename, "hotplug-status");
	if (!err)
		be->have_hotplug_status_watch = 1;

	netif_tx_wake_all_queues(be->vif->dev);

	return;

err:
	if (be->vif->num_queues > 0)
		xenvif_disconnect_data(be->vif); /* Clean up existing queues */
	vfree(be->vif->queues);
	be->vif->queues = NULL;
	be->vif->num_queues = 0;
	xenvif_disconnect_ctrl(be->vif);
	return;
}