static int netvsc_set_channels(struct net_device *net, struct ethtool_channels *channels) { struct net_device_context *net_device_ctx = netdev_priv(net); struct hv_device *dev = net_device_ctx->device_ctx; struct netvsc_device *nvdev = net_device_ctx->nvdev; unsigned int orig, count = channels->combined_count; struct netvsc_device_info device_info; bool was_opened; int ret = 0; /* We do not support separate count for rx, tx, or other */ if (count == 0 || channels->rx_count || channels->tx_count || channels->other_count) return -EINVAL; if (count > net->num_tx_queues || count > VRSS_CHANNEL_MAX) return -EINVAL; if (!nvdev || nvdev->destroy) return -ENODEV; if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5) return -EINVAL; if (count > nvdev->max_chn) return -EINVAL; orig = nvdev->num_chn; was_opened = rndis_filter_opened(nvdev); if (was_opened) rndis_filter_close(nvdev); rndis_filter_device_remove(dev, nvdev); memset(&device_info, 0, sizeof(device_info)); device_info.num_chn = count; device_info.ring_size = ring_size; nvdev = rndis_filter_device_add(dev, &device_info); if (!IS_ERR(nvdev)) { netif_set_real_num_tx_queues(net, nvdev->num_chn); netif_set_real_num_rx_queues(net, nvdev->num_chn); ret = PTR_ERR(nvdev); } else { device_info.num_chn = orig; rndis_filter_device_add(dev, &device_info); } if (was_opened) rndis_filter_open(nvdev); /* We may have missed link change notifications */ net_device_ctx->last_reconfig = 0; schedule_delayed_work(&net_device_ctx->dwork, 0); return ret; }
static int netvsc_close(struct net_device *net) { struct net_device_context *net_device_ctx = netdev_priv(net); struct hv_device *device_obj = net_device_ctx->device_ctx; struct netvsc_device *nvdev = net_device_ctx->nvdev; int ret; u32 aread, awrite, i, msec = 10, retry = 0, retry_max = 20; struct vmbus_channel *chn; netif_tx_disable(net); /* Make sure netvsc_set_multicast_list doesn't re-enable filter! */ cancel_work_sync(&net_device_ctx->work); ret = rndis_filter_close(device_obj); if (ret != 0) { netdev_err(net, "unable to close device (ret %d).\n", ret); return ret; } /* Ensure pending bytes in ring are read */ while (true) { aread = 0; for (i = 0; i < nvdev->num_chn; i++) { chn = nvdev->chn_table[i]; if (!chn) continue; hv_get_ringbuffer_availbytes(&chn->inbound, &aread, &awrite); if (aread) break; hv_get_ringbuffer_availbytes(&chn->outbound, &aread, &awrite); if (aread) break; } retry++; if (retry > retry_max || aread == 0) break; msleep(msec); if (msec < 1000) msec *= 2; } if (aread) { netdev_err(net, "Ring buffer not empty after closing rndis\n"); ret = -ETIMEDOUT; } return ret; }
static int netvsc_close(struct net_device *net) { struct net_device_context *net_device_ctx = netdev_priv(net); struct netvsc_device *nvdev = net_device_ctx->nvdev; int ret = 0; u32 aread, i, msec = 10, retry = 0, retry_max = 20; struct vmbus_channel *chn; netif_tx_disable(net); /* No need to close rndis filter if it is removed already */ if (!nvdev) goto out; ret = rndis_filter_close(nvdev); if (ret != 0) { netdev_err(net, "unable to close device (ret %d).\n", ret); return ret; } /* Ensure pending bytes in ring are read */ while (true) { aread = 0; for (i = 0; i < nvdev->num_chn; i++) { chn = nvdev->chan_table[i].channel; if (!chn) continue; aread = hv_get_bytes_to_read(&chn->inbound); if (aread) break; aread = hv_get_bytes_to_read(&chn->outbound); if (aread) break; } retry++; if (retry > retry_max || aread == 0) break; msleep(msec); if (msec < 1000) msec *= 2; } if (aread) { netdev_err(net, "Ring buffer not empty after closing rndis\n"); ret = -ETIMEDOUT; } out: return ret; }
static int netvsc_change_mtu(struct net_device *ndev, int mtu) { struct net_device_context *ndevctx = netdev_priv(ndev); struct netvsc_device *nvdev = ndevctx->nvdev; struct hv_device *hdev = ndevctx->device_ctx; int orig_mtu = ndev->mtu; struct netvsc_device_info device_info; int limit = ETH_DATA_LEN; bool was_opened; int ret = 0; if (!nvdev || nvdev->destroy) return -ENODEV; if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2) limit = NETVSC_MTU - ETH_HLEN; if (mtu < NETVSC_MTU_MIN || mtu > limit) return -EINVAL; netif_device_detach(ndev); was_opened = rndis_filter_opened(nvdev); if (was_opened) rndis_filter_close(nvdev); memset(&device_info, 0, sizeof(device_info)); device_info.ring_size = ring_size; device_info.num_chn = nvdev->num_chn; rndis_filter_device_remove(hdev, nvdev); ndev->mtu = mtu; nvdev = rndis_filter_device_add(hdev, &device_info); if (IS_ERR(nvdev)) { ret = PTR_ERR(nvdev); /* Attempt rollback to original MTU */ ndev->mtu = orig_mtu; rndis_filter_device_add(hdev, &device_info); } if (was_opened) rndis_filter_open(nvdev); netif_device_attach(ndev); /* We may have missed link change notifications */ schedule_delayed_work(&ndevctx->dwork, 0); return ret; }
static int netvsc_close(struct net_device *net) { struct net_device_context *net_device_ctx = netdev_priv(net); struct hv_device *device_obj = &net_device_ctx->device_ctx->device_obj; int ret; netif_stop_queue(net); ret = rndis_filter_close(device_obj); if (ret != 0) DPRINT_ERR(NETVSC_DRV, "unable to close device (ret %d).", ret); return ret; }
static int netvsc_close(struct net_device *net) { struct net_device_context *net_device_ctx = netdev_priv(net); struct hv_device *device_obj = net_device_ctx->device_ctx; int ret; netif_tx_disable(net); /* Make sure netvsc_set_multicast_list doesn't re-enable filter! */ cancel_work_sync(&net_device_ctx->work); ret = rndis_filter_close(device_obj); if (ret != 0) netdev_err(net, "unable to close device (ret %d).\n", ret); return ret; }