Ejemplo n.º 1
0
static int netvsc_probe(struct hv_device *dev,
			const struct hv_vmbus_device_id *dev_id)
{
	struct net_device *net = NULL;
	struct net_device_context *net_device_ctx;
	struct netvsc_device_info device_info;
	struct netvsc_device *nvdev;
	int ret;

	net = alloc_etherdev_mq(sizeof(struct net_device_context),
				num_online_cpus());
	if (!net)
		return -ENOMEM;

	netif_carrier_off(net);

	net_device_ctx = netdev_priv(net);
	net_device_ctx->device_ctx = dev;
	hv_set_drvdata(dev, net);
	INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
	INIT_WORK(&net_device_ctx->work, do_set_multicast);

	net->netdev_ops = &device_ops;

	net->hw_features = NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_IP_CSUM |
				NETIF_F_TSO;
	net->features = NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_SG | NETIF_F_RXCSUM |
			NETIF_F_IP_CSUM | NETIF_F_TSO;

	net->ethtool_ops = &ethtool_ops;
	SET_NETDEV_DEV(net, &dev->device);

	/* Notify the netvsc driver of the new device */
	device_info.ring_size = ring_size;
	ret = rndis_filter_device_add(dev, &device_info);
	if (ret != 0) {
		netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
		free_netdev(net);
		hv_set_drvdata(dev, NULL);
		return ret;
	}
	memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);

	nvdev = hv_get_drvdata(dev);
	netif_set_real_num_tx_queues(net, nvdev->num_chn);
	netif_set_real_num_rx_queues(net, nvdev->num_chn);

	ret = register_netdev(net);
	if (ret != 0) {
		pr_err("Unable to register netdev.\n");
		rndis_filter_device_remove(dev);
		free_netdev(net);
	} else {
		schedule_delayed_work(&net_device_ctx->dwork, 0);
	}

	return ret;
}
Ejemplo n.º 2
0
static struct netvsc_device *alloc_net_device(struct hv_device *device)
{
	struct netvsc_device *net_device;
	struct net_device *ndev = hv_get_drvdata(device);

	net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
	if (!net_device)
		return NULL;

	net_device->cb_buffer = kzalloc(NETVSC_PACKET_SIZE, GFP_KERNEL);
	if (!net_device->cb_buffer) {
		kfree(net_device);
		return NULL;
	}

	init_waitqueue_head(&net_device->wait_drain);
	net_device->start_remove = false;
	net_device->destroy = false;
	atomic_set(&net_device->open_cnt, 0);
	atomic_set(&net_device->vf_use_cnt, 0);
	net_device->dev = device;
	net_device->ndev = ndev;
	net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
	net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;

	net_device->vf_netdev = NULL;
	net_device->vf_inject = false;

	hv_set_drvdata(device, net_device);
	return net_device;
}
Ejemplo n.º 3
0
/*
 * netvsc_device_remove - Callback when the root bus device is removed
 */
int netvsc_device_remove(struct hv_device *device)
{
	struct netvsc_device *net_device;
	unsigned long flags;

	net_device = hv_get_drvdata(device);

	netvsc_disconnect_vsp(net_device);

	/*
	 * Since we have already drained, we don't need to busy wait
	 * as was done in final_release_stor_device()
	 * Note that we cannot set the ext pointer to NULL until
	 * we have drained - to drain the outgoing packets, we need to
	 * allow incoming packets.
	 */

	spin_lock_irqsave(&device->channel->inbound_lock, flags);
	hv_set_drvdata(device, NULL);
	spin_unlock_irqrestore(&device->channel->inbound_lock, flags);

	/*
	 * At this point, no one should be accessing net_device
	 * except in here
	 */
	dev_notice(&device->device, "net device safe to remove\n");

	/* Now, we can close the channel safely */
	vmbus_close(device->channel);

	/* Release all resources */
	vfree(net_device->sub_cb_buf);
	free_netvsc_device(net_device);
	return 0;
}
Ejemplo n.º 4
0
static struct netvsc_device *alloc_net_device(struct hv_device *device)
{
	struct netvsc_device *net_device;
	struct net_device *ndev = hv_get_drvdata(device);
	int i;

	net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
	if (!net_device)
		return NULL;

	net_device->cb_buffer = kzalloc(NETVSC_PACKET_SIZE, GFP_KERNEL);
	if (!net_device->cb_buffer) {
		kfree(net_device);
		return NULL;
	}

	init_waitqueue_head(&net_device->wait_drain);
	net_device->start_remove = false;
	net_device->destroy = false;
	net_device->dev = device;
	net_device->ndev = ndev;
	net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
	net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;

	for (i = 0; i < num_online_cpus(); i++)
		spin_lock_init(&net_device->msd[i].lock);

	hv_set_drvdata(device, net_device);
	return net_device;
}
Ejemplo n.º 5
0
static void mousevsc_free_device(struct mousevsc_dev *device)
{
	kfree(device->hid_desc);
	kfree(device->report_desc);
	hv_set_drvdata(device->device, NULL);
	kfree(device);
}
Ejemplo n.º 6
0
static int netvsc_change_mtu(struct net_device *ndev, int mtu)
{
	struct net_device_context *ndevctx = netdev_priv(ndev);
	struct hv_device *hdev =  ndevctx->device_ctx;
	struct netvsc_device *nvdev = hv_get_drvdata(hdev);
	struct netvsc_device_info device_info;
	int limit = ETH_DATA_LEN;

	if (nvdev == NULL || nvdev->destroy)
		return -ENODEV;

	if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
		limit = NETVSC_MTU - ETH_HLEN;

	if (mtu < 68 || mtu > limit)
		return -EINVAL;

	nvdev->start_remove = true;
	cancel_work_sync(&ndevctx->work);
	netif_tx_disable(ndev);
	rndis_filter_device_remove(hdev);

	ndev->mtu = mtu;

	ndevctx->device_ctx = hdev;
	hv_set_drvdata(hdev, ndev);
	device_info.ring_size = ring_size;
	rndis_filter_device_add(hdev, &device_info);
	netif_wake_queue(ndev);

	return 0;
}
Ejemplo n.º 7
0
static int hv_kbd_probe(struct hv_device *hv_dev,
			const struct hv_vmbus_device_id *dev_id)
{
	struct hv_kbd_dev *kbd_dev;
	struct serio *hv_serio;
	int error;

	kbd_dev = kzalloc(sizeof(struct hv_kbd_dev), GFP_KERNEL);
	hv_serio = kzalloc(sizeof(struct serio), GFP_KERNEL);
	if (!kbd_dev || !hv_serio) {
		error = -ENOMEM;
		goto err_free_mem;
	}

	kbd_dev->hv_dev = hv_dev;
	kbd_dev->hv_serio = hv_serio;
	spin_lock_init(&kbd_dev->lock);
	init_completion(&kbd_dev->wait_event);
	hv_set_drvdata(hv_dev, kbd_dev);

	hv_serio->dev.parent  = &hv_dev->device;
	hv_serio->id.type = SERIO_8042_XL;
	hv_serio->port_data = kbd_dev;
	strlcpy(hv_serio->name, dev_name(&hv_dev->device),
		sizeof(hv_serio->name));
	strlcpy(hv_serio->phys, dev_name(&hv_dev->device),
		sizeof(hv_serio->phys));

	hv_serio->start = hv_kbd_start;
	hv_serio->stop = hv_kbd_stop;

	error = vmbus_open(hv_dev->channel,
			   KBD_VSC_SEND_RING_BUFFER_SIZE,
			   KBD_VSC_RECV_RING_BUFFER_SIZE,
			   NULL, 0,
			   hv_kbd_on_channel_callback,
			   hv_dev);
	if (error)
		goto err_free_mem;

	error = hv_kbd_connect_to_vsp(hv_dev);
	if (error)
		goto err_close_vmbus;

	serio_register_port(kbd_dev->hv_serio);

	device_init_wakeup(&hv_dev->device, true);

	return 0;

err_close_vmbus:
	vmbus_close(hv_dev->channel);
err_free_mem:
	kfree(hv_serio);
	kfree(kbd_dev);
	return error;
}
Ejemplo n.º 8
0
static int hv_kbd_remove(struct hv_device *hv_dev)
{
	struct hv_kbd_dev *kbd_dev = hv_get_drvdata(hv_dev);

	serio_unregister_port(kbd_dev->hv_serio);
	vmbus_close(hv_dev->channel);
	kfree(kbd_dev);

	hv_set_drvdata(hv_dev, NULL);

	return 0;
}
Ejemplo n.º 9
0
static int
hv_uio_remove(struct hv_device *dev)
{
	struct hv_uio_private_data *pdata = hv_get_drvdata(dev);

	if (!pdata)
		return 0;

	uio_unregister_device(&pdata->info);
	hv_uio_cleanup(dev, pdata);
	hv_set_drvdata(dev, NULL);
	vmbus_close(dev->channel);
	kfree(pdata);
	return 0;
}
Ejemplo n.º 10
0
static struct mousevsc_dev *mousevsc_alloc_device(struct hv_device *device)
{
	struct mousevsc_dev *input_dev;

	input_dev = kzalloc(sizeof(struct mousevsc_dev), GFP_KERNEL);

	if (!input_dev)
		return NULL;

	input_dev->device = device;
	hv_set_drvdata(device, input_dev);
	init_completion(&input_dev->wait_event);
	input_dev->init_complete = false;

	return input_dev;
}
Ejemplo n.º 11
0
static struct netvsc_device *alloc_net_device(struct hv_device *device)
{
	struct netvsc_device *net_device;
	struct net_device *ndev = hv_get_drvdata(device);

	net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
	if (!net_device)
		return NULL;

	init_waitqueue_head(&net_device->wait_drain);
	net_device->start_remove = false;
	net_device->destroy = false;
	net_device->dev = device;
	net_device->ndev = ndev;

	hv_set_drvdata(device, net_device);
	return net_device;
}
Ejemplo n.º 12
0
static int netvsc_change_mtu(struct net_device *ndev, int mtu)
{
	struct net_device_context *ndevctx = netdev_priv(ndev);
	struct hv_device *hdev =  ndevctx->device_ctx;
	struct netvsc_device *nvdev = hv_get_drvdata(hdev);
	struct netvsc_device_info device_info;
	int limit = ETH_DATA_LEN;
	int ret = 0;

	if (nvdev == NULL || nvdev->destroy)
		return -ENODEV;

	if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
		limit = NETVSC_MTU - ETH_HLEN;

	if (mtu < NETVSC_MTU_MIN || mtu > limit)
		return -EINVAL;

	ret = netvsc_close(ndev);
	if (ret)
		goto out;

	nvdev->start_remove = true;
	rndis_filter_device_remove(hdev);

	ndev->mtu = mtu;

	ndevctx->device_ctx = hdev;
	hv_set_drvdata(hdev, ndev);

	memset(&device_info, 0, sizeof(device_info));
	device_info.ring_size = ring_size;
	device_info.num_chn = nvdev->num_chn;
	device_info.max_num_vrss_chns = max_num_vrss_chns;
	rndis_filter_device_add(hdev, &device_info);

out:
	netvsc_open(ndev);

	return ret;
}
Ejemplo n.º 13
0
static int netvsc_probe(struct hv_device *dev,
			const struct hv_vmbus_device_id *dev_id)
{
	struct net_device *net = NULL;
	struct net_device_context *net_device_ctx;
	struct netvsc_device_info device_info;
	struct netvsc_device *nvdev;
	int ret;

	net = alloc_etherdev(sizeof(struct net_device_context));

	if (!net)
		return -ENOMEM;

	netif_carrier_off(net);

	net_device_ctx = netdev_priv(net);
	net_device_ctx->device_ctx = dev;
	net_device_ctx->msg_enable = netif_msg_init(debug, default_msg);
	if (netif_msg_probe(net_device_ctx))
		netdev_dbg(net, "netvsc msg_enable: %d\n",
			net_device_ctx->msg_enable);

	hv_set_drvdata(dev, net);
	INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
	INIT_WORK(&net_device_ctx->work, do_set_multicast,
		  (void *)&net_device_ctx->work);

#if defined(RHEL_RELEASE_VERSION) && (RHEL_RELEASE_CODE > 1291)
	net->netdev_ops = &device_ops;
#else
	net->open               = netvsc_open;
	net->hard_start_xmit    = netvsc_start_xmit;
	net->stop               = netvsc_close;
	net->get_stats          = netvsc_get_stats;
	net->set_multicast_list = netvsc_set_multicast_list;
	net->change_mtu         = netvsc_change_mtu;
#endif

#if defined(RHEL_RELEASE_VERSION) && (RHEL_RELEASE_CODE > 1291)
	net->hw_features = NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_IP_CSUM |
				NETIF_F_TSO;
#endif
	net->features = NETIF_F_HW_VLAN_TX | NETIF_F_SG | NETIF_F_RXCSUM |
			NETIF_F_IP_CSUM | NETIF_F_TSO;

	net->ethtool_ops = &ethtool_ops;
	SET_NETDEV_DEV(net, &dev->device);

	/* Notify the netvsc driver of the new device */
	device_info.ring_size = ring_size;
	ret = rndis_filter_device_add(dev, &device_info);
	if (ret != 0) {
		netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
		free_netdev(net);
		hv_set_drvdata(dev, NULL);
		return ret;
	}
	memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);

	nvdev = hv_get_drvdata(dev);
#ifdef NOTYET
	netif_set_real_num_tx_queues(net, nvdev->num_chn);
	netif_set_real_num_rx_queues(net, nvdev->num_chn);
	dev_info(&dev->device, "real num tx,rx queues:%u, %u\n",
		 net->real_num_tx_queues, nvdev->num_chn);
#endif

	ret = register_netdev(net);
	if (ret != 0) {
		pr_err("Unable to register netdev.\n");
		rndis_filter_device_remove(dev);
		free_netdev(net);
	} else {
		schedule_delayed_work(&net_device_ctx->dwork.work, 0);
	}

	return ret;
}
Ejemplo n.º 14
0
static int
hv_uio_probe(struct hv_device *dev,
	     const struct hv_vmbus_device_id *dev_id)
{
	struct vmbus_channel *channel = dev->channel;
	struct hv_uio_private_data *pdata;
	void *ring_buffer;
	int ret;

	/* Communicating with host has to be via shared memory not hypercall */
	if (!channel->offermsg.monitor_allocated) {
		dev_err(&dev->device, "vmbus channel requires hypercall\n");
		return -ENOTSUPP;
	}

	pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
	if (!pdata)
		return -ENOMEM;

	ret = vmbus_alloc_ring(channel, HV_RING_SIZE * PAGE_SIZE,
			       HV_RING_SIZE * PAGE_SIZE);
	if (ret)
		goto fail;

	set_channel_read_mode(channel, HV_CALL_ISR);

	/* Fill general uio info */
	pdata->info.name = "uio_hv_generic";
	pdata->info.version = DRIVER_VERSION;
	pdata->info.irqcontrol = hv_uio_irqcontrol;
	pdata->info.open = hv_uio_open;
	pdata->info.release = hv_uio_release;
	pdata->info.irq = UIO_IRQ_CUSTOM;
	atomic_set(&pdata->refcnt, 0);

	/* mem resources */
	pdata->info.mem[TXRX_RING_MAP].name = "txrx_rings";
	ring_buffer = page_address(channel->ringbuffer_page);
	pdata->info.mem[TXRX_RING_MAP].addr
		= (uintptr_t)virt_to_phys(ring_buffer);
	pdata->info.mem[TXRX_RING_MAP].size
		= channel->ringbuffer_pagecount << PAGE_SHIFT;
	pdata->info.mem[TXRX_RING_MAP].memtype = UIO_MEM_IOVA;

	pdata->info.mem[INT_PAGE_MAP].name = "int_page";
	pdata->info.mem[INT_PAGE_MAP].addr
		= (uintptr_t)vmbus_connection.int_page;
	pdata->info.mem[INT_PAGE_MAP].size = PAGE_SIZE;
	pdata->info.mem[INT_PAGE_MAP].memtype = UIO_MEM_LOGICAL;

	pdata->info.mem[MON_PAGE_MAP].name = "monitor_page";
	pdata->info.mem[MON_PAGE_MAP].addr
		= (uintptr_t)vmbus_connection.monitor_pages[1];
	pdata->info.mem[MON_PAGE_MAP].size = PAGE_SIZE;
	pdata->info.mem[MON_PAGE_MAP].memtype = UIO_MEM_LOGICAL;

	pdata->recv_buf = vzalloc(RECV_BUFFER_SIZE);
	if (pdata->recv_buf == NULL) {
		ret = -ENOMEM;
		goto fail_close;
	}

	ret = vmbus_establish_gpadl(channel, pdata->recv_buf,
				    RECV_BUFFER_SIZE, &pdata->recv_gpadl);
	if (ret)
		goto fail_close;

	/* put Global Physical Address Label in name */
	snprintf(pdata->recv_name, sizeof(pdata->recv_name),
		 "recv:%u", pdata->recv_gpadl);
	pdata->info.mem[RECV_BUF_MAP].name = pdata->recv_name;
	pdata->info.mem[RECV_BUF_MAP].addr
		= (uintptr_t)pdata->recv_buf;
	pdata->info.mem[RECV_BUF_MAP].size = RECV_BUFFER_SIZE;
	pdata->info.mem[RECV_BUF_MAP].memtype = UIO_MEM_VIRTUAL;

	pdata->send_buf = vzalloc(SEND_BUFFER_SIZE);
	if (pdata->send_buf == NULL) {
		ret = -ENOMEM;
		goto fail_close;
	}

	ret = vmbus_establish_gpadl(channel, pdata->send_buf,
				    SEND_BUFFER_SIZE, &pdata->send_gpadl);
	if (ret)
		goto fail_close;

	snprintf(pdata->send_name, sizeof(pdata->send_name),
		 "send:%u", pdata->send_gpadl);
	pdata->info.mem[SEND_BUF_MAP].name = pdata->send_name;
	pdata->info.mem[SEND_BUF_MAP].addr
		= (uintptr_t)pdata->send_buf;
	pdata->info.mem[SEND_BUF_MAP].size = SEND_BUFFER_SIZE;
	pdata->info.mem[SEND_BUF_MAP].memtype = UIO_MEM_VIRTUAL;

	pdata->info.priv = pdata;
	pdata->device = dev;

	ret = uio_register_device(&dev->device, &pdata->info);
	if (ret) {
		dev_err(&dev->device, "hv_uio register failed\n");
		goto fail_close;
	}

	ret = sysfs_create_bin_file(&channel->kobj, &ring_buffer_bin_attr);
	if (ret)
		dev_notice(&dev->device,
			   "sysfs create ring bin file failed; %d\n", ret);

	hv_set_drvdata(dev, pdata);

	return 0;

fail_close:
	hv_uio_cleanup(dev, pdata);
fail:
	kfree(pdata);

	return ret;
}
Ejemplo n.º 15
0
static int
hv_uio_probe(struct hv_device *dev,
	     const struct hv_vmbus_device_id *dev_id)
{
	struct hv_uio_private_data *pdata;
	int ret;

	pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
	if (!pdata)
		return -ENOMEM;

	ret = vmbus_open(dev->channel, HV_RING_SIZE * PAGE_SIZE,
			 HV_RING_SIZE * PAGE_SIZE, NULL, 0,
			 hv_uio_channel_cb, pdata);
	if (ret)
		goto fail;

	/* Communicating with host has to be via shared memory not hypercall */
	if (!dev->channel->offermsg.monitor_allocated) {
		dev_err(&dev->device, "vmbus channel requires hypercall\n");
		ret = -ENOTSUPP;
		goto fail_close;
	}

	dev->channel->inbound.ring_buffer->interrupt_mask = 1;
	set_channel_read_mode(dev->channel, HV_CALL_ISR);

	/* Fill general uio info */
	pdata->info.name = "uio_hv_generic";
	pdata->info.version = DRIVER_VERSION;
	pdata->info.irqcontrol = hv_uio_irqcontrol;
	pdata->info.irq = UIO_IRQ_CUSTOM;

	/* mem resources */
	pdata->info.mem[TXRX_RING_MAP].name = "txrx_rings";
	pdata->info.mem[TXRX_RING_MAP].addr
		= (phys_addr_t)dev->channel->ringbuffer_pages;
	pdata->info.mem[TXRX_RING_MAP].size
		= dev->channel->ringbuffer_pagecount << PAGE_SHIFT;
	pdata->info.mem[TXRX_RING_MAP].memtype = UIO_MEM_LOGICAL;

	pdata->info.mem[INT_PAGE_MAP].name = "int_page";
	pdata->info.mem[INT_PAGE_MAP].addr
		= (phys_addr_t)vmbus_connection.int_page;
	pdata->info.mem[INT_PAGE_MAP].size = PAGE_SIZE;
	pdata->info.mem[INT_PAGE_MAP].memtype = UIO_MEM_LOGICAL;

	pdata->info.mem[MON_PAGE_MAP].name = "monitor_page";
	pdata->info.mem[MON_PAGE_MAP].addr
		= (phys_addr_t)vmbus_connection.monitor_pages[1];
	pdata->info.mem[MON_PAGE_MAP].size = PAGE_SIZE;
	pdata->info.mem[MON_PAGE_MAP].memtype = UIO_MEM_LOGICAL;
	pdata->recv_buf = vzalloc(RECV_BUFFER_SIZE);
	if (pdata->recv_buf == NULL) {
		ret = -ENOMEM;
		goto fail_close;
	}

	ret = vmbus_establish_gpadl(dev->channel, pdata->recv_buf,
				    RECV_BUFFER_SIZE, &pdata->recv_gpadl);
	if (ret)
		goto fail_close;

	/* put Global Physical Address Label in name */
	snprintf(pdata->recv_name, sizeof(pdata->recv_name),
		 "recv:%u", pdata->recv_gpadl);
	pdata->info.mem[RECV_BUF_MAP].name = pdata->recv_name;
	pdata->info.mem[RECV_BUF_MAP].addr
		= (phys_addr_t)pdata->recv_buf;
	pdata->info.mem[RECV_BUF_MAP].size = RECV_BUFFER_SIZE;
	pdata->info.mem[RECV_BUF_MAP].memtype = UIO_MEM_VIRTUAL;


	pdata->send_buf = vzalloc(SEND_BUFFER_SIZE);
	if (pdata->send_buf == NULL) {
		ret = -ENOMEM;
		goto fail_close;
	}

	ret = vmbus_establish_gpadl(dev->channel, pdata->send_buf,
				    SEND_BUFFER_SIZE, &pdata->send_gpadl);
	if (ret)
		goto fail_close;

	snprintf(pdata->send_name, sizeof(pdata->send_name),
		 "send:%u", pdata->send_gpadl);
	pdata->info.mem[SEND_BUF_MAP].name = pdata->send_name;
	pdata->info.mem[SEND_BUF_MAP].addr
		= (phys_addr_t)pdata->send_buf;
	pdata->info.mem[SEND_BUF_MAP].size = SEND_BUFFER_SIZE;
	pdata->info.mem[SEND_BUF_MAP].memtype = UIO_MEM_VIRTUAL;

	pdata->info.priv = pdata;
	pdata->device = dev;

	ret = uio_register_device(&dev->device, &pdata->info);
	if (ret) {
		dev_err(&dev->device, "hv_uio register failed\n");
		goto fail_close;
	}

	vmbus_set_chn_rescind_callback(dev->channel, hv_uio_rescind);

	hv_set_drvdata(dev, pdata);

	return 0;

fail_close:
	hv_uio_cleanup(dev, pdata);
	vmbus_close(dev->channel);
fail:
	kfree(pdata);

	return ret;
}
Ejemplo n.º 16
0
static int hvfb_probe(struct hv_device *hdev,
		      const struct hv_vmbus_device_id *dev_id)
{
	struct fb_info *info;
	struct hvfb_par *par;
	int ret;

	info = framebuffer_alloc(sizeof(struct hvfb_par), &hdev->device);
	if (!info) {
		pr_err("No memory for framebuffer info\n");
		return -ENOMEM;
	}

	par = info->par;
	par->info = info;
	par->fb_ready = false;
	init_completion(&par->wait);
	INIT_DELAYED_WORK(&par->dwork, hvfb_update_work);

	/* Connect to VSP */
	hv_set_drvdata(hdev, info);
	ret = synthvid_connect_vsp(hdev);
	if (ret) {
		pr_err("Unable to connect to VSP\n");
		goto error1;
	}

	ret = hvfb_getmem(info);
	if (ret) {
		pr_err("No memory for framebuffer\n");
		goto error2;
	}

	hvfb_get_option(info);
	pr_info("Screen resolution: %dx%d, Color depth: %d\n",
		screen_width, screen_height, screen_depth);


	/* Set up fb_info */
	info->flags = FBINFO_DEFAULT;

	info->var.xres_virtual = info->var.xres = screen_width;
	info->var.yres_virtual = info->var.yres = screen_height;
	info->var.bits_per_pixel = screen_depth;

	if (info->var.bits_per_pixel == 16) {
		info->var.red = (struct fb_bitfield){11, 5, 0};
		info->var.green = (struct fb_bitfield){5, 6, 0};
		info->var.blue = (struct fb_bitfield){0, 5, 0};
		info->var.transp = (struct fb_bitfield){0, 0, 0};
	} else {
		info->var.red = (struct fb_bitfield){16, 8, 0};
		info->var.green = (struct fb_bitfield){8, 8, 0};
		info->var.blue = (struct fb_bitfield){0, 8, 0};
		info->var.transp = (struct fb_bitfield){24, 8, 0};
	}

	info->var.activate = FB_ACTIVATE_NOW;
	info->var.height = -1;
	info->var.width = -1;
	info->var.vmode = FB_VMODE_NONINTERLACED;

	strcpy(info->fix.id, KBUILD_MODNAME);
	info->fix.type = FB_TYPE_PACKED_PIXELS;
	info->fix.visual = FB_VISUAL_TRUECOLOR;
	info->fix.line_length = screen_width * screen_depth / 8;
	info->fix.accel = FB_ACCEL_NONE;

	info->fbops = &hvfb_ops;
	info->pseudo_palette = par->pseudo_palette;

	/* Send config to host */
	ret = synthvid_send_config(hdev);
	if (ret)
		goto error;

	ret = register_framebuffer(info);
	if (ret) {
		pr_err("Unable to register framebuffer\n");
		goto error;
	}

	par->fb_ready = true;

	par->synchronous_fb = false;
	par->hvfb_panic_nb.notifier_call = hvfb_on_panic;
	atomic_notifier_chain_register(&panic_notifier_list,
				       &par->hvfb_panic_nb);

	return 0;

error:
	hvfb_putmem(info);
error2:
	vmbus_close(hdev->channel);
error1:
	cancel_delayed_work_sync(&par->dwork);
	hv_set_drvdata(hdev, NULL);
	framebuffer_release(info);
	return ret;
}


static int hvfb_remove(struct hv_device *hdev)
{
	struct fb_info *info = hv_get_drvdata(hdev);
	struct hvfb_par *par = info->par;

	atomic_notifier_chain_unregister(&panic_notifier_list,
					 &par->hvfb_panic_nb);

	par->update = false;
	par->fb_ready = false;

	unregister_framebuffer(info);
	cancel_delayed_work_sync(&par->dwork);

	vmbus_close(hdev->channel);
	hv_set_drvdata(hdev, NULL);

	hvfb_putmem(info);
	framebuffer_release(info);

	return 0;
}


static const struct pci_device_id pci_stub_id_table[] = {
	{
		.vendor      = PCI_VENDOR_ID_MICROSOFT,
		.device      = PCI_DEVICE_ID_HYPERV_VIDEO,
	},
	{ /* end of list */ }
};

static const struct hv_vmbus_device_id id_table[] = {
	/* Synthetic Video Device GUID */
	{HV_SYNTHVID_GUID},
	{}
};

MODULE_DEVICE_TABLE(pci, pci_stub_id_table);
MODULE_DEVICE_TABLE(vmbus, id_table);

static struct hv_driver hvfb_drv = {
	.name = KBUILD_MODNAME,
	.id_table = id_table,
	.probe = hvfb_probe,
	.remove = hvfb_remove,
};

static int hvfb_pci_stub_probe(struct pci_dev *pdev,
			       const struct pci_device_id *ent)
{
	return 0;
}

static void hvfb_pci_stub_remove(struct pci_dev *pdev)
{
}

static struct pci_driver hvfb_pci_stub_driver = {
	.name =		KBUILD_MODNAME,
	.id_table =	pci_stub_id_table,
	.probe =	hvfb_pci_stub_probe,
	.remove =	hvfb_pci_stub_remove,
};

static int __init hvfb_drv_init(void)
{
	int ret;

	ret = vmbus_driver_register(&hvfb_drv);
	if (ret != 0)
		return ret;

	ret = pci_register_driver(&hvfb_pci_stub_driver);
	if (ret != 0) {
		vmbus_driver_unregister(&hvfb_drv);
		return ret;
	}

	return 0;
}

static void __exit hvfb_drv_exit(void)
{
	pci_unregister_driver(&hvfb_pci_stub_driver);
	vmbus_driver_unregister(&hvfb_drv);
}

module_init(hvfb_drv_init);
module_exit(hvfb_drv_exit);

MODULE_LICENSE("GPL");
MODULE_VERSION(HV_DRV_VERSION);
MODULE_DESCRIPTION("Microsoft Hyper-V Synthetic Video Frame Buffer Driver");
Ejemplo n.º 17
0
static int netvsc_probe(struct hv_device *dev,
			const struct hv_vmbus_device_id *dev_id)
{
	struct net_device *net = NULL;
	struct net_device_context *net_device_ctx;
	struct netvsc_device_info device_info;
	struct netvsc_device *nvdev;
	int ret;
	u32 max_needed_headroom;

	net = alloc_etherdev_mq(sizeof(struct net_device_context),
				num_online_cpus());
	if (!net)
		return -ENOMEM;

	max_needed_headroom = sizeof(struct hv_netvsc_packet) +
			      RNDIS_AND_PPI_SIZE;

	netif_carrier_off(net);

	net_device_ctx = netdev_priv(net);
	net_device_ctx->device_ctx = dev;
	net_device_ctx->msg_enable = netif_msg_init(debug, default_msg);
	if (netif_msg_probe(net_device_ctx))
		netdev_dbg(net, "netvsc msg_enable: %d\n",
			   net_device_ctx->msg_enable);

	net_device_ctx->tx_stats = netdev_alloc_pcpu_stats(struct netvsc_stats);
	if (!net_device_ctx->tx_stats) {
		free_netdev(net);
		return -ENOMEM;
	}
	net_device_ctx->rx_stats = netdev_alloc_pcpu_stats(struct netvsc_stats);
	if (!net_device_ctx->rx_stats) {
		free_percpu(net_device_ctx->tx_stats);
		free_netdev(net);
		return -ENOMEM;
	}

	hv_set_drvdata(dev, net);
	INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
	INIT_WORK(&net_device_ctx->work, do_set_multicast);

	net->netdev_ops = &device_ops;

	net->hw_features = NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_IP_CSUM |
				NETIF_F_TSO;
	net->features = NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_SG | NETIF_F_RXCSUM |
			NETIF_F_IP_CSUM | NETIF_F_TSO;

	net->ethtool_ops = &ethtool_ops;
	SET_NETDEV_DEV(net, &dev->device);

	/*
	 * Request additional head room in the skb.
	 * We will use this space to build the rndis
	 * heaser and other state we need to maintain.
	 */
	net->needed_headroom = max_needed_headroom;

	/* Notify the netvsc driver of the new device */
	device_info.ring_size = ring_size;
	device_info.max_num_vrss_chns = max_num_vrss_chns;
	ret = rndis_filter_device_add(dev, &device_info);
	if (ret != 0) {
		netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
		netvsc_free_netdev(net);
		hv_set_drvdata(dev, NULL);
		return ret;
	}
	memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);

	nvdev = hv_get_drvdata(dev);
	netif_set_real_num_tx_queues(net, nvdev->num_chn);
	netif_set_real_num_rx_queues(net, nvdev->num_chn);

	ret = register_netdev(net);
	if (ret != 0) {
		pr_err("Unable to register netdev.\n");
		rndis_filter_device_remove(dev);
		netvsc_free_netdev(net);
	} else {
		schedule_delayed_work(&net_device_ctx->dwork, 0);
	}

	return ret;
}
Ejemplo n.º 18
0
static int netvsc_set_channels(struct net_device *net,
			       struct ethtool_channels *channels)
{
	struct net_device_context *net_device_ctx = netdev_priv(net);
	struct hv_device *dev = net_device_ctx->device_ctx;
	struct netvsc_device *nvdev = hv_get_drvdata(dev);
	struct netvsc_device_info device_info;
	u32 num_chn;
	u32 max_chn;
	int ret = 0;
	bool recovering = false;

	if (!nvdev || nvdev->destroy)
		return -ENODEV;

	num_chn = nvdev->num_chn;
	max_chn = min_t(u32, nvdev->max_chn, num_online_cpus());

	if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5) {
		pr_info("vRSS unsupported before NVSP Version 5\n");
		return -EINVAL;
	}

	/* We do not support rx, tx, or other */
	if (!channels ||
	    channels->rx_count ||
	    channels->tx_count ||
	    channels->other_count ||
	    (channels->combined_count < 1))
		return -EINVAL;

	if (channels->combined_count > max_chn) {
		pr_info("combined channels too high, using %d\n", max_chn);
		channels->combined_count = max_chn;
	}

	ret = netvsc_close(net);
	if (ret)
		goto out;

 do_set:
	nvdev->start_remove = true;
	rndis_filter_device_remove(dev);

	nvdev->num_chn = channels->combined_count;

	net_device_ctx->device_ctx = dev;
	hv_set_drvdata(dev, net);

	memset(&device_info, 0, sizeof(device_info));
	device_info.num_chn = nvdev->num_chn; /* passed to RNDIS */
	device_info.ring_size = ring_size;
	device_info.max_num_vrss_chns = max_num_vrss_chns;

	ret = rndis_filter_device_add(dev, &device_info);
	if (ret) {
		if (recovering) {
			netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
			return ret;
		}
		goto recover;
	}

	nvdev = hv_get_drvdata(dev);

	ret = netif_set_real_num_tx_queues(net, nvdev->num_chn);
	if (ret) {
		if (recovering) {
			netdev_err(net, "could not set tx queue count (ret %d)\n", ret);
			return ret;
		}
		goto recover;
	}

	ret = netif_set_real_num_rx_queues(net, nvdev->num_chn);
	if (ret) {
		if (recovering) {
			netdev_err(net, "could not set rx queue count (ret %d)\n", ret);
			return ret;
		}
		goto recover;
	}

 out:
	netvsc_open(net);

	return ret;

 recover:
	/* If the above failed, we attempt to recover through the same
	 * process but with the original number of channels.
	 */
	netdev_err(net, "could not set channels, recovering\n");
	recovering = true;
	channels->combined_count = num_chn;
	goto do_set;
}