示例#1
0
static int
virtio_blk_probe(int skip)
{
	int r;

	/* sub device id for virtio-blk is 0x0002 */
	blk_dev = virtio_setup_device(0x0002, name, blkf,
				      sizeof(blkf) / sizeof(blkf[0]),
				      VIRTIO_BLK_NUM_THREADS, skip);
	if (!blk_dev)
		return ENXIO;

	/* virtio-blk has one queue only */
	if ((r = virtio_alloc_queues(blk_dev, 1)) != OK) {
		virtio_free_device(blk_dev);
		return r;
	}

	/* Allocate memory for headers and status */
	if ((r = virtio_blk_alloc_requests() != OK)) {
		virtio_free_queues(blk_dev);
		virtio_free_device(blk_dev);
		return r;
	}

	virtio_blk_config();

	/* Let the host now that we are ready */
	virtio_device_ready(blk_dev);

	virtio_irq_enable(blk_dev);

	return OK;
}
示例#2
0
static void
virtio_net_conf(message *m)
{
	/* TODO: Add the multicast, broadcast filtering etc. */
	int i, r;

	message reply;

	/* If this is the first CONF message we see, fully initialize
	 * the device now.
	 */
	if (!started) {
		started = 1;
		virtio_device_ready(net_dev);
		virtio_irq_enable(net_dev);
	}

	/* Prepare reply */
	memcpy(reply.m_netdrv_net_dl_conf.hw_addr, virtio_net_mac,
		sizeof(reply.m_netdrv_net_dl_conf.hw_addr));

	reply.m_type = DL_CONF_REPLY;
	reply.m_netdrv_net_dl_conf.stat = OK;

	if ((r = ipc_send(m->m_source, &reply)) != OK)
		panic("%s: ipc_send to %d failed (%d)", name, m->m_source, r);
}
示例#3
0
文件: virtio_net.c 项目: wieck/minix
static void
virtio_net_conf(message *m)
{
    /* TODO: Add the multicast, broadcast filtering etc. */
    int i, r;

    message reply;

    /* If this is the first CONF message we see, fully initialize
     * the device now.
     */
    if (!started) {
        started = 1;
        virtio_device_ready(net_dev);
        virtio_irq_enable(net_dev);
    }

    /* Prepare reply */
    for (i = 0; i < sizeof(virtio_net_mac); i++)
        ((u8_t*)reply.DL_HWADDR)[i] = virtio_net_mac[i];

    reply.m_type = DL_CONF_REPLY;
    reply.DL_STAT = OK;
    reply.DL_COUNT = 0;

    if ((r = send(m->m_source, &reply)) != OK)
        panic("%s: send to %d failed (%d)", name, m->m_source, r);
}
示例#4
0
static int virtballoon_probe(struct virtio_device *vdev)
{
	struct virtio_balloon *vb;
	int err;

	if (!vdev->config->get) {
		dev_err(&vdev->dev, "%s failure: config access disabled\n",
			__func__);
		return -EINVAL;
	}

	vdev->priv = vb = kmalloc(sizeof(*vb), GFP_KERNEL);
	if (!vb) {
		err = -ENOMEM;
		goto out;
	}

	vb->num_pages = 0;
	mutex_init(&vb->balloon_lock);
	init_waitqueue_head(&vb->config_change);
	init_waitqueue_head(&vb->acked);
	vb->vdev = vdev;
	vb->need_stats_update = 0;

	balloon_devinfo_init(&vb->vb_dev_info);
#ifdef CONFIG_BALLOON_COMPACTION
	vb->vb_dev_info.migratepage = virtballoon_migratepage;
#endif

	err = init_vqs(vb);
	if (err)
		goto out_free_vb;

	vb->nb.notifier_call = virtballoon_oom_notify;
	vb->nb.priority = VIRTBALLOON_OOM_NOTIFY_PRIORITY;
	err = register_oom_notifier(&vb->nb);
	if (err < 0)
		goto out_oom_notify;

	virtio_device_ready(vdev);

	vb->thread = kthread_run(balloon, vb, "vballoon");
	if (IS_ERR(vb->thread)) {
		err = PTR_ERR(vb->thread);
		goto out_del_vqs;
	}

	return 0;

out_del_vqs:
	unregister_oom_notifier(&vb->nb);
out_oom_notify:
	vdev->config->del_vqs(vdev);
out_free_vb:
	kfree(vb);
out:
	return err;
}
示例#5
0
static int virtballoon_restore(struct virtio_device *vdev)
{
	struct virtio_balloon *vb = vdev->priv;
	int ret;

	ret = init_vqs(vdev->priv);
	if (ret)
		return ret;

	virtio_device_ready(vdev);

	fill_balloon(vb, towards_target(vb));
	update_balloon_size(vb);
	return 0;
}
示例#6
0
/*
 * Initialize the driver and the virtual hardware.
 */
static int
virtio_net_init(unsigned int instance, ether_addr_t *addr)
{
	int r;

	if ((r = virtio_net_probe(instance)) != OK)
		return r;

	virtio_net_config(addr);

	if (virtio_net_alloc_bufs() != OK)
		panic("%s: Buffer allocation failed", name);

	virtio_net_init_queues();

	/* Add packets to the receive queue. */
	virtio_net_refill_rx_queue();

	virtio_device_ready(net_dev);

	virtio_irq_enable(net_dev);

	return(OK);
}
示例#7
0
static int virtballoon_probe(struct virtio_device *vdev)
{
	struct virtio_balloon *vb;
	__u32 poison_val;
	int err;

	if (!vdev->config->get) {
		dev_err(&vdev->dev, "%s failure: config access disabled\n",
			__func__);
		return -EINVAL;
	}

	vdev->priv = vb = kzalloc(sizeof(*vb), GFP_KERNEL);
	if (!vb) {
		err = -ENOMEM;
		goto out;
	}

	INIT_WORK(&vb->update_balloon_stats_work, update_balloon_stats_func);
	INIT_WORK(&vb->update_balloon_size_work, update_balloon_size_func);
	spin_lock_init(&vb->stop_update_lock);
	mutex_init(&vb->balloon_lock);
	init_waitqueue_head(&vb->acked);
	vb->vdev = vdev;

	balloon_devinfo_init(&vb->vb_dev_info);

	err = init_vqs(vb);
	if (err)
		goto out_free_vb;

#ifdef CONFIG_BALLOON_COMPACTION
	balloon_mnt = kern_mount(&balloon_fs);
	if (IS_ERR(balloon_mnt)) {
		err = PTR_ERR(balloon_mnt);
		goto out_del_vqs;
	}

	vb->vb_dev_info.migratepage = virtballoon_migratepage;
	vb->vb_dev_info.inode = alloc_anon_inode(balloon_mnt->mnt_sb);
	if (IS_ERR(vb->vb_dev_info.inode)) {
		err = PTR_ERR(vb->vb_dev_info.inode);
		kern_unmount(balloon_mnt);
		goto out_del_vqs;
	}
	vb->vb_dev_info.inode->i_mapping->a_ops = &balloon_aops;
#endif
	if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) {
		/*
		 * There is always one entry reserved for cmd id, so the ring
		 * size needs to be at least two to report free page hints.
		 */
		if (virtqueue_get_vring_size(vb->free_page_vq) < 2) {
			err = -ENOSPC;
			goto out_del_vqs;
		}
		vb->balloon_wq = alloc_workqueue("balloon-wq",
					WQ_FREEZABLE | WQ_CPU_INTENSIVE, 0);
		if (!vb->balloon_wq) {
			err = -ENOMEM;
			goto out_del_vqs;
		}
		INIT_WORK(&vb->report_free_page_work, report_free_page_func);
		vb->cmd_id_received = VIRTIO_BALLOON_CMD_ID_STOP;
		vb->cmd_id_active = cpu_to_virtio32(vb->vdev,
						  VIRTIO_BALLOON_CMD_ID_STOP);
		vb->cmd_id_stop = cpu_to_virtio32(vb->vdev,
						  VIRTIO_BALLOON_CMD_ID_STOP);
		vb->num_free_page_blocks = 0;
		spin_lock_init(&vb->free_page_list_lock);
		INIT_LIST_HEAD(&vb->free_page_list);
		if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_PAGE_POISON)) {
			memset(&poison_val, PAGE_POISON, sizeof(poison_val));
			virtio_cwrite(vb->vdev, struct virtio_balloon_config,
				      poison_val, &poison_val);
		}
	}
	/*
	 * We continue to use VIRTIO_BALLOON_F_DEFLATE_ON_OOM to decide if a
	 * shrinker needs to be registered to relieve memory pressure.
	 */
	if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) {
		err = virtio_balloon_register_shrinker(vb);
		if (err)
			goto out_del_balloon_wq;
	}
	virtio_device_ready(vdev);

	if (towards_target(vb))
		virtballoon_changed(vdev);
	return 0;

out_del_balloon_wq:
	if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
		destroy_workqueue(vb->balloon_wq);
out_del_vqs:
	vdev->config->del_vqs(vdev);
out_free_vb:
	kfree(vb);
out:
	return err;
}
示例#8
0
static int virtballoon_probe(struct virtio_device *vdev)
{
	struct virtio_balloon *vb;
	int err;

	if (!vdev->config->get) {
		dev_err(&vdev->dev, "%s failure: config access disabled\n",
			__func__);
		return -EINVAL;
	}

	vdev->priv = vb = kmalloc(sizeof(*vb), GFP_KERNEL);
	if (!vb) {
		err = -ENOMEM;
		goto out;
	}

	INIT_WORK(&vb->update_balloon_stats_work, update_balloon_stats_func);
	INIT_WORK(&vb->update_balloon_size_work, update_balloon_size_func);
	spin_lock_init(&vb->stop_update_lock);
	vb->stop_update = false;
	vb->num_pages = 0;
	mutex_init(&vb->balloon_lock);
	init_waitqueue_head(&vb->acked);
	vb->vdev = vdev;

	balloon_devinfo_init(&vb->vb_dev_info);

	err = init_vqs(vb);
	if (err)
		goto out_free_vb;

	vb->nb.notifier_call = virtballoon_oom_notify;
	vb->nb.priority = VIRTBALLOON_OOM_NOTIFY_PRIORITY;
	err = register_oom_notifier(&vb->nb);
	if (err < 0)
		goto out_del_vqs;

#ifdef CONFIG_BALLOON_COMPACTION
	balloon_mnt = kern_mount(&balloon_fs);
	if (IS_ERR(balloon_mnt)) {
		err = PTR_ERR(balloon_mnt);
		unregister_oom_notifier(&vb->nb);
		goto out_del_vqs;
	}

	vb->vb_dev_info.migratepage = virtballoon_migratepage;
	vb->vb_dev_info.inode = alloc_anon_inode(balloon_mnt->mnt_sb);
	if (IS_ERR(vb->vb_dev_info.inode)) {
		err = PTR_ERR(vb->vb_dev_info.inode);
		kern_unmount(balloon_mnt);
		unregister_oom_notifier(&vb->nb);
		vb->vb_dev_info.inode = NULL;
		goto out_del_vqs;
	}
	vb->vb_dev_info.inode->i_mapping->a_ops = &balloon_aops;
#endif

	virtio_device_ready(vdev);

	if (towards_target(vb))
		virtballoon_changed(vdev);
	return 0;

out_del_vqs:
	vdev->config->del_vqs(vdev);
out_free_vb:
	kfree(vb);
out:
	return err;
}
示例#9
0
文件: virtio.c 项目: dj-tech/linux
static int virtio_dev_probe(struct device *_d)
{
	int err, i;
	struct virtio_device *dev = dev_to_virtio(_d);
	struct virtio_driver *drv = drv_to_virtio(dev->dev.driver);
	u64 device_features;
	u64 driver_features;
	u64 driver_features_legacy;

	/* We have a driver! */
	virtio_add_status(dev, VIRTIO_CONFIG_S_DRIVER);

	/* Figure out what features the device supports. */
	device_features = dev->config->get_features(dev);

	/* Figure out what features the driver supports. */
	driver_features = 0;
	for (i = 0; i < drv->feature_table_size; i++) {
		unsigned int f = drv->feature_table[i];
		BUG_ON(f >= 64);
		driver_features |= (1ULL << f);
	}

	/* Some drivers have a separate feature table for virtio v1.0 */
	if (drv->feature_table_legacy) {
		driver_features_legacy = 0;
		for (i = 0; i < drv->feature_table_size_legacy; i++) {
			unsigned int f = drv->feature_table_legacy[i];
			BUG_ON(f >= 64);
			driver_features_legacy |= (1ULL << f);
		}
	} else {
		driver_features_legacy = driver_features;
	}

	if (device_features & (1ULL << VIRTIO_F_VERSION_1))
		dev->features = driver_features & device_features;
	else
		dev->features = driver_features_legacy & device_features;

	/* Transport features always preserved to pass to finalize_features. */
	for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++)
		if (device_features & (1ULL << i))
			__virtio_set_bit(dev, i);

	if (drv->validate) {
		err = drv->validate(dev);
		if (err)
			goto err;
	}

	err = virtio_finalize_features(dev);
	if (err)
		goto err;

	err = drv->probe(dev);
	if (err)
		goto err;

	/* If probe didn't do it, mark device DRIVER_OK ourselves. */
	if (!(dev->config->get_status(dev) & VIRTIO_CONFIG_S_DRIVER_OK))
		virtio_device_ready(dev);

	if (drv->scan)
		drv->scan(dev);

	virtio_config_enable(dev);

	return 0;
err:
	virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED);
	return err;

}
示例#10
0
static int rpmsg_probe(struct virtio_device *vdev)
{
	vq_callback_t *vq_cbs[] = { rpmsg_recv_done, rpmsg_xmit_done };
	static const char * const names[] = { "input", "output" };
	struct virtqueue *vqs[2];
	struct virtproc_info *vrp;
	void *bufs_va;
	int err = 0, i;
	size_t total_buf_space;
	bool notify;

	vrp = kzalloc(sizeof(*vrp), GFP_KERNEL);
	if (!vrp)
		return -ENOMEM;

	vrp->vdev = vdev;

	idr_init(&vrp->endpoints);
	mutex_init(&vrp->endpoints_lock);
	mutex_init(&vrp->tx_lock);
	init_waitqueue_head(&vrp->sendq);

	/* We expect two virtqueues, rx and tx (and in this order) */
	err = vdev->config->find_vqs(vdev, 2, vqs, vq_cbs, names);
	if (err)
		goto free_vrp;

	vrp->rvq = vqs[0];
	vrp->svq = vqs[1];

	/* we expect symmetric tx/rx vrings */
	WARN_ON(virtqueue_get_vring_size(vrp->rvq) !=
		virtqueue_get_vring_size(vrp->svq));

	/* we need less buffers if vrings are small */
	if (virtqueue_get_vring_size(vrp->rvq) < MAX_RPMSG_NUM_BUFS / 2)
		vrp->num_bufs = virtqueue_get_vring_size(vrp->rvq) * 2;
	else
		vrp->num_bufs = MAX_RPMSG_NUM_BUFS;

	total_buf_space = vrp->num_bufs * RPMSG_BUF_SIZE;

	/* allocate coherent memory for the buffers */
	bufs_va = dma_alloc_coherent(vdev->dev.parent->parent,
				     total_buf_space, &vrp->bufs_dma,
				     GFP_KERNEL);
	if (!bufs_va) {
		err = -ENOMEM;
		goto vqs_del;
	}

	dev_dbg(&vdev->dev, "buffers: va %p, dma 0x%llx\n", bufs_va,
					(unsigned long long)vrp->bufs_dma);

	/* half of the buffers is dedicated for RX */
	vrp->rbufs = bufs_va;

	/* and half is dedicated for TX */
	vrp->sbufs = bufs_va + total_buf_space / 2;

	/* set up the receive buffers */
	for (i = 0; i < vrp->num_bufs / 2; i++) {
		struct scatterlist sg;
		void *cpu_addr = vrp->rbufs + i * RPMSG_BUF_SIZE;

		sg_init_one(&sg, cpu_addr, RPMSG_BUF_SIZE);

		err = virtqueue_add_inbuf(vrp->rvq, &sg, 1, cpu_addr,
								GFP_KERNEL);
		WARN_ON(err); /* sanity check; this can't really happen */
	}

	/* suppress "tx-complete" interrupts */
	virtqueue_disable_cb(vrp->svq);

	vdev->priv = vrp;

	/* if supported by the remote processor, enable the name service */
	if (virtio_has_feature(vdev, VIRTIO_RPMSG_F_NS)) {
		/* a dedicated endpoint handles the name service msgs */
		vrp->ns_ept = __rpmsg_create_ept(vrp, NULL, rpmsg_ns_cb,
						vrp, RPMSG_NS_ADDR);
		if (!vrp->ns_ept) {
			dev_err(&vdev->dev, "failed to create the ns ept\n");
			err = -ENOMEM;
			goto free_coherent;
		}
	}

	/*
	 * Prepare to kick but don't notify yet - we can't do this before
	 * device is ready.
	 */
	notify = virtqueue_kick_prepare(vrp->rvq);

	/* From this point on, we can notify and get callbacks. */
	virtio_device_ready(vdev);

	/* tell the remote processor it can start sending messages */
	/*
	 * this might be concurrent with callbacks, but we are only
	 * doing notify, not a full kick here, so that's ok.
	 */
	if (notify)
		virtqueue_notify(vrp->rvq);

	dev_info(&vdev->dev, "rpmsg host is online\n");

	return 0;

free_coherent:
	dma_free_coherent(vdev->dev.parent->parent, total_buf_space,
			  bufs_va, vrp->bufs_dma);
vqs_del:
	vdev->config->del_vqs(vrp->vdev);
free_vrp:
	kfree(vrp);
	return err;
}
示例#11
0
int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags)
{
	static vq_callback_t *callbacks[] = {
		virtio_gpu_ctrl_ack, virtio_gpu_cursor_ack
	};
	static const char *names[] = { "control", "cursor" };

	struct virtio_gpu_device *vgdev;
	/* this will expand later */
	struct virtqueue *vqs[2];
	u32 num_scanouts;
	int ret;

	if (!virtio_has_feature(dev->virtdev, VIRTIO_F_VERSION_1))
		return -ENODEV;

	vgdev = kzalloc(sizeof(struct virtio_gpu_device), GFP_KERNEL);
	if (!vgdev)
		return -ENOMEM;

	vgdev->ddev = dev;
	dev->dev_private = vgdev;
	vgdev->vdev = dev->virtdev;
	vgdev->dev = dev->dev;

	spin_lock_init(&vgdev->display_info_lock);
	spin_lock_init(&vgdev->ctx_id_idr_lock);
	idr_init(&vgdev->ctx_id_idr);
	spin_lock_init(&vgdev->resource_idr_lock);
	idr_init(&vgdev->resource_idr);
	init_waitqueue_head(&vgdev->resp_wq);
	virtio_gpu_init_vq(&vgdev->ctrlq, virtio_gpu_dequeue_ctrl_func);
	virtio_gpu_init_vq(&vgdev->cursorq, virtio_gpu_dequeue_cursor_func);

	spin_lock_init(&vgdev->fence_drv.lock);
	INIT_LIST_HEAD(&vgdev->fence_drv.fences);
	INIT_WORK(&vgdev->config_changed_work,
		  virtio_gpu_config_changed_work_func);

	ret = vgdev->vdev->config->find_vqs(vgdev->vdev, 2, vqs,
					    callbacks, names);
	if (ret) {
		DRM_ERROR("failed to find virt queues\n");
		goto err_vqs;
	}
	vgdev->ctrlq.vq = vqs[0];
	vgdev->cursorq.vq = vqs[1];
	ret = virtio_gpu_alloc_vbufs(vgdev);
	if (ret) {
		DRM_ERROR("failed to alloc vbufs\n");
		goto err_vbufs;
	}

	ret = virtio_gpu_ttm_init(vgdev);
	if (ret) {
		DRM_ERROR("failed to init ttm %d\n", ret);
		goto err_ttm;
	}

	/* get display info */
	virtio_cread(vgdev->vdev, struct virtio_gpu_config,
		     num_scanouts, &num_scanouts);
	vgdev->num_scanouts = min_t(uint32_t, num_scanouts,
				    VIRTIO_GPU_MAX_SCANOUTS);
	if (!vgdev->num_scanouts) {
		DRM_ERROR("num_scanouts is zero\n");
		ret = -EINVAL;
		goto err_scanouts;
	}

	ret = virtio_gpu_modeset_init(vgdev);
	if (ret)
		goto err_modeset;

	virtio_device_ready(vgdev->vdev);
	vgdev->vqs_ready = true;

	virtio_gpu_cmd_get_display_info(vgdev);
	wait_event_timeout(vgdev->resp_wq, !vgdev->display_info_pending,
			   5 * HZ);
	if (virtio_gpu_fbdev)
		virtio_gpu_fbdev_init(vgdev);

	return 0;

err_modeset:
err_scanouts:
	virtio_gpu_ttm_fini(vgdev);
err_ttm:
	virtio_gpu_free_vbufs(vgdev);
err_vbufs:
	vgdev->vdev->config->del_vqs(vgdev->vdev);
err_vqs:
	kfree(vgdev);
	return ret;
}
示例#12
-17
文件: virtio_net.c 项目: qiyeboy/yaos
int init_virtio_net()
{
    virtio_dev_t dev = to_virtio_dev_t(&pci_vn);

    if (virtio_setup_device(dev, VIRTIO_NET_SUBID, 1)) {
        virtio_net_read_config(&pci_vn);
        /* virtio-net has at least 2 queues */

        int queues = 2;

        if (pci_vn._ctrl_vq) {
            queues++;
        }
        int ret = virtio_alloc_queues(dev, queues);

        pci_d("virtio_alloc_queues ret:%d\n", ret);
        if (virtio_net_alloc_bufs() != OK)
            panic("%s: Buffer allocation failed", name);
        pci_d("virtio_net_alloc_bufs:%d\n", ret);

        virtio_net_init_queues();
        pci_d("virtio_net_init_queues:%d\n", ret);
        /* Add packets to the receive queue. */
        virtio_net_refill_rx_queue();
        pci_d("virtio_net_refill_rx_queue:%d\n", ret);

        virtio_device_ready(to_virtio_dev_t(&pci_vn));
        pci_d("virtio_device_ready:%d\n", ret);

//        virtio_irq_enable(to_virtio_dev_t(&pci_vn));
        bool virtio_had_irq(struct virtio_device *dev);

        pci_d("had_irq:%d\n", virtio_had_irq(to_virtio_dev_t(&pci_vn)));
        set_timeout(1000, vn_timeout);
        sti();

    }
    return OK;
}