Exemplo n.º 1
0
static void virtblk_done(struct virtqueue *vq)
{
	struct virtio_blk *vblk = vq->vdev->priv;
	bool req_done = false;
	int qid = vq->index;
	struct virtblk_req *vbr;
	unsigned long flags;
	unsigned int len;

	spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
	do {
		virtqueue_disable_cb(vq);
		while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
			blk_mq_complete_request(vbr->req, vbr->req->errors);
			req_done = true;
		}
		if (unlikely(virtqueue_is_broken(vq)))
			break;
	} while (!virtqueue_enable_cb(vq));

	/* In case queue is stopped waiting for more buffers. */
	if (req_done)
		blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
	spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
}
Exemplo n.º 2
0
/**
 * virtqueue_create - Creates new VirtIO queue
 *
 * @param device    - Pointer to VirtIO device
 * @param id        - VirtIO queue ID , must be unique
 * @param name      - Name of VirtIO queue
 * @param ring      - Pointer to vring_alloc_info control block
 * @param callback  - Pointer to callback function, invoked
 *                    when message is available on VirtIO queue
 * @param notify    - Pointer to notify function, used to notify
 *                    other side that there is job available for it
 * @param v_queue   - Created VirtIO queue.
 *
 * @return          - Function status
 */
int virtqueue_create(struct virtio_device *virt_dev, unsigned short id,
		     char *name, struct vring_alloc_info *ring,
		     void (*callback) (struct virtqueue * vq),
		     void (*notify) (struct virtqueue * vq),
		     struct virtqueue **v_queue)
{

	struct virtqueue *vq = VQ_NULL;
	int status = VQUEUE_SUCCESS;
	uint32_t vq_size = 0;

	VQ_PARAM_CHK(ring == VQ_NULL, status, ERROR_VQUEUE_INVLD_PARAM);
	VQ_PARAM_CHK(ring->num_descs == 0, status, ERROR_VQUEUE_INVLD_PARAM);
	VQ_PARAM_CHK(ring->num_descs & (ring->num_descs - 1), status,
		     ERROR_VRING_ALIGN);

	//TODO : Error check for indirect buffer addition

	if (status == VQUEUE_SUCCESS) {

		vq_size = sizeof(struct virtqueue)
		    + (ring->num_descs) * sizeof(struct vq_desc_extra);
		vq = (struct virtqueue *)env_allocate_memory(vq_size);

		if (vq == VQ_NULL) {
			return (ERROR_NO_MEM);
		}

		env_memset(vq, 0x00, vq_size);

		vq->vq_dev = virt_dev;
		env_strncpy(vq->vq_name, name, VIRTQUEUE_MAX_NAME_SZ);
		vq->vq_queue_index = id;
		vq->vq_alignment = ring->align;
		vq->vq_nentries = ring->num_descs;
		vq->vq_free_cnt = vq->vq_nentries;
		vq->callback = callback;
		vq->notify = notify;

		//TODO : Whether we want to support indirect addition or not.
		vq->vq_ring_size = vring_size(ring->num_descs, ring->align);
		vq->vq_ring_mem = (void *)ring->phy_addr;

		/* Initialize vring control block in virtqueue. */
		vq_ring_init(vq);

		/* Disable callbacks - will be enabled by the application
		 * once initialization is completed.
		 */
		virtqueue_disable_cb(vq);

		*v_queue = vq;

		//TODO : Need to add cleanup in case of error used with the indirect buffer addition
		//TODO: do we need to save the new queue in db based on its id
	}

	return (status);
}
Exemplo n.º 3
0
NTSTATUS VioCryptInterruptDisable(IN WDFINTERRUPT Interrupt, IN WDFDEVICE wdfDevice)
{
    PDEVICE_CONTEXT context = GetDeviceContext(wdfDevice);
    UNREFERENCED_PARAMETER(Interrupt);

    Trace(TRACE_LEVEL_VERBOSE, "[%s]", __FUNCTION__);

    virtqueue_disable_cb(context->ControlQueue);

    return STATUS_SUCCESS;
}
Exemplo n.º 4
0
/* This is invoked whenever the remote processor completed processing
 * a TX msg we just sent, and the buffer is put back to the used ring.
 */
static void cfv_release_used_buf(struct virtqueue *vq_tx)
{
	struct cfv_info *cfv = vq_tx->vdev->priv;
	unsigned long flags;

	BUG_ON(vq_tx != cfv->vq_tx);

	for (;;) {
		unsigned int len;
		struct buf_info *buf_info;

		/* Get used buffer from used ring to recycle used descriptors */
		spin_lock_irqsave(&cfv->tx_lock, flags);
		buf_info = virtqueue_get_buf(vq_tx, &len);
		spin_unlock_irqrestore(&cfv->tx_lock, flags);

		/* Stop looping if there are no more buffers to free */
		if (!buf_info)
			break;

		free_buf_info(cfv, buf_info);

		/* watermark_tx indicates if we previously stopped the tx
		 * queues. If we have enough free stots in the virtio ring,
		 * re-establish memory reserved and open up tx queues.
		 */
		if (cfv->vq_tx->num_free <= cfv->watermark_tx)
			continue;

		/* Re-establish memory reserve */
		if (cfv->reserved_mem == 0 && cfv->genpool)
			cfv->reserved_mem =
				gen_pool_alloc(cfv->genpool,
					       cfv->reserved_size);

		/* Open up the tx queues */
		if (cfv->reserved_mem) {
			cfv->watermark_tx =
				virtqueue_get_vring_size(cfv->vq_tx);
			netif_tx_wake_all_queues(cfv->ndev);
			/* Buffers are recycled in cfv_netdev_tx, so
			 * disable notifications when queues are opened.
			 */
			virtqueue_disable_cb(cfv->vq_tx);
			++cfv->stats.tx_flow_on;
		} else {
			/* if no memory reserve, wait for more free slots */
			WARN_ON(cfv->watermark_tx >
			       virtqueue_get_vring_size(cfv->vq_tx));
			cfv->watermark_tx +=
				virtqueue_get_vring_size(cfv->vq_tx) / 4;
		}
	}
}
Exemplo n.º 5
0
/**
 * rpmsg_downref_sleepers() - disable "tx-complete" interrupts, if needed
 * @vrp: virtual remote processor state
 *
 * This function is called after a sender, that waited for a tx buffer
 * to become available, is unblocked.
 *
 * If we still have blocking senders, this function merely decreases
 * the "sleepers" reference count, and exits.
 *
 * Otherwise, if there are no more blocking senders, we also disable
 * virtio's tx callbacks, to avoid the overhead incurred with handling
 * those (now redundant) interrupts.
 */
static void rpmsg_downref_sleepers(struct virtproc_info *vrp)
{
	/* support multiple concurrent senders */
	mutex_lock(&vrp->tx_lock);

	/* are we the last sleeping context waiting for tx buffers ? */
	if (atomic_dec_and_test(&vrp->sleepers))
		/* disable "tx-complete" interrupts */
		virtqueue_disable_cb(vrp->svq);

	mutex_unlock(&vrp->tx_lock);
}
static void rpmsg_downref_sleepers(struct virtproc_info *vrp)
{
	
	mutex_lock(&vrp->tx_lock);

	
	if (atomic_dec_and_test(&vrp->sleepers))
		
		virtqueue_disable_cb(vrp->svq);

	mutex_unlock(&vrp->tx_lock);
}
Exemplo n.º 7
0
NTSTATUS VirtRngEvtInterruptDisable(IN WDFINTERRUPT Interrupt,
                                    IN WDFDEVICE AssociatedDevice)
{
    PDEVICE_CONTEXT context = GetDeviceContext(
        WdfInterruptGetDevice(Interrupt));

    TraceEvents(TRACE_LEVEL_VERBOSE, DBG_INTERRUPT,
        "--> %!FUNC! Interrupt: %p Device: %p",
        Interrupt, AssociatedDevice);

    virtqueue_disable_cb(context->VirtQueue);

    TraceEvents(TRACE_LEVEL_VERBOSE, DBG_INTERRUPT, "<-- %!FUNC!");

    return STATUS_SUCCESS;
}
Exemplo n.º 8
0
static void __devexit rpmsg_remove(struct virtio_device *vdev)
{
	struct virtproc_info *vrp = vdev->priv;
	int ret;

	virtqueue_disable_cb(vrp->rvq);
	mutex_lock(&vrp->rm_lock);

	ret = device_for_each_child(&vdev->dev, NULL, rpmsg_remove_device);
	if (ret)
		dev_warn(&vdev->dev, "can't remove rpmsg device: %d\n", ret);

	idr_remove_all(&vrp->endpoints);
	idr_destroy(&vrp->endpoints);

	vdev->config->del_vqs(vrp->vdev);

	mutex_unlock(&vrp->rm_lock);

	kfree(vrp);
}
Exemplo n.º 9
0
/* Disable the CAIF interface and free the memory-pool */
static int cfv_netdev_close(struct net_device *netdev)
{
	struct cfv_info *cfv = netdev_priv(netdev);
	unsigned long flags;
	struct buf_info *buf_info;

	/* Disable interrupts, queues and NAPI polling */
	netif_carrier_off(netdev);
	virtqueue_disable_cb(cfv->vq_tx);
	vringh_notify_disable_kern(cfv->vr_rx);
	napi_disable(&cfv->napi);

	/* Release any TX buffers on both used and avilable rings */
	cfv_release_used_buf(cfv->vq_tx);
	spin_lock_irqsave(&cfv->tx_lock, flags);
	while ((buf_info = virtqueue_detach_unused_buf(cfv->vq_tx)))
		free_buf_info(cfv, buf_info);
	spin_unlock_irqrestore(&cfv->tx_lock, flags);

	/* Release all dma allocated memory and destroy the pool */
	cfv_destroy_genpool(cfv);
	return 0;
}
Exemplo n.º 10
0
int rpmsg_init_vdev(struct rpmsg_virtio_device *rvdev,
		    struct virtio_device *vdev,
		    rpmsg_ns_bind_cb ns_bind_cb,
		    struct metal_io_region *shm_io,
		    struct rpmsg_virtio_shm_pool *shpool)
{
	struct rpmsg_device *rdev;
	const char *vq_names[RPMSG_NUM_VRINGS];
	vq_callback *callback[RPMSG_NUM_VRINGS];
	int status;
	unsigned int i, role;

	rdev = &rvdev->rdev;
	memset(rdev, 0, sizeof(*rdev));
	metal_mutex_init(&rdev->lock);
	rvdev->vdev = vdev;
	rdev->ns_bind_cb = ns_bind_cb;
	vdev->priv = rvdev;
	rdev->ops.send_offchannel_raw = rpmsg_virtio_send_offchannel_raw;
	role = rpmsg_virtio_get_role(rvdev);

#ifndef VIRTIO_MASTER_ONLY
	if (role == RPMSG_REMOTE) {
		/* wait synchro with the master */
		rpmsg_virtio_wait_remote_ready(rvdev);
	}
#endif /*!VIRTIO_MASTER_ONLY*/
	vdev->features = rpmsg_virtio_get_features(rvdev);
	rdev->support_ns = !!(vdev->features & (1 << VIRTIO_RPMSG_F_NS));

#ifndef VIRTIO_SLAVE_ONLY
	if (role == RPMSG_MASTER) {
		/*
		 * Since device is RPMSG Remote so we need to manage the
		 * shared buffers. Create shared memory pool to handle buffers.
		 */
		if (!shpool)
			return RPMSG_ERR_PARAM;
		if (!shpool->size)
			return RPMSG_ERR_NO_BUFF;
		rvdev->shpool = shpool;

		vq_names[0] = "rx_vq";
		vq_names[1] = "tx_vq";
		callback[0] = rpmsg_virtio_rx_callback;
		callback[1] = rpmsg_virtio_tx_callback;
		rvdev->rvq  = vdev->vrings_info[0].vq;
		rvdev->svq  = vdev->vrings_info[1].vq;
	}
#endif /*!VIRTIO_SLAVE_ONLY*/

#ifndef VIRTIO_MASTER_ONLY
	(void)shpool;
	if (role == RPMSG_REMOTE) {
		vq_names[0] = "tx_vq";
		vq_names[1] = "rx_vq";
		callback[0] = rpmsg_virtio_tx_callback;
		callback[1] = rpmsg_virtio_rx_callback;
		rvdev->rvq  = vdev->vrings_info[1].vq;
		rvdev->svq  = vdev->vrings_info[0].vq;
	}
#endif /*!VIRTIO_MASTER_ONLY*/
	rvdev->shbuf_io = shm_io;

	/* Create virtqueues for remote device */
	status = rpmsg_virtio_create_virtqueues(rvdev, 0, RPMSG_NUM_VRINGS,
						vq_names, callback);
	if (status != RPMSG_SUCCESS)
		return status;

	/*
	 * Suppress "tx-complete" interrupts
	 * since send method use busy loop when buffer pool exhaust
	 */
	virtqueue_disable_cb(rvdev->svq);

	/* TODO: can have a virtio function to set the shared memory I/O */
	for (i = 0; i < RPMSG_NUM_VRINGS; i++) {
		struct virtqueue *vq;

		vq = vdev->vrings_info[i].vq;
		vq->shm_io = shm_io;
	}

#ifndef VIRTIO_SLAVE_ONLY
	if (role == RPMSG_MASTER) {
		struct virtqueue_buf vqbuf;
		unsigned int idx;
		void *buffer;

		vqbuf.len = RPMSG_BUFFER_SIZE;
		for (idx = 0; idx < rvdev->rvq->vq_nentries; idx++) {
			/* Initialize TX virtqueue buffers for remote device */
			buffer = rpmsg_virtio_shm_pool_get_buffer(shpool,
							RPMSG_BUFFER_SIZE);

			if (!buffer) {
				return RPMSG_ERR_NO_BUFF;
			}

			vqbuf.buf = buffer;

			metal_io_block_set(shm_io,
					   metal_io_virt_to_offset(shm_io,
								   buffer),
					   0x00, RPMSG_BUFFER_SIZE);
			status =
				virtqueue_add_buffer(rvdev->rvq, &vqbuf, 0, 1,
						     buffer);

			if (status != RPMSG_SUCCESS) {
				return status;
			}
		}
	}
#endif /*!VIRTIO_SLAVE_ONLY*/

	/* Initialize channels and endpoints list */
	metal_list_init(&rdev->endpoints);

	/*
	 * Create name service announcement endpoint if device supports name
	 * service announcement feature.
	 */
	if (rdev->support_ns) {
		rpmsg_init_ept(&rdev->ns_ept, "NS",
			       RPMSG_NS_EPT_ADDR, RPMSG_NS_EPT_ADDR,
			       rpmsg_virtio_ns_callback, NULL);
		rpmsg_register_endpoint(rdev, &rdev->ns_ept);
	}

#ifndef VIRTIO_SLAVE_ONLY
	if (role == RPMSG_MASTER)
		rpmsg_virtio_set_status(rvdev, VIRTIO_CONFIG_STATUS_DRIVER_OK);
#endif /*!VIRTIO_SLAVE_ONLY*/

	return status;
}
Exemplo n.º 11
0
static int rpmsg_probe(struct virtio_device *vdev)
{
	vq_callback_t *vq_cbs[] = { rpmsg_recv_done, rpmsg_xmit_done };
	const char *names[] = { "input", "output" };
	struct virtqueue *vqs[2];
	struct virtproc_info *vrp;
	void *addr;
	int err, i, num_bufs, buf_size, total_buf_size;
	struct rpmsg_channel_info *ch;

	vrp = kzalloc(sizeof(*vrp), GFP_KERNEL);
	if (!vrp)
		return -ENOMEM;

	vrp->vdev = vdev;

	idr_init(&vrp->endpoints);
	spin_lock_init(&vrp->endpoints_lock);
	mutex_init(&vrp->svq_lock);
	mutex_init(&vrp->rm_lock);
	init_waitqueue_head(&vrp->sendq);

	/* We expect two virtqueues, rx and tx (in this order) */
	err = vdev->config->find_vqs(vdev, 2, vqs, vq_cbs, names);
	if (err)
		goto free_vi;

	vrp->rvq = vqs[0];
	vrp->svq = vqs[1];

	/* Platform must supply pre-allocated uncached buffers for now */
	vdev->config->get(vdev, VPROC_BUF_ADDR, &addr, sizeof(addr));
	vdev->config->get(vdev, VPROC_BUF_NUM, &num_bufs,
							sizeof(num_bufs));
	vdev->config->get(vdev, VPROC_BUF_SZ, &buf_size, sizeof(buf_size));

	total_buf_size = num_bufs * buf_size;

	dev_dbg(&vdev->dev, "%d buffers, size %d, addr 0x%x, total 0x%x\n",
		num_bufs, buf_size, (unsigned int) addr, total_buf_size);

	vrp->num_bufs = num_bufs;
	vrp->buf_size = buf_size;
	vrp->rbufs = addr;
	vrp->sbufs = addr + total_buf_size / 2;

	/* simulated addr base to make virt_to_page happy */
	vdev->config->get(vdev, VPROC_SIM_BASE, &vrp->sim_base,
							sizeof(vrp->sim_base));

	/* set up the receive buffers */
	for (i = 0; i < num_bufs / 2; i++) {
		struct scatterlist sg;
		void *tmpaddr = vrp->rbufs + i * buf_size;
		void *simaddr = vrp->sim_base + i * buf_size;

		sg_init_one(&sg, simaddr, buf_size);
		err = virtqueue_add_buf_gfp(vrp->rvq, &sg, 0, 1, tmpaddr,
								GFP_KERNEL);
		WARN_ON(err < 0); /* sanity check; this can't really happen */
	}

	/* tell the remote processor it can start sending data */
	virtqueue_kick(vrp->rvq);

	/* suppress "tx-complete" interrupts */
	virtqueue_disable_cb(vrp->svq);

	vdev->priv = vrp;

	dev_info(&vdev->dev, "rpmsg backend virtproc probed successfully\n");

	/* if supported by the remote processor, enable the name service */
	if (virtio_has_feature(vdev, VIRTIO_RPMSG_F_NS)) {
		vrp->ns_ept = __rpmsg_create_ept(vrp, NULL, rpmsg_ns_cb,
						vrp, RPMSG_NS_ADDR);
		if (!vrp->ns_ept) {
			dev_err(&vdev->dev, "failed to create the ns ept\n");
			err = -ENOMEM;
			goto vqs_del;
		}
	}

	/* look for platform-specific static channels */
	vdev->config->get(vdev, VPROC_STATIC_CHANNELS, &ch, sizeof(ch));

	for (i = 0; ch && ch[i].name[0]; i++)
		rpmsg_create_channel(vrp, &ch[i]);

	return 0;

vqs_del:
	vdev->config->del_vqs(vrp->vdev);
free_vi:
	kfree(vrp);
	return err;
}
Exemplo n.º 12
0
/* XXX: the blocking 'wait' mechanism hasn't been tested yet */
int rpmsg_send_offchannel_raw(struct rpmsg_channel *rpdev, u32 src, u32 dst,
					void *data, int len, bool wait)
{
	struct virtproc_info *vrp = rpdev->vrp;
	struct device *dev = &rpdev->dev;
	struct scatterlist sg;
	struct rpmsg_hdr *msg;
	int err;
	unsigned long offset;
	void *sim_addr;

	if (src == RPMSG_ADDR_ANY || dst == RPMSG_ADDR_ANY) {
		dev_err(dev, "invalid addr (src 0x%x, dst 0x%x)\n", src, dst);
		return -EINVAL;
	}

	/* the payload's size is currently limited */
	if (len > vrp->buf_size - sizeof(struct rpmsg_hdr)) {
		dev_err(dev, "message is too big (%d)\n", len);
		return -EMSGSIZE;
	}

	/*
	 * protect svq from simultaneous concurrent manipulations,
	 * and serialize the sending of messages
	 */
	if (mutex_lock_interruptible(&vrp->svq_lock))
		return -ERESTARTSYS;
	/* grab a buffer */
	msg = get_a_buf(vrp);
	if (!msg && !wait) {
		err = -ENOMEM;
		goto out;
	}

	/* no free buffer ? wait for one (but bail after 15 seconds) */
	if (!msg) {
		/* enable "tx-complete" interrupts before dozing off */
		virtqueue_enable_cb(vrp->svq);

		/*
		 * sleep until a free buffer is available or 15 secs elapse.
		 * the timeout period is not configurable because frankly
		 * i don't see why drivers need to deal with that.
		 * if later this happens to be required, it'd be easy to add.
		 */
		err = wait_event_interruptible_timeout(vrp->sendq,
					(msg = get_a_buf(vrp)),
					msecs_to_jiffies(15000));

		/* on success, suppress "tx-complete" interrupts again */
		virtqueue_disable_cb(vrp->svq);

		if (err < 0) {
			err = -ERESTARTSYS;
			goto out;
		}

		if (!msg) {
			dev_err(dev, "timeout waiting for buffer\n");
			err = -ETIMEDOUT;
			goto out;
		}
	}

	msg->len = len;
	msg->flags = 0;
	msg->src = src;
	msg->dst = dst;
	msg->unused = 0;
	memcpy(msg->data, data, len);

	dev_dbg(dev, "TX From 0x%x, To 0x%x, Len %d, Flags %d, Unused %d\n",
					msg->src, msg->dst, msg->len,
					msg->flags, msg->unused);
#if 0
	print_hex_dump(KERN_DEBUG, "rpmsg_virtio TX: ", DUMP_PREFIX_NONE, 16, 1,
					msg, sizeof(*msg) + msg->len, true);
#endif

	offset = ((unsigned long) msg) - ((unsigned long) vrp->rbufs);
	sim_addr = vrp->sim_base + offset;
	sg_init_one(&sg, sim_addr, sizeof(*msg) + len);

	/* add message to the remote processor's virtqueue */
	err = virtqueue_add_buf_gfp(vrp->svq, &sg, 1, 0, msg, GFP_KERNEL);
	if (err < 0) {
		dev_err(dev, "virtqueue_add_buf_gfp failed: %d\n", err);
		goto out;
	}
	/* descriptors must be written before kicking remote processor */
	wmb();

	/* tell the remote processor it has a pending message to read */
	virtqueue_kick(vrp->svq);

	err = 0;
out:
	mutex_unlock(&vrp->svq_lock);
	return err;
}
Exemplo n.º 13
0
void vhost_vq_setup(struct vdev_info *dev, struct vq_info *info)
{
	struct vhost_vring_state state = { .index = info->idx };
	struct vhost_vring_file file = { .index = info->idx };
	unsigned long long features = dev->vdev.features[0];
	struct vhost_vring_addr addr = {
		.index = info->idx,
		.desc_user_addr = (uint64_t)(unsigned long)info->vring.desc,
		.avail_user_addr = (uint64_t)(unsigned long)info->vring.avail,
		.used_user_addr = (uint64_t)(unsigned long)info->vring.used,
	};
	int r;
	r = ioctl(dev->control, VHOST_SET_FEATURES, &features);
	assert(r >= 0);
	state.num = info->vring.num;
	r = ioctl(dev->control, VHOST_SET_VRING_NUM, &state);
	assert(r >= 0);
	state.num = 0;
	r = ioctl(dev->control, VHOST_SET_VRING_BASE, &state);
	assert(r >= 0);
	r = ioctl(dev->control, VHOST_SET_VRING_ADDR, &addr);
	assert(r >= 0);
	file.fd = info->kick;
	r = ioctl(dev->control, VHOST_SET_VRING_KICK, &file);
	assert(r >= 0);
	file.fd = info->call;
	r = ioctl(dev->control, VHOST_SET_VRING_CALL, &file);
	assert(r >= 0);
}

static void vq_info_add(struct vdev_info *dev, int num)
{
	struct vq_info *info = &dev->vqs[dev->nvqs];
	int r;
	info->idx = dev->nvqs;
	info->kick = eventfd(0, EFD_NONBLOCK);
	info->call = eventfd(0, EFD_NONBLOCK);
	r = posix_memalign(&info->ring, 4096, vring_size(num, 4096));
	assert(r >= 0);
	memset(info->ring, 0, vring_size(num, 4096));
	vring_init(&info->vring, num, info->ring, 4096);
	info->vq = vring_new_virtqueue(info->vring.num, 4096, &dev->vdev, info->ring,
				       vq_notify, vq_callback, "test");
	assert(info->vq);
	info->vq->priv = info;
	vhost_vq_setup(dev, info);
	dev->fds[info->idx].fd = info->call;
	dev->fds[info->idx].events = POLLIN;
	dev->nvqs++;
}

static void vdev_info_init(struct vdev_info* dev, unsigned long long features)
{
	int r;
	memset(dev, 0, sizeof *dev);
	dev->vdev.features[0] = features;
	dev->vdev.features[1] = features >> 32;
	dev->buf_size = 1024;
	dev->buf = malloc(dev->buf_size);
	assert(dev->buf);
        dev->control = open("/dev/vhost-test", O_RDWR);
	assert(dev->control >= 0);
	r = ioctl(dev->control, VHOST_SET_OWNER, NULL);
	assert(r >= 0);
	dev->mem = malloc(offsetof(struct vhost_memory, regions) +
			  sizeof dev->mem->regions[0]);
	assert(dev->mem);
	memset(dev->mem, 0, offsetof(struct vhost_memory, regions) +
                          sizeof dev->mem->regions[0]);
	dev->mem->nregions = 1;
	dev->mem->regions[0].guest_phys_addr = (long)dev->buf;
	dev->mem->regions[0].userspace_addr = (long)dev->buf;
	dev->mem->regions[0].memory_size = dev->buf_size;
	r = ioctl(dev->control, VHOST_SET_MEM_TABLE, dev->mem);
	assert(r >= 0);
}

/* TODO: this is pretty bad: we get a cache line bounce
 * for the wait queue on poll and another one on read,
 * plus the read which is there just to clear the
 * current state. */
static void wait_for_interrupt(struct vdev_info *dev)
{
	int i;
	unsigned long long val;
	poll(dev->fds, dev->nvqs, -1);
	for (i = 0; i < dev->nvqs; ++i)
		if (dev->fds[i].revents & POLLIN) {
			read(dev->fds[i].fd, &val, sizeof val);
		}
}

static void run_test(struct vdev_info *dev, struct vq_info *vq, int bufs)
{
	struct scatterlist sl;
	long started = 0, completed = 0;
	long completed_before;
	int r, test = 1;
	unsigned len;
	long long spurious = 0;
	r = ioctl(dev->control, VHOST_TEST_RUN, &test);
	assert(r >= 0);
	for (;;) {
		virtqueue_disable_cb(vq->vq);
		completed_before = completed;
		do {
			if (started < bufs) {
				sg_init_one(&sl, dev->buf, dev->buf_size);
				r = virtqueue_add_buf(vq->vq, &sl, 1, 0,
						      dev->buf + started);
				if (likely(r >= 0)) {
					++started;
					virtqueue_kick(vq->vq);
				}
			} else
				r = -1;

			/* Flush out completed bufs if any */
			if (virtqueue_get_buf(vq->vq, &len)) {
				++completed;
				r = 0;
			}

		} while (r >= 0);
		if (completed == completed_before)
			++spurious;
		assert(completed <= bufs);
		assert(started <= bufs);
		if (completed == bufs)
			break;
		if (virtqueue_enable_cb(vq->vq)) {
			wait_for_interrupt(dev);
		}
	}
	test = 0;
	r = ioctl(dev->control, VHOST_TEST_RUN, &test);
	assert(r >= 0);
	fprintf(stderr, "spurious wakeus: 0x%llx\n", spurious);
}

const char optstring[] = "h";
const struct option longopts[] = {
	{
		.name = "help",
		.val = 'h',
	},
	{
		.name = "event-idx",
Exemplo n.º 14
0
static int rpmsg_probe(struct virtio_device *vdev)
{
	vq_callback_t *vq_cbs[] = { rpmsg_recv_done, rpmsg_xmit_done };
	const char *names[] = { "input", "output" };
	struct virtqueue *vqs[2];
	struct virtproc_info *vrp;
	void *bufs_va;
	int err = 0, i, vproc_id;

	vrp = kzalloc(sizeof(*vrp), GFP_KERNEL);
	if (!vrp)
		return -ENOMEM;

	vrp->vdev = vdev;

	idr_init(&vrp->endpoints);
	mutex_init(&vrp->endpoints_lock);
	mutex_init(&vrp->tx_lock);
	init_waitqueue_head(&vrp->sendq);

	if (!idr_pre_get(&vprocs, GFP_KERNEL))
		goto free_vrp;

	mutex_lock(&vprocs_mutex);

	err = idr_get_new(&vprocs, vrp, &vproc_id);

	mutex_unlock(&vprocs_mutex);

	if (err) {
		dev_err(&vdev->dev, "idr_get_new failed: %d\n", err);
		goto free_vrp;
	}

	vrp->id = vproc_id;

	/* We expect two virtqueues, rx and tx (and in this order) */
	err = vdev->config->find_vqs(vdev, 2, vqs, vq_cbs, names);
	if (err)
		goto rem_idr;

	vrp->rvq = vqs[0];
	vrp->svq = vqs[1];

	/* allocate coherent memory for the buffers */
	bufs_va = dma_alloc_coherent(vdev->dev.parent->parent,
				RPMSG_TOTAL_BUF_SPACE,
				&vrp->bufs_dma, GFP_KERNEL);
	if (!bufs_va)
		goto vqs_del;

	dev_dbg(&vdev->dev, "buffers: va %p, dma 0x%llx\n", bufs_va,
					(unsigned long long)vrp->bufs_dma);

	/* half of the buffers is dedicated for RX */
	vrp->rbufs = bufs_va;

	/* and half is dedicated for TX */
	vrp->sbufs = bufs_va + RPMSG_TOTAL_BUF_SPACE / 2;

	/* set up the receive buffers */
	for (i = 0; i < RPMSG_NUM_BUFS / 2; i++) {
		struct scatterlist sg;
		void *cpu_addr = vrp->rbufs + i * RPMSG_BUF_SIZE;

		sg_init_one(&sg, cpu_addr, RPMSG_BUF_SIZE);

		err = virtqueue_add_buf(vrp->rvq, &sg, 0, 1, cpu_addr,
								GFP_KERNEL);
		WARN_ON(err); /* sanity check; this can't really happen */
	}

	/* suppress "tx-complete" interrupts */
	virtqueue_disable_cb(vrp->svq);

	vdev->priv = vrp;

	/* if supported by the remote processor, enable the name service */
	if (virtio_has_feature(vdev, VIRTIO_RPMSG_F_NS)) {
		/* a dedicated endpoint handles the name service msgs */
		vrp->ns_ept = __rpmsg_create_ept(vrp, NULL, rpmsg_ns_cb,
						vrp, RPMSG_NS_ADDR);
		if (!vrp->ns_ept) {
			dev_err(&vdev->dev, "failed to create the ns ept\n");
			err = -ENOMEM;
			goto free_coherent;
		}
	}

	/* tell the remote processor it can start sending messages */
	virtqueue_kick(vrp->rvq);

	dev_info(&vdev->dev, "rpmsg host is online\n");

	return 0;

free_coherent:
	dma_free_coherent(vdev->dev.parent->parent, RPMSG_TOTAL_BUF_SPACE,
					bufs_va, vrp->bufs_dma);
vqs_del:
	vdev->config->del_vqs(vrp->vdev);
rem_idr:
	mutex_lock(&vprocs_mutex);
	idr_remove(&vprocs, vproc_id);
	mutex_unlock(&vprocs_mutex);
free_vrp:
	kfree(vrp);
	return err;
}
Exemplo n.º 15
0
static int rpmsg_probe(struct virtio_device *vdev)
{
	vq_callback_t *vq_cbs[] = { rpmsg_recv_done, rpmsg_xmit_done };
	static const char * const names[] = { "input", "output" };
	struct virtqueue *vqs[2];
	struct virtproc_info *vrp;
	void *bufs_va;
	int err = 0, i;
	size_t total_buf_space;
	bool notify;

	vrp = kzalloc(sizeof(*vrp), GFP_KERNEL);
	if (!vrp)
		return -ENOMEM;

	vrp->vdev = vdev;

	idr_init(&vrp->endpoints);
	mutex_init(&vrp->endpoints_lock);
	mutex_init(&vrp->tx_lock);
	init_waitqueue_head(&vrp->sendq);

	/* We expect two virtqueues, rx and tx (and in this order) */
	err = vdev->config->find_vqs(vdev, 2, vqs, vq_cbs, names);
	if (err)
		goto free_vrp;

	vrp->rvq = vqs[0];
	vrp->svq = vqs[1];

	/* we expect symmetric tx/rx vrings */
	WARN_ON(virtqueue_get_vring_size(vrp->rvq) !=
		virtqueue_get_vring_size(vrp->svq));

	/* we need less buffers if vrings are small */
	if (virtqueue_get_vring_size(vrp->rvq) < MAX_RPMSG_NUM_BUFS / 2)
		vrp->num_bufs = virtqueue_get_vring_size(vrp->rvq) * 2;
	else
		vrp->num_bufs = MAX_RPMSG_NUM_BUFS;

	total_buf_space = vrp->num_bufs * RPMSG_BUF_SIZE;

	/* allocate coherent memory for the buffers */
	bufs_va = dma_alloc_coherent(vdev->dev.parent->parent,
				     total_buf_space, &vrp->bufs_dma,
				     GFP_KERNEL);
	if (!bufs_va) {
		err = -ENOMEM;
		goto vqs_del;
	}

	dev_dbg(&vdev->dev, "buffers: va %p, dma 0x%llx\n", bufs_va,
					(unsigned long long)vrp->bufs_dma);

	/* half of the buffers is dedicated for RX */
	vrp->rbufs = bufs_va;

	/* and half is dedicated for TX */
	vrp->sbufs = bufs_va + total_buf_space / 2;

	/* set up the receive buffers */
	for (i = 0; i < vrp->num_bufs / 2; i++) {
		struct scatterlist sg;
		void *cpu_addr = vrp->rbufs + i * RPMSG_BUF_SIZE;

		rpmsg_msg_sg_init(vrp, &sg, cpu_addr, RPMSG_BUF_SIZE);

		err = rpmsg_virtqueue_add_inbuf(vrp->rvq, &sg, 1, cpu_addr,
								GFP_KERNEL);
		WARN_ON(err); /* sanity check; this can't really happen */
	}

	/* suppress "tx-complete" interrupts */
	virtqueue_disable_cb(vrp->svq);

	vdev->priv = vrp;

	/* if supported by the remote processor, enable the name service */
	if (virtio_has_feature(vdev, VIRTIO_RPMSG_F_NS)) {
		/* a dedicated endpoint handles the name service msgs */
		vrp->ns_ept = __rpmsg_create_ept(vrp, NULL, rpmsg_ns_cb,
						vrp, RPMSG_NS_ADDR);
		if (!vrp->ns_ept) {
			dev_err(&vdev->dev, "failed to create the ns ept\n");
			err = -ENOMEM;
			goto free_coherent;
		}
	}

	/*
	 * Prepare to kick but don't notify yet - we can't do this before
	 * device is ready.
	 */
	notify = virtqueue_kick_prepare(vrp->rvq);

	/* From this point on, we can notify and get callbacks. */
	virtio_device_ready(vdev);

	/* tell the remote processor it can start sending messages */
	/*
	 * this might be concurrent with callbacks, but we are only
	 * doing notify, not a full kick here, so that's ok.
	 */
	if (notify)
		virtqueue_notify(vrp->rvq);

	dev_info(&vdev->dev, "rpmsg host is online\n");

	return 0;

free_coherent:
	dma_free_coherent(vdev->dev.parent->parent, total_buf_space,
			  bufs_va, vrp->bufs_dma);
vqs_del:
	vdev->config->del_vqs(vrp->vdev);
free_vrp:
	kfree(vrp);
	return err;
}
Exemplo n.º 16
0
static int rpmsg_probe(struct virtio_device *vdev)
{
	vq_callback_t *vq_cbs[] = { rpmsg_recv_done, rpmsg_xmit_done };
	const char *names[] = { "input", "output" };
	struct virtqueue *vqs[2];
	struct virtproc_info *vrp;
	struct rproc *vrp_rproc;
	void *bufs_va;
	void *cpu_addr; /* buffer virtual address */
	void *cpu_addr_dma; /* buffer DMA address' virutal address conversion */
	void *rbufs_guest_addr_kva;
	int err = 0, i;
	size_t total_buf_space;

	vrp = kzalloc(sizeof(*vrp), GFP_KERNEL);
	if (!vrp)
		return -ENOMEM;

	vrp->vdev = vdev;

	idr_init(&vrp->endpoints);
	mutex_init(&vrp->endpoints_lock);
	mutex_init(&vrp->tx_lock);
	init_waitqueue_head(&vrp->sendq);

	/* We expect two virtqueues, rx and tx (and in this order) */
	err = vdev->config->find_vqs(vdev, 2, vqs, vq_cbs, names);
	if (err)
		goto free_vrp;

	vrp->rvq = vqs[0];
	vrp->svq = vqs[1];

	/* we expect symmetric tx/rx vrings */
	WARN_ON(virtqueue_get_vring_size(vrp->rvq) !=
		virtqueue_get_vring_size(vrp->svq));

	/* we need less buffers if vrings are small */
	if (virtqueue_get_vring_size(vrp->rvq) < MAX_RPMSG_NUM_BUFS / 2)
		vrp->num_bufs = virtqueue_get_vring_size(vrp->rvq) * 2;
	else
		vrp->num_bufs = MAX_RPMSG_NUM_BUFS;

	total_buf_space = vrp->num_bufs * RPMSG_BUF_SIZE;

	/* allocate coherent memory for the buffers */
	bufs_va = dma_alloc_coherent(vdev->dev.parent->parent,
				     total_buf_space, &vrp->bufs_dma,
				     GFP_KERNEL);
	if (!bufs_va) {
		err = -ENOMEM;
		goto vqs_del;
	}

	dev_dbg(&vdev->dev, "buffers: va %p, dma 0x%llx\n", bufs_va,
					(unsigned long long)vrp->bufs_dma);

	/* half of the buffers is dedicated for RX */
	vrp->rbufs = bufs_va;

	/* and half is dedicated for TX */
	vrp->sbufs = bufs_va + total_buf_space / 2;

	vrp_rproc = vdev_to_rproc(vdev);
	rbufs_guest_addr_kva = vrp->rbufs;
	if (vrp_rproc->ops->kva_to_guest_addr_kva) {
		rbufs_guest_addr_kva = vrp_rproc->ops->kva_to_guest_addr_kva(vrp_rproc, vrp->rbufs, vrp->rvq);
	}
	/* set up the receive buffers */
	for (i = 0; i < vrp->num_bufs / 2; i++) {
		struct scatterlist sg;
		cpu_addr = vrp->rbufs + i * RPMSG_BUF_SIZE;
		cpu_addr_dma = rbufs_guest_addr_kva + i*RPMSG_BUF_SIZE;

		sg_init_one(&sg, cpu_addr_dma, RPMSG_BUF_SIZE);

		err = virtqueue_add_inbuf(vrp->rvq, &sg, 1, cpu_addr,
								GFP_KERNEL);
		WARN_ON(err); /* sanity check; this can't really happen */
	}

	/* suppress "tx-complete" interrupts */
	virtqueue_disable_cb(vrp->svq);

	vdev->priv = vrp;

	/* if supported by the remote processor, enable the name service */
	if (virtio_has_feature(vdev, VIRTIO_RPMSG_F_NS)) {
		/* a dedicated endpoint handles the name service msgs */
		vrp->ns_ept = __rpmsg_create_ept(vrp, NULL, rpmsg_ns_cb,
						vrp, RPMSG_NS_ADDR);
		if (!vrp->ns_ept) {
			dev_err(&vdev->dev, "failed to create the ns ept\n");
			err = -ENOMEM;
			goto free_coherent;
		}
	}

	/* tell the remote processor it can start sending messages */
	virtqueue_kick(vrp->rvq);

	dev_info(&vdev->dev, "rpmsg host is online\n");

	return 0;

free_coherent:
	dma_free_coherent(vdev->dev.parent->parent, total_buf_space,
			  bufs_va, vrp->bufs_dma);
vqs_del:
	vdev->config->del_vqs(vrp->vdev);
free_vrp:
	kfree(vrp);
	return err;
}
Exemplo n.º 17
0
/* Setup CAIF for the a virtio device */
static int cfv_probe(struct virtio_device *vdev)
{
	vq_callback_t *vq_cbs = cfv_release_cb;
	vrh_callback_t *vrh_cbs = cfv_recv;
	const char *names =  "output";
	const char *cfv_netdev_name = "cfvrt";
	struct net_device *netdev;
	struct cfv_info *cfv;
	int err = -EINVAL;

	netdev = alloc_netdev(sizeof(struct cfv_info), cfv_netdev_name,
			      cfv_netdev_setup);
	if (!netdev)
		return -ENOMEM;

	cfv = netdev_priv(netdev);
	cfv->vdev = vdev;
	cfv->ndev = netdev;

	spin_lock_init(&cfv->tx_lock);

	/* Get the RX virtio ring. This is a "host side vring". */
	err = -ENODEV;
	if (!vdev->vringh_config || !vdev->vringh_config->find_vrhs)
		goto err;

	err = vdev->vringh_config->find_vrhs(vdev, 1, &cfv->vr_rx, &vrh_cbs);
	if (err)
		goto err;

	/* Get the TX virtio ring. This is a "guest side vring". */
	err = vdev->config->find_vqs(vdev, 1, &cfv->vq_tx, &vq_cbs, &names);
	if (err)
		goto err;

	/* Get the CAIF configuration from virtio config space, if available */
#define GET_VIRTIO_CONFIG_OPS(_v, _var, _f) \
	((_v)->config->get(_v, offsetof(struct virtio_caif_transf_config, _f), \
			   &_var, \
			   FIELD_SIZEOF(struct virtio_caif_transf_config, _f)))

	if (vdev->config->get) {
		GET_VIRTIO_CONFIG_OPS(vdev, cfv->tx_hr, headroom);
		GET_VIRTIO_CONFIG_OPS(vdev, cfv->rx_hr, headroom);
		GET_VIRTIO_CONFIG_OPS(vdev, cfv->tx_tr, tailroom);
		GET_VIRTIO_CONFIG_OPS(vdev, cfv->rx_tr, tailroom);
		GET_VIRTIO_CONFIG_OPS(vdev, cfv->mtu, mtu);
		GET_VIRTIO_CONFIG_OPS(vdev, cfv->mru, mtu);
	} else {
		cfv->tx_hr = CFV_DEF_HEADROOM;
		cfv->rx_hr = CFV_DEF_HEADROOM;
		cfv->tx_tr = CFV_DEF_TAILROOM;
		cfv->rx_tr = CFV_DEF_TAILROOM;
		cfv->mtu = CFV_DEF_MTU_SIZE;
		cfv->mru = CFV_DEF_MTU_SIZE;
	}

	netdev->needed_headroom = cfv->tx_hr;
	netdev->needed_tailroom = cfv->tx_tr;

	/* Disable buffer release interrupts unless we have stopped TX queues */
	virtqueue_disable_cb(cfv->vq_tx);

	netdev->mtu = cfv->mtu - cfv->tx_tr;
	vdev->priv = cfv;

	/* Initialize NAPI poll context data */
	vringh_kiov_init(&cfv->ctx.riov, NULL, 0);
	cfv->ctx.head = USHRT_MAX;
	netif_napi_add(netdev, &cfv->napi, cfv_rx_poll, CFV_DEFAULT_QUOTA);

	tasklet_init(&cfv->tx_release_tasklet,
		     cfv_tx_release_tasklet,
		     (unsigned long)cfv);

	/* Carrier is off until netdevice is opened */
	netif_carrier_off(netdev);

	/* register Netdev */
	err = register_netdev(netdev);
	if (err) {
		dev_err(&vdev->dev, "Unable to register netdev (%d)\n", err);
		goto err;
	}

	debugfs_init(cfv);

	return 0;
err:
	netdev_warn(cfv->ndev, "CAIF Virtio probe failed:%d\n", err);

	if (cfv->vr_rx)
		vdev->vringh_config->del_vrhs(cfv->vdev);
	if (cfv->vdev)
		vdev->config->del_vqs(cfv->vdev);
	free_netdev(netdev);
	return err;
}
Exemplo n.º 18
0
static int rpmsg_probe(struct virtio_device *vdev)
{
	vq_callback_t *vq_cbs[] = { rpmsg_recv_done, rpmsg_xmit_done };
	const char *names[] = { "input", "output" };
	struct virtqueue *vqs[2];
	struct virtproc_info *vrp;
	void *bufs_va;
	int err = 0, i;

	vrp = kzalloc(sizeof(*vrp), GFP_KERNEL);
	if (!vrp)
		return -ENOMEM;

	vrp->vdev = vdev;

	idr_init(&vrp->endpoints);
	mutex_init(&vrp->endpoints_lock);
	mutex_init(&vrp->tx_lock);
	init_waitqueue_head(&vrp->sendq);

	
	err = vdev->config->find_vqs(vdev, 2, vqs, vq_cbs, names);
	if (err)
		goto free_vrp;

	vrp->rvq = vqs[0];
	vrp->svq = vqs[1];

	
	bufs_va = dma_alloc_coherent(vdev->dev.parent, RPMSG_TOTAL_BUF_SPACE,
				&vrp->bufs_dma, GFP_KERNEL);
	if (!bufs_va)
		goto vqs_del;

	dev_dbg(&vdev->dev, "buffers: va %p, dma 0x%llx\n", bufs_va,
					(unsigned long long)vrp->bufs_dma);

	
	vrp->rbufs = bufs_va;

	
	vrp->sbufs = bufs_va + RPMSG_TOTAL_BUF_SPACE / 2;

	
	for (i = 0; i < RPMSG_NUM_BUFS / 2; i++) {
		struct scatterlist sg;
		void *cpu_addr = vrp->rbufs + i * RPMSG_BUF_SIZE;

		sg_init_one(&sg, cpu_addr, RPMSG_BUF_SIZE);

		err = virtqueue_add_buf(vrp->rvq, &sg, 0, 1, cpu_addr,
								GFP_KERNEL);
		WARN_ON(err < 0); 
	}

	
	virtqueue_disable_cb(vrp->svq);

	vdev->priv = vrp;

	
	if (virtio_has_feature(vdev, VIRTIO_RPMSG_F_NS)) {
		
		vrp->ns_ept = __rpmsg_create_ept(vrp, NULL, rpmsg_ns_cb,
						vrp, RPMSG_NS_ADDR);
		if (!vrp->ns_ept) {
			dev_err(&vdev->dev, "failed to create the ns ept\n");
			err = -ENOMEM;
			goto free_coherent;
		}
	}

	
	virtqueue_kick(vrp->rvq);

	dev_info(&vdev->dev, "rpmsg host is online\n");

	return 0;

free_coherent:
	dma_free_coherent(vdev->dev.parent, RPMSG_TOTAL_BUF_SPACE, bufs_va,
					vrp->bufs_dma);
vqs_del:
	vdev->config->del_vqs(vrp->vdev);
free_vrp:
	kfree(vrp);
	return err;
}