Пример #1
0
static void virtblk_done(struct virtqueue *vq)
{
	struct virtio_blk *vblk = vq->vdev->priv;
	bool req_done = false;
	int qid = vq->index;
	struct virtblk_req *vbr;
	unsigned long flags;
	unsigned int len;

	spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
	do {
		virtqueue_disable_cb(vq);
		while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
			blk_mq_complete_request(vbr->req, vbr->req->errors);
			req_done = true;
		}
		if (unlikely(virtqueue_is_broken(vq)))
			break;
	} while (!virtqueue_enable_cb(vq));

	/* In case queue is stopped waiting for more buffers. */
	if (req_done)
		blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
	spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
}
Пример #2
0
/**
 * rpmsg_upref_sleepers() - enable "tx-complete" interrupts, if needed
 * @vrp: virtual remote processor state
 *
 * This function is called before a sender is blocked, waiting for
 * a tx buffer to become available.
 *
 * If we already have blocking senders, this function merely increases
 * the "sleepers" reference count, and exits.
 *
 * Otherwise, if this is the first sender to block, we also enable
 * virtio's tx callbacks, so we'd be immediately notified when a tx
 * buffer is consumed (we rely on virtio's tx callback in order
 * to wake up sleeping senders as soon as a tx buffer is used by the
 * remote processor).
 */
static void rpmsg_upref_sleepers(struct virtproc_info *vrp)
{
	/* support multiple concurrent senders */
	mutex_lock(&vrp->tx_lock);

	/* are we the first sleeping context waiting for tx buffers ? */
	if (atomic_inc_return(&vrp->sleepers) == 1)
		/* enable "tx-complete" interrupts before dozing off */
		virtqueue_enable_cb(vrp->svq);

	mutex_unlock(&vrp->tx_lock);
}
static void rpmsg_upref_sleepers(struct virtproc_info *vrp)
{
	
	mutex_lock(&vrp->tx_lock);

	
	if (atomic_inc_return(&vrp->sleepers) == 1)
		
		virtqueue_enable_cb(vrp->svq);

	mutex_unlock(&vrp->tx_lock);
}
Пример #4
0
NTSTATUS VioCryptInterruptEnable(IN WDFINTERRUPT Interrupt, IN WDFDEVICE wdfDevice)
{
    PDEVICE_CONTEXT context = GetDeviceContext(wdfDevice);
    UNREFERENCED_PARAMETER(Interrupt);

    virtqueue_enable_cb(context->ControlQueue);
    virtqueue_kick(context->ControlQueue);

    Trace(TRACE_LEVEL_VERBOSE, "[%s]", __FUNCTION__);

    return STATUS_SUCCESS;
}
Пример #5
0
NTSTATUS VirtRngEvtInterruptEnable(IN WDFINTERRUPT Interrupt,
                                   IN WDFDEVICE AssociatedDevice)
{
    PDEVICE_CONTEXT context = GetDeviceContext(
        WdfInterruptGetDevice(Interrupt));

    TraceEvents(TRACE_LEVEL_VERBOSE, DBG_INTERRUPT,
        "--> %!FUNC! Interrupt: %p Device: %p",
        Interrupt, AssociatedDevice);

    virtqueue_enable_cb(context->VirtQueue);
    virtqueue_kick(context->VirtQueue);

    TraceEvents(TRACE_LEVEL_VERBOSE, DBG_INTERRUPT, "<-- %!FUNC!");

    return STATUS_SUCCESS;
}
Пример #6
0
/* XXX: the blocking 'wait' mechanism hasn't been tested yet */
int rpmsg_send_offchannel_raw(struct rpmsg_channel *rpdev, u32 src, u32 dst,
					void *data, int len, bool wait)
{
	struct virtproc_info *vrp = rpdev->vrp;
	struct device *dev = &rpdev->dev;
	struct scatterlist sg;
	struct rpmsg_hdr *msg;
	int err;
	unsigned long offset;
	void *sim_addr;

	if (src == RPMSG_ADDR_ANY || dst == RPMSG_ADDR_ANY) {
		dev_err(dev, "invalid addr (src 0x%x, dst 0x%x)\n", src, dst);
		return -EINVAL;
	}

	/* the payload's size is currently limited */
	if (len > vrp->buf_size - sizeof(struct rpmsg_hdr)) {
		dev_err(dev, "message is too big (%d)\n", len);
		return -EMSGSIZE;
	}

	/*
	 * protect svq from simultaneous concurrent manipulations,
	 * and serialize the sending of messages
	 */
	if (mutex_lock_interruptible(&vrp->svq_lock))
		return -ERESTARTSYS;
	/* grab a buffer */
	msg = get_a_buf(vrp);
	if (!msg && !wait) {
		err = -ENOMEM;
		goto out;
	}

	/* no free buffer ? wait for one (but bail after 15 seconds) */
	if (!msg) {
		/* enable "tx-complete" interrupts before dozing off */
		virtqueue_enable_cb(vrp->svq);

		/*
		 * sleep until a free buffer is available or 15 secs elapse.
		 * the timeout period is not configurable because frankly
		 * i don't see why drivers need to deal with that.
		 * if later this happens to be required, it'd be easy to add.
		 */
		err = wait_event_interruptible_timeout(vrp->sendq,
					(msg = get_a_buf(vrp)),
					msecs_to_jiffies(15000));

		/* on success, suppress "tx-complete" interrupts again */
		virtqueue_disable_cb(vrp->svq);

		if (err < 0) {
			err = -ERESTARTSYS;
			goto out;
		}

		if (!msg) {
			dev_err(dev, "timeout waiting for buffer\n");
			err = -ETIMEDOUT;
			goto out;
		}
	}

	msg->len = len;
	msg->flags = 0;
	msg->src = src;
	msg->dst = dst;
	msg->unused = 0;
	memcpy(msg->data, data, len);

	dev_dbg(dev, "TX From 0x%x, To 0x%x, Len %d, Flags %d, Unused %d\n",
					msg->src, msg->dst, msg->len,
					msg->flags, msg->unused);
#if 0
	print_hex_dump(KERN_DEBUG, "rpmsg_virtio TX: ", DUMP_PREFIX_NONE, 16, 1,
					msg, sizeof(*msg) + msg->len, true);
#endif

	offset = ((unsigned long) msg) - ((unsigned long) vrp->rbufs);
	sim_addr = vrp->sim_base + offset;
	sg_init_one(&sg, sim_addr, sizeof(*msg) + len);

	/* add message to the remote processor's virtqueue */
	err = virtqueue_add_buf_gfp(vrp->svq, &sg, 1, 0, msg, GFP_KERNEL);
	if (err < 0) {
		dev_err(dev, "virtqueue_add_buf_gfp failed: %d\n", err);
		goto out;
	}
	/* descriptors must be written before kicking remote processor */
	wmb();

	/* tell the remote processor it has a pending message to read */
	virtqueue_kick(vrp->svq);

	err = 0;
out:
	mutex_unlock(&vrp->svq_lock);
	return err;
}
Пример #7
0
void vhost_vq_setup(struct vdev_info *dev, struct vq_info *info)
{
	struct vhost_vring_state state = { .index = info->idx };
	struct vhost_vring_file file = { .index = info->idx };
	unsigned long long features = dev->vdev.features[0];
	struct vhost_vring_addr addr = {
		.index = info->idx,
		.desc_user_addr = (uint64_t)(unsigned long)info->vring.desc,
		.avail_user_addr = (uint64_t)(unsigned long)info->vring.avail,
		.used_user_addr = (uint64_t)(unsigned long)info->vring.used,
	};
	int r;
	r = ioctl(dev->control, VHOST_SET_FEATURES, &features);
	assert(r >= 0);
	state.num = info->vring.num;
	r = ioctl(dev->control, VHOST_SET_VRING_NUM, &state);
	assert(r >= 0);
	state.num = 0;
	r = ioctl(dev->control, VHOST_SET_VRING_BASE, &state);
	assert(r >= 0);
	r = ioctl(dev->control, VHOST_SET_VRING_ADDR, &addr);
	assert(r >= 0);
	file.fd = info->kick;
	r = ioctl(dev->control, VHOST_SET_VRING_KICK, &file);
	assert(r >= 0);
	file.fd = info->call;
	r = ioctl(dev->control, VHOST_SET_VRING_CALL, &file);
	assert(r >= 0);
}

static void vq_info_add(struct vdev_info *dev, int num)
{
	struct vq_info *info = &dev->vqs[dev->nvqs];
	int r;
	info->idx = dev->nvqs;
	info->kick = eventfd(0, EFD_NONBLOCK);
	info->call = eventfd(0, EFD_NONBLOCK);
	r = posix_memalign(&info->ring, 4096, vring_size(num, 4096));
	assert(r >= 0);
	memset(info->ring, 0, vring_size(num, 4096));
	vring_init(&info->vring, num, info->ring, 4096);
	info->vq = vring_new_virtqueue(info->vring.num, 4096, &dev->vdev, info->ring,
				       vq_notify, vq_callback, "test");
	assert(info->vq);
	info->vq->priv = info;
	vhost_vq_setup(dev, info);
	dev->fds[info->idx].fd = info->call;
	dev->fds[info->idx].events = POLLIN;
	dev->nvqs++;
}

static void vdev_info_init(struct vdev_info* dev, unsigned long long features)
{
	int r;
	memset(dev, 0, sizeof *dev);
	dev->vdev.features[0] = features;
	dev->vdev.features[1] = features >> 32;
	dev->buf_size = 1024;
	dev->buf = malloc(dev->buf_size);
	assert(dev->buf);
        dev->control = open("/dev/vhost-test", O_RDWR);
	assert(dev->control >= 0);
	r = ioctl(dev->control, VHOST_SET_OWNER, NULL);
	assert(r >= 0);
	dev->mem = malloc(offsetof(struct vhost_memory, regions) +
			  sizeof dev->mem->regions[0]);
	assert(dev->mem);
	memset(dev->mem, 0, offsetof(struct vhost_memory, regions) +
                          sizeof dev->mem->regions[0]);
	dev->mem->nregions = 1;
	dev->mem->regions[0].guest_phys_addr = (long)dev->buf;
	dev->mem->regions[0].userspace_addr = (long)dev->buf;
	dev->mem->regions[0].memory_size = dev->buf_size;
	r = ioctl(dev->control, VHOST_SET_MEM_TABLE, dev->mem);
	assert(r >= 0);
}

/* TODO: this is pretty bad: we get a cache line bounce
 * for the wait queue on poll and another one on read,
 * plus the read which is there just to clear the
 * current state. */
static void wait_for_interrupt(struct vdev_info *dev)
{
	int i;
	unsigned long long val;
	poll(dev->fds, dev->nvqs, -1);
	for (i = 0; i < dev->nvqs; ++i)
		if (dev->fds[i].revents & POLLIN) {
			read(dev->fds[i].fd, &val, sizeof val);
		}
}

static void run_test(struct vdev_info *dev, struct vq_info *vq, int bufs)
{
	struct scatterlist sl;
	long started = 0, completed = 0;
	long completed_before;
	int r, test = 1;
	unsigned len;
	long long spurious = 0;
	r = ioctl(dev->control, VHOST_TEST_RUN, &test);
	assert(r >= 0);
	for (;;) {
		virtqueue_disable_cb(vq->vq);
		completed_before = completed;
		do {
			if (started < bufs) {
				sg_init_one(&sl, dev->buf, dev->buf_size);
				r = virtqueue_add_buf(vq->vq, &sl, 1, 0,
						      dev->buf + started);
				if (likely(r >= 0)) {
					++started;
					virtqueue_kick(vq->vq);
				}
			} else
				r = -1;

			/* Flush out completed bufs if any */
			if (virtqueue_get_buf(vq->vq, &len)) {
				++completed;
				r = 0;
			}

		} while (r >= 0);
		if (completed == completed_before)
			++spurious;
		assert(completed <= bufs);
		assert(started <= bufs);
		if (completed == bufs)
			break;
		if (virtqueue_enable_cb(vq->vq)) {
			wait_for_interrupt(dev);
		}
	}
	test = 0;
	r = ioctl(dev->control, VHOST_TEST_RUN, &test);
	assert(r >= 0);
	fprintf(stderr, "spurious wakeus: 0x%llx\n", spurious);
}

const char optstring[] = "h";
const struct option longopts[] = {
	{
		.name = "help",
		.val = 'h',
	},
	{
		.name = "event-idx",
Пример #8
0
/* Put the CAIF packet on the virtio ring and kick the receiver */
static int cfv_netdev_tx(struct sk_buff *skb, struct net_device *netdev)
{
	struct cfv_info *cfv = netdev_priv(netdev);
	struct buf_info *buf_info;
	struct scatterlist sg;
	unsigned long flags;
	bool flow_off = false;
	int ret;

	/* garbage collect released buffers */
	cfv_release_used_buf(cfv->vq_tx);
	spin_lock_irqsave(&cfv->tx_lock, flags);

	/* Flow-off check takes into account number of cpus to make sure
	 * virtqueue will not be overfilled in any possible smp conditions.
	 *
	 * Flow-on is triggered when sufficient buffers are freed
	 */
	if (unlikely(cfv->vq_tx->num_free <= num_present_cpus())) {
		flow_off = true;
		cfv->stats.tx_full_ring++;
	}

	/* If we run out of memory, we release the memory reserve and retry
	 * allocation.
	 */
	buf_info = cfv_alloc_and_copy_to_shm(cfv, skb, &sg);
	if (unlikely(!buf_info)) {
		cfv->stats.tx_no_mem++;
		flow_off = true;

		if (cfv->reserved_mem && cfv->genpool) {
			gen_pool_free(cfv->genpool,  cfv->reserved_mem,
				      cfv->reserved_size);
			cfv->reserved_mem = 0;
			buf_info = cfv_alloc_and_copy_to_shm(cfv, skb, &sg);
		}
	}

	if (unlikely(flow_off)) {
		/* Turn flow on when a 1/4 of the descriptors are released */
		cfv->watermark_tx = virtqueue_get_vring_size(cfv->vq_tx) / 4;
		/* Enable notifications of recycled TX buffers */
		virtqueue_enable_cb(cfv->vq_tx);
		netif_tx_stop_all_queues(netdev);
	}

	if (unlikely(!buf_info)) {
		/* If the memory reserve does it's job, this shouldn't happen */
		netdev_warn(cfv->ndev, "Out of gen_pool memory\n");
		goto err;
	}

	ret = virtqueue_add_outbuf(cfv->vq_tx, &sg, 1, buf_info, GFP_ATOMIC);
	if (unlikely((ret < 0))) {
		/* If flow control works, this shouldn't happen */
		netdev_warn(cfv->ndev, "Failed adding buffer to TX vring:%d\n",
			    ret);
		goto err;
	}

	/* update netdev statistics */
	cfv->ndev->stats.tx_packets++;
	cfv->ndev->stats.tx_bytes += skb->len;
	spin_unlock_irqrestore(&cfv->tx_lock, flags);

	/* tell the remote processor it has a pending message to read */
	virtqueue_kick(cfv->vq_tx);

	dev_kfree_skb(skb);
	return NETDEV_TX_OK;
err:
	spin_unlock_irqrestore(&cfv->tx_lock, flags);
	cfv->ndev->stats.tx_dropped++;
	free_buf_info(cfv, buf_info);
	dev_kfree_skb(skb);
	return NETDEV_TX_OK;
}
Пример #9
0
static int virtio_hwkey_probe(struct virtio_device *vdev)
{
    int ret = 0;
    vqidx = 0;

    printk(KERN_INFO "virtio hwkey driver is probed\n");

    /* init virtio */
    vdev->priv = vh = kmalloc(sizeof(*vh), GFP_KERNEL);
    if (!vh) {
        return -ENOMEM;
    }
    memset(&vh->vbuf, 0x00, sizeof(vh->vbuf));

    vh->vdev = vdev;

    vh->vq = virtio_find_single_vq(vh->vdev, vq_hwkey_callback, "virtio-hwkey-vq");
    if (IS_ERR(vh->vq)) {
        ret = PTR_ERR(vh->vq);

        kfree(vh);
        vdev->priv = NULL;
        return ret;
    }

    /* enable callback */
    virtqueue_enable_cb(vh->vq);

    sg_init_table(vh->sg, MAX_BUF_COUNT);

    /* prepare the buffers */
    for (index = 0; index < MAX_BUF_COUNT; index++) {
        sg_set_buf(&vh->sg[index], &vh->vbuf[index], sizeof(EmulHwkeyEvent));

        if (err < 0) {
            printk(KERN_ERR "failed to add buffer\n");

            kfree(vh);
            vdev->priv = NULL;
            return ret;
        }
    }

    err = virtqueue_add_buf(vh->vq, vh->sg, 0,
            MAX_BUF_COUNT, (void *)MAX_BUF_COUNT, GFP_ATOMIC);

    /* register for input device */
    vh->idev = input_allocate_device();
    if (!vh->idev) {
        printk(KERN_ERR "failed to allocate a input hwkey device\n");
        ret = -1;

        kfree(vh);
        vdev->priv = NULL;
        return ret;
    }

    vh->idev->name = "Maru Virtio Hwkey";
    vh->idev->dev.parent = &(vdev->dev);

    input_set_drvdata(vh->idev, vh);
    vh->idev->open = input_hwkey_open;
    vh->idev->close = input_hwkey_close;

    vh->idev->evbit[0] = BIT_MASK(EV_KEY);
    /* to support any keycode */
    memset(vh->idev->keybit, 0xffffffff, sizeof(unsigned long) * BITS_TO_LONGS(KEY_CNT));

    ret = input_register_device(vh->idev);
    if (ret) {
        printk(KERN_ERR "input hwkey driver cannot registered\n");
        ret = -1;

        input_free_device(vh->idev);
        kfree(vh);
        vdev->priv = NULL;
        return ret;
    }

    virtqueue_kick(vh->vq);
    index = 0;

    return 0;
}