Ejemplo n.º 1
0
/**
 * virtqueue_create - Creates new VirtIO queue
 *
 * @param device    - Pointer to VirtIO device
 * @param id        - VirtIO queue ID , must be unique
 * @param name      - Name of VirtIO queue
 * @param ring      - Pointer to vring_alloc_info control block
 * @param callback  - Pointer to callback function, invoked
 *                    when message is available on VirtIO queue
 * @param notify    - Pointer to notify function, used to notify
 *                    other side that there is job available for it
 * @param v_queue   - Created VirtIO queue.
 *
 * @return          - Function status
 */
int virtqueue_create(struct virtio_device *virt_dev, unsigned short id,
		     char *name, struct vring_alloc_info *ring,
		     void (*callback) (struct virtqueue * vq),
		     void (*notify) (struct virtqueue * vq),
		     struct virtqueue **v_queue)
{

	struct virtqueue *vq = VQ_NULL;
	int status = VQUEUE_SUCCESS;
	uint32_t vq_size = 0;

	VQ_PARAM_CHK(ring == VQ_NULL, status, ERROR_VQUEUE_INVLD_PARAM);
	VQ_PARAM_CHK(ring->num_descs == 0, status, ERROR_VQUEUE_INVLD_PARAM);
	VQ_PARAM_CHK(ring->num_descs & (ring->num_descs - 1), status,
		     ERROR_VRING_ALIGN);

	//TODO : Error check for indirect buffer addition

	if (status == VQUEUE_SUCCESS) {

		vq_size = sizeof(struct virtqueue)
		    + (ring->num_descs) * sizeof(struct vq_desc_extra);
		vq = (struct virtqueue *)env_allocate_memory(vq_size);

		if (vq == VQ_NULL) {
			return (ERROR_NO_MEM);
		}

		env_memset(vq, 0x00, vq_size);

		vq->vq_dev = virt_dev;
		env_strncpy(vq->vq_name, name, VIRTQUEUE_MAX_NAME_SZ);
		vq->vq_queue_index = id;
		vq->vq_alignment = ring->align;
		vq->vq_nentries = ring->num_descs;
		vq->vq_free_cnt = vq->vq_nentries;
		vq->callback = callback;
		vq->notify = notify;

		//TODO : Whether we want to support indirect addition or not.
		vq->vq_ring_size = vring_size(ring->num_descs, ring->align);
		vq->vq_ring_mem = (void *)ring->phy_addr;

		/* Initialize vring control block in virtqueue. */
		vq_ring_init(vq);

		/* Disable callbacks - will be enabled by the application
		 * once initialization is completed.
		 */
		virtqueue_disable_cb(vq);

		*v_queue = vq;

		//TODO : Need to add cleanup in case of error used with the indirect buffer addition
		//TODO: do we need to save the new queue in db based on its id
	}

	return (status);
}
Ejemplo n.º 2
0
int
virtqueue_reinit(struct virtqueue *vq, uint16_t size)
{
	struct vq_desc_extra *dxp;
	int i;

	if (vq->vq_nentries != size) {
		device_printf(vq->vq_dev,
		    "%s: '%s' changed size; old=%hu, new=%hu\n",
		    __func__, vq->vq_name, vq->vq_nentries, size);
		return (EINVAL);
	}

	/* Warn if the virtqueue was not properly cleaned up. */
	if (vq->vq_free_cnt != vq->vq_nentries) {
		device_printf(vq->vq_dev,
		    "%s: warning, '%s' virtqueue not empty, "
		    "leaking %d entries\n", __func__, vq->vq_name,
		    vq->vq_nentries - vq->vq_free_cnt);
	}

	vq->vq_desc_head_idx = 0;
	vq->vq_used_cons_idx = 0;
	vq->vq_queued_cnt = 0;
	vq->vq_free_cnt = vq->vq_nentries;

	/* To be safe, reset all our allocated memory. */
	bzero(vq->vq_ring_mem, vq->vq_ring_size);
	for (i = 0; i < vq->vq_nentries; i++) {
		dxp = &vq->vq_descx[i];
		dxp->cookie = NULL;
		dxp->ndescs = 0;
		if (vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT)
			virtqueue_init_indirect_list(vq, dxp->indirect);
	}

	vq_ring_init(vq);
	virtqueue_disable_intr(vq);

	return (0);
}
Ejemplo n.º 3
0
int
virtqueue_alloc(device_t dev, uint16_t queue, uint16_t size, int align,
    vm_paddr_t highaddr, struct vq_alloc_info *info, struct virtqueue **vqp)
{
	struct virtqueue *vq;
	int error;

	*vqp = NULL;
	error = 0;

	if (size == 0) {
		device_printf(dev,
		    "virtqueue %d (%s) does not exist (size is zero)\n",
		    queue, info->vqai_name);
		return (ENODEV);
	} else if (!powerof2(size)) {
		device_printf(dev,
		    "virtqueue %d (%s) size is not a power of 2: %d\n",
		    queue, info->vqai_name, size);
		return (ENXIO);
	} else if (info->vqai_maxindirsz > VIRTIO_MAX_INDIRECT) {
		device_printf(dev, "virtqueue %d (%s) requested too many "
		    "indirect descriptors: %d, max %d\n",
		    queue, info->vqai_name, info->vqai_maxindirsz,
		    VIRTIO_MAX_INDIRECT);
		return (EINVAL);
	}

	vq = kmalloc(sizeof(struct virtqueue) +
	    size * sizeof(struct vq_desc_extra), M_DEVBUF, M_INTWAIT | M_ZERO);
	if (vq == NULL) {
		device_printf(dev, "cannot allocate virtqueue\n");
		return (ENOMEM);
	}

	vq->vq_dev = dev;
	strlcpy(vq->vq_name, info->vqai_name, sizeof(vq->vq_name));
	vq->vq_queue_index = queue;
	vq->vq_alignment = align;
	vq->vq_nentries = size;
	vq->vq_free_cnt = size;
	vq->vq_intrhand = info->vqai_intr;
	vq->vq_intrhand_arg = info->vqai_intr_arg;

	if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_RING_F_EVENT_IDX) != 0)
		vq->vq_flags |= VIRTQUEUE_FLAG_EVENT_IDX;

	if (info->vqai_maxindirsz > 1) {
		error = virtqueue_init_indirect(vq, info->vqai_maxindirsz);
		if (error)
			goto fail;
	}

	vq->vq_ring_size = round_page(vring_size(size, align));
	vq->vq_ring_mem = contigmalloc(vq->vq_ring_size, M_DEVBUF,
	    M_WAITOK | M_ZERO, 0, highaddr, PAGE_SIZE, 0);
	if (vq->vq_ring_mem == NULL) {
		device_printf(dev,
		    "cannot allocate memory for virtqueue ring\n");
		error = ENOMEM;
		goto fail;
	}

	vq_ring_init(vq);
	virtqueue_disable_intr(vq);

	*vqp = vq;

fail:
	if (error)
		virtqueue_free(vq);

	return (error);
}