Esempio n. 1
0
/**
 * virtqueue_add_buffer()   - Enqueues new buffer in vring for consumption
 *                            by other side. Readable buffers are always
 *                            inserted before writable buffers
 *
 * @param vq                - Pointer to VirtIO queue control block.
 * @param buffer            - Pointer to buffer list
 * @param readable          - Number of readable buffers
 * @param writable          - Number of writable buffers
 * @param cookie            - Pointer to hold call back data
 *
 * @return                  - Function status
 */
int virtqueue_add_buffer(struct virtqueue *vq, struct llist *buffer,
			 int readable, int writable, void *cookie)
{

	struct vq_desc_extra *dxp = VQ_NULL;
	int status = VQUEUE_SUCCESS;
	uint16_t head_idx;
	uint16_t idx;
	int needed;

	needed = readable + writable;

	VQ_PARAM_CHK(vq == VQ_NULL, status, ERROR_VQUEUE_INVLD_PARAM);
	VQ_PARAM_CHK(needed < 1, status, ERROR_VQUEUE_INVLD_PARAM);
	VQ_PARAM_CHK(vq->vq_free_cnt == 0, status, ERROR_VRING_FULL);

	//TODO: Add parameters validation for indirect buffer addition

	VQUEUE_BUSY(vq);

	if (status == VQUEUE_SUCCESS) {

		//TODO : Indirect buffer addition support

		VQASSERT(vq, cookie != VQ_NULL, "enqueuing with no cookie");

		head_idx = vq->vq_desc_head_idx;
		VQ_RING_ASSERT_VALID_IDX(vq, head_idx);
		dxp = &vq->vq_descx[head_idx];

		VQASSERT(vq, (dxp->cookie == VQ_NULL),
			 "cookie already exists for index");

		dxp->cookie = cookie;
		dxp->ndescs = needed;

		/* Enqueue buffer onto the ring. */
		idx = vq_ring_add_buffer(vq, vq->vq_ring.desc, head_idx, buffer,
					 readable, writable);

		vq->vq_desc_head_idx = idx;
		vq->vq_free_cnt -= needed;

		if (vq->vq_free_cnt == 0) {
			VQ_RING_ASSERT_CHAIN_TERM(vq);
		} else {
			VQ_RING_ASSERT_VALID_IDX(vq, idx);
		}

		/*
		 * Update vring_avail control block fields so that other
		 * side can get buffer using it.
		 */
		vq_ring_update_avail(vq, head_idx);
	}

	VQUEUE_IDLE(vq);

	return (status);
}
Esempio n. 2
0
int
virtqueue_enqueue(struct virtqueue *vq, void *cookie, struct sglist *sg,
    int readable, int writable)
{
	struct vq_desc_extra *dxp;
	int needed;
	uint16_t head_idx, idx;

	needed = readable + writable;

	VQASSERT(vq, cookie != NULL, "enqueuing with no cookie");
	VQASSERT(vq, needed == sg->sg_nseg,
	    "segment count mismatch, %d, %d", needed, sg->sg_nseg);
	VQASSERT(vq,
	    needed <= vq->vq_nentries || needed <= vq->vq_max_indirect_size,
	    "too many segments to enqueue: %d, %d/%d", needed,
	    vq->vq_nentries, vq->vq_max_indirect_size);

	if (needed < 1)
		return (EINVAL);
	if (vq->vq_free_cnt == 0)
		return (ENOSPC);

	if (vq_ring_use_indirect(vq, needed)) {
		vq_ring_enqueue_indirect(vq, cookie, sg, readable, writable);
		return (0);
	} else if (vq->vq_free_cnt < needed)
		return (EMSGSIZE);

	head_idx = vq->vq_desc_head_idx;
	VQ_RING_ASSERT_VALID_IDX(vq, head_idx);
	dxp = &vq->vq_descx[head_idx];

	VQASSERT(vq, dxp->cookie == NULL,
	    "cookie already exists for index %d", head_idx);
	dxp->cookie = cookie;
	dxp->ndescs = needed;

	idx = vq_ring_enqueue_segments(vq, vq->vq_ring.desc, head_idx,
	    sg, readable, writable);

	vq->vq_desc_head_idx = idx;
	vq->vq_free_cnt -= needed;
	if (vq->vq_free_cnt == 0)
		VQ_RING_ASSERT_CHAIN_TERM(vq);
	else
		VQ_RING_ASSERT_VALID_IDX(vq, idx);

	vq_ring_update_avail(vq, head_idx);

	return (0);
}
Esempio n. 3
0
static void
vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
{
	struct vring_desc *dp;
	struct vq_desc_extra *dxp;

	VQ_RING_ASSERT_VALID_IDX(vq, desc_idx);
	dp = &vq->vq_ring.desc[desc_idx];
	dxp = &vq->vq_descx[desc_idx];

	if (vq->vq_free_cnt == 0)
		VQ_RING_ASSERT_CHAIN_TERM(vq);

	vq->vq_free_cnt += dxp->ndescs;
	dxp->ndescs--;

	if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
		while (dp->flags & VRING_DESC_F_NEXT) {
			VQ_RING_ASSERT_VALID_IDX(vq, dp->next);
			dp = &vq->vq_ring.desc[dp->next];
			dxp->ndescs--;
		}
	}
	VQASSERT(vq, dxp->ndescs == 0, "failed to free entire desc chain");

	/*
	 * We must append the existing free chain, if any, to the end of
	 * newly freed chain. If the virtqueue was completely used, then
	 * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
	 */
	dp->next = vq->vq_desc_head_idx;
	vq->vq_desc_head_idx = desc_idx;
}
Esempio n. 4
0
static uint16_t
vq_ring_enqueue_segments(struct virtqueue *vq, struct vring_desc *desc,
    uint16_t head_idx, struct sglist *sg, int readable, int writable)
{
	struct sglist_seg *seg;
	struct vring_desc *dp;
	int i, needed;
	uint16_t idx;

	needed = readable + writable;

	for (i = 0, idx = head_idx, seg = sg->sg_segs;
	     i < needed;
	     i++, idx = dp->next, seg++) {
		VQASSERT(vq, idx != VQ_RING_DESC_CHAIN_END,
		    "premature end of free desc chain");

		dp = &desc[idx];
		dp->addr = seg->ss_paddr;
		dp->len = seg->ss_len;
		dp->flags = 0;

		if (i < needed - 1)
			dp->flags |= VRING_DESC_F_NEXT;
		if (i >= readable)
			dp->flags |= VRING_DESC_F_WRITE;
	}

	return (idx);
}
Esempio n. 5
0
void *
virtqueue_dequeue(struct virtqueue *vq, uint32_t *len)
{
	struct vring_used_elem *uep;
	void *cookie;
	uint16_t used_idx, desc_idx;

	if (vq->vq_used_cons_idx == vq->vq_ring.used->idx)
		return (NULL);

	used_idx = vq->vq_used_cons_idx++ & (vq->vq_nentries - 1);
	uep = &vq->vq_ring.used->ring[used_idx];

	cpu_mfence();
	desc_idx = (uint16_t) uep->id;
	if (len != NULL)
		*len = uep->len;

	vq_ring_free_chain(vq, desc_idx);

	cookie = vq->vq_descx[desc_idx].cookie;
	VQASSERT(vq, cookie != NULL, "no cookie for index %d", desc_idx);
	vq->vq_descx[desc_idx].cookie = NULL;

	return (cookie);
}
Esempio n. 6
0
/**
 *
 * vq_ring_add_buffer
 *
 */
static uint16_t vq_ring_add_buffer(struct virtqueue *vq,
				   struct vring_desc *desc, uint16_t head_idx,
				   struct llist *buffer, int readable,
				   int writable)
{

	struct vring_desc *dp;
	int i, needed;
	uint16_t idx;

	(void)vq;

	needed = readable + writable;

	for (i = 0, idx = head_idx; (i < needed && buffer != VQ_NULL);
	     i++, idx = dp->next, buffer = buffer->next) {

		VQASSERT(vq, idx != VQ_RING_DESC_CHAIN_END,
			 "premature end of free desc chain");

		dp = &desc[idx];
		dp->addr = env_map_vatopa(buffer->data);
		dp->len = buffer->attr;
		dp->flags = 0;

		if (i < needed - 1)
			dp->flags |= VRING_DESC_F_NEXT;

		/* Readable buffers are inserted into vring before the writable buffers. */
		if (i >= readable)
			dp->flags |= VRING_DESC_F_WRITE;
	}

	return (idx);
}
Esempio n. 7
0
int
virtqueue_nused(struct virtqueue *vq)
{
	uint16_t used_idx, nused;

	used_idx = vq->vq_ring.used->idx;
	nused = (uint16_t)(used_idx - vq->vq_used_cons_idx);
	VQASSERT(vq, nused <= vq->vq_nentries, "used more than available");

	return (nused);
}
Esempio n. 8
0
/**
 * virtqueue_add_single_buffer - Enqueues single buffer in vring
 *
 * @param vq                    - Pointer to VirtIO queue control block
 * @param cookie                - Pointer to hold call back data
 * @param buffer_addr           - Address of buffer
 * @param len                   - Length of buffer
 * @param writable              - If buffer writable
 * @param has_next              - If buffers for subsequent call are
 *                                to be chained
 *
 * @return                      - Function status
 */
int virtqueue_add_single_buffer(struct virtqueue *vq, void *cookie,
				void *buffer_addr, uint32_t len, int writable,
				boolean has_next)
{

	struct vq_desc_extra *dxp;
	struct vring_desc *dp;
	uint16_t head_idx;
	uint16_t idx;
	int status = VQUEUE_SUCCESS;

	VQ_PARAM_CHK(vq == VQ_NULL, status, ERROR_VQUEUE_INVLD_PARAM);
	VQ_PARAM_CHK(vq->vq_free_cnt == 0, status, ERROR_VRING_FULL);

	VQUEUE_BUSY(vq);

	if (status == VQUEUE_SUCCESS) {

		VQASSERT(vq, cookie != VQ_NULL, "enqueuing with no cookie");

		head_idx = vq->vq_desc_head_idx;
		dxp = &vq->vq_descx[head_idx];

		dxp->cookie = cookie;
		dxp->ndescs = 1;
		idx = head_idx;

		dp = &vq->vq_ring.desc[idx];
		dp->addr = env_map_vatopa(buffer_addr);
		dp->len = len;
		dp->flags = 0;
		idx = dp->next;

		if (has_next)
			dp->flags |= VRING_DESC_F_NEXT;
		if (writable)
			dp->flags |= VRING_DESC_F_WRITE;

		vq->vq_desc_head_idx = idx;
		vq->vq_free_cnt--;

		if (vq->vq_free_cnt == 0) {
			VQ_RING_ASSERT_CHAIN_TERM(vq);
		} else {
			VQ_RING_ASSERT_VALID_IDX(vq, idx);
		}

		vq_ring_update_avail(vq, head_idx);
	}

	VQUEUE_IDLE(vq);

	return (status);
}
Esempio n. 9
0
static void
vq_ring_enqueue_indirect(struct virtqueue *vq, void *cookie,
    struct sglist *sg, int readable, int writable)
{
	struct vring_desc *dp;
	struct vq_desc_extra *dxp;
	int needed;
	uint16_t head_idx;

	needed = readable + writable;
	VQASSERT(vq, needed <= vq->vq_max_indirect_size,
	    "enqueuing too many indirect descriptors");

	head_idx = vq->vq_desc_head_idx;
	VQ_RING_ASSERT_VALID_IDX(vq, head_idx);
	dp = &vq->vq_ring.desc[head_idx];
	dxp = &vq->vq_descx[head_idx];

	VQASSERT(vq, dxp->cookie == NULL,
	    "cookie already exists for index %d", head_idx);
	dxp->cookie = cookie;
	dxp->ndescs = 1;

	dp->addr = dxp->indirect_paddr;
	dp->len = needed * sizeof(struct vring_desc);
	dp->flags = VRING_DESC_F_INDIRECT;

	vq_ring_enqueue_segments(vq, dxp->indirect, 0,
	    sg, readable, writable);

	vq->vq_desc_head_idx = dp->next;
	vq->vq_free_cnt--;
	if (vq->vq_free_cnt == 0)
		VQ_RING_ASSERT_CHAIN_TERM(vq);
	else
		VQ_RING_ASSERT_VALID_IDX(vq, vq->vq_desc_head_idx);

	vq_ring_update_avail(vq, head_idx);
}