int virtqueue_add_outbuf(struct virtqueue *_vq, char *buf, unsigned int len) { struct vring_virtqueue *vq = to_vvq(_vq); unsigned avail; int head; assert(buf != NULL); assert(len != 0); if (!vq->vq.num_free) return -1; --vq->vq.num_free; head = vq->free_head; vq->vring.desc[head].flags = 0; vq->vring.desc[head].addr = virt_to_phys(buf); vq->vring.desc[head].len = len; vq->free_head = vq->vring.desc[head].next; vq->data[head] = buf; avail = (vq->vring.avail->idx & (vq->vring.num-1)); vq->vring.avail->ring[avail] = head; wmb(); vq->vring.avail->idx++; vq->num_added++; return 0; }
static void vring_kick(struct virtqueue *_vq) { u16 prev_avail_idx; struct vring_virtqueue *vq = to_vvq(_vq); /* Descriptors and available array need to be set before we expose the * new available array entries. */ mb(); prev_avail_idx = vq->vring.avail->idx - (u16) vq->num_added; DPrintf(4, ("%s>>> vq->vring.avail->idx %d\n", __FUNCTION__, vq->vring.avail->idx)); vq->num_added = 0; /* Need to update avail index before checking if we should notify */ mb(); if(vq->use_published_indices) { if(vring_need_event(vring_last_avail(&vq->vring), vq->vring.avail->idx, prev_avail_idx)) vq->notify(&vq->vq); } else { if (!(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY)) /* Prod other side to tell it about changes. */ vq->notify(&vq->vq); } }
static void vring_disable_interrupts(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; mb(); }
static void vring_enable_interrupts(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; *vq->vring.vring_last_used_ptr = vq->last_used_idx; mb(); }
static void vring_delay_interrupts(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); u16 buffs_num_before_interrupt = ((u16) (vq->vring.avail->idx - vq->last_used_idx)) * 3 / 4; *vq->vring.vring_last_used_ptr = vq->vring.used->idx + buffs_num_before_interrupt; mb(); }
static int vring_add_indirect( IN struct virtqueue *_vq, IN struct VirtIOBufferDescriptor sg[], IN unsigned int out, IN unsigned int in, IN PVOID va, IN ULONGLONG phys ) { struct vring_virtqueue *vq = to_vvq(_vq); struct vring_desc *desc = (struct vring_desc *)va; unsigned head; unsigned int i; if (!phys) { return -1; } /* Transfer entries from the sg list into the indirect page */ for (i = 0; i < out; i++) { desc[i].flags = VRING_DESC_F_NEXT; desc[i].addr = sg->physAddr.QuadPart; desc[i].len = sg->ulSize; desc[i].next = i+1; sg++; } for (; i < (out + in); i++) { desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE; desc[i].addr = sg->physAddr.QuadPart; desc[i].len = sg->ulSize; desc[i].next = i+1; sg++; } /* Last one doesn't continue. */ desc[i-1].flags &= ~VRING_DESC_F_NEXT; desc[i-1].next = 0; /* We're about to use a buffer */ vq->num_free--; /* Use a single buffer which doesn't continue */ head = vq->free_head; vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT; vq->vring.desc[head].addr = phys; vq->vring.desc[head].len = i * sizeof(struct vring_desc); /* Update free pointer */ vq->free_head = vq->vring.desc[head].next; return head; }
/* FIXME: We need to tell other side about removal, to synchronize. */ static void vring_shutdown(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); unsigned int num = vq->vring.num; unsigned int index = vq->vq.ulIndex; void *pages = vq->vring.desc; VirtIODevice * pVirtIODevice = vq->vq.vdev; bool use_published_indices = vq->use_published_indices; void (*notify)(struct virtqueue *) = vq->notify; memset(pages, 0, vring_size(num,PAGE_SIZE)); initialize_virtqueue(vq, num, pVirtIODevice, pages, notify, index, use_published_indices); }
static void vring_kick_always(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); /* Descriptors and available array need to be set before we expose the * new available array entries. */ wmb(); DPrintf(4, ("%s>>> vq->vring.avail->idx %d\n", __FUNCTION__, vq->vring.avail->idx)); vq->num_added = 0; /* Need to update avail index before checking if we should notify */ mb(); vq->notify(&vq->vq); }
void* vring_detach_unused_buf(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); unsigned int i; void *buf; for (i = 0; i < vq->vring.num; i++) { if (!vq->data[i]) continue; buf = vq->data[i]; detach_buf(vq, i); vq->vring.avail->idx--; return buf; } return NULL; }
static void *vring_get_buf(struct virtqueue *_vq, unsigned int *len) { struct vring_virtqueue *vq = to_vvq(_vq); void *ret; struct vring_used_elem *u; unsigned int i; if (!more_used(vq)) { DPrintf(4, ("No more buffers in queue: last_used_idx %d vring.used->idx %d\n", vq->last_used_idx, vq->vring.used->idx)); return NULL; } /* Only get used array entries after they have been exposed by host. */ rmb(); u = &vq->vring.used->ring[vq->last_used_idx % vq->vring.num]; i = u->id; *len = u->len; DPrintf(4, ("%s>>> id %d, len %d\n", __FUNCTION__, i, *len) ); if (i >= vq->vring.num) { DPrintf(0, ("id %u out of range\n", i) ); return NULL; } if (!vq->data[i]) { DPrintf(0, ("id %u is not a head!\n", i) ); return NULL; } /* detach_buf clears data, so grab it now. */ ret = vq->data[i]; detach_buf(vq, i); ++vq->last_used_idx; if (!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) { *vq->vring.vring_last_used_ptr = vq->last_used_idx; mb(); } return ret; }
irqreturn_t vring_interrupt(int irq, void *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); if (!more_used(vq)) { pr_debug("virtqueue interrupt with no work for %p\n", vq); return IRQ_NONE; } if (unlikely(vq->broken)) return IRQ_HANDLED; pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback); if (vq->vq.callback) vq->vq.callback(&vq->vq); return IRQ_HANDLED; }
static bool vring_enable_cb(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); START_USE(vq); /* We optimistically turn back on interrupts, then check if there was * more to do. */ vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; mb(); if (unlikely(more_used(vq))) { END_USE(vq); return false; } END_USE(vq); return true; }
static bool vring_restart(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); DPrintf(6, ("%s\n", __FUNCTION__) ); //BUG_ON(!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)); /* We optimistically turn back on interrupts, then check if there was * more to do. */ vring_enable_interrupts(_vq); if (more_used(vq)) { vring_disable_interrupts(_vq); return 0; } return 1; }
static void vring_kick(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); START_USE(vq); /* Descriptors and available array need to be set before we expose the * new available array entries. */ wmb(); vq->vring.avail->idx += vq->num_added; vq->num_added = 0; /* Need to update avail index before checking if we should notify */ mb(); if (!(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY)) /* Prod other side to tell it about changes. */ vq->notify(&vq->vq); END_USE(vq); }
void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) { struct vring_virtqueue *vq = to_vvq(_vq); u16 last_used; unsigned i; void *ret; rmb(); last_used = (vq->last_used_idx & (vq->vring.num-1)); i = vq->vring.used->ring[last_used].id; *len = vq->vring.used->ring[last_used].len; ret = vq->data[i]; detach_buf(vq, i); vq->last_used_idx++; return ret; }
void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) { struct vring_virtqueue *vq = to_vvq(_vq); void *ret; unsigned int i; START_USE(vq); if (unlikely(vq->broken)) { END_USE(vq); return NULL; } if (!more_used(vq)) { pr_debug("No more buffers in queue\n"); END_USE(vq); return NULL; } /* Only get used array entries after they have been exposed by host. */ virtio_rmb(); i = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].id; *len = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].len; if (unlikely(i >= vq->vring.num)) { BAD_RING(vq, "id %u out of range\n", i); return NULL; } if (unlikely(!vq->data[i])) { BAD_RING(vq, "id %u is not a head!\n", i); return NULL; } /* detach_buf clears data, so grab it now. */ ret = vq->data[i]; detach_buf(vq, i); vq->last_used_idx++; END_USE(vq); return ret; }
void *virtqueue_detach_unused_buf(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); unsigned int i; void *buf; START_USE(vq); for (i = 0; i < vq->vring.num; i++) { if (!vq->data[i]) continue; /* detach_buf clears data, so grab it now. */ buf = vq->data[i]; detach_buf(vq, i); END_USE(vq); return buf; } /* That should have freed everything. */ BUG_ON(vq->num_free != vq->vring.num); END_USE(vq); return NULL; }
static int vring_add_buf(struct virtqueue *_vq, struct scatterlist sg[], unsigned int out, unsigned int in, void *data) { struct vring_virtqueue *vq = to_vvq(_vq); unsigned int i, avail, head, uninitialized_var(prev); BUG_ON(data == NULL); BUG_ON(out + in > vq->vring.num); BUG_ON(out + in == 0); START_USE(vq); if (vq->num_free < out + in) { pr_debug("Can't add buf len %i - avail = %i\n", out + in, vq->num_free); /* FIXME: for historical reasons, we force a notify here if * there are outgoing parts to the buffer. Presumably the * host should service the ring ASAP. */ if (out) vq->notify(&vq->vq); END_USE(vq); return -ENOSPC; } /* We're about to use some buffers from the free list. */ vq->num_free -= out + in; head = vq->free_head; for (i = vq->free_head; out; i = vq->vring.desc[i].next, out--) { vq->vring.desc[i].flags = VRING_DESC_F_NEXT; vq->vring.desc[i].addr = sg_phys(sg); vq->vring.desc[i].len = sg->length; prev = i; sg++; } for (; in; i = vq->vring.desc[i].next, in--) { vq->vring.desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE; vq->vring.desc[i].addr = sg_phys(sg); vq->vring.desc[i].len = sg->length; prev = i; sg++; } /* Last one doesn't continue. */ vq->vring.desc[prev].flags &= ~VRING_DESC_F_NEXT; /* Update free pointer */ vq->free_head = i; /* Set token. */ vq->data[head] = data; /* Put entry in available array (but don't update avail->idx until they * do sync). FIXME: avoid modulus here? */ avail = (vq->vring.avail->idx + vq->num_added++) % vq->vring.num; vq->vring.avail->ring[avail] = head; pr_debug("Added buffer head %i to %p\n", head, vq); END_USE(vq); return 0; }
bool virtqueue_kick(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); mb(); return vq->notify(_vq); }
static inline int virtqueue_add(struct virtqueue *_vq, struct scatterlist *sgs[], struct scatterlist *(*next) (struct scatterlist *, unsigned int *), unsigned int total_out, unsigned int total_in, unsigned int out_sgs, unsigned int in_sgs, void *data, gfp_t gfp) { struct vring_virtqueue *vq = to_vvq(_vq); struct scatterlist *sg; unsigned int i, n, avail, uninitialized_var(prev), total_sg; int head; START_USE(vq); BUG_ON(data == NULL); #ifdef DEBUG { ktime_t now = ktime_get(); /* No kick or get, with .1 second between? Warn. */ if (vq->last_add_time_valid) WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time)) > 100); vq->last_add_time = now; vq->last_add_time_valid = true; } #endif total_sg = total_in + total_out; /* If the host supports indirect descriptor tables, and we have multiple * buffers, then go indirect. FIXME: tune this threshold */ if (vq->indirect && total_sg > 1 && vq->vq.num_free) { head = vring_add_indirect(vq, sgs, next, total_sg, total_out, total_in, out_sgs, in_sgs, gfp); if (likely(head >= 0)) goto add_head; } BUG_ON(total_sg > vq->vring.num); BUG_ON(total_sg == 0); if (vq->vq.num_free < total_sg) { pr_debug("Can't add buf len %i - avail = %i\n", total_sg, vq->vq.num_free); /* FIXME: for historical reasons, we force a notify here if * there are outgoing parts to the buffer. Presumably the * host should service the ring ASAP. */ if (out_sgs) vq->notify(&vq->vq); END_USE(vq); return -ENOSPC; } /* We're about to use some buffers from the free list. */ vq->vq.num_free -= total_sg; head = i = vq->free_head; for (n = 0; n < out_sgs; n++) { for (sg = sgs[n]; sg; sg = next(sg, &total_out)) { vq->vring.desc[i].flags = VRING_DESC_F_NEXT; vq->vring.desc[i].addr = sg_phys(sg); vq->vring.desc[i].len = sg->length; prev = i; i = vq->vring.desc[i].next; } } for (; n < (out_sgs + in_sgs); n++) { for (sg = sgs[n]; sg; sg = next(sg, &total_in)) { vq->vring.desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE; vq->vring.desc[i].addr = sg_phys(sg); vq->vring.desc[i].len = sg->length; prev = i; i = vq->vring.desc[i].next; } } /* Last one doesn't continue. */ vq->vring.desc[prev].flags &= ~VRING_DESC_F_NEXT; /* Update free pointer */ vq->free_head = i; add_head: /* Set token. */ vq->data[head] = data; /* Put entry in available array (but don't update avail->idx until they * do sync). */ avail = (vq->vring.avail->idx & (vq->vring.num-1)); vq->vring.avail->ring[avail] = head; /* Descriptors and available array need to be set before we expose the * new available array entries. */ virtio_wmb(vq->weak_barriers); vq->vring.avail->idx++; vq->num_added++; /* This is very unlikely, but theoretically possible. Kick * just in case. */ if (unlikely(vq->num_added == (1 << 16) - 1)) virtqueue_kick(_vq); pr_debug("Added buffer head %i to %p\n", head, vq); END_USE(vq); return 0; }
int virtqueue_add_buf_gfp(struct virtqueue *_vq, struct scatterlist sg[], unsigned int out, unsigned int in, void *data, gfp_t gfp) { struct vring_virtqueue *vq = to_vvq(_vq); unsigned int i, avail, uninitialized_var(prev); int head; START_USE(vq); BUG_ON(data == NULL); /* If the host supports indirect descriptor tables, and we have multiple * buffers, then go indirect. FIXME: tune this threshold */ if (vq->indirect && (out + in) > 1 && vq->num_free) { head = vring_add_indirect(vq, sg, out, in, gfp); if (likely(head >= 0)) goto add_head; } BUG_ON(out + in > vq->vring.num); BUG_ON(out + in == 0); if (vq->num_free < out + in) { pr_debug("Can't add buf len %i - avail = %i\n", out + in, vq->num_free); /* FIXME: for historical reasons, we force a notify here if * there are outgoing parts to the buffer. Presumably the * host should service the ring ASAP. */ if (out) vq->notify(&vq->vq); END_USE(vq); return -ENOSPC; } /* We're about to use some buffers from the free list. */ vq->num_free -= out + in; head = vq->free_head; for (i = vq->free_head; out; i = vq->vring.desc[i].next, out--) { vq->vring.desc[i].flags = VRING_DESC_F_NEXT; vq->vring.desc[i].addr = sg_phys(sg); vq->vring.desc[i].len = sg->length; prev = i; sg++; } for (; in; i = vq->vring.desc[i].next, in--) { vq->vring.desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE; vq->vring.desc[i].addr = sg_phys(sg); vq->vring.desc[i].len = sg->length; prev = i; sg++; } /* Last one doesn't continue. */ vq->vring.desc[prev].flags &= ~VRING_DESC_F_NEXT; /* Update free pointer */ vq->free_head = i; add_head: /* Set token. */ vq->data[head] = data; /* Put entry in available array (but don't update avail->idx until they * do sync). FIXME: avoid modulus here? */ avail = (vq->vring.avail->idx + vq->num_added++) % vq->vring.num; vq->vring.avail->ring[avail] = head; pr_debug("Added buffer head %i to %p\n", head, vq); END_USE(vq); /* If we're indirect, we can fit many (assuming not OOM). */ if (vq->indirect) return vq->num_free ? vq->vring.num : 0; return vq->num_free; }
static BOOLEAN vring_is_interrupt_enabled(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); return (vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT) ? FALSE : TRUE; }
void virtqueue_disable_cb(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; }
void vring_del_virtqueue(struct virtqueue *vq) { list_del(&vq->list); kfree(to_vvq(vq)); }
void vring_del_virtqueue(struct virtqueue *vq) { kfree(to_vvq(vq)); }
static int vring_add_buf(struct virtqueue *_vq, struct VirtIOBufferDescriptor sg[], unsigned int out, unsigned int in, void *data, void *va_indirect, ULONGLONG phys_indirect ) { struct vring_virtqueue *vq = to_vvq(_vq); unsigned int i, avail, head, prev = 0; if(data == NULL) { DPrintf(0, ("%s: data is NULL!\n", __FUNCTION__) ); return -1; } if (va_indirect) { int ret = vring_add_indirect(_vq, sg, out, in, va_indirect, phys_indirect); if (ret >= 0) { head = (unsigned int)ret; goto add_head; } else { DPrintf(0, ("%s: no physical storage provided!\n", __FUNCTION__) ); return -1; } } if(out + in > vq->vring.num) { DPrintf(0, ("%s: out + in > vq->vring.num!\n", __FUNCTION__) ); return -1; } if(out + in == 0) { DPrintf(0, ("%s: out + in == 0!\n", __FUNCTION__) ); return -1; } if (vq->num_free < out + in) { DPrintf(0, ("Can't add buf len %i - avail = %i\n", out + in, vq->num_free) ); /* notify the host immediately if we are out of descriptors in tx ring */ if (out) vq->notify(&vq->vq); return -1; } /* We're about to use some buffers from the free list. */ vq->num_free -= out + in; head = vq->free_head; for (i = vq->free_head; out; i = vq->vring.desc[i].next, out--) { vq->vring.desc[i].flags = VRING_DESC_F_NEXT; vq->vring.desc[i].addr = sg->physAddr.QuadPart; vq->vring.desc[i].len = sg->ulSize; prev = i; sg++; } for (; in; i = vq->vring.desc[i].next, in--) { vq->vring.desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE; vq->vring.desc[i].addr = sg->physAddr.QuadPart; vq->vring.desc[i].len = sg->ulSize; prev = i; sg++; } /* Last one doesn't continue. */ vq->vring.desc[prev].flags &= ~VRING_DESC_F_NEXT; /* Update free pointer */ vq->free_head = i; add_head: /* Set token. */ vq->data[head] = data; /* Put entry in available array (but don't update avail->idx until they * do sync). FIXME: avoid modulus here? */ avail = vq->vring.avail->idx % vq->vring.num; DPrintf(6, ("%s >>> avail %d vq->vring.avail->idx = %d, vq->num_added = %d vq->vring.num = %d\n", __FUNCTION__, avail, vq->vring.avail->idx, vq->num_added, vq->vring.num)); vq->vring.avail->ring[avail] = (u16) head; DPrintf(6, ("Added buffer head %i to %p\n", head, vq) ); //Flush ring changes before index advancement mb(); vq->vring.avail->idx++; vq->num_added++; return vq->num_free; }