int virtqueue_postpone_intr(struct virtqueue *vq) { uint16_t ndesc; /* * Postpone until at least half of the available descriptors * have been consumed. * * XXX Adaptive factor? (Linux uses 3/4) */ ndesc = (uint16_t)(vq->vq_ring.avail->idx - vq->vq_used_cons_idx) / 2; if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx + ndesc; else vq->vq_ring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; cpu_mfence(); /* * Enough items may have already been consumed to meet our * threshold since we last checked. Let our caller know so * it processes the new entries. */ if (virtqueue_nused(vq) > ndesc) return (1); return (0); }
/** * * vq_ring_enable_interrupt * */ static int vq_ring_enable_interrupt(struct virtqueue *vq, uint16_t ndesc) { /* * Enable interrupts, making sure we get the latest index of * what's already been consumed. */ if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) { vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx + ndesc; } else { vq->vq_ring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; } env_mb(); /* * Enough items may have already been consumed to meet our threshold * since we last checked. Let our caller know so it processes the new * entries. */ if (virtqueue_nused(vq) > ndesc) { return (1); } return (0); }
/* * Enable interrupts on a given virtqueue. Returns 1 if there are * additional entries to process on the virtqueue after we return. */ int virtqueue_enable_intr(struct virtqueue *vq) { /* * Enable interrupts, making sure we get the latest * index of what's already been consumed. */ vq->vq_ring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) { vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx; } else { vq->vq_ring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; } cpu_mfence(); /* * Additional items may have been consumed in the time between * since we last checked and enabled interrupts above. Let our * caller know so it processes the new entries. */ if (vq->vq_used_cons_idx != vq->vq_ring.used->idx) return (1); return (0); }
void call_used(void) { /* Flush in previous flags write */ /* Barrier D (for pairing) */ smp_mb(); if (!vring_need_event(vring_used_event(&ring), host.used_idx, host.called_used_idx)) return; host.called_used_idx = host.used_idx; call(); }
/** * virtqueue_enable_cb - Disables callback generation * * @param vq - Pointer to VirtIO queue control block * */ void virtqueue_disable_cb(struct virtqueue *vq) { VQUEUE_BUSY(vq); if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) { vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx - vq->vq_nentries - 1; } else { vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; } VQUEUE_IDLE(vq); }
bool virtio_queue_should_signal(struct virtio_queue *vq) { u16 old_idx, new_idx, event_idx; if (!vq->addr) { return FALSE; } old_idx = vq->last_used_signalled; new_idx = vq->vring.used->idx; event_idx = vring_used_event(&vq->vring); if (vring_need_event(event_idx, new_idx, old_idx)) { vq->last_used_signalled = new_idx; return TRUE; } return FALSE; }
bool enable_call() { unsigned short last_used_idx; vring_used_event(&ring) = (last_used_idx = guest.last_used_idx); /* Flush call index write */ /* Barrier D (for pairing) */ smp_mb(); #ifdef RING_POLL { unsigned short head = last_used_idx & (ring_size - 1); unsigned index = ring.used->ring[head].id; return (index ^ last_used_idx ^ 0x8000) & ~(ring_size - 1); } #else return ring.used->idx == last_used_idx; #endif }
static bool vring_notify(PVOID unused, VirtQueue *vq) { uint16_t old, _new; bool v; /* Always notify when queue is empty (when feature acknowledge) */ //if (((vdev->guest_features & (1 << VIRTIO_F_NOTIFY_ON_EMPTY)) && if ((bVirtioF_NotifyOnEmpty && !vq->inuse && vring_avail_idx(vq) == vq->last_avail_idx)) { return TRUE; } //if (!(vdev->guest_features & (1 << VIRTIO_RING_F_EVENT_IDX))) { if (!bUsePublishedIndices) { return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT); } v = vq->signalled_used_valid; vq->signalled_used_valid = TRUE; old = vq->signalled_used; _new = vq->signalled_used = vring_used_idx(vq); return !v || vring_need_event(vring_used_event(vq), _new, old); }