/** * virtqueue_add_consumed_buffer - Returns consumed buffer back to VirtIO queue * * @param vq - Pointer to VirtIO queue control block * @param head_idx - Index of vring desc containing used buffer * @param len - Length of buffer * * @return - Function status */ int virtqueue_add_consumed_buffer(struct virtqueue *vq, uint16_t head_idx, uint32_t len) { struct vring_used_elem *used_desc = VQ_NULL; uint16_t used_idx; if (head_idx > vq->vq_nentries) { return (ERROR_VRING_NO_BUFF); } VQUEUE_BUSY(vq); used_idx = vq->vq_ring.used->idx & (vq->vq_nentries - 1); used_desc = &(vq->vq_ring.used->ring[used_idx]); used_desc->id = head_idx; used_desc->len = len; env_wmb(); vq->vq_ring.used->idx++; VQUEUE_IDLE(vq); return (VQUEUE_SUCCESS); }
void _notify(int cpu_id, struct proc_intr *intr_info) { struct ipi_info *chn_ipi_info = (struct ipi_info *)(intr_info->data); if (chn_ipi_info == NULL) return; platform_dcache_all_flush(); env_wmb(); /* Trigger IPI */ ipi_trigger(chn_ipi_info->ipi_base_addr, chn_ipi_info->ipi_chn_mask); }
/** * * vq_ring_update_avail * */ static void vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx) { uint16_t avail_idx; /* * Place the head of the descriptor chain into the next slot and make * it usable to the host. The chain is made available now rather than * deferring to virtqueue_notify() in the hopes that if the host is * currently running on another CPU, we can keep it processing the new * descriptor. */ avail_idx = vq->vq_ring.avail->idx & (vq->vq_nentries - 1); vq->vq_ring.avail->ring[avail_idx] = desc_idx; env_wmb(); vq->vq_ring.avail->idx++; /* Keep pending count until virtqueue_notify(). */ vq->vq_queued_cnt++; }