void initialize_virtqueue(struct vring_virtqueue *vq, unsigned int num, VirtIODevice * pVirtIODevice, void *pages, void (*notify)(struct virtqueue *), unsigned int index, bool use_published_indices) { unsigned int i = num; memset(vq, 0, sizeof(*vq) + sizeof(void *)*num); vring_init(&vq->vring, num, pages, PAGE_SIZE); vq->vq.vdev = pVirtIODevice; vq->vq.vq_ops = &vring_vq_ops; vq->notify = notify; vq->broken = 0; vq->vq.ulIndex = index; *vq->vring.vring_last_used_ptr = vq->last_used_idx = 0; vq->num_added = 0; vq->use_published_indices = use_published_indices; /* No callback? Tell other side not to bother us. */ // TBD //if (!callback) // vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; /* Put everything in free lists. */ vq->num_free = num; vq->free_head = 0; for (i = 0; i < num-1; i++) vq->vring.desc[i].next = i+1; }
/* implemented by ring */ void alloc_ring(void) { int ret; int i; void *p; ret = posix_memalign(&p, 0x1000, vring_size(ring_size, 0x1000)); if (ret) { perror("Unable to allocate ring buffer.\n"); exit(3); } memset(p, 0, vring_size(ring_size, 0x1000)); vring_init(&ring, ring_size, p, 0x1000); guest.avail_idx = 0; guest.kicked_avail_idx = -1; guest.last_used_idx = 0; /* Put everything in free lists. */ guest.free_head = 0; for (i = 0; i < ring_size - 1; i++) ring.desc[i].next = i + 1; host.used_idx = 0; host.called_used_idx = -1; guest.num_free = ring_size; data = malloc(ring_size * sizeof *data); if (!data) { perror("Unable to allocate data buffer.\n"); exit(3); } memset(data, 0, ring_size * sizeof *data); }
void vring_init_virtqueue(struct vring_virtqueue *vq, unsigned index, unsigned num, unsigned vring_align, struct virtio_device *vdev, void *pages, bool (*notify)(struct virtqueue *), void (*callback)(struct virtqueue *), const char *name) { unsigned i; vring_init(&vq->vring, num, pages, vring_align); vq->vq.callback = callback; vq->vq.vdev = vdev; vq->vq.name = name; vq->vq.num_free = num; vq->vq.index = index; vq->notify = notify; vq->last_used_idx = 0; vq->num_added = 0; vq->free_head = 0; for (i = 0; i < num-1; i++) { vq->vring.desc[i].next = i+1; vq->data[i] = NULL; } vq->data[i] = NULL; }
/* Map the guest's vring to host memory */ bool vring_setup(Vring *vring, VirtIODevice *vdev, int n) { hwaddr vring_addr = virtio_queue_get_ring_addr(vdev, n); hwaddr vring_size = virtio_queue_get_ring_size(vdev, n); void *vring_ptr; vring->broken = false; vring_ptr = vring_map(&vring->mr, vring_addr, vring_size, true); if (!vring_ptr) { error_report("Failed to map vring " "addr %#" HWADDR_PRIx " size %" HWADDR_PRIu, vring_addr, vring_size); vring->broken = true; return false; } vring_init(&vring->vr, virtio_queue_get_num(vdev, n), vring_ptr, 4096); vring->last_avail_idx = virtio_queue_get_last_avail_idx(vdev, n); vring->last_used_idx = vring->vr.used->idx; vring->signalled_used = 0; vring->signalled_used_valid = false; trace_vring_setup(virtio_queue_get_ring_addr(vdev, n), vring->vr.desc, vring->vr.avail, vring->vr.used); return true; }
static void init_phys_queue(struct virtio_queue *q) { memset(q->vaddr, 0, q->ring_size); memset(q->data, 0, sizeof(q->data[0]) * q->num); /* physical page in guest */ q->page = q->paddr / PAGE_SIZE; /* Set pointers in q->vring according to size */ vring_init(&q->vring, q->num, q->vaddr, PAGE_SIZE); /* Everything's free at this point */ for (int i = 0; i < q->num; i++) { q->vring.desc[i].flags = VRING_DESC_F_NEXT; q->vring.desc[i].next = (i + 1) & (q->num - 1); } q->free_num = q->num; q->free_head = 0; q->free_tail = q->num - 1; q->last_used = 0; return; }
/* * This initialization routine requires at least one * vring i.e. vr0. vr1 is optional. */ static void * init_vr(struct mic_info *mic, int fd, int type, struct mic_vring *vr0, struct mic_vring *vr1, int num_vq) { int vr_size; char *va; vr_size = PAGE_ALIGN(_vring_size(MIC_VRING_ENTRIES, MIC_VIRTIO_RING_ALIGN) + sizeof(struct _mic_vring_info)); va = mmap(NULL, MIC_DEVICE_PAGE_END + vr_size * num_vq, PROT_READ, MAP_SHARED, fd, 0); if (MAP_FAILED == va) { mpsslog("%s %s %d mmap failed errno %s\n", mic->name, __func__, __LINE__, strerror(errno)); goto done; } set_dp(mic, type, va); vr0->va = (struct mic_vring *)&va[MIC_DEVICE_PAGE_END]; vr0->info = vr0->va + _vring_size(MIC_VRING_ENTRIES, MIC_VIRTIO_RING_ALIGN); vring_init(&vr0->vr, MIC_VRING_ENTRIES, vr0->va, MIC_VIRTIO_RING_ALIGN); mpsslog("%s %s vr0 %p vr0->info %p vr_size 0x%x vring 0x%x ", __func__, mic->name, vr0->va, vr0->info, vr_size, _vring_size(MIC_VRING_ENTRIES, MIC_VIRTIO_RING_ALIGN)); mpsslog("magic 0x%x expected 0x%x\n", le32toh(vr0->info->magic), MIC_MAGIC + type); assert(le32toh(vr0->info->magic) == MIC_MAGIC + type); if (vr1) { vr1->va = (struct mic_vring *) &va[MIC_DEVICE_PAGE_END + vr_size]; vr1->info = vr1->va + _vring_size(MIC_VRING_ENTRIES, MIC_VIRTIO_RING_ALIGN); vring_init(&vr1->vr, MIC_VRING_ENTRIES, vr1->va, MIC_VIRTIO_RING_ALIGN); mpsslog("%s %s vr1 %p vr1->info %p vr_size 0x%x vring 0x%x ", __func__, mic->name, vr1->va, vr1->info, vr_size, _vring_size(MIC_VRING_ENTRIES, MIC_VIRTIO_RING_ALIGN)); mpsslog("magic 0x%x expected 0x%x\n", le32toh(vr1->info->magic), MIC_MAGIC + type + 1); assert(le32toh(vr1->info->magic) == MIC_MAGIC + type + 1); } done: return va; }
int vp_find_vq(unsigned int ioaddr, int queue_index, struct vring_virtqueue **p_vq) { u16 num; ASSERT32FLAT(); struct vring_virtqueue *vq = *p_vq = memalign_low(PAGE_SIZE, sizeof(*vq)); if (!vq) { warn_noalloc(); goto fail; } memset(vq, 0, sizeof(*vq)); /* select the queue */ outw(queue_index, ioaddr + VIRTIO_PCI_QUEUE_SEL); /* check if the queue is available */ num = inw(ioaddr + VIRTIO_PCI_QUEUE_NUM); if (!num) { dprintf(1, "ERROR: queue size is 0\n"); goto fail; } if (num > MAX_QUEUE_NUM) { dprintf(1, "ERROR: queue size %d > %d\n", num, MAX_QUEUE_NUM); goto fail; } /* check if the queue is already active */ if (inl(ioaddr + VIRTIO_PCI_QUEUE_PFN)) { dprintf(1, "ERROR: queue already active\n"); goto fail; } vq->queue_index = queue_index; /* initialize the queue */ struct vring * vr = &vq->vring; vring_init(vr, num, (unsigned char*)&vq->queue); /* activate the queue * * NOTE: vr->desc is initialized by vring_init() */ outl((unsigned long)virt_to_phys(vr->desc) >> PAGE_SHIFT, ioaddr + VIRTIO_PCI_QUEUE_PFN); return num; fail: free(vq); *p_vq = NULL; return -1; }
ethif_virtio_emul_t *ethif_virtio_emul_init(ps_io_ops_t io_ops, int queue_size, virtio_emul_vm_t *emul_vm, ethif_driver_init driver, void *config) { ethif_virtio_emul_t *emul = NULL; ethif_virtio_emul_internal_t *internal = NULL; int err; emul = malloc(sizeof(*emul)); internal = malloc(sizeof(*internal)); if (!emul || !internal) { goto error; } memset(emul, 0, sizeof(*emul)); memset(internal, 0, sizeof(*internal)); emul->internal = internal; emul->io_in = emul_io_in; emul->io_out = emul_io_out; emul->notify = emul_notify; internal->queue_size[RX_QUEUE] = queue_size; internal->queue_size[TX_QUEUE] = queue_size; /* create dummy rings. we never actually dereference the rings so they can be null */ vring_init(&internal->vring[RX_QUEUE], emul->internal->queue_size[RX_QUEUE], 0, VIRTIO_PCI_VRING_ALIGN); vring_init(&internal->vring[TX_QUEUE], emul->internal->queue_size[RX_QUEUE], 0, VIRTIO_PCI_VRING_ALIGN); internal->driver.cb_cookie = emul; internal->driver.i_cb = emul_callbacks; internal->dma_man = io_ops.dma_manager; internal->emul_vm = emul_vm; err = driver(&internal->driver, io_ops, config); if (err) { ZF_LOGE("Fafiled to initialize driver"); goto error; } int mtu; internal->driver.i_fn.low_level_init(&internal->driver, internal->mac, &mtu); return emul; error: if (emul) { free(emul); } if (internal) { free(internal); } return NULL; }
int virtio_queue_setup(struct virtio_queue *vq, struct vmm_guest *guest, physical_addr_t guest_pfn, physical_size_t guest_page_size, u32 desc_count, u32 align) { int rc = 0; u32 reg_flags; physical_addr_t gphys_addr, hphys_addr; physical_size_t gphys_size, avail_size; if ((rc = virtio_queue_cleanup(vq))) { return rc; } gphys_addr = guest_pfn * guest_page_size; gphys_size = vring_size(desc_count, align); if ((rc = vmm_guest_physical_map(guest, gphys_addr, gphys_size, &hphys_addr, &avail_size, ®_flags))) { vmm_printf("Failed vmm_guest_physical_map\n"); return VMM_EFAIL; } if (!(reg_flags & VMM_REGION_ISRAM)) { return VMM_EINVALID; } if (avail_size < gphys_size) { return VMM_EINVALID; } vq->addr = (void *)vmm_host_memmap(hphys_addr, gphys_size, VMM_MEMORY_FLAGS_NORMAL); if (!vq->addr) { return VMM_ENOMEM; } vring_init(&vq->vring, desc_count, vq->addr, align); vq->guest = guest; vq->desc_count = desc_count; vq->align = align; vq->guest_pfn = guest_pfn; vq->guest_page_size = guest_page_size; vq->guest_addr = gphys_addr; vq->host_addr = hphys_addr; vq->total_size = gphys_size; return VMM_OK; }
/* * Each device descriptor is followed by the description of its virtqueues. We * specify how many descriptors the virtqueue is to have. */ static void add_virtqueue(struct device *dev, unsigned int num_descs, void (*service)(struct virtqueue *)) { unsigned int pages; struct virtqueue **i, *vq = malloc(sizeof(*vq)); void *p; /* First we need some memory for this virtqueue. */ pages = (vring_size(num_descs, LGUEST_VRING_ALIGN) + getpagesize() - 1) / getpagesize(); p = get_pages(pages); /* Initialize the virtqueue */ vq->next = NULL; vq->last_avail_idx = 0; vq->dev = dev; /* * This is the routine the service thread will run, and its Process ID * once it's running. */ vq->service = service; vq->thread = (pid_t)-1; /* Initialize the configuration. */ vq->config.num = num_descs; vq->config.irq = devices.next_irq++; vq->config.pfn = to_guest_phys(p) / getpagesize(); /* Initialize the vring. */ vring_init(&vq->vring, num_descs, p, LGUEST_VRING_ALIGN); /* * Append virtqueue to this device's descriptor. We use * device_config() to get the end of the device's current virtqueues; * we check that we haven't added any config or feature information * yet, otherwise we'd be overwriting them. */ assert(dev->desc->config_len == 0 && dev->desc->feature_len == 0); memcpy(device_config(dev), &vq->config, sizeof(vq->config)); dev->num_vq++; dev->desc->num_vq++; verbose("Virtqueue page %#lx\n", to_guest_phys(p)); /* * Add to tail of list, so dev->vq is first vq, dev->vq->next is * second. */ for (i = &dev->vq; *i; i = &(*i)->next); *i = vq; }
struct virtqueue *vring_new_virtqueue(unsigned int num, unsigned int vring_align, struct virtio_device *vdev, void *pages, void (*notify)(struct virtqueue *), void (*callback)(struct virtqueue *), const char *name) { struct vring_virtqueue *vq; unsigned int i; /* We assume num is a power of 2. */ if (num & (num - 1)) { dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num); return NULL; } vq = kmalloc(sizeof(*vq) + sizeof(void *)*num, GFP_KERNEL); if (!vq) return NULL; vring_init(&vq->vring, num, pages, vring_align); vq->vq.callback = callback; vq->vq.vdev = vdev; vq->vq.name = name; vq->notify = notify; vq->broken = false; vq->last_used_idx = 0; vq->num_added = 0; list_add_tail(&vq->vq.list, &vdev->vqs); #ifdef DEBUG vq->in_use = false; #endif vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC); /* No callback? Tell other side not to bother us. */ if (!callback) vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; /* Put everything in free lists. */ vq->num_free = num; vq->free_head = 0; for (i = 0; i < num-1; i++) { vq->vring.desc[i].next = i+1; vq->data[i] = NULL; } vq->data[i] = NULL; return &vq->vq; }
void pru_virtqueue_init ( struct pru_virtqueue *vq, struct fw_rsc_vdev_vring *vring, volatile uint32_t *to_arm_mbx, volatile uint32_t *from_arm_mbx ) { vq->id = vring->notifyid; vq->to_arm_mbx = to_arm_mbx; vq->from_arm_mbx = from_arm_mbx; vq->last_avail_idx = 0; vring_init(&vq->vring, vring->num, (void*)vring->da, vring->align); }
void pru_virtqueue_init( struct pru_virtqueue *vq, struct fw_rsc_vdev_vring *vring, uint32_t to_arm_event, uint32_t from_arm_event ) { vq->id = vring->notifyid; vq->to_arm_event = to_arm_event; vq->from_arm_event = from_arm_event; vq->last_avail_idx = 0; vring_init(&vq->vring, vring->num, (void*)vring->da, vring->align); }
VirtioQueue::VirtioQueue(VirtioDevice* device, uint16 queueNumber, uint16 ringSize) : fDevice(device), fQueueNumber(queueNumber), fRingSize(ringSize), fRingFree(ringSize), fRingHeadIndex(0), fRingUsedIndex(0), fStatus(B_OK), fIndirectMaxSize(0) { fDescriptors = new(std::nothrow) TransferDescriptor*[fRingSize]; if (fDescriptors == NULL) { fStatus = B_NO_MEMORY; return; } uint8* virtAddr; phys_addr_t physAddr; fAreaSize = vring_size(fRingSize, device->Alignment()); fArea = alloc_mem((void **)&virtAddr, &physAddr, fAreaSize, 0, "virtqueue"); if (fArea < B_OK) { fStatus = fArea; return; } memset(virtAddr, 0, fAreaSize); vring_init(&fRing, fRingSize, virtAddr, device->Alignment()); for (uint16 i = 0; i < fRingSize - 1; i++) fRing.desc[i].next = i + 1; fRing.desc[fRingSize - 1].next = UINT16_MAX; if ((fDevice->Features() & VIRTIO_FEATURE_RING_INDIRECT_DESC) != 0) fIndirectMaxSize = 128; for (uint16 i = 0; i < fRingSize; i++) { fDescriptors[i] = new TransferDescriptor(this, fIndirectMaxSize); if (fDescriptors[i] == NULL || fDescriptors[i]->InitCheck() != B_OK) { fStatus = B_NO_MEMORY; return; } } DisableInterrupt(); device->SetupQueue(fQueueNumber, physAddr); }
/** * * vq_ring_init * */ static void vq_ring_init(struct virtqueue *vq) { struct vring *vr; unsigned char *ring_mem; int i, size; ring_mem = vq->vq_ring_mem; size = vq->vq_nentries; vr = &vq->vq_ring; vring_init(vr, size, ring_mem, vq->vq_alignment); for (i = 0; i < size - 1; i++) vr->desc[i].next = i + 1; vr->desc[i].next = VQ_RING_DESC_CHAIN_END; }
int vp_find_vq(unsigned int ioaddr, int queue_index, struct vring_virtqueue *vq) { struct vring * vr = &vq->vring; u16 num; /* select the queue */ outw(queue_index, ioaddr + VIRTIO_PCI_QUEUE_SEL); /* check if the queue is available */ num = inw(ioaddr + VIRTIO_PCI_QUEUE_NUM); if (!num) { printf("ERROR: queue size is 0\n"); return -1; } if (num > MAX_QUEUE_NUM) { printf("ERROR: queue size %d > %d\n", num, MAX_QUEUE_NUM); return -1; } /* check if the queue is already active */ if (inl(ioaddr + VIRTIO_PCI_QUEUE_PFN)) { printf("ERROR: queue already active\n"); return -1; } vq->queue_index = queue_index; /* initialize the queue */ vring_init(vr, num, (unsigned char*)&vq->queue); /* activate the queue * * NOTE: vr->desc is initialized by vring_init() */ outl((unsigned long)virt_to_phys(vr->desc) >> PAGE_SHIFT, ioaddr + VIRTIO_PCI_QUEUE_PFN); return num; }
/*! * ======== VirtQueue_create ======== */ VirtQueue_Handle VirtQueue_create (VirtQueue_callback callback, UInt16 procId, UInt16 id, UInt32 vaddr, UInt32 paddr, UInt32 num, UInt32 align, Void *arg) { VirtQueue_Object *vq = NULL; vq = Memory_alloc(NULL, sizeof(VirtQueue_Object), 0, NULL); if (!vq) { return (NULL); } vq->callback = callback; vq->id = id; numQueues++; vq->procId = procId; vq->intId = coreIntId[procId]; vq->last_avail_idx = 0; vq->arg = arg; /* init the vring */ vring_init(&(vq->vring), num, (void *)vaddr, align); vq->num_free = num; vq->last_used_idx = 0; vq->vaddr = vaddr; vq->paddr = paddr; vq->vring.avail->idx = 0; vq->vring.used->idx = 0; /* Initialize the flags */ vq->vring.avail->flags = 0; vq->vring.used->flags = 0; /* Store the VirtQueue locally */ if (queueRegistry[procId][vq->id%2] == NULL) queueRegistry[procId][vq->id%2] = vq; else { Osal_printf ("VirtQueue ID %d already created", id); Memory_free(NULL, vq, sizeof(VirtQueue_Object)); vq = NULL; } return (vq); }
static int init_vq(struct kvm *kvm, void *dev, u32 vq, u32 page_size, u32 align, u32 pfn) { struct bln_dev *bdev = dev; struct virt_queue *queue; void *p; compat__remove_message(compat_id); queue = &bdev->vqs[vq]; queue->pfn = pfn; p = guest_flat_to_host(kvm, queue->pfn * page_size); thread_pool__init_job(&bdev->jobs[vq], kvm, virtio_bln_do_io, queue); vring_init(&queue->vring, VIRTIO_BLN_QUEUE_SIZE, p, align); return 0; }
static struct virtqueue *vop_new_virtqueue(unsigned int index, unsigned int num, struct virtio_device *vdev, bool context, void *pages, bool (*notify)(struct virtqueue *vq), void (*callback)(struct virtqueue *vq), const char *name, void *used) { bool weak_barriers = false; struct vring vring; vring_init(&vring, num, pages, MIC_VIRTIO_RING_ALIGN); vring.used = used; return __vring_new_virtqueue(index, vring, vdev, weak_barriers, context, notify, callback, name); }
struct virtqueue *vring_new_virtqueue(unsigned int num, struct virtio_device *vdev, void *pages, void (*notify)(struct virtqueue *), void (*callback)(struct virtqueue *)) { struct vring_virtqueue *vq; unsigned int i; /* We assume num is a power of 2. */ if (num & (num - 1)) { dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num); return NULL; } vq = kmalloc(sizeof(*vq) + sizeof(void *)*num, GFP_KERNEL); if (!vq) return NULL; vring_init(&vq->vring, num, pages, PAGE_SIZE); vq->vq.callback = callback; vq->vq.vdev = vdev; vq->vq.vq_ops = &vring_vq_ops; vq->notify = notify; vq->broken = false; vq->last_used_idx = 0; vq->num_added = 0; #ifdef DEBUG vq->in_use = false; #endif /* No callback? Tell other side not to bother us. */ if (!callback) vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; /* Put everything in free lists. */ vq->num_free = num; vq->free_head = 0; for (i = 0; i < num-1; i++) vq->vring.desc[i].next = i+1; return &vq->vq; }
static int emul_io_out(struct ethif_virtio_emul *emul, unsigned int offset, unsigned int size, unsigned int value) { switch (offset) { case VIRTIO_PCI_GUEST_FEATURES: assert(size == 4); assert(value == BIT(VIRTIO_NET_F_MAC)); break; case VIRTIO_PCI_STATUS: assert(size == 1); emul->internal->status = value & 0xff; break; case VIRTIO_PCI_QUEUE_SEL: assert(size == 2); emul->internal->queue = (value & 0xffff); assert(emul->internal->queue == 0 || emul->internal->queue == 1); break; case VIRTIO_PCI_QUEUE_PFN: { assert(size == 4); int queue = emul->internal->queue; emul->internal->queue_pfn[queue] = value; vring_init(&emul->internal->vring[queue], emul->internal->queue_size[queue], (void*)(uintptr_t)(value << 12), VIRTIO_PCI_VRING_ALIGN); break; } case VIRTIO_PCI_QUEUE_NOTIFY: if (value == RX_QUEUE) { /* Currently RX packets will just get dropped if there was no space * so we will never have work to do if the client suddenly adds * more buffers */ } else if (value == TX_QUEUE) { emul_notify_tx(emul); } break; default: printf("Unhandled offset of 0x%x of size %d, writing 0x%x\n", offset, size, value); assert(!"panic"); } return 0; }
static void add_virtqueue(struct device *dev, unsigned int num_descs, void (*service)(struct virtqueue *)) { unsigned int pages; struct virtqueue **i, *vq = malloc(sizeof(*vq)); void *p; pages = (vring_size(num_descs, LGUEST_VRING_ALIGN) + getpagesize() - 1) / getpagesize(); p = get_pages(pages); vq->next = NULL; vq->last_avail_idx = 0; vq->dev = dev; vq->service = service; vq->thread = (pid_t)-1; vq->config.num = num_descs; vq->config.irq = devices.next_irq++; vq->config.pfn = to_guest_phys(p) / getpagesize(); vring_init(&vq->vring, num_descs, p, LGUEST_VRING_ALIGN); assert(dev->desc->config_len == 0 && dev->desc->feature_len == 0); memcpy(device_config(dev), &vq->config, sizeof(vq->config)); dev->num_vq++; dev->desc->num_vq++; verbose("Virtqueue page %#lx\n", to_guest_phys(p)); for (i = &dev->vq; *i; i = &(*i)->next); *i = vq; }
static void virtio_dev_vring_start(struct virtqueue *vq, int queue_type) { struct rte_mbuf *m; int i, nbufs, error, size = vq->vq_nentries; struct vring *vr = &vq->vq_ring; uint8_t *ring_mem = vq->vq_ring_virt_mem; PMD_INIT_FUNC_TRACE(); /* * Reinitialise since virtio port might have been stopped and restarted */ memset(vq->vq_ring_virt_mem, 0, vq->vq_ring_size); vring_init(vr, size, ring_mem, VIRTIO_PCI_VRING_ALIGN); vq->vq_used_cons_idx = 0; vq->vq_desc_head_idx = 0; vq->vq_avail_idx = 0; vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1); vq->vq_free_cnt = vq->vq_nentries; memset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries); /* Chain all the descriptors in the ring with an END */ for (i = 0; i < size - 1; i++) vr->desc[i].next = (uint16_t)(i + 1); vr->desc[i].next = VQ_RING_DESC_CHAIN_END; /* * Disable device(host) interrupting guest */ virtqueue_disable_intr(vq); /* Only rx virtqueue needs mbufs to be allocated at initialization */ if (queue_type == VTNET_RQ) { if (vq->mpool == NULL) rte_exit(EXIT_FAILURE, "Cannot allocate initial mbufs for rx virtqueue"); /* Allocate blank mbufs for the each rx descriptor */ nbufs = 0; error = ENOSPC; while (!virtqueue_full(vq)) { m = rte_rxmbuf_alloc(vq->mpool); if (m == NULL) break; /****************************************** * Enqueue allocated buffers * *******************************************/ error = virtqueue_enqueue_recv_refill(vq, m); if (error) { rte_pktmbuf_free(m); break; } nbufs++; } vq_update_avail_idx(vq); PMD_INIT_LOG(DEBUG, "Allocated %d bufs", nbufs); VIRTIO_WRITE_REG_2(vq->hw, VIRTIO_PCI_QUEUE_SEL, vq->vq_queue_index); VIRTIO_WRITE_REG_4(vq->hw, VIRTIO_PCI_QUEUE_PFN, vq->mz->phys_addr >> VIRTIO_PCI_QUEUE_ADDR_SHIFT); } else if (queue_type == VTNET_TQ) {
/*! * ======== VirtQueue_create ======== */ VirtQueue_Object *VirtQueue_create(VirtQueue_callback callback, UInt16 remoteProcId, Int vqId) { VirtQueue_Object *vq; Void *vringAddr; Error_Block eb; Error_init(&eb); vq = Memory_alloc(NULL, sizeof(VirtQueue_Object), 0, &eb); if (!vq) { return (NULL); } vq->callback = callback; vq->id = vqId; vq->procId = remoteProcId; vq->last_avail_idx = 0; #ifndef SMP if (MultiProc_self() == appm3ProcId) { /* vqindices that belong to AppM3 should be next to SysM3. * Care must be taken to not collide with SysM3's virtqueues. */ vq->id += 2; } #endif switch (vq->id) { /* IPC transport vrings */ case ID_SELF_TO_A9: /* IPU/DSP -> A9 */ vringAddr = (struct vring *) IPC_MEM_VRING0; break; case ID_A9_TO_SELF: /* A9 -> IPU/DSP */ vringAddr = (struct vring *) IPC_MEM_VRING1; break; #ifndef SMP case ID_APPM3_TO_A9: /* APPM3 -> A9 */ vringAddr = (struct vring *) IPC_MEM_VRING2; break; case ID_A9_TO_APPM3: /* A9 -> APPM3 */ vringAddr = (struct vring *) IPC_MEM_VRING3; break; #endif } Log_print3(Diags_USER1, "vring: %d 0x%x (0x%x)\n", vq->id, (IArg)vringAddr, RP_MSG_RING_SIZE); vring_init(&(vq->vring), RP_MSG_NUM_BUFS, vringAddr, RP_MSG_VRING_ALIGN); /* * Don't trigger a mailbox message every time MPU makes another buffer * available */ if (vq->procId == hostProcId) { vq->vring.used->flags |= VRING_USED_F_NO_NOTIFY; } queueRegistry[vq->id] = vq; return (vq); }
void vhost_vq_setup(struct vdev_info *dev, struct vq_info *info) { struct vhost_vring_state state = { .index = info->idx }; struct vhost_vring_file file = { .index = info->idx }; unsigned long long features = dev->vdev.features[0]; struct vhost_vring_addr addr = { .index = info->idx, .desc_user_addr = (uint64_t)(unsigned long)info->vring.desc, .avail_user_addr = (uint64_t)(unsigned long)info->vring.avail, .used_user_addr = (uint64_t)(unsigned long)info->vring.used, }; int r; r = ioctl(dev->control, VHOST_SET_FEATURES, &features); assert(r >= 0); state.num = info->vring.num; r = ioctl(dev->control, VHOST_SET_VRING_NUM, &state); assert(r >= 0); state.num = 0; r = ioctl(dev->control, VHOST_SET_VRING_BASE, &state); assert(r >= 0); r = ioctl(dev->control, VHOST_SET_VRING_ADDR, &addr); assert(r >= 0); file.fd = info->kick; r = ioctl(dev->control, VHOST_SET_VRING_KICK, &file); assert(r >= 0); file.fd = info->call; r = ioctl(dev->control, VHOST_SET_VRING_CALL, &file); assert(r >= 0); } static void vq_info_add(struct vdev_info *dev, int num) { struct vq_info *info = &dev->vqs[dev->nvqs]; int r; info->idx = dev->nvqs; info->kick = eventfd(0, EFD_NONBLOCK); info->call = eventfd(0, EFD_NONBLOCK); r = posix_memalign(&info->ring, 4096, vring_size(num, 4096)); assert(r >= 0); memset(info->ring, 0, vring_size(num, 4096)); vring_init(&info->vring, num, info->ring, 4096); info->vq = vring_new_virtqueue(info->vring.num, 4096, &dev->vdev, info->ring, vq_notify, vq_callback, "test"); assert(info->vq); info->vq->priv = info; vhost_vq_setup(dev, info); dev->fds[info->idx].fd = info->call; dev->fds[info->idx].events = POLLIN; dev->nvqs++; } static void vdev_info_init(struct vdev_info* dev, unsigned long long features) { int r; memset(dev, 0, sizeof *dev); dev->vdev.features[0] = features; dev->vdev.features[1] = features >> 32; dev->buf_size = 1024; dev->buf = malloc(dev->buf_size); assert(dev->buf); dev->control = open("/dev/vhost-test", O_RDWR); assert(dev->control >= 0); r = ioctl(dev->control, VHOST_SET_OWNER, NULL); assert(r >= 0); dev->mem = malloc(offsetof(struct vhost_memory, regions) + sizeof dev->mem->regions[0]); assert(dev->mem); memset(dev->mem, 0, offsetof(struct vhost_memory, regions) + sizeof dev->mem->regions[0]); dev->mem->nregions = 1; dev->mem->regions[0].guest_phys_addr = (long)dev->buf; dev->mem->regions[0].userspace_addr = (long)dev->buf; dev->mem->regions[0].memory_size = dev->buf_size; r = ioctl(dev->control, VHOST_SET_MEM_TABLE, dev->mem); assert(r >= 0); } /* TODO: this is pretty bad: we get a cache line bounce * for the wait queue on poll and another one on read, * plus the read which is there just to clear the * current state. */ static void wait_for_interrupt(struct vdev_info *dev) { int i; unsigned long long val; poll(dev->fds, dev->nvqs, -1); for (i = 0; i < dev->nvqs; ++i) if (dev->fds[i].revents & POLLIN) { read(dev->fds[i].fd, &val, sizeof val); } } static void run_test(struct vdev_info *dev, struct vq_info *vq, int bufs) { struct scatterlist sl; long started = 0, completed = 0; long completed_before; int r, test = 1; unsigned len; long long spurious = 0; r = ioctl(dev->control, VHOST_TEST_RUN, &test); assert(r >= 0); for (;;) { virtqueue_disable_cb(vq->vq); completed_before = completed; do { if (started < bufs) { sg_init_one(&sl, dev->buf, dev->buf_size); r = virtqueue_add_buf(vq->vq, &sl, 1, 0, dev->buf + started); if (likely(r >= 0)) { ++started; virtqueue_kick(vq->vq); } } else r = -1; /* Flush out completed bufs if any */ if (virtqueue_get_buf(vq->vq, &len)) { ++completed; r = 0; } } while (r >= 0); if (completed == completed_before) ++spurious; assert(completed <= bufs); assert(started <= bufs); if (completed == bufs) break; if (virtqueue_enable_cb(vq->vq)) { wait_for_interrupt(dev); } } test = 0; r = ioctl(dev->control, VHOST_TEST_RUN, &test); assert(r >= 0); fprintf(stderr, "spurious wakeus: 0x%llx\n", spurious); } const char optstring[] = "h"; const struct option longopts[] = { { .name = "help", .val = 'h', }, { .name = "event-idx",
status_t virtio_alloc_ring(struct virtio_device *dev, uint index, uint16_t len) { LTRACEF("dev %p, index %u, len %u\n", dev, index, len); DEBUG_ASSERT(dev); DEBUG_ASSERT(len > 0 && ispow2(len)); DEBUG_ASSERT(index < MAX_VIRTIO_RINGS); if (len == 0 || !ispow2(len)) return ERR_INVALID_ARGS; struct vring *ring = &dev->ring[index]; /* allocate a ring */ size_t size = vring_size(len, PAGE_SIZE); LTRACEF("need %zu bytes\n", size); #if WITH_KERNEL_VM void *vptr; status_t err = vmm_alloc_contiguous(vmm_get_kernel_aspace(), "virtio_ring", size, &vptr, 0, 0, ARCH_MMU_FLAG_UNCACHED_DEVICE); if (err < 0) return ERR_NO_MEMORY; LTRACEF("allocated virtio_ring at va %p\n", vptr); /* compute the physical address */ paddr_t pa; err = arch_mmu_query((vaddr_t)vptr, &pa, NULL); if (err < 0) { return ERR_NO_MEMORY; } LTRACEF("virtio_ring at pa 0x%lx\n", pa); #else void *vptr = memalign(PAGE_SIZE, size); if (!vptr) return ERR_NO_MEMORY; LTRACEF("ptr %p\n", vptr); memset(vptr, 0, size); /* compute the physical address */ paddr_t pa = (paddr_t)vptr; #endif /* initialize the ring */ vring_init(ring, len, vptr, PAGE_SIZE); dev->ring[index].free_list = 0xffff; dev->ring[index].free_count = 0; /* add all the descriptors to the free list */ for (uint i = 0; i < len; i++) { virtio_free_desc(dev, index, i); } /* register the ring with the device */ DEBUG_ASSERT(dev->mmio_config); dev->mmio_config->guest_page_size = PAGE_SIZE; dev->mmio_config->queue_sel = index; dev->mmio_config->queue_num = len; dev->mmio_config->queue_align = PAGE_SIZE; dev->mmio_config->queue_pfn = pa / PAGE_SIZE; /* mark the ring active */ dev->active_rings_bitmap |= (1 << index); return NO_ERROR; }
/*! * ======== VirtQueue_create ======== */ VirtQueue_Object *VirtQueue_create(VirtQueue_callback callback, UInt16 remoteProcId) { VirtQueue_Object *vq; void *vring_phys; Error_Block eb; Error_init(&eb); vq = Memory_alloc(NULL, sizeof(VirtQueue_Object), 0, &eb); if (!vq) { return (NULL); } vq->callback = callback; vq->id = numQueues++; vq->procId = remoteProcId; vq->last_avail_idx = 0; if (MultiProc_self() == appm3ProcId) { vq->id += 2; } switch (vq->id) { case ID_A9_TO_SYSM3: /* A9 -> SYSM3 */ vring_phys = (struct vring *)((UInt)buf_addr + RP_MSG_BUFS_SPACE); break; case ID_SYSM3_TO_A9: /* SYSM3 */ vring_phys = (struct vring *)((UInt)buf_addr + RP_MSG_RING_SIZE + RP_MSG_BUFS_SPACE); break; case ID_A9_TO_APPM3: /* A9 -> APPM3 */ vring_phys = (struct vring *)((UInt)buf_addr + RP_MSG_BUFS_SPACE + RPMSG_IPC_MEM); break; case ID_APPM3_TO_A9: /* APPM3 */ vring_phys = (struct vring *)((UInt)buf_addr + RP_MSG_RING_SIZE + RP_MSG_BUFS_SPACE + RPMSG_IPC_MEM); break; } Log_print3(Diags_USER1, "vring: %d 0x%x (0x%x)\n", vq->id, (IArg)vring_phys, RP_MSG_RING_SIZE); vring_init(&(vq->vring), RP_MSG_NUM_BUFS, vring_phys, RP_MSG_VRING_ALIGN); /* * Don't trigger a mailbox message every time A8 makes another buffer * available */ if (vq->procId == hostProcId || vq->procId == dspProcId) { vq->vring.used->flags |= VRING_USED_F_NO_NOTIFY; } queueRegistry[vq->id] = vq; return (vq); }
/*! * ======== VirtQueue_create ======== */ VirtQueue_Handle VirtQueue_create(UInt16 remoteProcId, VirtQueue_Params *params, Error_Block *eb) { VirtQueue_Object *vq; Void *vringAddr; /* Perform initialization we can't do in Instance_init (being non-XDC): */ _VirtQueue_init(); vq = Memory_alloc(NULL, sizeof(VirtQueue_Object), 0, eb); if (NULL == vq) { return (NULL); } /* Create the thread protection gate */ vq->gateH = GateHwi_create(NULL, eb); if (Error_check(eb)) { Log_error0("VirtQueue_create: could not create gate object"); Memory_free(NULL, vq, sizeof(VirtQueue_Object)); return (NULL); } vq->callback = params->callback; vq->id = params->vqId; vq->procId = remoteProcId; vq->last_avail_idx = 0; #ifndef SMP if (MultiProc_self() == appm3ProcId) { /* vqindices that belong to AppM3 should be big so they don't * collide with SysM3's virtqueues */ vq->id += 2; } #endif switch (vq->id) { /* IPC transport vrings */ case ID_SELF_TO_A9: /* IPU/DSP -> A9 */ vringAddr = (struct vring *) IPC_MEM_VRING0; break; case ID_A9_TO_SELF: /* A9 -> IPU/DSP */ vringAddr = (struct vring *) IPC_MEM_VRING1; break; #ifndef SMP case ID_APPM3_TO_A9: /* APPM3 -> A9 */ vringAddr = (struct vring *) IPC_MEM_VRING2; break; case ID_A9_TO_APPM3: /* A9 -> APPM3 */ vringAddr = (struct vring *) IPC_MEM_VRING3; break; #endif default: GateHwi_delete(&vq->gateH); Memory_free(NULL, vq, sizeof(VirtQueue_Object)); return (NULL); } Log_print3(Diags_USER1, "vring: %d 0x%x (0x%x)\n", vq->id, (IArg)vringAddr, RP_MSG_RING_SIZE); /* See coverity related comment in vring_init() */ /* coverity[overrun-call] */ vring_init(&(vq->vring), RP_MSG_NUM_BUFS, vringAddr, RP_MSG_VRING_ALIGN); /* * Don't trigger a mailbox message every time MPU makes another buffer * available */ if (vq->procId == hostProcId) { vq->vring.used->flags &= ~VRING_USED_F_NO_NOTIFY; } queueRegistry[vq->id] = vq; return (vq); }