static void kvm_del_vq(struct virtqueue *vq) { struct kvm_vqconfig *config = vq->priv; vring_del_virtqueue(vq); vmem_remove_mapping(config->address, vring_size(config->num, KVM_S390_VIRTIO_RING_ALIGN)); }
static void mic_del_vq(struct virtqueue *vq, int n) { struct mic_vdev *mvdev = to_micvdev(vq->vdev); struct vring *vr = (struct vring *)(vq + 1); free_pages((unsigned long) vr->used, get_order(mvdev->used_size[n])); vring_del_virtqueue(vq); mic_card_unmap(mvdev->mdev, mvdev->vr[n]); mvdev->vr[n] = NULL; }
static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, struct virtio_pci_vq_info *info, unsigned index, void (*callback)(struct virtqueue *vq), const char *name, bool ctx, u16 msix_vec) { struct virtqueue *vq; u16 num; int err; /* Select the queue we're interested in */ iowrite16(index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); /* Check if queue is either not available or already active. */ num = ioread16(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NUM); if (!num || ioread32(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN)) return ERR_PTR(-ENOENT); info->msix_vector = msix_vec; /* create the vring */ vq = vring_create_virtqueue(index, num, VIRTIO_PCI_VRING_ALIGN, &vp_dev->vdev, true, false, ctx, vp_notify, callback, name); if (!vq) return ERR_PTR(-ENOMEM); /* activate the queue */ iowrite32(virtqueue_get_desc_addr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); vq->priv = (void __force *)vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY; if (msix_vec != VIRTIO_MSI_NO_VECTOR) { iowrite16(msix_vec, vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); msix_vec = ioread16(vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); if (msix_vec == VIRTIO_MSI_NO_VECTOR) { err = -EBUSY; goto out_deactivate; } } return vq; out_deactivate: iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); vring_del_virtqueue(vq); return ERR_PTR(err); }
/* Cleaning up a virtqueue is easy */ static void lg_del_vq(struct virtqueue *vq) { struct lguest_vq_info *lvq = vq->priv; /* Release the interrupt */ free_irq(lvq->config.irq, vq); /* Tell virtio_ring.c to free the virtqueue. */ vring_del_virtqueue(vq); /* Unmap the pages containing the ring. */ lguest_unmap(lvq->pages); /* Free our own queue information. */ kfree(lvq); }
static void vop_del_vq(struct virtqueue *vq, int n) { struct _vop_vdev *vdev = to_vopvdev(vq->vdev); struct vring *vr = (struct vring *)(vq + 1); struct vop_device *vpdev = vdev->vpdev; dma_unmap_single(&vpdev->dev, vdev->used[n], vdev->used_size[n], DMA_BIDIRECTIONAL); free_pages((unsigned long)vr->used, get_order(vdev->used_size[n])); vring_del_virtqueue(vq); vpdev->hw_ops->iounmap(vpdev, vdev->vr[n]); vdev->vr[n] = NULL; }
static void zynq_rpmsg_virtio_del_vqs(struct virtio_device *vdev) { struct zynq_rpmsg_vring *local_vring; int i; for (i = 0; i < ZYNQ_RPMSG_NUM_VRINGS; i++) { local_vring = &(zynq_rpmsg_p->vrings[i]); vring_del_virtqueue(local_vring->vq); local_vring->vq = NULL; dma_free_coherent(&(zynq_rpmsg_platform->dev), local_vring->len, local_vring->va, local_vring->dma); } }
static void del_vq(struct virtio_pci_vq_info *info) { struct virtqueue *vq = info->vq; struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); if (vp_dev->msix_enabled) { iowrite16(VIRTIO_MSI_NO_VECTOR, vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); /* Flush the write out to device */ ioread8(vp_dev->ioaddr + VIRTIO_PCI_ISR); } /* Select and deactivate the queue */ iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); vring_del_virtqueue(vq); }
static void vm_del_vq(struct virtqueue *vq) { struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev); struct virtio_mmio_vq_info *info = vq->priv; unsigned long flags, size; spin_lock_irqsave(&vm_dev->lock, flags); list_del(&info->node); spin_unlock_irqrestore(&vm_dev->lock, flags); vring_del_virtqueue(vq); /* Select and deactivate the queue */ writel(info->queue_index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL); writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN); size = PAGE_ALIGN(vring_size(info->num, VIRTIO_MMIO_VRING_ALIGN)); free_pages_exact(info->queue, size); kfree(info); }
/* * This routine finds the Nth virtqueue described in the configuration of * this device and sets it up. * * This is kind of an ugly duckling. It'd be nicer to have a standard * representation of a virtqueue in the configuration space, but it seems that * everyone wants to do it differently. The KVM coders want the Guest to * allocate its own pages and tell the Host where they are, but for lguest it's * simpler for the Host to simply tell us where the pages are. */ static struct virtqueue *lg_find_vq(struct virtio_device *vdev, unsigned index, void (*callback)(struct virtqueue *vq), const char *name) { struct lguest_device *ldev = to_lgdev(vdev); struct lguest_vq_info *lvq; struct virtqueue *vq; int err; if (!name) return NULL; /* We must have this many virtqueues. */ if (index >= ldev->desc->num_vq) return ERR_PTR(-ENOENT); lvq = kmalloc(sizeof(*lvq), GFP_KERNEL); if (!lvq) return ERR_PTR(-ENOMEM); /* * Make a copy of the "struct lguest_vqconfig" entry, which sits after * the descriptor. We need a copy because the config space might not * be aligned correctly. */ memcpy(&lvq->config, lg_vq(ldev->desc)+index, sizeof(lvq->config)); printk("Mapping virtqueue %i addr %lx\n", index, (unsigned long)lvq->config.pfn << PAGE_SHIFT); /* Figure out how many pages the ring will take, and map that memory */ lvq->pages = lguest_map((unsigned long)lvq->config.pfn << PAGE_SHIFT, DIV_ROUND_UP(vring_size(lvq->config.num, LGUEST_VRING_ALIGN), PAGE_SIZE)); if (!lvq->pages) { err = -ENOMEM; goto free_lvq; } /* * OK, tell virtio_ring.c to set up a virtqueue now we know its size * and we've got a pointer to its pages. Note that we set weak_barriers * to 'true': the host just a(nother) SMP CPU, so we only need inter-cpu * barriers. */ vq = vring_new_virtqueue(index, lvq->config.num, LGUEST_VRING_ALIGN, vdev, true, lvq->pages, lg_notify, callback, name); if (!vq) { err = -ENOMEM; goto unmap; } /* Make sure the interrupt is allocated. */ err = lguest_setup_irq(lvq->config.irq); if (err) goto destroy_vring; /* * Tell the interrupt for this virtqueue to go to the virtio_ring * interrupt handler. * * FIXME: We used to have a flag for the Host to tell us we could use * the interrupt as a source of randomness: it'd be nice to have that * back. */ err = request_irq(lvq->config.irq, vring_interrupt, IRQF_SHARED, dev_name(&vdev->dev), vq); if (err) goto free_desc; /* * Last of all we hook up our 'struct lguest_vq_info" to the * virtqueue's priv pointer. */ vq->priv = lvq; return vq; free_desc: irq_free_desc(lvq->config.irq); destroy_vring: vring_del_virtqueue(vq); unmap: lguest_unmap(lvq->pages); free_lvq: kfree(lvq); return ERR_PTR(err); }
/* * This routine will assign vring's allocated in host/io memory. Code in * virtio_ring.c however continues to access this io memory as if it were local * memory without io accessors. */ static struct virtqueue *mic_find_vq(struct virtio_device *vdev, unsigned index, void (*callback)(struct virtqueue *vq), const char *name) { struct mic_vdev *mvdev = to_micvdev(vdev); struct mic_vqconfig __iomem *vqconfig; struct mic_vqconfig config; struct virtqueue *vq; void __iomem *va; struct _mic_vring_info __iomem *info; void *used; int vr_size, _vr_size, err, magic; struct vring *vr; u8 type = ioread8(&mvdev->desc->type); if (index >= ioread8(&mvdev->desc->num_vq)) return ERR_PTR(-ENOENT); if (!name) return ERR_PTR(-ENOENT); /* First assign the vring's allocated in host memory */ vqconfig = mic_vq_config(mvdev->desc) + index; memcpy_fromio(&config, vqconfig, sizeof(config)); _vr_size = vring_size(le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN); vr_size = PAGE_ALIGN(_vr_size + sizeof(struct _mic_vring_info)); va = mic_card_map(mvdev->mdev, le64_to_cpu(config.address), vr_size); if (!va) return ERR_PTR(-ENOMEM); mvdev->vr[index] = va; memset_io(va, 0x0, _vr_size); vq = vring_new_virtqueue(index, le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN, vdev, false, (void __force *)va, mic_notify, callback, name); if (!vq) { err = -ENOMEM; goto unmap; } info = va + _vr_size; magic = ioread32(&info->magic); if (WARN(magic != MIC_MAGIC + type + index, "magic mismatch")) { err = -EIO; goto unmap; } /* Allocate and reassign used ring now */ mvdev->used_size[index] = PAGE_ALIGN(sizeof(__u16) * 3 + sizeof(struct vring_used_elem) * le16_to_cpu(config.num)); used = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, get_order(mvdev->used_size[index])); if (!used) { err = -ENOMEM; dev_err(mic_dev(mvdev), "%s %d err %d\n", __func__, __LINE__, err); goto del_vq; } iowrite64(virt_to_phys(used), &vqconfig->used_address); /* * To reassign the used ring here we are directly accessing * struct vring_virtqueue which is a private data structure * in virtio_ring.c. At the minimum, a BUILD_BUG_ON() in * vring_new_virtqueue() would ensure that * (&vq->vring == (struct vring *) (&vq->vq + 1)); */ vr = (struct vring *)(vq + 1); vr->used = used; vq->priv = mvdev; return vq; del_vq: vring_del_virtqueue(vq); unmap: mic_card_unmap(mvdev->mdev, mvdev->vr[index]); return ERR_PTR(err); }
/* * This routine finds the first virtqueue described in the configuration of * this device and sets it up. */ static struct virtqueue *kvm_find_vq(struct virtio_device *vdev, unsigned index, void (*callback)(struct virtqueue *vq), const char *name) { struct kvm_device *kdev = to_kvmdev(vdev); struct kvm_vqinfo *vqi; struct kvm_vqconfig *config; struct virtqueue *vq; long irq; int err = -EINVAL; if (index >= kdev->desc->num_vq) return ERR_PTR(-ENOENT); vqi = kzalloc(sizeof(*vqi), GFP_KERNEL); if (!vqi) return ERR_PTR(-ENOMEM); config = kvm_vq_config(kdev->desc)+index; vqi->config = config; vqi->pages = generic_remap_prot(config->pa, vring_size(config->num, KVM_TILE_VIRTIO_RING_ALIGN), 0, io_prot()); if (!vqi->pages) { err = -ENOMEM; goto out; } vq = vring_new_virtqueue(config->num, KVM_TILE_VIRTIO_RING_ALIGN, vdev, 0, vqi->pages, kvm_notify, callback, name); if (!vq) { err = -ENOMEM; goto unmap; } /* * Trigger the IPI interrupt in SW way. * TODO: We do not need to create one irq for each vq. A bit wasteful. */ irq = create_irq(); if (irq < 0) { err = -ENXIO; goto del_virtqueue; } tile_irq_activate(irq, TILE_IRQ_SW_CLEAR); if (request_irq(irq, vring_interrupt, 0, dev_name(&vdev->dev), vq)) { err = -ENXIO; destroy_irq(irq); goto del_virtqueue; } config->irq = irq; vq->priv = vqi; return vq; del_virtqueue: vring_del_virtqueue(vq); unmap: vunmap(vqi->pages); out: return ERR_PTR(err); }
/* * This routine will assign vring's allocated in host/io memory. Code in * virtio_ring.c however continues to access this io memory as if it were local * memory without io accessors. */ static struct virtqueue *vop_find_vq(struct virtio_device *dev, unsigned index, void (*callback)(struct virtqueue *vq), const char *name, bool ctx) { struct _vop_vdev *vdev = to_vopvdev(dev); struct vop_device *vpdev = vdev->vpdev; struct mic_vqconfig __iomem *vqconfig; struct mic_vqconfig config; struct virtqueue *vq; void __iomem *va; struct _mic_vring_info __iomem *info; void *used; int vr_size, _vr_size, err, magic; u8 type = ioread8(&vdev->desc->type); if (index >= ioread8(&vdev->desc->num_vq)) return ERR_PTR(-ENOENT); if (!name) return ERR_PTR(-ENOENT); /* First assign the vring's allocated in host memory */ vqconfig = _vop_vq_config(vdev->desc) + index; memcpy_fromio(&config, vqconfig, sizeof(config)); _vr_size = vring_size(le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN); vr_size = PAGE_ALIGN(_vr_size + sizeof(struct _mic_vring_info)); va = vpdev->hw_ops->remap(vpdev, le64_to_cpu(config.address), vr_size); if (!va) return ERR_PTR(-ENOMEM); vdev->vr[index] = va; memset_io(va, 0x0, _vr_size); info = va + _vr_size; magic = ioread32(&info->magic); if (WARN(magic != MIC_MAGIC + type + index, "magic mismatch")) { err = -EIO; goto unmap; } vdev->used_size[index] = PAGE_ALIGN(sizeof(__u16) * 3 + sizeof(struct vring_used_elem) * le16_to_cpu(config.num)); used = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, get_order(vdev->used_size[index])); vdev->used_virt[index] = used; if (!used) { err = -ENOMEM; dev_err(_vop_dev(vdev), "%s %d err %d\n", __func__, __LINE__, err); goto unmap; } vq = vop_new_virtqueue(index, le16_to_cpu(config.num), dev, ctx, (void __force *)va, vop_notify, callback, name, used); if (!vq) { err = -ENOMEM; goto free_used; } vdev->used[index] = dma_map_single(&vpdev->dev, used, vdev->used_size[index], DMA_BIDIRECTIONAL); if (dma_mapping_error(&vpdev->dev, vdev->used[index])) { err = -ENOMEM; dev_err(_vop_dev(vdev), "%s %d err %d\n", __func__, __LINE__, err); goto del_vq; } writeq(vdev->used[index], &vqconfig->used_address); vq->priv = vdev; return vq; del_vq: vring_del_virtqueue(vq); free_used: free_pages((unsigned long)used, get_order(vdev->used_size[index])); unmap: vpdev->hw_ops->unmap(vpdev, vdev->vr[index]); return ERR_PTR(err); }