示例#1
0
/* implemented by ring */
void alloc_ring(void)
{
	int ret;
	int i;
	void *p;

	ret = posix_memalign(&p, 0x1000, vring_size(ring_size, 0x1000));
	if (ret) {
		perror("Unable to allocate ring buffer.\n");
		exit(3);
	}
	memset(p, 0, vring_size(ring_size, 0x1000));
	vring_init(&ring, ring_size, p, 0x1000);

	guest.avail_idx = 0;
	guest.kicked_avail_idx = -1;
	guest.last_used_idx = 0;
	/* Put everything in free lists. */
	guest.free_head = 0;
	for (i = 0; i < ring_size - 1; i++)
		ring.desc[i].next = i + 1;
	host.used_idx = 0;
	host.called_used_idx = -1;
	guest.num_free = ring_size;
	data = malloc(ring_size * sizeof *data);
	if (!data) {
		perror("Unable to allocate data buffer.\n");
		exit(3);
	}
	memset(data, 0, ring_size * sizeof *data);
}
示例#2
0
static void reset_device(struct device *dev)
{
	struct virtqueue *vq;

	verbose("Resetting device %s\n", dev->name);

	/* Clear any features they've acked. */
	memset(get_feature_bits(dev) + dev->feature_len, 0, dev->feature_len);

	/* We're going to be explicitly killing threads, so ignore them. */
	signal(SIGCHLD, SIG_IGN);

	/* Zero out the virtqueues, get rid of their threads */
	for (vq = dev->vq; vq; vq = vq->next) {
		if (vq->thread != (pid_t)-1) {
			kill(vq->thread, SIGTERM);
			waitpid(vq->thread, NULL, 0);
			vq->thread = (pid_t)-1;
		}
		memset(vq->vring.desc, 0,
		       vring_size(vq->config.num, LGUEST_VRING_ALIGN));
		lg_last_avail(vq) = 0;
	}
	dev->running = false;

	/* Now we care if threads die. */
	signal(SIGCHLD, (void *)kill_launcher);
}
示例#3
0
文件: virtio.c 项目: mosconi/openbsd
void
vionet_notify_rx(struct vionet_dev *dev)
{
	uint64_t q_gpa;
	uint32_t vr_sz;
	char *vr;
	struct vring_avail *avail;

	vr_sz = vring_size(VIONET_QUEUE_SIZE);
	q_gpa = dev->vq[dev->cfg.queue_notify].qa;
	q_gpa = q_gpa * VIRTIO_PAGE_SIZE;

	vr = malloc(vr_sz);
	if (vr == NULL) {
		log_warn("malloc error getting vionet ring");
		return;
	}

	if (read_mem(q_gpa, vr, vr_sz)) {
		log_warnx("error reading gpa 0x%llx", q_gpa);
		free(vr);
		return;
	}

	/* Compute offset into avail ring */
	avail = (struct vring_avail *)(vr +
	    dev->vq[dev->cfg.queue_notify].vq_availoffset);

	dev->rx_added = 1;
	dev->vq[0].notified_avail = avail->idx;

	free(vr);
}
示例#4
0
static void reset_device(struct device *dev)
{
	struct virtqueue *vq;

	verbose("Resetting device %s\n", dev->name);

	
	memset(get_feature_bits(dev) + dev->feature_len, 0, dev->feature_len);

	
	signal(SIGCHLD, SIG_IGN);

	
	for (vq = dev->vq; vq; vq = vq->next) {
		if (vq->thread != (pid_t)-1) {
			kill(vq->thread, SIGTERM);
			waitpid(vq->thread, NULL, 0);
			vq->thread = (pid_t)-1;
		}
		memset(vq->vring.desc, 0,
		       vring_size(vq->config.num, LGUEST_VRING_ALIGN));
		lg_last_avail(vq) = 0;
	}
	dev->running = false;

	
	signal(SIGCHLD, (void *)kill_launcher);
}
示例#5
0
/*
 * Initialize the currently-selected virtio queue (vs->vs_curq).
 * The guest just gave us a page frame number, from which we can
 * calculate the addresses of the queue.
 */
void
vi_vq_init(struct virtio_softc *vs, uint32_t pfn)
{
	struct vqueue_info *vq;
	uint64_t phys;
	size_t size;
	char *base;

	vq = &vs->vs_queues[vs->vs_curq];
	vq->vq_pfn = pfn;
	phys = (uint64_t)pfn << VRING_PFN;
	size = vring_size(vq->vq_qsize);
	base = paddr_guest2host(vs->vs_pi->pi_vmctx, phys, size);

	/* First page(s) are descriptors... */
	vq->vq_desc = (struct virtio_desc *)base;
	base += vq->vq_qsize * sizeof(struct virtio_desc);

	/* ... immediately followed by "avail" ring (entirely uint16_t's) */
	vq->vq_avail = (struct vring_avail *)base;
	base += (2 + vq->vq_qsize + 1) * sizeof(uint16_t);

	/* Then it's rounded up to the next page... */
	base = (char *)roundup2((uintptr_t)base, VRING_ALIGN);

	/* ... and the last page(s) are the used ring. */
	vq->vq_used = (struct vring_used *)base;

	/* Mark queue as allocated, and start at 0 when we use it. */
	vq->vq_flags = VQ_ALLOC;
	vq->vq_last_avail = 0;
}
示例#6
0
/*
 * This routine finds the first virtqueue described in the configuration of
 * this device and sets it up.
 */
static struct virtqueue *kvm_find_vq(struct virtio_device *vdev,
				     unsigned index,
				     void (*callback)(struct virtqueue *vq),
				     const char *name)
{
	struct kvm_device *kdev = to_kvmdev(vdev);
	struct kvm_vqconfig *config;
	struct virtqueue *vq;
	int err;

	if (index >= kdev->desc->num_vq)
		return ERR_PTR(-ENOENT);

	if (!name)
		return NULL;

	config = kvm_vq_config(kdev->desc)+index;

	err = vmem_add_mapping(config->address,
			       vring_size(config->num,
					  KVM_S390_VIRTIO_RING_ALIGN));
	if (err)
		goto out;

	vq = vring_new_virtqueue(index, config->num, KVM_S390_VIRTIO_RING_ALIGN,
				 vdev, true, (void *) config->address,
				 kvm_notify, callback, name);
	if (!vq) {
		err = -ENOMEM;
		goto unmap;
	}

	/*
	 * register a callback token
	 * The host will sent this via the external interrupt parameter
	 */
	config->token = (u64) vq;

	vq->priv = config;
	return vq;
unmap:
	vmem_remove_mapping(config->address,
			    vring_size(config->num,
				       KVM_S390_VIRTIO_RING_ALIGN));
out:
	return ERR_PTR(err);
}
示例#7
0
/**
 * virtqueue_create - Creates new VirtIO queue
 *
 * @param device    - Pointer to VirtIO device
 * @param id        - VirtIO queue ID , must be unique
 * @param name      - Name of VirtIO queue
 * @param ring      - Pointer to vring_alloc_info control block
 * @param callback  - Pointer to callback function, invoked
 *                    when message is available on VirtIO queue
 * @param notify    - Pointer to notify function, used to notify
 *                    other side that there is job available for it
 * @param v_queue   - Created VirtIO queue.
 *
 * @return          - Function status
 */
int virtqueue_create(struct virtio_device *virt_dev, unsigned short id,
		     char *name, struct vring_alloc_info *ring,
		     void (*callback) (struct virtqueue * vq),
		     void (*notify) (struct virtqueue * vq),
		     struct virtqueue **v_queue)
{

	struct virtqueue *vq = VQ_NULL;
	int status = VQUEUE_SUCCESS;
	uint32_t vq_size = 0;

	VQ_PARAM_CHK(ring == VQ_NULL, status, ERROR_VQUEUE_INVLD_PARAM);
	VQ_PARAM_CHK(ring->num_descs == 0, status, ERROR_VQUEUE_INVLD_PARAM);
	VQ_PARAM_CHK(ring->num_descs & (ring->num_descs - 1), status,
		     ERROR_VRING_ALIGN);

	//TODO : Error check for indirect buffer addition

	if (status == VQUEUE_SUCCESS) {

		vq_size = sizeof(struct virtqueue)
		    + (ring->num_descs) * sizeof(struct vq_desc_extra);
		vq = (struct virtqueue *)env_allocate_memory(vq_size);

		if (vq == VQ_NULL) {
			return (ERROR_NO_MEM);
		}

		env_memset(vq, 0x00, vq_size);

		vq->vq_dev = virt_dev;
		env_strncpy(vq->vq_name, name, VIRTQUEUE_MAX_NAME_SZ);
		vq->vq_queue_index = id;
		vq->vq_alignment = ring->align;
		vq->vq_nentries = ring->num_descs;
		vq->vq_free_cnt = vq->vq_nentries;
		vq->callback = callback;
		vq->notify = notify;

		//TODO : Whether we want to support indirect addition or not.
		vq->vq_ring_size = vring_size(ring->num_descs, ring->align);
		vq->vq_ring_mem = (void *)ring->phy_addr;

		/* Initialize vring control block in virtqueue. */
		vq_ring_init(vq);

		/* Disable callbacks - will be enabled by the application
		 * once initialization is completed.
		 */
		virtqueue_disable_cb(vq);

		*v_queue = vq;

		//TODO : Need to add cleanup in case of error used with the indirect buffer addition
		//TODO: do we need to save the new queue in db based on its id
	}

	return (status);
}
示例#8
0
/*
 * This initialization routine requires at least one
 * vring i.e. vr0. vr1 is optional.
 */
static void *
init_vr(struct mic_info *mic, int fd, int type,
	struct mic_vring *vr0, struct mic_vring *vr1, int num_vq)
{
	int vr_size;
	char *va;

	vr_size = PAGE_ALIGN(vring_size(MIC_VRING_ENTRIES,
		MIC_VIRTIO_RING_ALIGN) + sizeof(struct _mic_vring_info));
	va = mmap(NULL, MIC_DEVICE_PAGE_END + vr_size * num_vq,
		PROT_READ, MAP_SHARED, fd, 0);
	if (MAP_FAILED == va) {
		mpsslog("%s %s %d mmap failed errno %s\n",
			mic->name, __func__, __LINE__,
			strerror(errno));
		goto done;
	}
	set_dp(mic, type, va);
	vr0->va = (struct mic_vring *)&va[MIC_DEVICE_PAGE_END];
	vr0->info = vr0->va +
		vring_size(MIC_VRING_ENTRIES, MIC_VIRTIO_RING_ALIGN);
	vring_init(&vr0->vr,
		   MIC_VRING_ENTRIES, vr0->va, MIC_VIRTIO_RING_ALIGN);
	mpsslog("%s %s vr0 %p vr0->info %p vr_size 0x%x vring 0x%x ",
		__func__, mic->name, vr0->va, vr0->info, vr_size,
		vring_size(MIC_VRING_ENTRIES, MIC_VIRTIO_RING_ALIGN));
	mpsslog("magic 0x%x expected 0x%x\n",
		le32toh(vr0->info->magic), MIC_MAGIC + type);
	assert(le32toh(vr0->info->magic) == MIC_MAGIC + type);
	if (vr1) {
		vr1->va = (struct mic_vring *)
			&va[MIC_DEVICE_PAGE_END + vr_size];
		vr1->info = vr1->va + vring_size(MIC_VRING_ENTRIES,
			MIC_VIRTIO_RING_ALIGN);
		vring_init(&vr1->vr,
			   MIC_VRING_ENTRIES, vr1->va, MIC_VIRTIO_RING_ALIGN);
		mpsslog("%s %s vr1 %p vr1->info %p vr_size 0x%x vring 0x%x ",
			__func__, mic->name, vr1->va, vr1->info, vr_size,
			vring_size(MIC_VRING_ENTRIES, MIC_VIRTIO_RING_ALIGN));
		mpsslog("magic 0x%x expected 0x%x\n",
			le32toh(vr1->info->magic), MIC_MAGIC + type + 1);
		assert(le32toh(vr1->info->magic) == MIC_MAGIC + type + 1);
	}
done:
	return va;
}
示例#9
0
static void kvm_del_vq(struct virtqueue *vq)
{
	struct kvm_vqconfig *config = vq->priv;

	vring_del_virtqueue(vq);
	vmem_remove_mapping(config->address,
			    vring_size(config->num,
				       KVM_S390_VIRTIO_RING_ALIGN));
}
示例#10
0
int virtio_queue_setup(struct virtio_queue *vq,
			struct vmm_guest *guest,
			physical_addr_t guest_pfn,
			physical_size_t guest_page_size,
			u32 desc_count, u32 align)
{
	int rc = 0;
	u32 reg_flags;
	physical_addr_t gphys_addr, hphys_addr;
	physical_size_t gphys_size, avail_size;

	if ((rc = virtio_queue_cleanup(vq))) {
		return rc;
	}

	gphys_addr = guest_pfn * guest_page_size;
	gphys_size = vring_size(desc_count, align);

	if ((rc = vmm_guest_physical_map(guest, gphys_addr, gphys_size,
					 &hphys_addr, &avail_size,
					 &reg_flags))) {
		vmm_printf("Failed vmm_guest_physical_map\n");
		return VMM_EFAIL;
	}

	if (!(reg_flags & VMM_REGION_ISRAM)) {
		return VMM_EINVALID;
	}

	if (avail_size < gphys_size) {
		return VMM_EINVALID;
	}

	vq->addr = (void *)vmm_host_memmap(hphys_addr, gphys_size, 
					   VMM_MEMORY_FLAGS_NORMAL);
	if (!vq->addr) {
		return VMM_ENOMEM;
	}

	vring_init(&vq->vring, desc_count, vq->addr, align);

	vq->guest = guest;
	vq->desc_count = desc_count;
	vq->align = align;
	vq->guest_pfn = guest_pfn;
	vq->guest_page_size = guest_page_size;

	vq->guest_addr = gphys_addr;
	vq->host_addr = hphys_addr;
	vq->total_size = gphys_size;

	return VMM_OK;
}
示例#11
0
/*
 * Each device descriptor is followed by the description of its virtqueues.  We
 * specify how many descriptors the virtqueue is to have.
 */
static void add_virtqueue(struct device *dev, unsigned int num_descs,
			  void (*service)(struct virtqueue *))
{
	unsigned int pages;
	struct virtqueue **i, *vq = malloc(sizeof(*vq));
	void *p;

	/* First we need some memory for this virtqueue. */
	pages = (vring_size(num_descs, LGUEST_VRING_ALIGN) + getpagesize() - 1)
		/ getpagesize();
	p = get_pages(pages);

	/* Initialize the virtqueue */
	vq->next = NULL;
	vq->last_avail_idx = 0;
	vq->dev = dev;

	/*
	 * This is the routine the service thread will run, and its Process ID
	 * once it's running.
	 */
	vq->service = service;
	vq->thread = (pid_t)-1;

	/* Initialize the configuration. */
	vq->config.num = num_descs;
	vq->config.irq = devices.next_irq++;
	vq->config.pfn = to_guest_phys(p) / getpagesize();

	/* Initialize the vring. */
	vring_init(&vq->vring, num_descs, p, LGUEST_VRING_ALIGN);

	/*
	 * Append virtqueue to this device's descriptor.  We use
	 * device_config() to get the end of the device's current virtqueues;
	 * we check that we haven't added any config or feature information
	 * yet, otherwise we'd be overwriting them.
	 */
	assert(dev->desc->config_len == 0 && dev->desc->feature_len == 0);
	memcpy(device_config(dev), &vq->config, sizeof(vq->config));
	dev->num_vq++;
	dev->desc->num_vq++;

	verbose("Virtqueue page %#lx\n", to_guest_phys(p));

	/*
	 * Add to tail of list, so dev->vq is first vq, dev->vq->next is
	 * second.
	 */
	for (i = &dev->vq; *i; i = &(*i)->next);
	*i = vq;
}
/* FIXME: We need to tell other side about removal, to synchronize. */
static void vring_shutdown(struct virtqueue *_vq)
{
	struct vring_virtqueue *vq = to_vvq(_vq);
	unsigned int num = vq->vring.num;
	unsigned int index = vq->vq.ulIndex;
	void *pages = vq->vring.desc;
	VirtIODevice * pVirtIODevice = vq->vq.vdev;
	bool use_published_indices = vq->use_published_indices;
	void (*notify)(struct virtqueue *) = vq->notify;

	memset(pages, 0, vring_size(num,PAGE_SIZE));
	initialize_virtqueue(vq, num, pVirtIODevice, pages, notify, index, use_published_indices);
}
示例#13
0
VirtioQueue::VirtioQueue(VirtioDevice* device, uint16 queueNumber,
	uint16 ringSize)
	:
	fDevice(device),
	fQueueNumber(queueNumber),
	fRingSize(ringSize),
	fRingFree(ringSize),
	fRingHeadIndex(0),
	fRingUsedIndex(0),
	fStatus(B_OK),
	fIndirectMaxSize(0)
{
	fDescriptors = new(std::nothrow) TransferDescriptor*[fRingSize];
	if (fDescriptors == NULL) {
		fStatus = B_NO_MEMORY;
		return;
	}

	uint8* virtAddr;
	phys_addr_t physAddr;
	fAreaSize = vring_size(fRingSize, device->Alignment());
	fArea = alloc_mem((void **)&virtAddr, &physAddr, fAreaSize, 0,
		"virtqueue");
	if (fArea < B_OK) {
		fStatus = fArea;
		return;
	}
	memset(virtAddr, 0, fAreaSize);
	vring_init(&fRing, fRingSize, virtAddr, device->Alignment());

	for (uint16 i = 0; i < fRingSize - 1; i++)
		fRing.desc[i].next = i + 1;
	fRing.desc[fRingSize - 1].next = UINT16_MAX;

	if ((fDevice->Features() & VIRTIO_FEATURE_RING_INDIRECT_DESC) != 0)
		fIndirectMaxSize = 128;

	for (uint16 i = 0; i < fRingSize; i++) {
		fDescriptors[i] = new TransferDescriptor(this, fIndirectMaxSize);
		if (fDescriptors[i] == NULL || fDescriptors[i]->InitCheck() != B_OK) {
			fStatus = B_NO_MEMORY;
			return;
		}
	}

	DisableInterrupt();

	device->SetupQueue(fQueueNumber, physAddr);
}
示例#14
0
/**
 * handle_vdev_rsc
 *
 * Virtio device resource handler
 *
 * @param rproc - pointer to remote remote_proc
 * @param rsc   - pointer to virtio device resource
 *
 * @returns - execution status
 *
 */
int handle_vdev_rsc(struct remote_proc *rproc, void *rsc)
{

	struct fw_rsc_vdev *vdev_rsc = (struct fw_rsc_vdev *)rsc;
	struct fw_rsc_vdev_vring *vring;
	struct proc_vdev *vdev;
	struct proc_vring *vring_table;
	int idx;

	if (!vdev_rsc) {
		return RPROC_ERR_RSC_TAB_NP;
	}

	/* Maximum supported vrings per Virtio device */
	if (vdev_rsc->num_of_vrings > RSC_TAB_MAX_VRINGS) {
		return RPROC_ERR_RSC_TAB_VDEV_NRINGS;
	}

	/* Reserved fields - must be zero */
	if (vdev_rsc->reserved[0] || vdev_rsc->reserved[1]) {
		return RPROC_ERR_RSC_TAB_RSVD;
	}

	/* Get the Virtio device from HIL proc */
	vdev = hil_get_vdev_info(rproc->proc);

	/* Initialize HIL Virtio device resources */
	vdev->num_vrings = vdev_rsc->num_of_vrings;
	vdev->dfeatures = vdev_rsc->dfeatures;
	vdev->gfeatures = vdev_rsc->gfeatures;
	vring_table = &vdev->vring_info[0];

	for (idx = 0; idx < vdev_rsc->num_of_vrings; idx++) {
		vring = &vdev_rsc->vring[idx];

		/* Initialize HIL vring resources */
		vring_table[idx].phy_addr = (void *)vring->da;
		vring_table[idx].num_descs = vring->num;
		vring_table[idx].align = vring->align;

		/* Enable access to vring memory regions */
		env_map_memory(vring->da, vring->da,
			       vring_size(vring->num, vring->align),
			       (SHARED_MEM | UNCACHED));
	}

	return RPROC_SUCCESS;
}
示例#15
0
void
vionet_notify_rx(struct vionet_dev *dev)
{
	uint64_t q_gpa;
	uint32_t vr_sz, i;
	uint16_t idx, pkt_desc_idx;
	char *vr;
	struct vring_desc *desc, *pkt_desc;
	struct vring_avail *avail;
	struct vring_used *used;

	vr_sz = vring_size(VIONET_QUEUE_SIZE);
	q_gpa = dev->vq[dev->cfg.queue_notify].qa;
	q_gpa = q_gpa * VIRTIO_PAGE_SIZE;

	vr = malloc(vr_sz);
	if (vr == NULL) {
		fprintf(stderr, "malloc error getting vionet ring\n\r");
		return;
	}

	bzero(vr, vr_sz);

	for (i = 0; i < vr_sz; i += VIRTIO_PAGE_SIZE) {
		if (read_page((uint32_t)q_gpa + i, vr + i, PAGE_SIZE, 0)) {
			fprintf(stderr, "error reading gpa 0x%x\n\r",
			    (uint32_t)q_gpa + i);
			free(vr);
			return;
		}
	}

	/* Compute offsets in ring of descriptors, avail ring, and used ring */
	desc = (struct vring_desc *)(vr);
	avail = (struct vring_avail *)(vr +
	    dev->vq[dev->cfg.queue_notify].vq_availoffset);
	used = (struct vring_used *)(vr +
	    dev->vq[dev->cfg.queue_notify].vq_usedoffset);

	idx = dev->vq[dev->cfg.queue_notify].last_avail & VIONET_QUEUE_MASK;
	pkt_desc_idx = avail->ring[idx] & VIONET_QUEUE_MASK;
	pkt_desc = &desc[pkt_desc_idx];

	dev->rx_added = 1;

	free(vr);
}
示例#16
0
static void vm_del_vq(struct virtqueue *vq)
{
	struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev);
	struct virtio_mmio_vq_info *info = vq->priv;
	unsigned long flags, size;

	spin_lock_irqsave(&vm_dev->lock, flags);
	list_del(&info->node);
	spin_unlock_irqrestore(&vm_dev->lock, flags);

	vring_del_virtqueue(vq);

	/* Select and deactivate the queue */
	writel(info->queue_index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL);
	writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);

	size = PAGE_ALIGN(vring_size(info->num, VIRTIO_MMIO_VRING_ALIGN));
	free_pages_exact(info->queue, size);
	kfree(info);
}
示例#17
0
static int zynq_rpmsg_virtio_find_vqs(struct virtio_device *vdev,
					unsigned nvqs, struct virtqueue *vqs[],
					vq_callback_t *callbacks[],
					const char *names[])
{
	int				i;
	struct zynq_rpmsg_vring   *local_vring;
	void				*vring_va;
	int				 size;

	/* Skip through the vrings. */
	for (i = 0; i < nvqs; i++) {

		local_vring = &(zynq_rpmsg_p->vrings[i]);

		local_vring->len = zynq_rpmsg_p->num_descs;

		size = vring_size(zynq_rpmsg_p->num_descs,
					zynq_rpmsg_p->align);

		/* Allocate non-cacheable memory for the vring. */
		local_vring->va = dma_alloc_coherent
					(&(zynq_rpmsg_platform->dev),
					size, &(local_vring->dma), GFP_KERNEL);

		vring_va = local_vring->va;

		memset(vring_va, 0, size);

		local_vring->vq = vring_new_virtqueue(i,
						zynq_rpmsg_p->num_descs,
						zynq_rpmsg_p->align, vdev,
						false, vring_va,
						zynq_rpmsg_virtio_notify,
						callbacks[i], names[i]);

		vqs[i] = local_vring->vq;
	}

	return 0;
}
示例#18
0
文件: if_vtbe.c 项目: jashank/freebsd
static int
vq_init(struct vtbe_softc *sc)
{
	struct vqueue_info *vq;
	uint8_t *base;
	int size;
	int reg;
	int pfn;

	vq = &sc->vs_queues[sc->vs_curq];
	vq->vq_qsize = DESC_COUNT;

	reg = READ4(sc, VIRTIO_MMIO_QUEUE_PFN);
	pfn = be32toh(reg);
	vq->vq_pfn = pfn;

	size = vring_size(vq->vq_qsize, VRING_ALIGN);
	base = paddr_map(sc->beri_mem_offset,
		(pfn << PAGE_SHIFT), size);

	/* First pages are descriptors */
	vq->vq_desc = (struct vring_desc *)base;
	base += vq->vq_qsize * sizeof(struct vring_desc);

	/* Then avail ring */
	vq->vq_avail = (struct vring_avail *)base;
	base += (2 + vq->vq_qsize + 1) * sizeof(uint16_t);

	/* Then it's rounded up to the next page */
	base = (uint8_t *)roundup2((uintptr_t)base, VRING_ALIGN);

	/* And the last pages are the used ring */
	vq->vq_used = (struct vring_used *)base;

	/* Mark queue as allocated, and start at 0 when we use it. */
	vq->vq_flags = VQ_ALLOC;
	vq->vq_last_avail = 0;

	return (0);
}
示例#19
0
文件: virtio.c 项目: OpenAMP/open-amp
int virtio_create_virtqueues(struct virtio_device *vdev, unsigned int flags,
			     unsigned int nvqs, const char *names[],
			     vq_callback *callbacks[])
{
	struct virtio_vring_info *vring_info;
	struct vring_alloc_info *vring_alloc;
	unsigned int num_vrings, i;
	int ret;
	(void)flags;

	num_vrings = vdev->vrings_num;
	if (nvqs > num_vrings)
		return ERROR_VQUEUE_INVLD_PARAM;
	/* Initialize virtqueue for each vring */
	for (i = 0; i < nvqs; i++) {
		vring_info = &vdev->vrings_info[i];

		vring_alloc = &vring_info->info;
#ifndef VIRTIO_SLAVE_ONLY
		if (vdev->role == VIRTIO_DEV_MASTER) {
			size_t offset;
			struct metal_io_region *io = vring_info->io;

			offset = metal_io_virt_to_offset(io,
							 vring_alloc->vaddr);
			metal_io_block_set(io, offset, 0,
					   vring_size(vring_alloc->num_descs,
						      vring_alloc->align));
		}
#endif
		ret = virtqueue_create(vdev, i, names[i], vring_alloc,
				       callbacks[i], vdev->func->notify,
				       vring_info->vq);
		if (ret)
			return ret;
	}
	return 0;
}
示例#20
0
static void add_virtqueue(struct device *dev, unsigned int num_descs,
			  void (*service)(struct virtqueue *))
{
	unsigned int pages;
	struct virtqueue **i, *vq = malloc(sizeof(*vq));
	void *p;

	
	pages = (vring_size(num_descs, LGUEST_VRING_ALIGN) + getpagesize() - 1)
		/ getpagesize();
	p = get_pages(pages);

	
	vq->next = NULL;
	vq->last_avail_idx = 0;
	vq->dev = dev;

	vq->service = service;
	vq->thread = (pid_t)-1;

	
	vq->config.num = num_descs;
	vq->config.irq = devices.next_irq++;
	vq->config.pfn = to_guest_phys(p) / getpagesize();

	
	vring_init(&vq->vring, num_descs, p, LGUEST_VRING_ALIGN);

	assert(dev->desc->config_len == 0 && dev->desc->feature_len == 0);
	memcpy(device_config(dev), &vq->config, sizeof(vq->config));
	dev->num_vq++;
	dev->desc->num_vq++;

	verbose("Virtqueue page %#lx\n", to_guest_phys(p));

	for (i = &dev->vq; *i; i = &(*i)->next);
	*i = vq;
}
示例#21
0
文件: virtio.c 项目: wmdwjwm/minix
static int
alloc_phys_queue(struct virtio_queue *q)
{
	assert(q != NULL);

	/* How much memory do we need? */
	q->ring_size = vring_size(q->num, PAGE_SIZE);

	q->vaddr = alloc_contig(q->ring_size, AC_ALIGN4K, &q->paddr);

	if (q->vaddr == NULL)
		return ENOMEM;

	q->data = alloc_contig(sizeof(q->data[0]) * q->num, AC_ALIGN4K, NULL);

	if (q->data == NULL) {
		free_contig(q->vaddr, q->ring_size);
		q->vaddr = NULL;
		q->paddr = 0;
		return ENOMEM;
	}

	return OK;
}
示例#22
0
/*
 * This routine finds the Nth virtqueue described in the configuration of
 * this device and sets it up.
 *
 * This is kind of an ugly duckling.  It'd be nicer to have a standard
 * representation of a virtqueue in the configuration space, but it seems that
 * everyone wants to do it differently.  The KVM coders want the Guest to
 * allocate its own pages and tell the Host where they are, but for lguest it's
 * simpler for the Host to simply tell us where the pages are.
 */
static struct virtqueue *lg_find_vq(struct virtio_device *vdev,
				    unsigned index,
				    void (*callback)(struct virtqueue *vq),
				    const char *name)
{
	struct lguest_device *ldev = to_lgdev(vdev);
	struct lguest_vq_info *lvq;
	struct virtqueue *vq;
	int err;

	if (!name)
		return NULL;

	/* We must have this many virtqueues. */
	if (index >= ldev->desc->num_vq)
		return ERR_PTR(-ENOENT);

	lvq = kmalloc(sizeof(*lvq), GFP_KERNEL);
	if (!lvq)
		return ERR_PTR(-ENOMEM);

	/*
	 * Make a copy of the "struct lguest_vqconfig" entry, which sits after
	 * the descriptor.  We need a copy because the config space might not
	 * be aligned correctly.
	 */
	memcpy(&lvq->config, lg_vq(ldev->desc)+index, sizeof(lvq->config));

	printk("Mapping virtqueue %i addr %lx\n", index,
	       (unsigned long)lvq->config.pfn << PAGE_SHIFT);
	/* Figure out how many pages the ring will take, and map that memory */
	lvq->pages = lguest_map((unsigned long)lvq->config.pfn << PAGE_SHIFT,
				DIV_ROUND_UP(vring_size(lvq->config.num,
							LGUEST_VRING_ALIGN),
					     PAGE_SIZE));
	if (!lvq->pages) {
		err = -ENOMEM;
		goto free_lvq;
	}

	/*
	 * OK, tell virtio_ring.c to set up a virtqueue now we know its size
	 * and we've got a pointer to its pages.  Note that we set weak_barriers
	 * to 'true': the host just a(nother) SMP CPU, so we only need inter-cpu
	 * barriers.
	 */
	vq = vring_new_virtqueue(index, lvq->config.num, LGUEST_VRING_ALIGN, vdev,
				 true, lvq->pages, lg_notify, callback, name);
	if (!vq) {
		err = -ENOMEM;
		goto unmap;
	}

	/* Make sure the interrupt is allocated. */
	err = lguest_setup_irq(lvq->config.irq);
	if (err)
		goto destroy_vring;

	/*
	 * Tell the interrupt for this virtqueue to go to the virtio_ring
	 * interrupt handler.
	 *
	 * FIXME: We used to have a flag for the Host to tell us we could use
	 * the interrupt as a source of randomness: it'd be nice to have that
	 * back.
	 */
	err = request_irq(lvq->config.irq, vring_interrupt, IRQF_SHARED,
			  dev_name(&vdev->dev), vq);
	if (err)
		goto free_desc;

	/*
	 * Last of all we hook up our 'struct lguest_vq_info" to the
	 * virtqueue's priv pointer.
	 */
	vq->priv = lvq;
	return vq;

free_desc:
	irq_free_desc(lvq->config.irq);
destroy_vring:
	vring_del_virtqueue(vq);
unmap:
	lguest_unmap(lvq->pages);
free_lvq:
	kfree(lvq);
	return ERR_PTR(err);
}
示例#23
0
static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index,
				  void (*callback)(struct virtqueue *vq),
				  const char *name)
{
	struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
	struct virtio_mmio_vq_info *info;
	struct virtqueue *vq;
	unsigned long flags, size;
	int err;

	/* Select the queue we're interested in */
	writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL);

	/* Queue shouldn't already be set up. */
	if (readl(vm_dev->base + VIRTIO_MMIO_QUEUE_PFN)) {
		err = -ENOENT;
		goto error_available;
	}

	/* Allocate and fill out our active queue description */
	info = kmalloc(sizeof(*info), GFP_KERNEL);
	if (!info) {
		err = -ENOMEM;
		goto error_kmalloc;
	}
	info->queue_index = index;

	/* Allocate pages for the queue - start with a queue as big as
	 * possible (limited by maximum size allowed by device), drop down
	 * to a minimal size, just big enough to fit descriptor table
	 * and two rings (which makes it "alignment_size * 2")
	 */
	info->num = readl(vm_dev->base + VIRTIO_MMIO_QUEUE_NUM_MAX);
	while (1) {
		size = PAGE_ALIGN(vring_size(info->num,
				VIRTIO_MMIO_VRING_ALIGN));
		/* Already smallest possible allocation? */
		if (size <= VIRTIO_MMIO_VRING_ALIGN * 2) {
			err = -ENOMEM;
			goto error_alloc_pages;
		}

		info->queue = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
		if (info->queue)
			break;

		info->num /= 2;
	}

	/* Activate the queue */
	writel(info->num, vm_dev->base + VIRTIO_MMIO_QUEUE_NUM);
	writel(VIRTIO_MMIO_VRING_ALIGN,
			vm_dev->base + VIRTIO_MMIO_QUEUE_ALIGN);
	writel(virt_to_phys(info->queue) >> PAGE_SHIFT,
			vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);

	/* Create the vring */
	vq = vring_new_virtqueue(info->num, VIRTIO_MMIO_VRING_ALIGN, vdev,
				 true, info->queue, vm_notify, callback, name);
	if (!vq) {
		err = -ENOMEM;
		goto error_new_virtqueue;
	}

	vq->priv = info;
	info->vq = vq;

	spin_lock_irqsave(&vm_dev->lock, flags);
	list_add(&info->node, &vm_dev->virtqueues);
	spin_unlock_irqrestore(&vm_dev->lock, flags);

	return vq;

error_new_virtqueue:
	writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
	free_pages_exact(info->queue, size);
error_alloc_pages:
	kfree(info);
error_kmalloc:
error_available:
	return ERR_PTR(err);
}
示例#24
0
void vhost_vq_setup(struct vdev_info *dev, struct vq_info *info)
{
	struct vhost_vring_state state = { .index = info->idx };
	struct vhost_vring_file file = { .index = info->idx };
	unsigned long long features = dev->vdev.features[0];
	struct vhost_vring_addr addr = {
		.index = info->idx,
		.desc_user_addr = (uint64_t)(unsigned long)info->vring.desc,
		.avail_user_addr = (uint64_t)(unsigned long)info->vring.avail,
		.used_user_addr = (uint64_t)(unsigned long)info->vring.used,
	};
	int r;
	r = ioctl(dev->control, VHOST_SET_FEATURES, &features);
	assert(r >= 0);
	state.num = info->vring.num;
	r = ioctl(dev->control, VHOST_SET_VRING_NUM, &state);
	assert(r >= 0);
	state.num = 0;
	r = ioctl(dev->control, VHOST_SET_VRING_BASE, &state);
	assert(r >= 0);
	r = ioctl(dev->control, VHOST_SET_VRING_ADDR, &addr);
	assert(r >= 0);
	file.fd = info->kick;
	r = ioctl(dev->control, VHOST_SET_VRING_KICK, &file);
	assert(r >= 0);
	file.fd = info->call;
	r = ioctl(dev->control, VHOST_SET_VRING_CALL, &file);
	assert(r >= 0);
}

static void vq_info_add(struct vdev_info *dev, int num)
{
	struct vq_info *info = &dev->vqs[dev->nvqs];
	int r;
	info->idx = dev->nvqs;
	info->kick = eventfd(0, EFD_NONBLOCK);
	info->call = eventfd(0, EFD_NONBLOCK);
	r = posix_memalign(&info->ring, 4096, vring_size(num, 4096));
	assert(r >= 0);
	memset(info->ring, 0, vring_size(num, 4096));
	vring_init(&info->vring, num, info->ring, 4096);
	info->vq = vring_new_virtqueue(info->vring.num, 4096, &dev->vdev, info->ring,
				       vq_notify, vq_callback, "test");
	assert(info->vq);
	info->vq->priv = info;
	vhost_vq_setup(dev, info);
	dev->fds[info->idx].fd = info->call;
	dev->fds[info->idx].events = POLLIN;
	dev->nvqs++;
}

static void vdev_info_init(struct vdev_info* dev, unsigned long long features)
{
	int r;
	memset(dev, 0, sizeof *dev);
	dev->vdev.features[0] = features;
	dev->vdev.features[1] = features >> 32;
	dev->buf_size = 1024;
	dev->buf = malloc(dev->buf_size);
	assert(dev->buf);
        dev->control = open("/dev/vhost-test", O_RDWR);
	assert(dev->control >= 0);
	r = ioctl(dev->control, VHOST_SET_OWNER, NULL);
	assert(r >= 0);
	dev->mem = malloc(offsetof(struct vhost_memory, regions) +
			  sizeof dev->mem->regions[0]);
	assert(dev->mem);
	memset(dev->mem, 0, offsetof(struct vhost_memory, regions) +
                          sizeof dev->mem->regions[0]);
	dev->mem->nregions = 1;
	dev->mem->regions[0].guest_phys_addr = (long)dev->buf;
	dev->mem->regions[0].userspace_addr = (long)dev->buf;
	dev->mem->regions[0].memory_size = dev->buf_size;
	r = ioctl(dev->control, VHOST_SET_MEM_TABLE, dev->mem);
	assert(r >= 0);
}

/* TODO: this is pretty bad: we get a cache line bounce
 * for the wait queue on poll and another one on read,
 * plus the read which is there just to clear the
 * current state. */
static void wait_for_interrupt(struct vdev_info *dev)
{
	int i;
	unsigned long long val;
	poll(dev->fds, dev->nvqs, -1);
	for (i = 0; i < dev->nvqs; ++i)
		if (dev->fds[i].revents & POLLIN) {
			read(dev->fds[i].fd, &val, sizeof val);
		}
}

static void run_test(struct vdev_info *dev, struct vq_info *vq, int bufs)
{
	struct scatterlist sl;
	long started = 0, completed = 0;
	long completed_before;
	int r, test = 1;
	unsigned len;
	long long spurious = 0;
	r = ioctl(dev->control, VHOST_TEST_RUN, &test);
	assert(r >= 0);
	for (;;) {
		virtqueue_disable_cb(vq->vq);
		completed_before = completed;
		do {
			if (started < bufs) {
				sg_init_one(&sl, dev->buf, dev->buf_size);
				r = virtqueue_add_buf(vq->vq, &sl, 1, 0,
						      dev->buf + started);
				if (likely(r >= 0)) {
					++started;
					virtqueue_kick(vq->vq);
				}
			} else
				r = -1;

			/* Flush out completed bufs if any */
			if (virtqueue_get_buf(vq->vq, &len)) {
				++completed;
				r = 0;
			}

		} while (r >= 0);
		if (completed == completed_before)
			++spurious;
		assert(completed <= bufs);
		assert(started <= bufs);
		if (completed == bufs)
			break;
		if (virtqueue_enable_cb(vq->vq)) {
			wait_for_interrupt(dev);
		}
	}
	test = 0;
	r = ioctl(dev->control, VHOST_TEST_RUN, &test);
	assert(r >= 0);
	fprintf(stderr, "spurious wakeus: 0x%llx\n", spurious);
}

const char optstring[] = "h";
const struct option longopts[] = {
	{
		.name = "help",
		.val = 'h',
	},
	{
		.name = "event-idx",
/*
 * This routine finds the first virtqueue described in the configuration of
 * this device and sets it up.
 */
static struct virtqueue *kvm_find_vq(struct virtio_device *vdev,
				     unsigned index,
				     void (*callback)(struct virtqueue *vq),
				     const char *name)
{
	struct kvm_device *kdev = to_kvmdev(vdev);
	struct kvm_vqinfo *vqi;
	struct kvm_vqconfig *config;
	struct virtqueue *vq;
	long irq;
	int err = -EINVAL;

	if (index >= kdev->desc->num_vq)
		return ERR_PTR(-ENOENT);

	vqi = kzalloc(sizeof(*vqi), GFP_KERNEL);
	if (!vqi)
		return ERR_PTR(-ENOMEM);

	config = kvm_vq_config(kdev->desc)+index;

	vqi->config = config;
	vqi->pages = generic_remap_prot(config->pa,
				vring_size(config->num,
					KVM_TILE_VIRTIO_RING_ALIGN),
					0, io_prot());
	if (!vqi->pages) {
		err = -ENOMEM;
		goto out;
	}

	vq = vring_new_virtqueue(config->num, KVM_TILE_VIRTIO_RING_ALIGN,
				 vdev, 0, vqi->pages,
				 kvm_notify, callback, name);
	if (!vq) {
		err = -ENOMEM;
		goto unmap;
	}

	/*
	 * Trigger the IPI interrupt in SW way.
	 * TODO: We do not need to create one irq for each vq. A bit wasteful.
	 */
	irq = create_irq();
	if (irq < 0) {
		err = -ENXIO;
		goto del_virtqueue;
	}

	tile_irq_activate(irq, TILE_IRQ_SW_CLEAR);

	if (request_irq(irq, vring_interrupt, 0, dev_name(&vdev->dev), vq)) {
		err = -ENXIO;
		destroy_irq(irq);
		goto del_virtqueue;
	}

	config->irq = irq;

	vq->priv = vqi;
	return vq;

del_virtqueue:
	vring_del_virtqueue(vq);
unmap:
	vunmap(vqi->pages);
out:
	return ERR_PTR(err);
}
示例#26
0
文件: virtio.c 项目: mosconi/openbsd
int
vionet_enq_rx(struct vionet_dev *dev, char *pkt, ssize_t sz, int *spc)
{
	uint64_t q_gpa;
	uint32_t vr_sz;
	uint16_t idx, pkt_desc_idx, hdr_desc_idx;
	ptrdiff_t off;
	int ret;
	char *vr;
	struct vring_desc *desc, *pkt_desc, *hdr_desc;
	struct vring_avail *avail;
	struct vring_used *used;
	struct vring_used_elem *ue;

	ret = 0;

	vr_sz = vring_size(VIONET_QUEUE_SIZE);
	q_gpa = dev->vq[0].qa;
	q_gpa = q_gpa * VIRTIO_PAGE_SIZE;

	vr = malloc(vr_sz);
	if (vr == NULL) {
		log_warn("rx enq: malloc error getting vionet ring");
		return (0);
	}

	memset(vr, 0, vr_sz);

	if (read_mem(q_gpa, vr, vr_sz)) {
		log_warnx("rx enq: error reading gpa 0x%llx", q_gpa);
		free(vr);
		return (0);
	}

	/* Compute offsets in ring of descriptors, avail ring, and used ring */
	desc = (struct vring_desc *)(vr);
	avail = (struct vring_avail *)(vr +
	    dev->vq[0].vq_availoffset);
	used = (struct vring_used *)(vr +
	    dev->vq[0].vq_usedoffset);

	idx = dev->vq[0].last_avail & VIONET_QUEUE_MASK;

	if ((dev->vq[0].notified_avail & VIONET_QUEUE_MASK) == idx) {
		log_warnx("vionet queue notify - no space, dropping packet");
		free(vr);
		return (0);
	}

	hdr_desc_idx = avail->ring[idx] & VIONET_QUEUE_MASK;
	hdr_desc = &desc[hdr_desc_idx];

	pkt_desc_idx = hdr_desc->next & VIONET_QUEUE_MASK;
	pkt_desc = &desc[pkt_desc_idx];

	/* must be not readable */
	if ((pkt_desc->flags & VRING_DESC_F_WRITE) == 0) {
		log_warnx("unexpected readable rx descriptor %d",
		    pkt_desc_idx);
		free(vr);
		return (0);
	}

	/* Write packet to descriptor ring */
	if (write_mem(pkt_desc->addr, pkt, sz)) {
		log_warnx("vionet: rx enq packet write_mem error @ "
		    "0x%llx", pkt_desc->addr);
		free(vr);
		return (0);
	}

	ret = 1;
	dev->cfg.isr_status = 1;
	ue = &used->ring[used->idx & VIONET_QUEUE_MASK];
	ue->id = hdr_desc_idx;
	ue->len = hdr_desc->len + sz;
	used->idx++;
	dev->vq[0].last_avail = (dev->vq[0].last_avail + 1);
	*spc = dev->vq[0].notified_avail - dev->vq[0].last_avail;

	off = (char *)ue - vr;
	if (write_mem(q_gpa + off, ue, sizeof *ue))
		log_warnx("vionet: error writing vio ring");
	else {
		off = (char *)&used->idx - vr;
		if (write_mem(q_gpa + off, &used->idx, sizeof used->idx))
			log_warnx("vionet: error writing vio ring");
	}

	free(vr);

	return (ret);
}
示例#27
0
文件: virtio.c 项目: mosconi/openbsd
/*
 * XXX cant trust ring data from VM, be extra cautious.
 * XXX advertise link status to guest
 */
int
vionet_notifyq(struct vionet_dev *dev)
{
	uint64_t q_gpa;
	uint32_t vr_sz;
	uint16_t idx, pkt_desc_idx, hdr_desc_idx, dxx;
	size_t pktsz;
	int ret, num_enq, ofs;
	char *vr, *pkt;
	struct vring_desc *desc, *pkt_desc, *hdr_desc;
	struct vring_avail *avail;
	struct vring_used *used;

	vr = pkt = NULL;
	ret = 0;

	/* Invalid queue? */
	if (dev->cfg.queue_notify != 1) {
		vionet_notify_rx(dev);
		goto out;
	}

	vr_sz = vring_size(VIONET_QUEUE_SIZE);
	q_gpa = dev->vq[dev->cfg.queue_notify].qa;
	q_gpa = q_gpa * VIRTIO_PAGE_SIZE;

	vr = malloc(vr_sz);
	if (vr == NULL) {
		log_warn("malloc error getting vionet ring");
		goto out;
	}

	memset(vr, 0, vr_sz);

	if (read_mem(q_gpa, vr, vr_sz)) {
		log_warnx("error reading gpa 0x%llx", q_gpa);
		goto out;
	}

	/* Compute offsets in ring of descriptors, avail ring, and used ring */
	desc = (struct vring_desc *)(vr);
	avail = (struct vring_avail *)(vr +
	    dev->vq[dev->cfg.queue_notify].vq_availoffset);
	used = (struct vring_used *)(vr +
	    dev->vq[dev->cfg.queue_notify].vq_usedoffset);

	num_enq = 0;

	idx = dev->vq[dev->cfg.queue_notify].last_avail & VIONET_QUEUE_MASK;

	if ((avail->idx & VIONET_QUEUE_MASK) == idx) {
		log_warnx("vionet tx queue notify - nothing to do?");
		goto out;
	}

	while ((avail->idx & VIONET_QUEUE_MASK) != idx) {
		hdr_desc_idx = avail->ring[idx] & VIONET_QUEUE_MASK;
		hdr_desc = &desc[hdr_desc_idx];
		pktsz = 0;

		dxx = hdr_desc_idx;
		do {
			pktsz += desc[dxx].len;
			dxx = desc[dxx].next;
		} while (desc[dxx].flags & VRING_DESC_F_NEXT);

		pktsz += desc[dxx].len;

		/* Remove virtio header descriptor len */
		pktsz -= hdr_desc->len;

		/*
		 * XXX check sanity pktsz
		 * XXX too long and  > PAGE_SIZE checks
		 *     (PAGE_SIZE can be relaxed to 16384 later)
		 */
		pkt = malloc(pktsz);
		if (pkt == NULL) {
			log_warn("malloc error alloc packet buf");
			goto out;
		}

		ofs = 0;
		pkt_desc_idx = hdr_desc->next & VIONET_QUEUE_MASK;
		pkt_desc = &desc[pkt_desc_idx];

		while (pkt_desc->flags & VRING_DESC_F_NEXT) {
			/* must be not writable */
			if (pkt_desc->flags & VRING_DESC_F_WRITE) {
				log_warnx("unexpected writable tx desc "
				    "%d", pkt_desc_idx);
				goto out;
			}

			/* Read packet from descriptor ring */
			if (read_mem(pkt_desc->addr, pkt + ofs,
			    pkt_desc->len)) {
				log_warnx("vionet: packet read_mem error "
				    "@ 0x%llx", pkt_desc->addr);
				goto out;
			}

			ofs += pkt_desc->len;
			pkt_desc_idx = pkt_desc->next & VIONET_QUEUE_MASK;
			pkt_desc = &desc[pkt_desc_idx];
		}

		/* Now handle tail descriptor - must be not writable */
		if (pkt_desc->flags & VRING_DESC_F_WRITE) {
			log_warnx("unexpected writable tx descriptor %d",
			    pkt_desc_idx);
			goto out;
		}

		/* Read packet from descriptor ring */
		if (read_mem(pkt_desc->addr, pkt + ofs,
		    pkt_desc->len)) {
			log_warnx("vionet: packet read_mem error @ "
			    "0x%llx", pkt_desc->addr);
			goto out;
		}

		/* XXX signed vs unsigned here, funky cast */
		if (write(dev->fd, pkt, pktsz) != (int)pktsz) {
			log_warnx("vionet: tx failed writing to tap: "
			    "%d", errno);
			goto out;
		}

		ret = 1;
		dev->cfg.isr_status = 1;
		used->ring[used->idx & VIONET_QUEUE_MASK].id = hdr_desc_idx;
		used->ring[used->idx & VIONET_QUEUE_MASK].len = hdr_desc->len;
		used->idx++;

		dev->vq[dev->cfg.queue_notify].last_avail =
		    (dev->vq[dev->cfg.queue_notify].last_avail + 1);
		num_enq++;

		idx = dev->vq[dev->cfg.queue_notify].last_avail &
		    VIONET_QUEUE_MASK;
	}

	if (write_mem(q_gpa, vr, vr_sz)) {
		log_warnx("vionet: tx error writing vio ring");
	}

out:
	free(vr);
	free(pkt);

	return (ret);
}
示例#28
0
int
virtqueue_alloc(device_t dev, uint16_t queue, uint16_t size, int align,
    vm_paddr_t highaddr, struct vq_alloc_info *info, struct virtqueue **vqp)
{
	struct virtqueue *vq;
	int error;

	*vqp = NULL;
	error = 0;

	if (size == 0) {
		device_printf(dev,
		    "virtqueue %d (%s) does not exist (size is zero)\n",
		    queue, info->vqai_name);
		return (ENODEV);
	} else if (!powerof2(size)) {
		device_printf(dev,
		    "virtqueue %d (%s) size is not a power of 2: %d\n",
		    queue, info->vqai_name, size);
		return (ENXIO);
	} else if (info->vqai_maxindirsz > VIRTIO_MAX_INDIRECT) {
		device_printf(dev, "virtqueue %d (%s) requested too many "
		    "indirect descriptors: %d, max %d\n",
		    queue, info->vqai_name, info->vqai_maxindirsz,
		    VIRTIO_MAX_INDIRECT);
		return (EINVAL);
	}

	vq = kmalloc(sizeof(struct virtqueue) +
	    size * sizeof(struct vq_desc_extra), M_DEVBUF, M_INTWAIT | M_ZERO);
	if (vq == NULL) {
		device_printf(dev, "cannot allocate virtqueue\n");
		return (ENOMEM);
	}

	vq->vq_dev = dev;
	strlcpy(vq->vq_name, info->vqai_name, sizeof(vq->vq_name));
	vq->vq_queue_index = queue;
	vq->vq_alignment = align;
	vq->vq_nentries = size;
	vq->vq_free_cnt = size;
	vq->vq_intrhand = info->vqai_intr;
	vq->vq_intrhand_arg = info->vqai_intr_arg;

	if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_RING_F_EVENT_IDX) != 0)
		vq->vq_flags |= VIRTQUEUE_FLAG_EVENT_IDX;

	if (info->vqai_maxindirsz > 1) {
		error = virtqueue_init_indirect(vq, info->vqai_maxindirsz);
		if (error)
			goto fail;
	}

	vq->vq_ring_size = round_page(vring_size(size, align));
	vq->vq_ring_mem = contigmalloc(vq->vq_ring_size, M_DEVBUF,
	    M_WAITOK | M_ZERO, 0, highaddr, PAGE_SIZE, 0);
	if (vq->vq_ring_mem == NULL) {
		device_printf(dev,
		    "cannot allocate memory for virtqueue ring\n");
		error = ENOMEM;
		goto fail;
	}

	vq_ring_init(vq);
	virtqueue_disable_intr(vq);

	*vqp = vq;

fail:
	if (error)
		virtqueue_free(vq);

	return (error);
}
示例#29
0
文件: virtio.c 项目: srodrig1/lk
status_t virtio_alloc_ring(struct virtio_device *dev, uint index, uint16_t len)
{
    LTRACEF("dev %p, index %u, len %u\n", dev, index, len);

    DEBUG_ASSERT(dev);
    DEBUG_ASSERT(len > 0 && ispow2(len));
    DEBUG_ASSERT(index < MAX_VIRTIO_RINGS);

    if (len == 0 || !ispow2(len))
        return ERR_INVALID_ARGS;

    struct vring *ring = &dev->ring[index];

    /* allocate a ring */
    size_t size = vring_size(len, PAGE_SIZE);
    LTRACEF("need %zu bytes\n", size);

#if WITH_KERNEL_VM
    void *vptr;
    status_t err = vmm_alloc_contiguous(vmm_get_kernel_aspace(), "virtio_ring", size, &vptr, 0, 0, ARCH_MMU_FLAG_UNCACHED_DEVICE);
    if (err < 0)
        return ERR_NO_MEMORY;

    LTRACEF("allocated virtio_ring at va %p\n", vptr);

    /* compute the physical address */
    paddr_t pa;
    err = arch_mmu_query((vaddr_t)vptr, &pa, NULL);
    if (err < 0) {
        return ERR_NO_MEMORY;
    }

    LTRACEF("virtio_ring at pa 0x%lx\n", pa);
#else
    void *vptr = memalign(PAGE_SIZE, size);
    if (!vptr)
        return ERR_NO_MEMORY;

    LTRACEF("ptr %p\n", vptr);
    memset(vptr, 0, size);

    /* compute the physical address */
    paddr_t pa = (paddr_t)vptr;
#endif

    /* initialize the ring */
    vring_init(ring, len, vptr, PAGE_SIZE);
    dev->ring[index].free_list = 0xffff;
    dev->ring[index].free_count = 0;

    /* add all the descriptors to the free list */
    for (uint i = 0; i < len; i++) {
        virtio_free_desc(dev, index, i);
    }

    /* register the ring with the device */
    DEBUG_ASSERT(dev->mmio_config);
    dev->mmio_config->guest_page_size = PAGE_SIZE;
    dev->mmio_config->queue_sel = index;
    dev->mmio_config->queue_num = len;
    dev->mmio_config->queue_align = PAGE_SIZE;
    dev->mmio_config->queue_pfn = pa / PAGE_SIZE;

    /* mark the ring active */
    dev->active_rings_bitmap |= (1 << index);

    return NO_ERROR;
}
示例#30
0
文件: mic_virtio.c 项目: 7799/linux
/*
 * This routine will assign vring's allocated in host/io memory. Code in
 * virtio_ring.c however continues to access this io memory as if it were local
 * memory without io accessors.
 */
static struct virtqueue *mic_find_vq(struct virtio_device *vdev,
				     unsigned index,
				     void (*callback)(struct virtqueue *vq),
				     const char *name)
{
	struct mic_vdev *mvdev = to_micvdev(vdev);
	struct mic_vqconfig __iomem *vqconfig;
	struct mic_vqconfig config;
	struct virtqueue *vq;
	void __iomem *va;
	struct _mic_vring_info __iomem *info;
	void *used;
	int vr_size, _vr_size, err, magic;
	struct vring *vr;
	u8 type = ioread8(&mvdev->desc->type);

	if (index >= ioread8(&mvdev->desc->num_vq))
		return ERR_PTR(-ENOENT);

	if (!name)
		return ERR_PTR(-ENOENT);

	/* First assign the vring's allocated in host memory */
	vqconfig = mic_vq_config(mvdev->desc) + index;
	memcpy_fromio(&config, vqconfig, sizeof(config));
	_vr_size = vring_size(le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN);
	vr_size = PAGE_ALIGN(_vr_size + sizeof(struct _mic_vring_info));
	va = mic_card_map(mvdev->mdev, le64_to_cpu(config.address), vr_size);
	if (!va)
		return ERR_PTR(-ENOMEM);
	mvdev->vr[index] = va;
	memset_io(va, 0x0, _vr_size);
	vq = vring_new_virtqueue(index, le16_to_cpu(config.num),
				 MIC_VIRTIO_RING_ALIGN, vdev, false,
				 (void __force *)va, mic_notify, callback,
				 name);
	if (!vq) {
		err = -ENOMEM;
		goto unmap;
	}
	info = va + _vr_size;
	magic = ioread32(&info->magic);

	if (WARN(magic != MIC_MAGIC + type + index, "magic mismatch")) {
		err = -EIO;
		goto unmap;
	}

	/* Allocate and reassign used ring now */
	mvdev->used_size[index] = PAGE_ALIGN(sizeof(__u16) * 3 +
					     sizeof(struct vring_used_elem) *
					     le16_to_cpu(config.num));
	used = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
					get_order(mvdev->used_size[index]));
	if (!used) {
		err = -ENOMEM;
		dev_err(mic_dev(mvdev), "%s %d err %d\n",
			__func__, __LINE__, err);
		goto del_vq;
	}
	iowrite64(virt_to_phys(used), &vqconfig->used_address);

	/*
	 * To reassign the used ring here we are directly accessing
	 * struct vring_virtqueue which is a private data structure
	 * in virtio_ring.c. At the minimum, a BUILD_BUG_ON() in
	 * vring_new_virtqueue() would ensure that
	 *  (&vq->vring == (struct vring *) (&vq->vq + 1));
	 */
	vr = (struct vring *)(vq + 1);
	vr->used = used;

	vq->priv = mvdev;
	return vq;
del_vq:
	vring_del_virtqueue(vq);
unmap:
	mic_card_unmap(mvdev->mdev, mvdev->vr[index]);
	return ERR_PTR(err);
}