Exemple #1
0
void
virtio_reinit_start(struct virtio_softc *sc)
{
	int i;

	virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_ACK);
	virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER);
	for (i = 0; i < sc->sc_nvqs; i++) {
		int n;
		struct virtqueue *vq = &sc->sc_vqs[i];
		bus_space_write_2(sc->sc_iot, sc->sc_ioh,
				  VIRTIO_CONFIG_QUEUE_SELECT,
				  vq->vq_index);
		n = bus_space_read_2(sc->sc_iot, sc->sc_ioh,
				     VIRTIO_CONFIG_QUEUE_SIZE);
		if (n == 0)	/* vq disappeared */
			continue;
		if (n != vq->vq_num) {
			panic("%s: virtqueue size changed, vq index %d\n",
			      device_xname(sc->sc_dev),
			      vq->vq_index);
		}
		virtio_init_vq(sc, vq);
		bus_space_write_4(sc->sc_iot, sc->sc_ioh,
				  VIRTIO_CONFIG_QUEUE_ADDRESS,
				  (vq->vq_dmamap->dm_segs[0].ds_addr
				   / VIRTIO_PAGE_SIZE));
	}
}
Exemple #2
0
void
virtio_reinit_start(struct virtio_softc *sc)
{
	int i;

	virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_ACK);
	virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER);
	for (i = 0; i < sc->sc_nvqs; i++) {
		int n;
		struct virtqueue *vq = &sc->sc_vqs[i];
		n = virtio_read_queue_size(sc, vq->vq_index);
		if (n == 0)	/* vq disappeared */
			continue;
		if (n != vq->vq_num) {
			panic("%s: virtqueue size changed, vq index %d\n",
			    sc->sc_dev.dv_xname, vq->vq_index);
		}
		virtio_init_vq(sc, vq, 1);
		virtio_setup_queue(sc, vq->vq_index,
		    vq->vq_dmamap->dm_segs[0].ds_addr / VIRTIO_PAGE_SIZE);
	}
}
Exemple #3
0
/*
 * Allocate/free a vq.
 */
struct virtqueue *
virtio_alloc_vq(struct virtio_softc *sc, unsigned int index, unsigned int size,
    unsigned int indirect_num, const char *name)
{
	int vq_size, allocsize1, allocsize2, allocsize = 0;
	int ret;
	unsigned int ncookies;
	size_t len;
	struct virtqueue *vq;

	ddi_put16(sc->sc_ioh,
	    /* LINTED E_BAD_PTR_CAST_ALIGN */
	    (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SELECT), index);
	vq_size = ddi_get16(sc->sc_ioh,
	    /* LINTED E_BAD_PTR_CAST_ALIGN */
	    (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SIZE));
	if (vq_size == 0) {
		dev_err(sc->sc_dev, CE_WARN,
		    "virtqueue dest not exist, index %d for %s\n", index, name);
		goto out;
	}

	vq = kmem_zalloc(sizeof (struct virtqueue), KM_SLEEP);

	/* size 0 => use native vq size, good for receive queues. */
	if (size)
		vq_size = MIN(vq_size, size);

	/* allocsize1: descriptor table + avail ring + pad */
	allocsize1 = VIRTQUEUE_ALIGN(sizeof (struct vring_desc) * vq_size +
	    sizeof (struct vring_avail) + sizeof (uint16_t) * vq_size);
	/* allocsize2: used ring + pad */
	allocsize2 = VIRTQUEUE_ALIGN(sizeof (struct vring_used) +
	    sizeof (struct vring_used_elem) * vq_size);

	allocsize = allocsize1 + allocsize2;

	ret = ddi_dma_alloc_handle(sc->sc_dev, &virtio_vq_dma_attr,
	    DDI_DMA_SLEEP, NULL, &vq->vq_dma_handle);
	if (ret != DDI_SUCCESS) {
		dev_err(sc->sc_dev, CE_WARN,
		    "Failed to allocate dma handle for vq %d", index);
		goto out_alloc_handle;
	}

	ret = ddi_dma_mem_alloc(vq->vq_dma_handle, allocsize,
	    &virtio_vq_devattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
	    (caddr_t *)&vq->vq_vaddr, &len, &vq->vq_dma_acch);
	if (ret != DDI_SUCCESS) {
		dev_err(sc->sc_dev, CE_WARN,
		    "Failed to allocate dma memory for vq %d", index);
		goto out_alloc;
	}

	ret = ddi_dma_addr_bind_handle(vq->vq_dma_handle, NULL,
	    (caddr_t)vq->vq_vaddr, len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
	    DDI_DMA_SLEEP, NULL, &vq->vq_dma_cookie, &ncookies);
	if (ret != DDI_DMA_MAPPED) {
		dev_err(sc->sc_dev, CE_WARN,
		    "Failed to bind dma memory for vq %d", index);
		goto out_bind;
	}

	/* We asked for a single segment */
	ASSERT(ncookies == 1);
	/* and page-ligned buffers. */
	ASSERT(vq->vq_dma_cookie.dmac_laddress % VIRTIO_PAGE_SIZE == 0);

	(void) memset(vq->vq_vaddr, 0, allocsize);

	/* Make sure all zeros hit the buffer before we point the host to it */
	membar_producer();

	/* set the vq address */
	ddi_put32(sc->sc_ioh,
	    /* LINTED E_BAD_PTR_CAST_ALIGN */
	    (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_ADDRESS),
	    (vq->vq_dma_cookie.dmac_laddress / VIRTIO_PAGE_SIZE));

	/* remember addresses and offsets for later use */
	vq->vq_owner = sc;
	vq->vq_num = vq_size;
	vq->vq_index = index;
	vq->vq_descs = vq->vq_vaddr;
	vq->vq_availoffset = sizeof (struct vring_desc)*vq_size;
	vq->vq_avail = (void *)(((char *)vq->vq_descs) + vq->vq_availoffset);
	vq->vq_usedoffset = allocsize1;
	vq->vq_used = (void *)(((char *)vq->vq_descs) + vq->vq_usedoffset);

	ASSERT(indirect_num == 0 ||
	    virtio_has_feature(sc, VIRTIO_F_RING_INDIRECT_DESC));
	vq->vq_indirect_num = indirect_num;

	/* free slot management */
	vq->vq_entries = kmem_zalloc(sizeof (struct vq_entry) * vq_size,
	    KM_SLEEP);

	ret = virtio_init_vq(sc, vq);
	if (ret)
		goto out_init;

	dev_debug(sc->sc_dev, CE_NOTE,
	    "Allocated %d entries for vq %d:%s (%d indirect descs)",
	    vq_size, index, name, indirect_num * vq_size);

	return (vq);

out_init:
	kmem_free(vq->vq_entries, sizeof (struct vq_entry) * vq_size);
	(void) ddi_dma_unbind_handle(vq->vq_dma_handle);
out_bind:
	ddi_dma_mem_free(&vq->vq_dma_acch);
out_alloc:
	ddi_dma_free_handle(&vq->vq_dma_handle);
out_alloc_handle:
	kmem_free(vq, sizeof (struct virtqueue));
out:
	return (NULL);
}