Exemple #1
0
static void vhost_virtqueue_cleanup(struct vhost_dev *dev,
                                    struct VirtIODevice *vdev,
                                    struct vhost_virtqueue *vq,
                                    unsigned idx)
{
    struct vhost_vring_state state = {
        .index = idx,
    };
    int r;
    r = vdev->binding->set_host_notifier(vdev->binding_opaque, idx, false);
    if (r < 0) {
        fprintf(stderr, "vhost VQ %d host cleanup failed: %d\n", idx, r);
        fflush(stderr);
    }
    assert (r >= 0);
    r = ioctl(dev->control, VHOST_GET_VRING_BASE, &state);
    if (r < 0) {
        fprintf(stderr, "vhost VQ %d ring restore failed: %d\n", idx, r);
        fflush(stderr);
    }
    virtio_queue_set_last_avail_idx(vdev, idx, state.num);
    assert (r >= 0);
    cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
                              0, virtio_queue_get_ring_size(vdev, idx));
    cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
                              1, virtio_queue_get_used_size(vdev, idx));
    cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
                              0, virtio_queue_get_avail_size(vdev, idx));
    cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
                              0, virtio_queue_get_desc_size(vdev, idx));
}
Exemple #2
0
static int vhost_virtqueue_init(struct vhost_dev *dev,
                                struct VirtIODevice *vdev,
                                struct vhost_virtqueue *vq,
                                unsigned idx)
{
    target_phys_addr_t s, l, a;
    int r;
    struct vhost_vring_file file = {
        .index = idx,
    };
    struct vhost_vring_state state = {
        .index = idx,
    };
    struct VirtQueue *vvq = virtio_get_queue(vdev, idx);

    if (!vdev->binding->set_host_notifier) {
        fprintf(stderr, "binding does not support host notifiers\n");
        return -ENOSYS;
    }

    vq->num = state.num = virtio_queue_get_num(vdev, idx);
    r = ioctl(dev->control, VHOST_SET_VRING_NUM, &state);
    if (r) {
        return -errno;
    }

    state.num = virtio_queue_get_last_avail_idx(vdev, idx);
    r = ioctl(dev->control, VHOST_SET_VRING_BASE, &state);
    if (r) {
        return -errno;
    }

    s = l = virtio_queue_get_desc_size(vdev, idx);
    a = virtio_queue_get_desc_addr(vdev, idx);
    vq->desc = cpu_physical_memory_map(a, &l, 0);
    if (!vq->desc || l != s) {
        r = -ENOMEM;
        goto fail_alloc_desc;
    }
    s = l = virtio_queue_get_avail_size(vdev, idx);
    a = virtio_queue_get_avail_addr(vdev, idx);
    vq->avail = cpu_physical_memory_map(a, &l, 0);
    if (!vq->avail || l != s) {
        r = -ENOMEM;
        goto fail_alloc_avail;
    }
    vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx);
    vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx);
    vq->used = cpu_physical_memory_map(a, &l, 1);
    if (!vq->used || l != s) {
        r = -ENOMEM;
        goto fail_alloc_used;
    }

    vq->ring_size = s = l = virtio_queue_get_ring_size(vdev, idx);
    vq->ring_phys = a = virtio_queue_get_ring_addr(vdev, idx);
    vq->ring = cpu_physical_memory_map(a, &l, 1);
    if (!vq->ring || l != s) {
        r = -ENOMEM;
        goto fail_alloc_ring;
    }

    r = vhost_virtqueue_set_addr(dev, vq, idx, dev->log_enabled);
    if (r < 0) {
        r = -errno;
        goto fail_alloc;
    }
    r = vdev->binding->set_host_notifier(vdev->binding_opaque, idx, true);
    if (r < 0) {
        fprintf(stderr, "Error binding host notifier: %d\n", -r);
        goto fail_host_notifier;
    }

    file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
    r = ioctl(dev->control, VHOST_SET_VRING_KICK, &file);
    if (r) {
        r = -errno;
        goto fail_kick;
    }

    file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
    r = ioctl(dev->control, VHOST_SET_VRING_CALL, &file);
    if (r) {
        r = -errno;
        goto fail_call;
    }

    return 0;

fail_call:
fail_kick:
    vdev->binding->set_host_notifier(vdev->binding_opaque, idx, false);
fail_host_notifier:
fail_alloc:
    cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
                              0, 0);
fail_alloc_ring:
    cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
                              0, 0);
fail_alloc_used:
    cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
                              0, 0);
fail_alloc_avail:
    cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
                              0, 0);
fail_alloc_desc:
    return r;
}
Exemple #3
0
/* FIXME: add a vq index parameter */
static void vhost_virtqueue_cleanup(struct vhost_dev *dev,
                                    struct VirtIODevice *vdev,
                                    struct vhost_virtqueue *vq,
                                    unsigned idx)
{
    struct vhost_vring_state state = {
        .index = idx % dev->nvqs,
    };
    int r;
    r = ioctl(dev->control, VHOST_GET_VRING_BASE, &state);
    if (r < 0) {
        fprintf(stderr, "vhost VQ %d ring restore failed: %d\n", idx, r);
        fflush(stderr);
    }
    virtio_queue_set_last_avail_idx(vdev, idx, state.num);
    assert (r >= 0);
    cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
                              0, virtio_queue_get_ring_size(vdev, idx));
    cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
                              1, virtio_queue_get_used_size(vdev, idx));
    cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
                              0, virtio_queue_get_avail_size(vdev, idx));
    cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
                              0, virtio_queue_get_desc_size(vdev, idx));
}

static void vhost_eventfd_add(MemoryListener *listener,
                              MemoryRegionSection *section,
                              bool match_data, uint64_t data, int fd)
{
}

static void vhost_eventfd_del(MemoryListener *listener,
                              MemoryRegionSection *section,
                              bool match_data, uint64_t data, int fd)
{
}

int vhost_dev_init(struct vhost_dev *hdev, int devfd, bool force)
{
    uint64_t features;
    int r;
    if (devfd >= 0) {
        hdev->control = devfd;
    } else {
        hdev->control = open("/dev/vhost-net", O_RDWR);
        if (hdev->control < 0) {
            return -errno;
        }
    }
    r = ioctl(hdev->control, VHOST_SET_OWNER, NULL);
    if (r < 0) {
        goto fail;
    }

    r = ioctl(hdev->control, VHOST_GET_FEATURES, &features);
    if (r < 0) {
        goto fail;
    }
    hdev->features = features;

    hdev->memory_listener = (MemoryListener) {
        .begin = vhost_begin,
        .commit = vhost_commit,
        .region_add = vhost_region_add,
        .region_del = vhost_region_del,
        .region_nop = vhost_region_nop,
        .log_start = vhost_log_start,
        .log_stop = vhost_log_stop,
        .log_sync = vhost_log_sync,
        .log_global_start = vhost_log_global_start,
        .log_global_stop = vhost_log_global_stop,
        .eventfd_add = vhost_eventfd_add,
        .eventfd_del = vhost_eventfd_del,
        .priority = 10
    };
    hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions));
    hdev->n_mem_sections = 0;
    hdev->mem_sections = NULL;
    hdev->log = NULL;
    hdev->log_size = 0;
    hdev->log_enabled = false;
    hdev->started = false;
    memory_listener_register(&hdev->memory_listener, NULL);
    hdev->force = force;
    return 0;
fail:
    r = -errno;
    close(hdev->control);
    return r;
}

void vhost_dev_cleanup(struct vhost_dev *hdev)
{
    memory_listener_unregister(&hdev->memory_listener);
    g_free(hdev->mem);
    g_free(hdev->mem_sections);
    close(hdev->control);
}
Exemple #4
0
/* Map the guest's vring to host memory */
bool vring_setup(Vring *vring, VirtIODevice *vdev, int n)
{
    struct vring *vr = &vring->vr;
    hwaddr addr;
    hwaddr size;
    void *ptr;

    vring->broken = false;
    vr->num = virtio_queue_get_num(vdev, n);

    addr = virtio_queue_get_desc_addr(vdev, n);
    size = virtio_queue_get_desc_size(vdev, n);
    /* Map the descriptor area as read only */
    ptr = vring_map(&vring->mr_desc, addr, size, NULL, false);
    if (!ptr) {
        error_report("Failed to map 0x%" HWADDR_PRIx " byte for vring desc "
                     "at 0x%" HWADDR_PRIx,
                      size, addr);
        goto out_err_desc;
    }
    vr->desc = ptr;

    addr = virtio_queue_get_avail_addr(vdev, n);
    size = virtio_queue_get_avail_size(vdev, n);
    /* Add the size of the used_event_idx */
    size += sizeof(uint16_t);
    /* Map the driver area as read only */
    ptr = vring_map(&vring->mr_avail, addr, size, NULL, false);
    if (!ptr) {
        error_report("Failed to map 0x%" HWADDR_PRIx " byte for vring avail "
                     "at 0x%" HWADDR_PRIx,
                      size, addr);
        goto out_err_avail;
    }
    vr->avail = ptr;

    addr = virtio_queue_get_used_addr(vdev, n);
    size = virtio_queue_get_used_size(vdev, n);
    /* Add the size of the avail_event_idx */
    size += sizeof(uint16_t);
    /* Map the device area as read-write */
    ptr = vring_map(&vring->mr_used, addr, size, NULL, true);
    if (!ptr) {
        error_report("Failed to map 0x%" HWADDR_PRIx " byte for vring used "
                     "at 0x%" HWADDR_PRIx,
                      size, addr);
        goto out_err_used;
    }
    vr->used = ptr;

    vring->last_avail_idx = virtio_queue_get_last_avail_idx(vdev, n);
    vring->last_used_idx = vring_get_used_idx(vdev, vring);
    vring->signalled_used = 0;
    vring->signalled_used_valid = false;

    trace_vring_setup(virtio_queue_get_ring_addr(vdev, n),
                      vring->vr.desc, vring->vr.avail, vring->vr.used);
    return true;

out_err_used:
    memory_region_unref(vring->mr_avail);
out_err_avail:
    memory_region_unref(vring->mr_desc);
out_err_desc:
    vring->broken = true;
    return false;
}
Exemple #5
0
target_phys_addr_t virtio_queue_get_ring_size(VirtIODevice *vdev, int n)
{
    return vdev->vq[n].vring.used - vdev->vq[n].vring.desc +
	    virtio_queue_get_used_size(vdev, n);
}