Esempio n. 1
0
static void vhost_net_stop_one(struct vhost_net *net,
                               VirtIODevice *dev)
{
    struct vhost_vring_file file = { .fd = -1 };

    if (net->nc->info->type == NET_CLIENT_DRIVER_TAP) {
        for (file.index = 0; file.index < net->dev.nvqs; ++file.index) {
            int r = vhost_net_set_backend(&net->dev, &file);
            assert(r >= 0);
        }
    }
    if (net->nc->info->poll) {
        net->nc->info->poll(net->nc, true);
    }
    vhost_dev_stop(&net->dev, dev);
    vhost_dev_disable_notifiers(&net->dev, dev);
}

int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
                    int total_queues)
{
    BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev)));
    VirtioBusState *vbus = VIRTIO_BUS(qbus);
    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
    int r, e, i;

    if (!k->set_guest_notifiers) {
        error_report("binding does not support guest notifiers");
        return -ENOSYS;
    }

    for (i = 0; i < total_queues; i++) {
        struct vhost_net *net;

        net = get_vhost_net(ncs[i].peer);
        vhost_net_set_vq_index(net, i * 2);

        /* Suppress the masking guest notifiers on vhost user
         * because vhost user doesn't interrupt masking/unmasking
         * properly.
         */
        if (net->nc->info->type == NET_CLIENT_DRIVER_VHOST_USER) {
            dev->use_guest_notifier_mask = false;
        }
     }

    r = k->set_guest_notifiers(qbus->parent, total_queues * 2, true);
    if (r < 0) {
        error_report("Error binding guest notifier: %d", -r);
        goto err;
    }

    for (i = 0; i < total_queues; i++) {
        r = vhost_net_start_one(get_vhost_net(ncs[i].peer), dev);

        if (r < 0) {
            goto err_start;
        }

        if (ncs[i].peer->vring_enable) {
            /* restore vring enable state */
            r = vhost_set_vring_enable(ncs[i].peer, ncs[i].peer->vring_enable);

            if (r < 0) {
                goto err_start;
            }
        }
    }

    return 0;

err_start:
    while (--i >= 0) {
        vhost_net_stop_one(get_vhost_net(ncs[i].peer), dev);
    }
    e = k->set_guest_notifiers(qbus->parent, total_queues * 2, false);
    if (e < 0) {
        fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", e);
        fflush(stderr);
    }
err:
    return r;
}
Esempio n. 2
0
static void ccw_init(MachineState *machine)
{
    ram_addr_t my_ram_size = machine->ram_size;
    MemoryRegion *sysmem = get_system_memory();
    MemoryRegion *ram = g_new(MemoryRegion, 1);
    sclpMemoryHotplugDev *mhd = init_sclp_memory_hotplug_dev();
    uint8_t *storage_keys;
    int ret;
    VirtualCssBus *css_bus;
    DeviceState *dev;
    QemuOpts *opts = qemu_opts_find(qemu_find_opts("memory"), NULL);
    ram_addr_t pad_size = 0;
    ram_addr_t maxmem = qemu_opt_get_size(opts, "maxmem", my_ram_size);
    ram_addr_t standby_mem_size = maxmem - my_ram_size;

    /* The storage increment size is a multiple of 1M and is a power of 2.
     * The number of storage increments must be MAX_STORAGE_INCREMENTS or fewer.
     * The variable 'mhd->increment_size' is an exponent of 2 that can be
     * used to calculate the size (in bytes) of an increment. */
    mhd->increment_size = 20;
    while ((my_ram_size >> mhd->increment_size) > MAX_STORAGE_INCREMENTS) {
        mhd->increment_size++;
    }
    while ((standby_mem_size >> mhd->increment_size) > MAX_STORAGE_INCREMENTS) {
        mhd->increment_size++;
    }

    /* The core and standby memory areas need to be aligned with
     * the increment size.  In effect, this can cause the
     * user-specified memory size to be rounded down to align
     * with the nearest increment boundary. */
    standby_mem_size = standby_mem_size >> mhd->increment_size
                                        << mhd->increment_size;
    my_ram_size = my_ram_size >> mhd->increment_size
                              << mhd->increment_size;

    /* let's propagate the changed ram size into the global variable. */
    ram_size = my_ram_size;

    /* get a BUS */
    css_bus = virtual_css_bus_init();
    s390_sclp_init();
    s390_init_ipl_dev(machine->kernel_filename, machine->kernel_cmdline,
                      machine->initrd_filename, "s390-ccw.img");
    s390_flic_init();

    dev = qdev_create(NULL, TYPE_S390_PCI_HOST_BRIDGE);
    object_property_add_child(qdev_get_machine(), TYPE_S390_PCI_HOST_BRIDGE,
                              OBJECT(dev), NULL);
    qdev_init_nofail(dev);

    /* register hypercalls */
    virtio_ccw_register_hcalls();

    /* allocate RAM for core */
    memory_region_init_ram(ram, NULL, "s390.ram", my_ram_size, &error_abort);
    vmstate_register_ram_global(ram);
    memory_region_add_subregion(sysmem, 0, ram);

    /* If the size of ram is not on a MEM_SECTION_SIZE boundary,
       calculate the pad size necessary to force this boundary. */
    if (standby_mem_size) {
        if (my_ram_size % MEM_SECTION_SIZE) {
            pad_size = MEM_SECTION_SIZE - my_ram_size % MEM_SECTION_SIZE;
        }
        my_ram_size += standby_mem_size + pad_size;
        mhd->pad_size = pad_size;
        mhd->standby_mem_size = standby_mem_size;
    }

    /* allocate storage keys */
    storage_keys = g_malloc0(my_ram_size / TARGET_PAGE_SIZE);

    /* init CPUs */
    s390_init_cpus(machine->cpu_model, storage_keys);

    if (kvm_enabled()) {
        kvm_s390_enable_css_support(s390_cpu_addr2state(0));
    }
    /*
     * Create virtual css and set it as default so that non mcss-e
     * enabled guests only see virtio devices.
     */
    ret = css_create_css_image(VIRTUAL_CSSID, true);
    assert(ret == 0);

    /* Create VirtIO network adapters */
    s390_create_virtio_net(BUS(css_bus), "virtio-net-ccw");
}
Esempio n. 3
0
static void vhost_net_stop_one(struct vhost_net *net,
                               VirtIODevice *dev)
{
    struct vhost_vring_file file = { .fd = -1 };

    if (net->nc->info->type == NET_CLIENT_OPTIONS_KIND_TAP) {
        for (file.index = 0; file.index < net->dev.nvqs; ++file.index) {
            const VhostOps *vhost_ops = net->dev.vhost_ops;
            int r = vhost_ops->vhost_call(&net->dev, VHOST_NET_SET_BACKEND,
                                          &file);
            assert(r >= 0);
        }
    }
    if (net->nc->info->poll) {
        net->nc->info->poll(net->nc, true);
    }
    vhost_dev_stop(&net->dev, dev);
    vhost_dev_disable_notifiers(&net->dev, dev);
}

static bool vhost_net_device_endian_ok(VirtIODevice *vdev)
{
#ifdef TARGET_IS_BIENDIAN
#ifdef HOST_WORDS_BIGENDIAN
    return virtio_is_big_endian(vdev);
#else
    return !virtio_is_big_endian(vdev);
#endif
#else
    return true;
#endif
}

int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
                    int total_queues)
{
    BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev)));
    VirtioBusState *vbus = VIRTIO_BUS(qbus);
    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
    int r, e, i;

    if (!vhost_net_device_endian_ok(dev)) {
        error_report("vhost-net does not support cross-endian");
        r = -ENOSYS;
        goto err;
    }

    if (!k->set_guest_notifiers) {
        error_report("binding does not support guest notifiers");
        r = -ENOSYS;
        goto err;
    }

    for (i = 0; i < total_queues; i++) {
        vhost_net_set_vq_index(get_vhost_net(ncs[i].peer), i * 2);
    }

    r = k->set_guest_notifiers(qbus->parent, total_queues * 2, true);
    if (r < 0) {
        error_report("Error binding guest notifier: %d", -r);
        goto err;
    }

    for (i = 0; i < total_queues; i++) {
        r = vhost_net_start_one(get_vhost_net(ncs[i].peer), dev);

        if (r < 0) {
            goto err_start;
        }
    }

    return 0;

err_start:
    while (--i >= 0) {
        vhost_net_stop_one(get_vhost_net(ncs[i].peer), dev);
    }
    e = k->set_guest_notifiers(qbus->parent, total_queues * 2, false);
    if (e < 0) {
        fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", e);
        fflush(stderr);
    }
err:
    return r;
}
Esempio n. 4
0
static void vhost_net_stop_one(struct vhost_net *net,
                               VirtIODevice *dev)
{
    struct vhost_vring_file file = { .fd = -1 };

    if (net->nc->info->type == NET_CLIENT_OPTIONS_KIND_TAP) {
        for (file.index = 0; file.index < net->dev.nvqs; ++file.index) {
            const VhostOps *vhost_ops = net->dev.vhost_ops;
            int r = vhost_ops->vhost_net_set_backend(&net->dev, &file);
            assert(r >= 0);
        }
    }
    if (net->nc->info->poll) {
        net->nc->info->poll(net->nc, true);
    }
    vhost_dev_stop(&net->dev, dev);
    vhost_dev_disable_notifiers(&net->dev, dev);
}

int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
                    int total_queues)
{
    BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev)));
    VirtioBusState *vbus = VIRTIO_BUS(qbus);
    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
    int r, e, i, j;

    if (!k->set_guest_notifiers) {
        error_report("binding does not support guest notifiers");
        return -ENOSYS;
    }

    for (j = 0; j < total_queues; j++) {
        r = vhost_net_set_vnet_endian(dev, ncs[j].peer, true);
        if (r < 0) {
            goto err_endian;
        }
        vhost_net_set_vq_index(get_vhost_net(ncs[j].peer), j * 2);
    }

    r = k->set_guest_notifiers(qbus->parent, total_queues * 2, true);
    if (r < 0) {
        error_report("Error binding guest notifier: %d", -r);
        goto err_endian;
    }

    for (i = 0; i < total_queues; i++) {
        r = vhost_net_start_one(get_vhost_net(ncs[i].peer), dev);

        if (r < 0) {
            goto err_start;
        }
    }

    return 0;

err_start:
    while (--i >= 0) {
        vhost_net_stop_one(get_vhost_net(ncs[i].peer), dev);
    }
    e = k->set_guest_notifiers(qbus->parent, total_queues * 2, false);
    if (e < 0) {
        fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", e);
        fflush(stderr);
    }
err_endian:
    while (--j >= 0) {
        vhost_net_set_vnet_endian(dev, ncs[j].peer, false);
    }
    return r;
}
Esempio n. 5
0
static void handle_notify(EventNotifier *e)
{
    VirtIOBlockDataPlane *s = container_of(e, VirtIOBlockDataPlane,
                                           host_notifier);
    VirtIOBlock *vblk = VIRTIO_BLK(s->vdev);

    event_notifier_test_and_clear(&s->host_notifier);
    bdrv_io_plug(s->blk->conf.bs);
    for (;;) {
        MultiReqBuffer mrb = {
            .num_writes = 0,
        };
        int ret;

        /* Disable guest->host notifies to avoid unnecessary vmexits */
        vring_disable_notification(s->vdev, &s->vring);

        for (;;) {
            VirtIOBlockReq *req = virtio_blk_alloc_request(vblk);

            ret = vring_pop(s->vdev, &s->vring, &req->elem);
            if (ret < 0) {
                virtio_blk_free_request(req);
                break; /* no more requests */
            }

            trace_virtio_blk_data_plane_process_request(s, req->elem.out_num,
                                                        req->elem.in_num,
                                                        req->elem.index);

            virtio_blk_handle_request(req, &mrb);
        }

        virtio_submit_multiwrite(s->blk->conf.bs, &mrb);

        if (likely(ret == -EAGAIN)) { /* vring emptied */
            /* Re-enable guest->host notifies and stop processing the vring.
             * But if the guest has snuck in more descriptors, keep processing.
             */
            if (vring_enable_notification(s->vdev, &s->vring)) {
                break;
            }
        } else { /* fatal error */
            break;
        }
    }
    bdrv_io_unplug(s->blk->conf.bs);
}

/* Context: QEMU global mutex held */
void virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *blk,
                                  VirtIOBlockDataPlane **dataplane,
                                  Error **errp)
{
    VirtIOBlockDataPlane *s;
    Error *local_err = NULL;
    BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);

    *dataplane = NULL;

    if (!blk->data_plane && !blk->iothread) {
        return;
    }

    /* Don't try if transport does not support notifiers. */
    if (!k->set_guest_notifiers || !k->set_host_notifier) {
        error_setg(errp,
                   "device is incompatible with x-data-plane "
                   "(transport does not support notifiers)");
        return;
    }

    /* If dataplane is (re-)enabled while the guest is running there could be
     * block jobs that can conflict.
     */
    if (bdrv_op_is_blocked(blk->conf.bs, BLOCK_OP_TYPE_DATAPLANE, &local_err)) {
        error_setg(errp, "cannot start dataplane thread: %s",
                   error_get_pretty(local_err));
        error_free(local_err);
        return;
    }

    s = g_new0(VirtIOBlockDataPlane, 1);
    s->vdev = vdev;
    s->blk = blk;

    if (blk->iothread) {
        s->iothread = blk->iothread;
        object_ref(OBJECT(s->iothread));
    } else {
        /* Create per-device IOThread if none specified.  This is for
         * x-data-plane option compatibility.  If x-data-plane is removed we
         * can drop this.
         */
        object_initialize(&s->internal_iothread_obj,
                          sizeof(s->internal_iothread_obj),
                          TYPE_IOTHREAD);
        user_creatable_complete(OBJECT(&s->internal_iothread_obj), &error_abort);
        s->iothread = &s->internal_iothread_obj;
    }
    s->ctx = iothread_get_aio_context(s->iothread);
    s->bh = aio_bh_new(s->ctx, notify_guest_bh, s);

    error_setg(&s->blocker, "block device is in use by data plane");
    bdrv_op_block_all(blk->conf.bs, s->blocker);
    bdrv_op_unblock(blk->conf.bs, BLOCK_OP_TYPE_RESIZE, s->blocker);
    bdrv_op_unblock(blk->conf.bs, BLOCK_OP_TYPE_DRIVE_DEL, s->blocker);

    *dataplane = s;
}
Esempio n. 6
0
/* Context: QEMU global mutex held */
void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s)
{
    BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s->vdev)));
    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
    VirtIOBlock *vblk = VIRTIO_BLK(s->vdev);
    VirtQueue *vq;
    int r;

    if (s->started || s->disabled) {
        return;
    }

    if (s->starting) {
        return;
    }

    s->starting = true;

    vq = virtio_get_queue(s->vdev, 0);
    if (!vring_setup(&s->vring, s->vdev, 0)) {
        goto fail_vring;
    }

    /* Set up guest notifier (irq) */
    r = k->set_guest_notifiers(qbus->parent, 1, true);
    if (r != 0) {
        fprintf(stderr, "virtio-blk failed to set guest notifier (%d), "
                "ensure -enable-kvm is set\n", r);
        goto fail_guest_notifiers;
    }
    s->guest_notifier = virtio_queue_get_guest_notifier(vq);

    /* Set up virtqueue notify */
    r = k->set_host_notifier(qbus->parent, 0, true);
    if (r != 0) {
        fprintf(stderr, "virtio-blk failed to set host notifier (%d)\n", r);
        goto fail_host_notifier;
    }
    s->host_notifier = *virtio_queue_get_host_notifier(vq);

    s->saved_complete_request = vblk->complete_request;
    vblk->complete_request = complete_request_vring;

    s->starting = false;
    s->started = true;
    trace_virtio_blk_data_plane_start(s);

    bdrv_set_aio_context(s->blk->conf.bs, s->ctx);

    /* Kick right away to begin processing requests already in vring */
    event_notifier_set(virtio_queue_get_host_notifier(vq));

    /* Get this show started by hooking up our callbacks */
    aio_context_acquire(s->ctx);
    aio_set_event_notifier(s->ctx, &s->host_notifier, handle_notify);
    aio_context_release(s->ctx);
    return;

  fail_host_notifier:
    k->set_guest_notifiers(qbus->parent, 1, false);
  fail_guest_notifiers:
    vring_teardown(&s->vring, s->vdev, 0);
    s->disabled = true;
  fail_vring:
    s->starting = false;
}
Esempio n. 7
0
File: pnv.c Progetto: pfliu/qemu
                     d->ioport_id);
    }

    return 0;
}

static void pnv_dt_isa(ISABus *bus, void *fdt, int lpc_offset)
{
    ForeachPopulateArgs args = {
        .fdt = fdt,
        .offset = lpc_offset,
    };

    /* ISA devices are not necessarily parented to the ISA bus so we
     * can not use object_child_foreach() */
    qbus_walk_children(BUS(bus), pnv_dt_isa_device, NULL, NULL, NULL, &args);
}

static void *pnv_dt_create(MachineState *machine)
{
    const char plat_compat[] = "qemu,powernv\0ibm,powernv";
    PnvMachineState *pnv = PNV_MACHINE(machine);
    void *fdt;
    char *buf;
    int off;
    int i;
    int lpc_offset;

    fdt = g_malloc0(FDT_MAX_SIZE);
    _FDT((fdt_create_empty_tree(fdt, FDT_MAX_SIZE)));
Esempio n. 8
0
/* Context: QEMU global mutex held */
void virtio_scsi_dataplane_start(VirtIOSCSI *s)
{
    int i;
    int rc;
    BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s)));
    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
    VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);

    if (s->dataplane_started ||
        s->dataplane_starting ||
        s->dataplane_fenced ||
        s->ctx != iothread_get_aio_context(vs->conf.iothread)) {
        return;
    }

    s->dataplane_starting = true;

    /* Set up guest notifier (irq) */
    rc = k->set_guest_notifiers(qbus->parent, vs->conf.num_queues + 2, true);
    if (rc != 0) {
        fprintf(stderr, "virtio-scsi: Failed to set guest notifiers (%d), "
                "ensure -enable-kvm is set\n", rc);
        goto fail_guest_notifiers;
    }

    aio_context_acquire(s->ctx);
    rc = virtio_scsi_vring_init(s, vs->ctrl_vq, 0,
                                virtio_scsi_data_plane_handle_ctrl);
    if (rc) {
        goto fail_vrings;
    }
    rc = virtio_scsi_vring_init(s, vs->event_vq, 1,
                                virtio_scsi_data_plane_handle_event);
    if (rc) {
        goto fail_vrings;
    }
    for (i = 0; i < vs->conf.num_queues; i++) {
        rc = virtio_scsi_vring_init(s, vs->cmd_vqs[i], i + 2,
                                    virtio_scsi_data_plane_handle_cmd);
        if (rc) {
            goto fail_vrings;
        }
    }

    s->dataplane_starting = false;
    s->dataplane_started = true;
    aio_context_release(s->ctx);
    return;

fail_vrings:
    virtio_scsi_clear_aio(s);
    aio_context_release(s->ctx);
    for (i = 0; i < vs->conf.num_queues + 2; i++) {
        virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false);
    }
    k->set_guest_notifiers(qbus->parent, vs->conf.num_queues + 2, false);
fail_guest_notifiers:
    s->dataplane_fenced = true;
    s->dataplane_starting = false;
    s->dataplane_started = true;
}
Esempio n. 9
0
/* Context: QEMU global mutex held */
void virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf,
                                  VirtIOBlockDataPlane **dataplane,
                                  Error **errp)
{
    VirtIOBlockDataPlane *s;
    Error *local_err = NULL;
    BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);

    *dataplane = NULL;

    if (!conf->data_plane && !conf->iothread) {
        return;
    }

    /* Don't try if transport does not support notifiers. */
    if (!k->set_guest_notifiers || !k->set_host_notifier) {
        error_setg(errp,
                   "device is incompatible with x-data-plane "
                   "(transport does not support notifiers)");
        return;
    }

    /* If dataplane is (re-)enabled while the guest is running there could be
     * block jobs that can conflict.
     */
    if (blk_op_is_blocked(conf->conf.blk, BLOCK_OP_TYPE_DATAPLANE,
                          &local_err)) {
        error_setg(errp, "cannot start dataplane thread: %s",
                   error_get_pretty(local_err));
        error_free(local_err);
        return;
    }

    s = g_new0(VirtIOBlockDataPlane, 1);
    s->vdev = vdev;
    s->conf = conf;

    if (conf->iothread) {
        s->iothread = conf->iothread;
        object_ref(OBJECT(s->iothread));
    } else {
        /* Create per-device IOThread if none specified.  This is for
         * x-data-plane option compatibility.  If x-data-plane is removed we
         * can drop this.
         */
        object_initialize(&s->internal_iothread_obj,
                          sizeof(s->internal_iothread_obj),
                          TYPE_IOTHREAD);
        user_creatable_complete(OBJECT(&s->internal_iothread_obj), &error_abort);
        s->iothread = &s->internal_iothread_obj;
    }
    s->ctx = iothread_get_aio_context(s->iothread);
    s->bh = aio_bh_new(s->ctx, notify_guest_bh, s);

    error_setg(&s->blocker, "block device is in use by data plane");
    blk_op_block_all(conf->conf.blk, s->blocker);
    blk_op_unblock(conf->conf.blk, BLOCK_OP_TYPE_RESIZE, s->blocker);
    blk_op_unblock(conf->conf.blk, BLOCK_OP_TYPE_DRIVE_DEL, s->blocker);
    blk_op_unblock(conf->conf.blk, BLOCK_OP_TYPE_BACKUP_SOURCE, s->blocker);
    blk_op_unblock(conf->conf.blk, BLOCK_OP_TYPE_CHANGE, s->blocker);
    blk_op_unblock(conf->conf.blk, BLOCK_OP_TYPE_COMMIT_SOURCE, s->blocker);
    blk_op_unblock(conf->conf.blk, BLOCK_OP_TYPE_COMMIT_TARGET, s->blocker);
    blk_op_unblock(conf->conf.blk, BLOCK_OP_TYPE_EJECT, s->blocker);
    blk_op_unblock(conf->conf.blk, BLOCK_OP_TYPE_EXTERNAL_SNAPSHOT, s->blocker);
    blk_op_unblock(conf->conf.blk, BLOCK_OP_TYPE_INTERNAL_SNAPSHOT, s->blocker);
    blk_op_unblock(conf->conf.blk, BLOCK_OP_TYPE_INTERNAL_SNAPSHOT_DELETE,
                   s->blocker);
    blk_op_unblock(conf->conf.blk, BLOCK_OP_TYPE_MIRROR, s->blocker);
    blk_op_unblock(conf->conf.blk, BLOCK_OP_TYPE_STREAM, s->blocker);
    blk_op_unblock(conf->conf.blk, BLOCK_OP_TYPE_REPLACE, s->blocker);

    *dataplane = s;
}