Esempio n. 1
0
static void error_handle_fatal(Error **errp, Error *err)
{
    if (errp == &error_abort) {
        fprintf(stderr, "Unexpected error in %s() at %s:%d:\n",
                err->func, err->src, err->line);
        error_report_err(err);
        abort();
    }
    if (errp == &error_fatal) {
        error_report_err(err);
        exit(1);
    }
}
Esempio n. 2
0
static void bcm2835_realize(DeviceState *dev, Error **errp)
{
    BCM2835State *s = BCM2835(dev);
    Error *err = NULL;

    /* common peripherals from bcm2835 */
    object_property_set_bool(OBJECT(&s->peripherals), true, "realized", &err);
    if (err) {
        error_propagate(errp, err);
        return;
    }

    sysbus_mmio_map_overlap(SYS_BUS_DEVICE(&s->peripherals), 0,
                            BCM2835_PERI_BASE, 1);

    object_property_set_bool(OBJECT(&s->cpu), true, "realized", &err);
    if (err) {
        error_report_err(err);
        exit(1);
    }

    sysbus_connect_irq(SYS_BUS_DEVICE(&s->peripherals), 0,
                       qdev_get_gpio_in(DEVICE(&s->cpu), ARM_CPU_IRQ));
    sysbus_connect_irq(SYS_BUS_DEVICE(&s->peripherals), 1,
                       qdev_get_gpio_in(DEVICE(&s->cpu), ARM_CPU_FIQ));

}
Esempio n. 3
0
static int
pcie_endpoint_cap_common_init(PCIDevice *dev, uint8_t offset, uint8_t cap_size)
{
    uint8_t type = PCI_EXP_TYPE_ENDPOINT;
    Error *local_err = NULL;
    int ret;

    /*
     * Windows guests will report Code 10, device cannot start, if
     * a regular Endpoint type is exposed on a root complex.  These
     * should instead be Root Complex Integrated Endpoints.
     */
    if (pci_bus_is_express(pci_get_bus(dev))
        && pci_bus_is_root(pci_get_bus(dev))) {
        type = PCI_EXP_TYPE_RC_END;
    }

    if (cap_size == PCI_EXP_VER1_SIZEOF) {
        return pcie_cap_v1_init(dev, offset, type, 0);
    } else {
        ret = pcie_cap_init(dev, offset, type, 0, &local_err);

        if (ret < 0) {
            error_report_err(local_err);
        }

        return ret;
    }
}
Esempio n. 4
0
static void chr_closed_bh(void *opaque)
{
    const char *name = opaque;
    NetClientState *ncs[MAX_QUEUE_NUM];
    VhostUserState *s;
    Error *err = NULL;
    int queues;

    queues = qemu_find_net_clients_except(name, ncs,
                                          NET_CLIENT_DRIVER_NIC,
                                          MAX_QUEUE_NUM);
    assert(queues < MAX_QUEUE_NUM);

    s = DO_UPCAST(VhostUserState, nc, ncs[0]);

    qmp_set_link(name, false, &err);
    vhost_user_stop(queues, ncs);

    qemu_chr_fe_set_handlers(&s->chr, NULL, NULL, net_vhost_user_event,
                             NULL, opaque, NULL, true);

    if (err) {
        error_report_err(err);
    }
}
Esempio n. 5
0
static void init_cpus(const char *cpu_model, const char *privdev,
                      hwaddr periphbase, qemu_irq *pic, bool secure)
{
    ObjectClass *cpu_oc = cpu_class_by_name(TYPE_ARM_CPU, cpu_model);
    DeviceState *dev;
    SysBusDevice *busdev;
    int n;

    if (!cpu_oc) {
        fprintf(stderr, "Unable to find CPU definition\n");
        exit(1);
    }

    /* Create the actual CPUs */
    for (n = 0; n < smp_cpus; n++) {
        Object *cpuobj = object_new(object_class_get_name(cpu_oc));
        Error *err = NULL;

        if (!secure) {
            object_property_set_bool(cpuobj, false, "has_el3", NULL);
        }

        if (object_property_find(cpuobj, "reset-cbar", NULL)) {
            object_property_set_int(cpuobj, periphbase,
                                    "reset-cbar", &error_abort);
        }
        object_property_set_bool(cpuobj, true, "realized", &err);
        if (err) {
            error_report_err(err);
            exit(1);
        }
    }

    /* Create the private peripheral devices (including the GIC);
     * this must happen after the CPUs are created because a15mpcore_priv
     * wires itself up to the CPU's generic_timer gpio out lines.
     */
    dev = qdev_create(NULL, privdev);
    qdev_prop_set_uint32(dev, "num-cpu", smp_cpus);
    qdev_init_nofail(dev);
    busdev = SYS_BUS_DEVICE(dev);
    sysbus_mmio_map(busdev, 0, periphbase);

    /* Interrupts [42:0] are from the motherboard;
     * [47:43] are reserved; [63:48] are daughterboard
     * peripherals. Note that some documentation numbers
     * external interrupts starting from 32 (because there
     * are internal interrupts 0..31).
     */
    for (n = 0; n < 64; n++) {
        pic[n] = qdev_get_gpio_in(dev, n);
    }

    /* Connect the CPUs to the GIC */
    for (n = 0; n < smp_cpus; n++) {
        DeviceState *cpudev = DEVICE(qemu_get_cpu(n));

        sysbus_connect_irq(busdev, n, qdev_get_gpio_in(cpudev, ARM_CPU_IRQ));
    }
}
Esempio n. 6
0
static int stream_prepare(Job *job)
{
    StreamBlockJob *s = container_of(job, StreamBlockJob, common.job);
    BlockJob *bjob = &s->common;
    BlockDriverState *bs = blk_bs(bjob->blk);
    BlockDriverState *base = s->base;
    Error *local_err = NULL;
    int ret = 0;

    bdrv_unfreeze_backing_chain(bs, base);
    s->chain_frozen = false;

    if (bs->backing) {
        const char *base_id = NULL, *base_fmt = NULL;
        if (base) {
            base_id = s->backing_file_str;
            if (base->drv) {
                base_fmt = base->drv->format_name;
            }
        }
        ret = bdrv_change_backing_file(bs, base_id, base_fmt);
        bdrv_set_backing_hd(bs, base, &local_err);
        if (local_err) {
            error_report_err(local_err);
            return -EPERM;
        }
    }

    return ret;
}
Esempio n. 7
0
static int ioh3420_initfn(PCIDevice *d)
{
    PCIEPort *p = PCIE_PORT(d);
    PCIESlot *s = PCIE_SLOT(d);
    int rc;
    Error *err = NULL;

    pci_bridge_initfn(d, TYPE_PCIE_BUS);
    pcie_port_init_reg(d);

    rc = pci_bridge_ssvid_init(d, IOH_EP_SSVID_OFFSET,
                               IOH_EP_SSVID_SVID, IOH_EP_SSVID_SSID);
    if (rc < 0) {
        goto err_bridge;
    }

    rc = msi_init(d, IOH_EP_MSI_OFFSET, IOH_EP_MSI_NR_VECTOR,
                  IOH_EP_MSI_SUPPORTED_FLAGS & PCI_MSI_FLAGS_64BIT,
                  IOH_EP_MSI_SUPPORTED_FLAGS & PCI_MSI_FLAGS_MASKBIT, &err);
    if (rc < 0) {
        assert(rc == -ENOTSUP);
        error_report_err(err);
        goto err_bridge;
    }

    rc = pcie_cap_init(d, IOH_EP_EXP_OFFSET, PCI_EXP_TYPE_ROOT_PORT, p->port);
    if (rc < 0) {
        goto err_msi;
    }

    pcie_cap_arifwd_init(d);
    pcie_cap_deverr_init(d);
    pcie_cap_slot_init(d, s->slot);
    pcie_cap_root_init(d);

    pcie_chassis_create(s->chassis);
    rc = pcie_chassis_add_slot(s);
    if (rc < 0) {
        goto err_pcie_cap;
    }

    rc = pcie_aer_init(d, IOH_EP_AER_OFFSET, PCI_ERR_SIZEOF);
    if (rc < 0) {
        goto err;
    }
    pcie_aer_root_init(d);
    ioh3420_aer_vector_update(d);

    return 0;

err:
    pcie_chassis_del_slot(s);
err_pcie_cap:
    pcie_cap_exit(d);
err_msi:
    msi_uninit(d);
err_bridge:
    pci_bridge_exitfn(d);
    return rc;
}
Esempio n. 8
0
static void realize(DeviceState *d, Error **errp)
{
    sPAPRDRConnector *drc = SPAPR_DR_CONNECTOR(d);
    sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
    Object *root_container;
    char link_name[256];
    gchar *child_name;
    Error *err = NULL;

    DPRINTFN("drc realize: %x", drck->get_index(drc));
    /* NOTE: we do this as part of realize/unrealize due to the fact
     * that the guest will communicate with the DRC via RTAS calls
     * referencing the global DRC index. By unlinking the DRC
     * from DRC_CONTAINER_PATH/<drc_index> we effectively make it
     * inaccessible by the guest, since lookups rely on this path
     * existing in the composition tree
     */
    root_container = container_get(object_get_root(), DRC_CONTAINER_PATH);
    snprintf(link_name, sizeof(link_name), "%x", drck->get_index(drc));
    child_name = object_get_canonical_path_component(OBJECT(drc));
    DPRINTFN("drc child name: %s", child_name);
    object_property_add_alias(root_container, link_name,
                              drc->owner, child_name, &err);
    if (err) {
        error_report_err(err);
        object_unref(OBJECT(drc));
    }
    g_free(child_name);
    DPRINTFN("drc realize complete");
}
Esempio n. 9
0
static uint8_t
virtio_crypto_handle_close_session(VirtIOCrypto *vcrypto,
         struct virtio_crypto_destroy_session_req *close_sess_req,
         uint32_t queue_id)
{
    int ret;
    uint64_t session_id;
    uint32_t status;
    Error *local_err = NULL;

    session_id = ldq_le_p(&close_sess_req->session_id);
    DPRINTF("close session, id=%" PRIu64 "\n", session_id);

    ret = cryptodev_backend_sym_close_session(
              vcrypto->cryptodev, session_id, queue_id, &local_err);
    if (ret == 0) {
        status = VIRTIO_CRYPTO_OK;
    } else {
        if (local_err) {
            error_report_err(local_err);
        } else {
            error_report("destroy session failed");
        }
        status = VIRTIO_CRYPTO_ERR;
    }

    return status;
}
Esempio n. 10
0
static void allocate_system_memory_nonnuma(MemoryRegion *mr, Object *owner,
                                           const char *name,
                                           uint64_t ram_size)
{
    if (mem_path) {
#ifdef __linux__
        Error *err = NULL;
        memory_region_init_ram_from_file(mr, owner, name, ram_size, false,
                                         mem_path, &err);

        /* Legacy behavior: if allocation failed, fall back to
         * regular RAM allocation.
         */
        if (err) {
            error_report_err(err);
            memory_region_init_ram(mr, owner, name, ram_size, &error_abort);
        }
#else
        fprintf(stderr, "-mem-path not supported on this host\n");
        exit(1);
#endif
    } else {
        memory_region_init_ram(mr, owner, name, ram_size, &error_abort);
    }
    vmstate_register_ram_global(mr);
}
Esempio n. 11
0
void error_reportf_err(Error *err, const char *fmt, ...)
{
    va_list ap;

    va_start(ap, fmt);
    error_vprepend(&err, fmt, ap);
    va_end(ap);
    error_report_err(err);
}
Esempio n. 12
0
void hmp_dump_skeys(Monitor *mon, const QDict *qdict)
{
    const char *filename = qdict_get_str(qdict, "filename");
    Error *err = NULL;

    qmp_dump_skeys(filename, &err);
    if (err) {
        error_report_err(err);
    }
}
Esempio n. 13
0
static int unix_socket_outgoing(const char *path)
{
    Error *local_err = NULL;
    int fd = unix_connect(path, &local_err);

    if (local_err != NULL) {
        error_report_err(local_err);
    }
    return fd;
}
Esempio n. 14
0
static int unix_socket_incoming(const char *path)
{
    Error *local_err = NULL;
    int fd = unix_listen(path, NULL, 0, &local_err);

    if (local_err != NULL) {
        error_report_err(local_err);
    }
    return fd;
}
Esempio n. 15
0
static int net_vhost_user_init(NetClientState *peer, const char *device,
                               const char *name, Chardev *chr,
                               int queues)
{
    Error *err = NULL;
    NetClientState *nc, *nc0 = NULL;
    VhostUserState *s;
    int i;

    assert(name);
    assert(queues > 0);

    for (i = 0; i < queues; i++) {
        nc = qemu_new_net_client(&net_vhost_user_info, peer, device, name);
        snprintf(nc->info_str, sizeof(nc->info_str), "vhost-user%d to %s",
                 i, chr->label);
        nc->queue_index = i;
        if (!nc0) {
            nc0 = nc;
            s = DO_UPCAST(VhostUserState, nc, nc);
            if (!qemu_chr_fe_init(&s->chr, chr, &err)) {
                error_report_err(err);
                return -1;
            }
        }

    }

    s = DO_UPCAST(VhostUserState, nc, nc0);
    do {
        if (qemu_chr_fe_wait_connected(&s->chr, &err) < 0) {
            error_report_err(err);
            return -1;
        }
        qemu_chr_fe_set_handlers(&s->chr, NULL, NULL,
                                 net_vhost_user_event, NULL, nc0->name, NULL,
                                 true);
    } while (!s->started);

    assert(s->vhost_net);

    return 0;
}
Esempio n. 16
0
/*
 * At the end of a migration where postcopy_ram_incoming_init was called.
 */
int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis)
{
    trace_postcopy_ram_incoming_cleanup_entry();

    if (mis->have_fault_thread) {
        Error *local_err = NULL;

        if (postcopy_notify(POSTCOPY_NOTIFY_INBOUND_END, &local_err)) {
            error_report_err(local_err);
            return -1;
        }

        if (qemu_ram_foreach_block(cleanup_range, mis)) {
            return -1;
        }
        /* Let the fault thread quit */
        atomic_set(&mis->fault_thread_quit, 1);
        postcopy_fault_thread_notify(mis);
        trace_postcopy_ram_incoming_cleanup_join();
        qemu_thread_join(&mis->fault_thread);

        trace_postcopy_ram_incoming_cleanup_closeuf();
        close(mis->userfault_fd);
        close(mis->userfault_event_fd);
        mis->have_fault_thread = false;
    }

    qemu_balloon_inhibit(false);

    if (enable_mlock) {
        if (os_mlock() < 0) {
            error_report("mlock: %s", strerror(errno));
            /*
             * It doesn't feel right to fail at this point, we have a valid
             * VM state.
             */
        }
    }

    postcopy_state_set(POSTCOPY_INCOMING_END);

    if (mis->postcopy_tmp_page) {
        munmap(mis->postcopy_tmp_page, mis->largest_page_size);
        mis->postcopy_tmp_page = NULL;
    }
    if (mis->postcopy_tmp_zero_page) {
        munmap(mis->postcopy_tmp_zero_page, mis->largest_page_size);
        mis->postcopy_tmp_zero_page = NULL;
    }
    trace_postcopy_ram_incoming_cleanup_blocktime(
            get_postcopy_total_blocktime());

    trace_postcopy_ram_incoming_cleanup_exit();
    return 0;
}
Esempio n. 17
0
int whpx_init_vcpu(CPUState *cpu)
{
    HRESULT hr;
    struct whpx_state *whpx = &whpx_global;
    struct whpx_vcpu *vcpu;
    Error *local_error = NULL;

    /* Add migration blockers for all unsupported features of the
     * Windows Hypervisor Platform
     */
    if (whpx_migration_blocker == NULL) {
        error_setg(&whpx_migration_blocker,
               "State blocked due to non-migratable CPUID feature support,"
               "dirty memory tracking support, and XSAVE/XRSTOR support");

        (void)migrate_add_blocker(whpx_migration_blocker, &local_error);
        if (local_error) {
            error_report_err(local_error);
            error_free(whpx_migration_blocker);
            migrate_del_blocker(whpx_migration_blocker);
            return -EINVAL;
        }
    }

    vcpu = g_malloc0(sizeof(struct whpx_vcpu));

    if (!vcpu) {
        error_report("WHPX: Failed to allocte VCPU context.");
        return -ENOMEM;
    }

    hr = WHvEmulatorCreateEmulator(&whpx_emu_callbacks, &vcpu->emulator);
    if (FAILED(hr)) {
        error_report("WHPX: Failed to setup instruction completion support,"
                     " hr=%08lx", hr);
        g_free(vcpu);
        return -EINVAL;
    }

    hr = WHvCreateVirtualProcessor(whpx->partition, cpu->cpu_index, 0);
    if (FAILED(hr)) {
        error_report("WHPX: Failed to create a virtual processor,"
                     " hr=%08lx", hr);
        WHvEmulatorDestroyEmulator(vcpu->emulator);
        g_free(vcpu);
        return -EINVAL;
    }

    vcpu->interruptable = true;

    cpu->vcpu_dirty = true;
    cpu->hax_vcpu = (struct hax_vcpu_state *)vcpu;

    return 0;
}
Esempio n. 18
0
void qdev_prop_set_drive_nofail(DeviceState *dev, const char *name,
                                BlockBackend *value)
{
    Error *err = NULL;

    qdev_prop_set_drive(dev, name, value, &err);
    if (err) {
        error_report_err(err);
        exit(1);
    }
}
Esempio n. 19
0
void xen_block_dataplane_stop(XenBlockDataPlane *dataplane)
{
    XenDevice *xendev;

    if (!dataplane) {
        return;
    }

    aio_context_acquire(dataplane->ctx);
    blk_set_aio_context(dataplane->blk, qemu_get_aio_context());
    aio_context_release(dataplane->ctx);

    xendev = dataplane->xendev;

    if (dataplane->event_channel) {
        Error *local_err = NULL;

        xen_device_unbind_event_channel(xendev, dataplane->event_channel,
                                        &local_err);
        dataplane->event_channel = NULL;

        if (local_err) {
            error_report_err(local_err);
        }
    }

    if (dataplane->sring) {
        Error *local_err = NULL;

        xen_device_unmap_grant_refs(xendev, dataplane->sring,
                                    dataplane->nr_ring_ref, &local_err);
        dataplane->sring = NULL;

        if (local_err) {
            error_report_err(local_err);
        }
    }

    g_free(dataplane->ring_ref);
    dataplane->ring_ref = NULL;
}
Esempio n. 20
0
QemuOptsList *qemu_find_opts(const char *group)
{
    QemuOptsList *ret;
    Error *local_err = NULL;

    ret = find_list(vm_config_groups, group, &local_err);
    if (local_err) {
        error_report_err(local_err);
    }

    return ret;
}
Esempio n. 21
0
File: tap.c Progetto: PennPanda/qemu
static void tap_exit_notify(Notifier *notifier, void *data)
{
    TAPState *s = container_of(notifier, TAPState, exit);
    Error *err = NULL;

    if (s->down_script[0]) {
        launch_script(s->down_script, s->down_script_arg, s->fd, &err);
        if (err) {
            error_report_err(err);
        }
    }
}
Esempio n. 22
0
static int tcp_socket_incoming(const char *address, uint16_t port)
{
    char address_and_port[128];
    Error *local_err = NULL;

    combine_addr(address_and_port, 128, address, port);
    int fd = inet_listen(address_and_port, NULL, 0, SOCK_STREAM, 0, &local_err);

    if (local_err != NULL) {
        error_report_err(local_err);
    }
    return fd;
}
Esempio n. 23
0
File: helper.c Progetto: chao-p/qemu
S390CPU *cpu_s390x_init(const char *cpu_model)
{
    Error *err = NULL;
    S390CPU *cpu;
    /* Use to track CPU ID for linux-user only */
    static int64_t next_cpu_id;

    cpu = s390x_new_cpu(cpu_model, next_cpu_id++, &err);
    if (err) {
        error_report_err(err);
    }
    return cpu;
}
Esempio n. 24
0
static void net_vhost_user_event(void *opaque, int event)
{
    const char *name = opaque;
    NetClientState *ncs[MAX_QUEUE_NUM];
    VhostUserState *s;
    Chardev *chr;
    Error *err = NULL;
    int queues;

    queues = qemu_find_net_clients_except(name, ncs,
                                          NET_CLIENT_DRIVER_NIC,
                                          MAX_QUEUE_NUM);
    assert(queues < MAX_QUEUE_NUM);

    s = DO_UPCAST(VhostUserState, nc, ncs[0]);
    chr = qemu_chr_fe_get_driver(&s->chr);
    trace_vhost_user_event(chr->label, event);
    switch (event) {
    case CHR_EVENT_OPENED:
        if (vhost_user_start(queues, ncs, &s->chr) < 0) {
            qemu_chr_fe_disconnect(&s->chr);
            return;
        }
        s->watch = qemu_chr_fe_add_watch(&s->chr, G_IO_HUP,
                                         net_vhost_user_watch, s);
        qmp_set_link(name, true, &err);
        s->started = true;
        break;
    case CHR_EVENT_CLOSED:
        /* a close event may happen during a read/write, but vhost
         * code assumes the vhost_dev remains setup, so delay the
         * stop & clear to idle.
         * FIXME: better handle failure in vhost code, remove bh
         */
        if (s->watch) {
            AioContext *ctx = qemu_get_current_aio_context();

            g_source_remove(s->watch);
            s->watch = 0;
            qemu_chr_fe_set_handlers(&s->chr, NULL, NULL, NULL, NULL,
                                     NULL, NULL, false);

            aio_bh_schedule_oneshot(ctx, chr_closed_bh, opaque);
        }
        break;
    }

    if (err) {
        error_report_err(err);
    }
}
Esempio n. 25
0
File: qdev.c Progetto: Pating/qemu
static int ide_dev_initfn(IDEDevice *dev, IDEDriveKind kind)
{
    IDEBus *bus = DO_UPCAST(IDEBus, qbus, dev->qdev.parent_bus);
    IDEState *s = bus->ifs + dev->unit;
    Error *err = NULL;

    if (dev->conf.discard_granularity == -1) {
        dev->conf.discard_granularity = 512;
    } else if (dev->conf.discard_granularity &&
               dev->conf.discard_granularity != 512) {
        error_report("discard_granularity must be 512 for ide");
        return -1;
    }

    blkconf_blocksizes(&dev->conf);
    if (dev->conf.logical_block_size != 512) {
        error_report("logical_block_size must be 512 for IDE");
        return -1;
    }

    blkconf_serial(&dev->conf, &dev->serial);
    if (kind != IDE_CD) {
        blkconf_geometry(&dev->conf, &dev->chs_trans, 65535, 16, 255, &err);
        if (err) {
            error_report_err(err);
            return -1;
        }
    }
    blkconf_apply_backend_options(&dev->conf);

    if (ide_init_drive(s, dev->conf.blk, kind,
                       dev->version, dev->serial, dev->model, dev->wwn,
                       dev->conf.cyls, dev->conf.heads, dev->conf.secs,
                       dev->chs_trans) < 0) {
        return -1;
    }

    if (!dev->version) {
        dev->version = g_strdup(s->version);
    }
    if (!dev->serial) {
        dev->serial = g_strdup(s->drive_serial_str);
    }

    add_boot_device_path(dev->conf.bootindex, &dev->qdev,
                         dev->unit ? "/disk@1" : "/disk@0");

    return 0;
}
Esempio n. 26
0
/**
 * vm_change_state_handler - VM change state callback aiming at flushing
 * ITS tables into guest RAM
 *
 * The tables get flushed to guest RAM whenever the VM gets stopped.
 */
static void vm_change_state_handler(void *opaque, int running,
                                    RunState state)
{
    GICv3ITSState *s = (GICv3ITSState *)opaque;
    Error *err = NULL;

    if (running) {
        return;
    }

    kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
                      KVM_DEV_ARM_ITS_SAVE_TABLES, NULL, true, &err);
    if (err) {
        error_report_err(err);
    }
}
Esempio n. 27
0
File: colo.c Progetto: CTU-IIG/qemu
static void primary_vm_do_failover(void)
{
#ifdef CONFIG_REPLICATION
    MigrationState *s = migrate_get_current();
    int old_state;
    Error *local_err = NULL;

    migrate_set_state(&s->state, MIGRATION_STATUS_COLO,
                      MIGRATION_STATUS_COMPLETED);
    /*
     * kick COLO thread which might wait at
     * qemu_sem_wait(&s->colo_checkpoint_sem).
     */
    colo_checkpoint_notify(migrate_get_current());

    /*
     * Wake up COLO thread which may blocked in recv() or send(),
     * The s->rp_state.from_dst_file and s->to_dst_file may use the
     * same fd, but we still shutdown the fd for twice, it is harmless.
     */
    if (s->to_dst_file) {
        qemu_file_shutdown(s->to_dst_file);
    }
    if (s->rp_state.from_dst_file) {
        qemu_file_shutdown(s->rp_state.from_dst_file);
    }

    old_state = failover_set_state(FAILOVER_STATUS_ACTIVE,
                                   FAILOVER_STATUS_COMPLETED);
    if (old_state != FAILOVER_STATUS_ACTIVE) {
        error_report("Incorrect state (%s) while doing failover for Primary VM",
                     FailoverStatus_str(old_state));
        return;
    }

    replication_stop_all(true, &local_err);
    if (local_err) {
        error_report_err(local_err);
        local_err = NULL;
    }

    /* Notify COLO thread that failover work is finished */
    qemu_sem_post(&s->colo_exit_sem);
#else
    abort();
#endif
}
Esempio n. 28
0
static coroutine_fn void nbd_read_reply_entry(void *opaque)
{
    NBDClientSession *s = opaque;
    uint64_t i;
    int ret = 0;
    Error *local_err = NULL;

    while (!s->quit) {
        assert(s->reply.handle == 0);
        ret = nbd_receive_reply(s->ioc, &s->reply, &local_err);
        if (ret < 0) {
            error_report_err(local_err);
        }
        if (ret <= 0) {
            break;
        }

        /* There's no need for a mutex on the receive side, because the
         * handler acts as a synchronization point and ensures that only
         * one coroutine is called until the reply finishes.
         */
        i = HANDLE_TO_INDEX(s, s->reply.handle);
        if (i >= MAX_NBD_REQUESTS ||
            !s->requests[i].coroutine ||
            !s->requests[i].receiving) {
            break;
        }

        /* We're woken up again by the request itself.  Note that there
         * is no race between yielding and reentering read_reply_co.  This
         * is because:
         *
         * - if the request runs on the same AioContext, it is only
         *   entered after we yield
         *
         * - if the request runs on a different AioContext, reentering
         *   read_reply_co happens through a bottom half, which can only
         *   run after we yield.
         */
        aio_co_wake(s->requests[i].coroutine);
        qemu_coroutine_yield();
    }

    s->quit = true;
    nbd_recv_coroutines_wake_all(s);
    s->read_reply_co = NULL;
}
Esempio n. 29
0
static int tpm_emulator_block_migration(TPMEmulator *tpm_emu)
{
    Error *err = NULL;

    error_setg(&tpm_emu->migration_blocker,
               "Migration disabled: TPM emulator not yet migratable");
    migrate_add_blocker(tpm_emu->migration_blocker, &err);
    if (err) {
        error_report_err(err);
        error_free(tpm_emu->migration_blocker);
        tpm_emu->migration_blocker = NULL;

        return -1;
    }

    return 0;
}
Esempio n. 30
0
static void unrealize(DeviceState *d, Error **errp)
{
    sPAPRDRConnector *drc = SPAPR_DR_CONNECTOR(d);
    sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
    Object *root_container;
    char name[256];
    Error *err = NULL;

    DPRINTFN("drc unrealize: %x", drck->get_index(drc));
    root_container = container_get(object_get_root(), DRC_CONTAINER_PATH);
    snprintf(name, sizeof(name), "%x", drck->get_index(drc));
    object_property_del(root_container, name, &err);
    if (err) {
        error_report_err(err);
        object_unref(OBJECT(drc));
    }
}