Пример #1
0
static void iothread_complete(UserCreatable *obj, Error **errp)
{
    Error *local_error = NULL;
    IOThread *iothread = IOTHREAD(obj);
    char *name, *thread_name;

    iothread->stopping = false;
    iothread->thread_id = -1;
    iothread->ctx = aio_context_new(&local_error);
    if (!iothread->ctx) {
        error_propagate(errp, local_error);
        return;
    }

    qemu_mutex_init(&iothread->init_done_lock);
    qemu_cond_init(&iothread->init_done_cond);

    /* This assumes we are called from a thread with useful CPU affinity for us
     * to inherit.
     */
    name = object_get_canonical_path_component(OBJECT(obj));
    thread_name = g_strdup_printf("IO %s", name);
    qemu_thread_create(&iothread->thread, thread_name, iothread_run,
                       iothread, QEMU_THREAD_JOINABLE);
    g_free(thread_name);
    g_free(name);

    /* Wait for initialization to complete */
    qemu_mutex_lock(&iothread->init_done_lock);
    while (iothread->thread_id == -1) {
        qemu_cond_wait(&iothread->init_done_cond,
                       &iothread->init_done_lock);
    }
    qemu_mutex_unlock(&iothread->init_done_lock);
}
Пример #2
0
static void pc_dimm_realize(DeviceState *dev, Error **errp)
{
    PCDIMMDevice *dimm = PC_DIMM(dev);
    PCDIMMDeviceClass *ddc = PC_DIMM_GET_CLASS(dimm);

    if (!dimm->hostmem) {
        error_setg(errp, "'" PC_DIMM_MEMDEV_PROP "' property is not set");
        return;
    } else if (host_memory_backend_is_mapped(dimm->hostmem)) {
        char *path = object_get_canonical_path_component(OBJECT(dimm->hostmem));
        error_setg(errp, "can't use already busy memdev: %s", path);
        g_free(path);
        return;
    }
    if (((nb_numa_nodes > 0) && (dimm->node >= nb_numa_nodes)) ||
        (!nb_numa_nodes && dimm->node)) {
        error_setg(errp, "'DIMM property " PC_DIMM_NODE_PROP " has value %"
                   PRIu32 "' which exceeds the number of numa nodes: %d",
                   dimm->node, nb_numa_nodes ? nb_numa_nodes : 1);
        return;
    }

    if (ddc->realize) {
        ddc->realize(dimm, errp);
    }

    host_memory_backend_set_mapped(dimm->hostmem, true);
}
Пример #3
0
static void nvdimm_realize(PCDIMMDevice *dimm, Error **errp)
{
    MemoryRegion *mr = host_memory_backend_get_memory(dimm->hostmem, errp);
    NVDIMMDevice *nvdimm = NVDIMM(dimm);
    uint64_t align, pmem_size, size = memory_region_size(mr);

    align = memory_region_get_alignment(mr);

    pmem_size = size - nvdimm->label_size;
    nvdimm->label_data = memory_region_get_ram_ptr(mr) + pmem_size;
    pmem_size = QEMU_ALIGN_DOWN(pmem_size, align);

    if (size <= nvdimm->label_size || !pmem_size) {
        HostMemoryBackend *hostmem = dimm->hostmem;
        char *path = object_get_canonical_path_component(OBJECT(hostmem));

        error_setg(errp, "the size of memdev %s (0x%" PRIx64 ") is too "
                   "small to contain nvdimm label (0x%" PRIx64 ") and "
                   "aligned PMEM (0x%" PRIx64 ")",
                   path, memory_region_size(mr), nvdimm->label_size, align);
        g_free(path);
        return;
    }

    memory_region_init_alias(&nvdimm->nvdimm_mr, OBJECT(dimm),
                             "nvdimm-memory", mr, 0, pmem_size);
    nvdimm->nvdimm_mr.align = align;
}
Пример #4
0
static void realize(DeviceState *d, Error **errp)
{
    sPAPRDRConnector *drc = SPAPR_DR_CONNECTOR(d);
    sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
    Object *root_container;
    char link_name[256];
    gchar *child_name;
    Error *err = NULL;

    DPRINTFN("drc realize: %x", drck->get_index(drc));
    /* NOTE: we do this as part of realize/unrealize due to the fact
     * that the guest will communicate with the DRC via RTAS calls
     * referencing the global DRC index. By unlinking the DRC
     * from DRC_CONTAINER_PATH/<drc_index> we effectively make it
     * inaccessible by the guest, since lookups rely on this path
     * existing in the composition tree
     */
    root_container = container_get(object_get_root(), DRC_CONTAINER_PATH);
    snprintf(link_name, sizeof(link_name), "%x", drck->get_index(drc));
    child_name = object_get_canonical_path_component(OBJECT(drc));
    DPRINTFN("drc child name: %s", child_name);
    object_property_add_alias(root_container, link_name,
                              drc->owner, child_name, &err);
    if (err) {
        error_report("%s", error_get_pretty(err));
        error_free(err);
        object_unref(OBJECT(drc));
    }
    g_free(child_name);
    DPRINTFN("drc realize complete");
}
Пример #5
0
static void ivshmem_check_memdev_is_busy(Object *obj, const char *name,
                                         Object *val, Error **errp)
{
    if (host_memory_backend_is_mapped(MEMORY_BACKEND(val))) {
        char *path = object_get_canonical_path_component(val);
        error_setg(errp, "can't use already busy memdev: %s", path);
        g_free(path);
    } else {
        qdev_prop_allow_set_link_before_realize(obj, name, val, errp);
    }
}
Пример #6
0
static void
ram_backend_memory_alloc(HostMemoryBackend *backend, Error **errp)
{
    char *path;

    if (!backend->size) {
        error_setg(errp, "can't create backend with size 0");
        return;
    }

    path = object_get_canonical_path_component(OBJECT(backend));
    memory_region_init_ram_shared_nomigrate(&backend->mr, OBJECT(backend), path,
                           backend->size, backend->share, errp);
    g_free(path);
}
Пример #7
0
static void virtio_crypto_device_realize(DeviceState *dev, Error **errp)
{
    VirtIODevice *vdev = VIRTIO_DEVICE(dev);
    VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(dev);
    int i;

    vcrypto->cryptodev = vcrypto->conf.cryptodev;
    if (vcrypto->cryptodev == NULL) {
        error_setg(errp, "'cryptodev' parameter expects a valid object");
        return;
    } else if (cryptodev_backend_is_used(vcrypto->cryptodev)) {
        char *path = object_get_canonical_path_component(OBJECT(vcrypto->conf.cryptodev));
        error_setg(errp, "can't use already used cryptodev backend: %s", path);
        g_free(path);
        return;
    }

    vcrypto->max_queues = MAX(vcrypto->cryptodev->conf.peers.queues, 1);
    if (vcrypto->max_queues + 1 > VIRTIO_QUEUE_MAX) {
        error_setg(errp, "Invalid number of queues (= %" PRIu32 "), "
                   "must be a positive integer less than %d.",
                   vcrypto->max_queues, VIRTIO_QUEUE_MAX);
        return;
    }

    virtio_init(vdev, "virtio-crypto", VIRTIO_ID_CRYPTO, vcrypto->config_size);
    vcrypto->curr_queues = 1;
    vcrypto->vqs = g_malloc0(sizeof(VirtIOCryptoQueue) * vcrypto->max_queues);
    for (i = 0; i < vcrypto->max_queues; i++) {
        vcrypto->vqs[i].dataq =
                 virtio_add_queue(vdev, 1024, virtio_crypto_handle_dataq_bh);
        vcrypto->vqs[i].dataq_bh =
                 qemu_bh_new(virtio_crypto_dataq_bh, &vcrypto->vqs[i]);
        vcrypto->vqs[i].vcrypto = vcrypto;
    }

    vcrypto->ctrl_vq = virtio_add_queue(vdev, 64, virtio_crypto_handle_ctrl);
    if (!cryptodev_backend_is_ready(vcrypto->cryptodev)) {
        vcrypto->status &= ~VIRTIO_CRYPTO_S_HW_READY;
    } else {
        vcrypto->status |= VIRTIO_CRYPTO_S_HW_READY;
    }

    virtio_crypto_init_config(vdev);
    cryptodev_backend_set_used(vcrypto->cryptodev, true);
}
Пример #8
0
static void pc_dimm_check_memdev_is_busy(Object *obj, const char *name,
                                      Object *val, Error **errp)
{
    MemoryRegion *mr;
    Error *local_err = NULL;

    mr = host_memory_backend_get_memory(MEMORY_BACKEND(val), &local_err);
    if (local_err) {
        goto out;
    }
    if (memory_region_is_mapped(mr)) {
        char *path = object_get_canonical_path_component(val);
        error_setg(&local_err, "can't use already busy memdev: %s", path);
        g_free(path);
    } else {
        qdev_prop_allow_set_link_before_realize(obj, name, val, &local_err);
    }

out:
    error_propagate(errp, local_err);
}
Пример #9
0
void memory_region_allocate_system_memory(MemoryRegion *mr, Object *owner,
                                          const char *name,
                                          uint64_t ram_size)
{
    uint64_t addr = 0;
    int i;

    if (nb_numa_nodes == 0 || !have_memdevs) {
        allocate_system_memory_nonnuma(mr, owner, name, ram_size);
        return;
    }

    memory_region_init(mr, owner, name, ram_size);
    for (i = 0; i < MAX_NODES; i++) {
        Error *local_err = NULL;
        uint64_t size = numa_info[i].node_mem;
        HostMemoryBackend *backend = numa_info[i].node_memdev;
        if (!backend) {
            continue;
        }
        MemoryRegion *seg = host_memory_backend_get_memory(backend, &local_err);
        if (local_err) {
            error_report_err(local_err);
            exit(1);
        }

        if (memory_region_is_mapped(seg)) {
            char *path = object_get_canonical_path_component(OBJECT(backend));
            error_report("memory backend %s is used multiple times. Each "
                         "-numa option must use a different memdev value.",
                         path);
            exit(1);
        }

        memory_region_add_subregion(mr, addr, seg);
        vmstate_register_ram_global(seg);
        addr += size;
    }
}
Пример #10
0
static int query_one_pr_manager(Object *object, void *opaque)
{
    PRManagerInfoList ***prev = opaque;
    PRManagerInfoList *elem;
    PRManagerInfo *info;
    PRManager *pr_mgr;

    pr_mgr = (PRManager *)object_dynamic_cast(object, TYPE_PR_MANAGER);
    if (!pr_mgr) {
        return 0;
    }

    elem = g_new0(PRManagerInfoList, 1);
    info = g_new0(PRManagerInfo, 1);
    info->id = object_get_canonical_path_component(object);
    info->connected = pr_manager_is_connected(pr_mgr);
    elem->value = info;
    elem->next = NULL;

    **prev = elem;
    *prev = &elem->next;
    return 0;
}
Пример #11
0
char *iothread_get_id(IOThread *iothread)
{
    return object_get_canonical_path_component(OBJECT(iothread));
}