Пример #1
0
int qemu_init_main_loop(void)
{
    int ret;

    qemu_init_sigbus();

    ret = qemu_signal_init();
    if (ret) {
        return ret;
    }

    /* Note eventfd must be drained before signalfd handlers run */
    ret = qemu_event_init();
    if (ret) {
        return ret;
    }

    qemu_cond_init(&qemu_cpu_cond);
    qemu_cond_init(&qemu_system_cond);
    qemu_cond_init(&qemu_pause_cond);
    qemu_cond_init(&qemu_work_cond);
    qemu_mutex_init(&qemu_fair_mutex);
    qemu_mutex_init(&qemu_global_mutex);
    qemu_mutex_lock(&qemu_global_mutex);

    qemu_thread_get_self(&io_thread);

    return 0;
}
Пример #2
0
int qemu_init_main_loop(void)
{
    int ret;
    sigset_t blocked_signals;

    cpu_set_debug_excp_handler(cpu_debug_handler);

    blocked_signals = block_io_signals();

    ret = qemu_signalfd_init(blocked_signals);
    if (ret)
        return ret;

    /* Note eventfd must be drained before signalfd handlers run */
    ret = qemu_event_init();
    if (ret)
        return ret;

    qemu_cond_init(&qemu_pause_cond);
    qemu_cond_init(&qemu_system_cond);
    qemu_mutex_init(&qemu_fair_mutex);
    qemu_mutex_init(&qemu_global_mutex);
    qemu_mutex_lock(&qemu_global_mutex);

    qemu_thread_self(&io_thread);

    return 0;
}
Пример #3
0
static void test_acquire(void)
{
    QemuThread thread;
    AcquireTestData data;

    /* Dummy event notifier ensures aio_poll() will block */
    event_notifier_init(&data.notifier, false);
    set_event_notifier(ctx, &data.notifier, dummy_notifier_read);
    g_assert(!aio_poll(ctx, false)); /* consume aio_notify() */

    qemu_mutex_init(&data.start_lock);
    qemu_mutex_lock(&data.start_lock);
    data.thread_acquired = false;

    qemu_thread_create(&thread, "test_acquire_thread",
                       test_acquire_thread,
                       &data, QEMU_THREAD_JOINABLE);

    /* Block in aio_poll(), let other thread kick us and acquire context */
    aio_context_acquire(ctx);
    qemu_mutex_unlock(&data.start_lock); /* let the thread run */
    g_assert(aio_poll(ctx, true));
    g_assert(!data.thread_acquired);
    aio_context_release(ctx);

    qemu_thread_join(&thread);
    set_event_notifier(ctx, &data.notifier, NULL);
    event_notifier_cleanup(&data.notifier);

    g_assert(data.thread_acquired);
}
Пример #4
0
Файл: char.c Проект: mdroth/qemu
static void char_init(Object *obj)
{
    Chardev *chr = CHARDEV(obj);

    chr->logfd = -1;
    qemu_mutex_init(&chr->chr_write_lock);
}
Пример #5
0
static void iothread_complete(UserCreatable *obj, Error **errp)
{
    Error *local_error = NULL;
    IOThread *iothread = IOTHREAD(obj);
    char *name, *thread_name;

    iothread->stopping = false;
    iothread->thread_id = -1;
    iothread->ctx = aio_context_new(&local_error);
    if (!iothread->ctx) {
        error_propagate(errp, local_error);
        return;
    }

    qemu_mutex_init(&iothread->init_done_lock);
    qemu_cond_init(&iothread->init_done_cond);

    /* This assumes we are called from a thread with useful CPU affinity for us
     * to inherit.
     */
    name = object_get_canonical_path_component(OBJECT(obj));
    thread_name = g_strdup_printf("IO %s", name);
    qemu_thread_create(&iothread->thread, thread_name, iothread_run,
                       iothread, QEMU_THREAD_JOINABLE);
    g_free(thread_name);
    g_free(name);

    /* Wait for initialization to complete */
    qemu_mutex_lock(&iothread->init_done_lock);
    while (iothread->thread_id == -1) {
        qemu_cond_wait(&iothread->init_done_cond,
                       &iothread->init_done_lock);
    }
    qemu_mutex_unlock(&iothread->init_done_lock);
}
Пример #6
0
static void iothread_complete(UserCreatable *obj, Error **errp)
{
    Error *local_error = NULL;
    IOThread *iothread = IOTHREAD(obj);

    iothread->stopping = false;
    iothread->thread_id = -1;
    iothread->ctx = aio_context_new(&local_error);
    if (!iothread->ctx) {
        error_propagate(errp, local_error);
        return;
    }

    qemu_mutex_init(&iothread->init_done_lock);
    qemu_cond_init(&iothread->init_done_cond);

    /* This assumes we are called from a thread with useful CPU affinity for us
     * to inherit.
     */
    qemu_thread_create(&iothread->thread, "iothread", iothread_run,
                       iothread, QEMU_THREAD_JOINABLE);

    /* Wait for initialization to complete */
    qemu_mutex_lock(&iothread->init_done_lock);
    while (iothread->thread_id == -1) {
        qemu_cond_wait(&iothread->init_done_cond,
                       &iothread->init_done_lock);
    }
    qemu_mutex_unlock(&iothread->init_done_lock);
}
Пример #7
0
static int qcrypto_gcrypt_mutex_init(void **priv)
{                                                                             \
    QemuMutex *lock = NULL;
    lock = g_new0(QemuMutex, 1);
    qemu_mutex_init(lock);
    *priv = lock;
    return 0;
}
Пример #8
0
int qemu_init_main_loop(void)
{
    int ret;

    ret = qemu_event_init();
    if (ret)
        return ret;

    qemu_cond_init(&qemu_pause_cond);
    qemu_mutex_init(&qemu_fair_mutex);
    qemu_mutex_init(&qemu_global_mutex);
    qemu_mutex_lock(&qemu_global_mutex);

    unblock_io_signals();
    qemu_thread_self(&io_thread);

    return 0;
}
Пример #9
0
void qemu_spice_display_init_common(SimpleSpiceDisplay *ssd, DisplayState *ds)
{
    ssd->ds = ds;
    qemu_mutex_init(&ssd->lock);
    ssd->mouse_x = -1;
    ssd->mouse_y = -1;
    ssd->bufsize = (16 * 1024 * 1024);
    ssd->buf = g_malloc(ssd->bufsize);
}
Пример #10
0
void rfifolock_init(RFifoLock *r, void (*cb)(void *), void *opaque)
{
    qemu_mutex_init(&r->lock);
    r->head = 0;
    r->tail = 0;
    qemu_cond_init(&r->cond);
    r->nesting = 0;
    r->cb = cb;
    r->cb_opaque = opaque;
}
Пример #11
0
static void tpm_emulator_inst_init(Object *obj)
{
    TPMEmulator *tpm_emu = TPM_EMULATOR(obj);

    trace_tpm_emulator_inst_init();

    tpm_emu->options = g_new0(TPMEmulatorOptions, 1);
    tpm_emu->cur_locty_number = ~0;
    qemu_mutex_init(&tpm_emu->mutex);
}
Пример #12
0
void qemu_init_cpu_loop(void)
{
    qemu_init_sigbus();
    qemu_cond_init(&qemu_cpu_cond);
    qemu_cond_init(&qemu_pause_cond);
    qemu_cond_init(&qemu_work_cond);
    qemu_cond_init(&qemu_io_proceeded_cond);
    qemu_mutex_init(&qemu_global_mutex);

    qemu_thread_get_self(&io_thread);
}
Пример #13
0
void qemu_init_cpu_list(void)
{
    /* This is needed because qemu_init_cpu_list is also called by the
     * child process in a fork.  */
    pending_cpus = 0;

    qemu_mutex_init(&qemu_cpu_list_lock);
    qemu_cond_init(&exclusive_cond);
    qemu_cond_init(&exclusive_resume);
    qemu_cond_init(&qemu_work_cond);
}
Пример #14
0
int qemu_init_main_loop(void)
{
    int ret;

    cpu_set_debug_excp_handler(cpu_debug_handler);

    ret = qemu_event_init();
    if (ret)
        return ret;

    qemu_cond_init(&qemu_pause_cond);
    qemu_cond_init(&qemu_system_cond);
    qemu_mutex_init(&qemu_fair_mutex);
    qemu_mutex_init(&qemu_global_mutex);
    qemu_mutex_lock(&qemu_global_mutex);

    unblock_io_signals();
    qemu_thread_self(&io_thread);

    return 0;
}
Пример #15
0
static inline void res_tbl_init(const char *name, RdmaRmResTbl *tbl,
                                uint32_t tbl_sz, uint32_t res_sz)
{
    tbl->tbl = g_malloc(tbl_sz * res_sz);

    strncpy(tbl->name, name, MAX_RM_TBL_NAME);
    tbl->name[MAX_RM_TBL_NAME - 1] = 0;

    tbl->bitmap = bitmap_new(tbl_sz);
    tbl->tbl_sz = tbl_sz;
    tbl->res_sz = res_sz;
    qemu_mutex_init(&tbl->lock);
}
Пример #16
0
static int qemu_archipelago_init(BDRVArchipelagoState *s)
{
    int ret;

    ret = qemu_archipelago_xseg_init(s);
    if (ret < 0) {
        error_report("Cannot initialize XSEG. Aborting...\n");
        goto err_exit;
    }

    qemu_cond_init(&s->archip_cond);
    qemu_mutex_init(&s->archip_mutex);
    qemu_cond_init(&s->request_cond);
    qemu_mutex_init(&s->request_mutex);
    s->th_is_signaled = false;
    qemu_thread_create(&s->request_th, "xseg_io_th",
                       (void *) xseg_request_handler,
                       (void *) s, QEMU_THREAD_JOINABLE);

err_exit:
    return ret;
}
Пример #17
0
void hostmem_init(HostMem *hostmem)
{
    memset(hostmem, 0, sizeof(*hostmem));

    qemu_mutex_init(&hostmem->mem_lock);

    hostmem->mem = qemu_mallocz(sizeof(*hostmem->mem));

    hostmem->client.set_memory = hostmem_client_set_memory;
    hostmem->client.sync_dirty_bitmap = hostmem_client_sync_dirty_bitmap;
    hostmem->client.migration_log = hostmem_client_migration_log;
    cpu_register_phys_memory_client(&hostmem->client);
}
Пример #18
0
AioContext *aio_context_new(void)
{
    AioContext *ctx;
    ctx = (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext));
    ctx->pollfds = g_array_new(FALSE, FALSE, sizeof(GPollFD));
    ctx->thread_pool = NULL;
    qemu_mutex_init(&ctx->bh_lock);
    event_notifier_init(&ctx->notifier, false);
    aio_set_event_notifier(ctx, &ctx->notifier, 
                           (EventNotifierHandler *)
                           event_notifier_test_and_clear, NULL);

    return ctx;
}
Пример #19
0
void qemu_init_cpu_loop(void)
{
#ifdef CONFIG_S2E
    tcg_cpu_thread = NULL;
    tcg_halt_cond = NULL;
#endif

    qemu_init_sigbus();
    qemu_cond_init(&qemu_cpu_cond);
    qemu_cond_init(&qemu_pause_cond);
    qemu_cond_init(&qemu_work_cond);
    qemu_cond_init(&qemu_io_proceeded_cond);
    qemu_mutex_init(&qemu_global_mutex);

    qemu_thread_get_self(&io_thread);
}
Пример #20
0
static void virtio_balloon_device_realize(DeviceState *dev, Error **errp)
{
    VirtIODevice *vdev = VIRTIO_DEVICE(dev);
    VirtIOBalloon *s = VIRTIO_BALLOON(dev);
    int ret;

    virtio_init(vdev, "virtio-balloon", VIRTIO_ID_BALLOON,
                sizeof(struct virtio_balloon_config));

    ret = qemu_add_balloon_handler(virtio_balloon_to_target,
                                   virtio_balloon_stat, s);

    if (ret < 0) {
        error_setg(errp, "Only one balloon device is supported");
        virtio_cleanup(vdev);
        return;
    }

    s->ivq = virtio_add_queue(vdev, 128, virtio_balloon_handle_output);
    s->dvq = virtio_add_queue(vdev, 128, virtio_balloon_handle_output);
    s->svq = virtio_add_queue(vdev, 128, virtio_balloon_receive_stats);

    if (virtio_has_feature(s->host_features,
                           VIRTIO_BALLOON_F_FREE_PAGE_HINT)) {
        s->free_page_vq = virtio_add_queue(vdev, VIRTQUEUE_MAX_SIZE,
                                           virtio_balloon_handle_free_page_vq);
        s->free_page_report_status = FREE_PAGE_REPORT_S_STOP;
        s->free_page_report_cmd_id =
                           VIRTIO_BALLOON_FREE_PAGE_REPORT_CMD_ID_MIN;
        s->free_page_report_notify.notify =
                                       virtio_balloon_free_page_report_notify;
        precopy_add_notifier(&s->free_page_report_notify);
        if (s->iothread) {
            object_ref(OBJECT(s->iothread));
            s->free_page_bh = aio_bh_new(iothread_get_aio_context(s->iothread),
                                       virtio_ballloon_get_free_page_hints, s);
            qemu_mutex_init(&s->free_page_lock);
            qemu_cond_init(&s->free_page_cond);
            s->block_iothread = false;
        } else {
            /* Simply disable this feature if the iothread wasn't created. */
            s->host_features &= ~(1 << VIRTIO_BALLOON_F_FREE_PAGE_HINT);
            virtio_error(vdev, "iothread is missing");
        }
    }
    reset_stats(s);
}
Пример #21
0
static int pci_edu_init(PCIDevice *pdev)
{
    EduState *edu = DO_UPCAST(EduState, pdev, pdev);
    uint8_t *pci_conf = pdev->config;

    timer_init_ms(&edu->dma_timer, QEMU_CLOCK_VIRTUAL, edu_dma_timer, edu);

    qemu_mutex_init(&edu->thr_mutex);
    qemu_cond_init(&edu->thr_cond);
    qemu_thread_create(&edu->thread, "edu", edu_fact_thread,
                       edu, QEMU_THREAD_JOINABLE);

    pci_config_set_interrupt_pin(pci_conf, 1);

    memory_region_init_io(&edu->mmio, OBJECT(edu), &edu_mmio_ops, edu,
                    "edu-mmio", 1 << 20);
    pci_register_bar(pdev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &edu->mmio);

    return 0;
}
Пример #22
0
Файл: async.c Проект: L0op/qemu
AioContext *aio_context_new(Error **errp)
{
    int ret;
    AioContext *ctx;
    ctx = (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext));
    ret = event_notifier_init(&ctx->notifier, false);
    if (ret < 0) {
        g_source_destroy(&ctx->source);
        error_setg_errno(errp, -ret, "Failed to initialize event notifier");
        return NULL;
    }
    aio_set_event_notifier(ctx, &ctx->notifier,
                           (EventNotifierHandler *)
                           event_notifier_test_and_clear);
    ctx->pollfds = g_array_new(FALSE, FALSE, sizeof(GPollFD));
    ctx->thread_pool = NULL;
    qemu_mutex_init(&ctx->bh_lock);
    rfifolock_init(&ctx->lock, aio_rfifolock_cb, ctx);
    timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx);

    return ctx;
}
Пример #23
0
void hostmem_init(HostMem *hostmem)
{
    memset(hostmem, 0, sizeof(*hostmem));

    qemu_mutex_init(&hostmem->current_regions_lock);

    hostmem->listener = (MemoryListener){
        .begin = hostmem_listener_dummy,
        .commit = hostmem_listener_commit,
        .region_add = hostmem_listener_append_region,
        .region_del = hostmem_listener_section_dummy,
        .region_nop = hostmem_listener_append_region,
        .log_start = hostmem_listener_section_dummy,
        .log_stop = hostmem_listener_section_dummy,
        .log_sync = hostmem_listener_section_dummy,
        .log_global_start = hostmem_listener_dummy,
        .log_global_stop = hostmem_listener_dummy,
        .eventfd_add = hostmem_listener_eventfd_dummy,
        .eventfd_del = hostmem_listener_eventfd_dummy,
        .coalesced_mmio_add = hostmem_listener_coalesced_mmio_dummy,
        .coalesced_mmio_del = hostmem_listener_coalesced_mmio_dummy,
        .priority = 10,
    };

    memory_listener_register(&hostmem->listener, &address_space_memory);
    if (hostmem->num_new_regions > 0) {
        hostmem_listener_commit(&hostmem->listener);
    }
}

void hostmem_finalize(HostMem *hostmem)
{
    memory_listener_unregister(&hostmem->listener);
    g_free(hostmem->new_regions);
    g_free(hostmem->current_regions);
    qemu_mutex_destroy(&hostmem->current_regions_lock);
}
Пример #24
0
int main(int argc, char *argv[])
{
    int nreaders = 1;
    int duration = 1;

    qemu_mutex_init(&counts_mutex);
    if (argc >= 2 && argv[1][0] == '-') {
        g_test_init(&argc, &argv, NULL);
        if (g_test_quick()) {
            g_test_add_func("/rcu/torture/1reader", gtest_stress_1_1);
            g_test_add_func("/rcu/torture/10readers", gtest_stress_10_1);
        } else {
            g_test_add_func("/rcu/torture/1reader", gtest_stress_1_5);
            g_test_add_func("/rcu/torture/10readers", gtest_stress_10_5);
        }
        return g_test_run();
    }

    if (argc >= 2) {
        nreaders = strtoul(argv[1], NULL, 0);
    }
    if (argc > 3) {
        duration = strtoul(argv[3], NULL, 0);
    }
    if (argc < 3 || strcmp(argv[2], "stress") == 0) {
        stresstest(nreaders, duration);
    } else if (strcmp(argv[2], "rperf") == 0) {
        rperftest(nreaders, duration);
    } else if (strcmp(argv[2], "uperf") == 0) {
        uperftest(nreaders, duration);
    } else if (strcmp(argv[2], "perf") == 0) {
        perftest(nreaders, duration);
    }
    usage(argc, argv);
    return 0;
}
Пример #25
0
void helper_lock_init(void)
{
    qemu_mutex_init(&global_cpu_lock);
}
Пример #26
0
int
main(
    int argc,
    char *argv[]
) {
    char *qemu_host;
    char *qemu_port;
    VSCMsgHeader mhHeader;
    VSCMsgError *error_msg;

    int rv;
    int dwSendLength;
    int dwRecvLength;
    uint8_t pbRecvBuffer[APDUBufSize];
    uint8_t pbSendBuffer[APDUBufSize];
     VReaderStatus reader_status;
    VReader *reader = NULL;
    VCardEmulOptions *command_line_options = NULL;

    char *cert_names[MAX_CERTS];
    char *emul_args = NULL;
    int cert_count = 0;
    int c;

    while ((c = getopt(argc, argv, "c:e:pd:")) != -1) {
        switch (c) {
        case 'c':
            if (cert_count >= MAX_CERTS) {
                printf("too many certificates (max = %d)\n", MAX_CERTS);
                exit(5);
            }
            cert_names[cert_count++] = optarg;
            break;
        case 'e':
            emul_args = optarg;
            break;
        case 'p':
            print_usage();
            exit(4);
            break;
        case 'd':
            verbose = get_id_from_string(optarg, 1);
            break;
        }
    }

    if (argc - optind != 2) {
        print_usage();
        exit(4);
    }

    if (cert_count > 0) {
        char *new_args;
        int len, i;
        /* if we've given some -c options, we clearly we want do so some
         * software emulation.  add that emulation now. this is NSS Emulator
         * specific */
        if (emul_args == NULL) {
            emul_args = (char *)"db=\"/etc/pki/nssdb\"";
        }
#define SOFT_STRING ",soft=(,Virtual Reader,CAC,,"
             /* 2 == close paren & null */
        len = strlen(emul_args) + strlen(SOFT_STRING) + 2;
        for (i = 0; i < cert_count; i++) {
            len += strlen(cert_names[i])+1; /* 1 == comma */
        }
        new_args = g_malloc(len);
        strcpy(new_args, emul_args);
        strcat(new_args, SOFT_STRING);
        for (i = 0; i < cert_count; i++) {
            strcat(new_args, cert_names[i]);
            strcat(new_args, ",");
        }
        strcat(new_args, ")");
        emul_args = new_args;
    }
    if (emul_args) {
        command_line_options = vcard_emul_options(emul_args);
    }

    qemu_host = g_strdup(argv[argc - 2]);
    qemu_port = g_strdup(argv[argc - 1]);
    sock = connect_to_qemu(qemu_host, qemu_port);
    if (sock == -1) {
        fprintf(stderr, "error opening socket, exiting.\n");
        exit(5);
    }

    qemu_mutex_init(&write_lock);
    qemu_mutex_init(&pending_reader_lock);
    qemu_cond_init(&pending_reader_condition);

    vcard_emul_init(command_line_options);

    printf("> ");
    fflush(stdout);

    /* Send init message, Host responds (and then we send reader attachments) */
    VSCMsgInit init = {
        .version = htonl(VSCARD_VERSION),
        .magic = VSCARD_MAGIC,
        .capabilities = {0}
    };
    send_msg(VSC_Init, mhHeader.reader_id, &init, sizeof(init));

    do {
        fd_set fds;

        FD_ZERO(&fds);
        FD_SET(1, &fds);
        FD_SET(sock, &fds);

        /* waiting on input from the socket */
        rv = select(sock+1, &fds, NULL, NULL, NULL);
        if (rv < 0) {
            /* handle error */
            perror("select");
            return 7;
        }
        if (FD_ISSET(1, &fds)) {
            do_command();
        }
        if (!FD_ISSET(sock, &fds)) {
            continue;
        }

        rv = read(sock, &mhHeader, sizeof(mhHeader));
        if (rv < sizeof(mhHeader)) {
            /* Error */
            if (rv < 0) {
                perror("header read error\n");
            } else {
                fprintf(stderr, "header short read %d\n", rv);
            }
            return 8;
        }
        mhHeader.type = ntohl(mhHeader.type);
        mhHeader.reader_id = ntohl(mhHeader.reader_id);
        mhHeader.length = ntohl(mhHeader.length);
        if (verbose) {
            printf("Header: type=%d, reader_id=%u length=%d (0x%x)\n",
                    mhHeader.type, mhHeader.reader_id, mhHeader.length,
                                               mhHeader.length);
        }
        switch (mhHeader.type) {
        case VSC_APDU:
        case VSC_Flush:
        case VSC_Error:
        case VSC_Init:
            rv = read(sock, pbSendBuffer, mhHeader.length);
            break;
        default:
            fprintf(stderr, "Unexpected message of type 0x%X\n", mhHeader.type);
            return 0;
        }
        switch (mhHeader.type) {
        case VSC_APDU:
            if (rv < 0) {
                /* Error */
                fprintf(stderr, "read error\n");
                close(sock);
                return 8;
            }
            if (verbose) {
                printf(" recv APDU: ");
                print_byte_array(pbSendBuffer, mhHeader.length);
            }
            /* Transmit received APDU */
            dwSendLength = mhHeader.length;
            dwRecvLength = sizeof(pbRecvBuffer);
            reader = vreader_get_reader_by_id(mhHeader.reader_id);
            reader_status = vreader_xfr_bytes(reader,
                pbSendBuffer, dwSendLength,
                pbRecvBuffer, &dwRecvLength);
            if (reader_status == VREADER_OK) {
                mhHeader.length = dwRecvLength;
                if (verbose) {
                    printf(" send response: ");
                    print_byte_array(pbRecvBuffer, mhHeader.length);
                }
                send_msg(VSC_APDU, mhHeader.reader_id,
                         pbRecvBuffer, dwRecvLength);
            } else {
                rv = reader_status; /* warning: not meaningful */
                send_msg(VSC_Error, mhHeader.reader_id, &rv, sizeof(uint32_t));
            }
            vreader_free(reader);
            reader = NULL; /* we've freed it, don't use it by accident
                              again */
            break;
        case VSC_Flush:
            /* TODO: actually flush */
            send_msg(VSC_FlushComplete, mhHeader.reader_id, NULL, 0);
            break;
        case VSC_Error:
            error_msg = (VSCMsgError *) pbSendBuffer;
            if (error_msg->code == VSC_SUCCESS) {
                qemu_mutex_lock(&pending_reader_lock);
                if (pending_reader) {
                    vreader_set_id(pending_reader, mhHeader.reader_id);
                    vreader_free(pending_reader);
                    pending_reader = NULL;
                    qemu_cond_signal(&pending_reader_condition);
                }
                qemu_mutex_unlock(&pending_reader_lock);
                break;
            }
            printf("warning: qemu refused to add reader\n");
            if (error_msg->code == VSC_CANNOT_ADD_MORE_READERS) {
                /* clear pending reader, qemu can't handle any more */
                qemu_mutex_lock(&pending_reader_lock);
                if (pending_reader) {
                    pending_reader = NULL;
                    /* make sure the event loop doesn't hang */
                    qemu_cond_signal(&pending_reader_condition);
                }
                qemu_mutex_unlock(&pending_reader_lock);
            }
            break;
        case VSC_Init:
            if (on_host_init(&mhHeader, (VSCMsgInit *)pbSendBuffer) < 0) {
                return -1;
            }
            break;
        default:
            printf("Default\n");
            return 0;
        }
    } while (rv >= 0);

    return 0;
}
Пример #27
0
void qemu_init_cpu_loop(struct uc_struct* uc)
{
    qemu_cond_init(&uc->qemu_cpu_cond);
    qemu_mutex_init(&uc->qemu_global_mutex);
}