예제 #1
0
int qemu_init_main_loop(void)
{
    int ret;

    qemu_init_sigbus();

    ret = qemu_signal_init();
    if (ret) {
        return ret;
    }

    /* Note eventfd must be drained before signalfd handlers run */
    ret = qemu_event_init();
    if (ret) {
        return ret;
    }

    qemu_cond_init(&qemu_cpu_cond);
    qemu_cond_init(&qemu_pause_cond);
    qemu_cond_init(&qemu_work_cond);
    qemu_cond_init(&qemu_io_proceeded_cond);
    qemu_mutex_init(&qemu_global_mutex);
    qemu_mutex_lock(&qemu_global_mutex);

    qemu_thread_get_self(&io_thread);

    return 0;
}
예제 #2
0
파일: cpus.c 프로젝트: yclin127/phobos
int qemu_init_main_loop(void)
{
    int ret;
    sigset_t blocked_signals;

    cpu_set_debug_excp_handler(cpu_debug_handler);

    blocked_signals = block_io_signals();

    ret = qemu_signalfd_init(blocked_signals);
    if (ret)
        return ret;

    /* Note eventfd must be drained before signalfd handlers run */
    ret = qemu_event_init();
    if (ret)
        return ret;

    qemu_cond_init(&qemu_pause_cond);
    qemu_cond_init(&qemu_system_cond);
    qemu_mutex_init(&qemu_fair_mutex);
    qemu_mutex_init(&qemu_global_mutex);
    qemu_mutex_lock(&qemu_global_mutex);

    qemu_thread_self(&io_thread);

    return 0;
}
예제 #3
0
파일: cpus-common.c 프로젝트: 8tab/qemu
void qemu_init_cpu_list(void)
{
    /* This is needed because qemu_init_cpu_list is also called by the
     * child process in a fork.  */
    pending_cpus = 0;

    qemu_mutex_init(&qemu_cpu_list_lock);
    qemu_cond_init(&exclusive_cond);
    qemu_cond_init(&exclusive_resume);
    qemu_cond_init(&qemu_work_cond);
}
예제 #4
0
void qemu_init_cpu_loop(void)
{
    qemu_init_sigbus();
    qemu_cond_init(&qemu_cpu_cond);
    qemu_cond_init(&qemu_pause_cond);
    qemu_cond_init(&qemu_work_cond);
    qemu_cond_init(&qemu_io_proceeded_cond);
    qemu_mutex_init(&qemu_global_mutex);

    qemu_thread_get_self(&io_thread);
}
예제 #5
0
파일: iothread.c 프로젝트: 01org/qemu-lite
static void iothread_complete(UserCreatable *obj, Error **errp)
{
    Error *local_error = NULL;
    IOThread *iothread = IOTHREAD(obj);
    char *name, *thread_name;

    iothread->stopping = false;
    iothread->thread_id = -1;
    iothread->ctx = aio_context_new(&local_error);
    if (!iothread->ctx) {
        error_propagate(errp, local_error);
        return;
    }

    qemu_mutex_init(&iothread->init_done_lock);
    qemu_cond_init(&iothread->init_done_cond);

    /* This assumes we are called from a thread with useful CPU affinity for us
     * to inherit.
     */
    name = object_get_canonical_path_component(OBJECT(obj));
    thread_name = g_strdup_printf("IO %s", name);
    qemu_thread_create(&iothread->thread, thread_name, iothread_run,
                       iothread, QEMU_THREAD_JOINABLE);
    g_free(thread_name);
    g_free(name);

    /* Wait for initialization to complete */
    qemu_mutex_lock(&iothread->init_done_lock);
    while (iothread->thread_id == -1) {
        qemu_cond_wait(&iothread->init_done_cond,
                       &iothread->init_done_lock);
    }
    qemu_mutex_unlock(&iothread->init_done_lock);
}
예제 #6
0
파일: iothread.c 프로젝트: ismailhkose/qemu
static void iothread_complete(UserCreatable *obj, Error **errp)
{
    Error *local_error = NULL;
    IOThread *iothread = IOTHREAD(obj);

    iothread->stopping = false;
    iothread->thread_id = -1;
    iothread->ctx = aio_context_new(&local_error);
    if (!iothread->ctx) {
        error_propagate(errp, local_error);
        return;
    }

    qemu_mutex_init(&iothread->init_done_lock);
    qemu_cond_init(&iothread->init_done_cond);

    /* This assumes we are called from a thread with useful CPU affinity for us
     * to inherit.
     */
    qemu_thread_create(&iothread->thread, "iothread", iothread_run,
                       iothread, QEMU_THREAD_JOINABLE);

    /* Wait for initialization to complete */
    qemu_mutex_lock(&iothread->init_done_lock);
    while (iothread->thread_id == -1) {
        qemu_cond_wait(&iothread->init_done_cond,
                       &iothread->init_done_lock);
    }
    qemu_mutex_unlock(&iothread->init_done_lock);
}
예제 #7
0
파일: cpus.c 프로젝트: 0bliv10n/s2e
void qemu_init_cpu_loop(void)
{
#ifdef CONFIG_S2E
    tcg_cpu_thread = NULL;
    tcg_halt_cond = NULL;
#endif

    qemu_init_sigbus();
    qemu_cond_init(&qemu_cpu_cond);
    qemu_cond_init(&qemu_pause_cond);
    qemu_cond_init(&qemu_work_cond);
    qemu_cond_init(&qemu_io_proceeded_cond);
    qemu_mutex_init(&qemu_global_mutex);

    qemu_thread_get_self(&io_thread);
}
예제 #8
0
파일: cpus.c 프로젝트: 0bliv10n/s2e
static void qemu_tcg_init_vcpu(void *_env)
{
    CPUArchState *env = _env;

    /* share a single thread for all cpus with TCG */
    if (!tcg_cpu_thread) {
#ifdef CONFIG_S2E
        /* Forks inherit parent's memory, therefore we do not want
           to allocate new memory regions, just overwrite them. */
        if (!s2e_is_forking()) {
            env->thread = g_malloc0(sizeof(QemuThread));
            env->halt_cond = g_malloc0(sizeof(QemuCond));
        }
#else
        env->thread = g_malloc0(sizeof(QemuThread));
        env->halt_cond = g_malloc0(sizeof(QemuCond));
#endif
        qemu_cond_init(env->halt_cond);
        tcg_halt_cond = env->halt_cond;
        qemu_thread_create(env->thread, qemu_tcg_cpu_thread_fn, env,
                           QEMU_THREAD_JOINABLE);
#ifdef _WIN32
        env->hThread = qemu_thread_get_handle(env->thread);
#endif
        while (env->created == 0) {
            qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
        }
        tcg_cpu_thread = env->thread;
    } else {
        env->thread = tcg_cpu_thread;
        env->halt_cond = tcg_halt_cond;
    }
}
예제 #9
0
파일: cpus.c 프로젝트: yujinyu/QEMU_PACER
static void kvm_start_vcpu(CPUState *env)
{
    env->thread = qemu_mallocz(sizeof(QemuThread));
    env->halt_cond = qemu_mallocz(sizeof(QemuCond));
    qemu_cond_init(env->halt_cond);
    qemu_thread_create(env->thread, kvm_cpu_thread_fn, env);
    while (env->created == 0)
        qemu_cond_timedwait(&qemu_cpu_cond, &qemu_global_mutex, 100);
}
예제 #10
0
void rfifolock_init(RFifoLock *r, void (*cb)(void *), void *opaque)
{
    qemu_mutex_init(&r->lock);
    r->head = 0;
    r->tail = 0;
    qemu_cond_init(&r->cond);
    r->nesting = 0;
    r->cb = cb;
    r->cb_opaque = opaque;
}
예제 #11
0
static void qemu_kvm_start_vcpu(CPUState *env)
{
    env->thread = g_malloc0(sizeof(QemuThread));
    env->halt_cond = g_malloc0(sizeof(QemuCond));
    qemu_cond_init(env->halt_cond);
    qemu_thread_create(env->thread, qemu_kvm_cpu_thread_fn, env);
    while (env->created == 0) {
        qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
    }
}
예제 #12
0
static void qemu_dummy_start_vcpu(CPUArchState *env)
{
    env->thread = g_malloc0(sizeof(QemuThread));
    env->halt_cond = g_malloc0(sizeof(QemuCond));
    qemu_cond_init(env->halt_cond);
    qemu_thread_create(env->thread, qemu_dummy_cpu_thread_fn, env,
                       QEMU_THREAD_JOINABLE);
    while (env->created == 0) {
        qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
    }
}
예제 #13
0
파일: cpus.c 프로젝트: zhouy-fnst/qemu
static void qemu_dummy_start_vcpu(CPUState *cpu)
{
    cpu->thread = g_malloc0(sizeof(QemuThread));
    cpu->halt_cond = g_malloc0(sizeof(QemuCond));
    qemu_cond_init(cpu->halt_cond);
    qemu_thread_create(cpu->thread, qemu_dummy_cpu_thread_fn, cpu,
                       QEMU_THREAD_JOINABLE);
    while (!cpu->created) {
        qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
    }
}
예제 #14
0
파일: cpus.c 프로젝트: EgoIncarnate/qemu-rr
int qemu_init_main_loop(void)
{
    int ret;

    cpu_set_debug_excp_handler(cpu_debug_handler);

    ret = qemu_event_init();
    if (ret)
        return ret;

    qemu_cond_init(&qemu_pause_cond);
    qemu_cond_init(&qemu_system_cond);
    qemu_mutex_init(&qemu_fair_mutex);
    qemu_mutex_init(&qemu_global_mutex);
    qemu_mutex_lock(&qemu_global_mutex);

    unblock_io_signals();
    qemu_thread_self(&io_thread);

    return 0;
}
예제 #15
0
static void qemu_kvm_start_vcpu(CPUArchState *env)
{
    CPUState *cpu = ENV_GET_CPU(env);

    cpu->thread = g_malloc0(sizeof(QemuThread));
    cpu->halt_cond = g_malloc0(sizeof(QemuCond));
    qemu_cond_init(cpu->halt_cond);
    qemu_thread_create(cpu->thread, qemu_kvm_cpu_thread_fn, env,
                       QEMU_THREAD_JOINABLE);
    while (!cpu->created) {
        qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
    }
}
예제 #16
0
static int qemu_archipelago_init(BDRVArchipelagoState *s)
{
    int ret;

    ret = qemu_archipelago_xseg_init(s);
    if (ret < 0) {
        error_report("Cannot initialize XSEG. Aborting...\n");
        goto err_exit;
    }

    qemu_cond_init(&s->archip_cond);
    qemu_mutex_init(&s->archip_mutex);
    qemu_cond_init(&s->request_cond);
    qemu_mutex_init(&s->request_mutex);
    s->th_is_signaled = false;
    qemu_thread_create(&s->request_th, "xseg_io_th",
                       (void *) xseg_request_handler,
                       (void *) s, QEMU_THREAD_JOINABLE);

err_exit:
    return ret;
}
예제 #17
0
static void virtio_balloon_device_realize(DeviceState *dev, Error **errp)
{
    VirtIODevice *vdev = VIRTIO_DEVICE(dev);
    VirtIOBalloon *s = VIRTIO_BALLOON(dev);
    int ret;

    virtio_init(vdev, "virtio-balloon", VIRTIO_ID_BALLOON,
                sizeof(struct virtio_balloon_config));

    ret = qemu_add_balloon_handler(virtio_balloon_to_target,
                                   virtio_balloon_stat, s);

    if (ret < 0) {
        error_setg(errp, "Only one balloon device is supported");
        virtio_cleanup(vdev);
        return;
    }

    s->ivq = virtio_add_queue(vdev, 128, virtio_balloon_handle_output);
    s->dvq = virtio_add_queue(vdev, 128, virtio_balloon_handle_output);
    s->svq = virtio_add_queue(vdev, 128, virtio_balloon_receive_stats);

    if (virtio_has_feature(s->host_features,
                           VIRTIO_BALLOON_F_FREE_PAGE_HINT)) {
        s->free_page_vq = virtio_add_queue(vdev, VIRTQUEUE_MAX_SIZE,
                                           virtio_balloon_handle_free_page_vq);
        s->free_page_report_status = FREE_PAGE_REPORT_S_STOP;
        s->free_page_report_cmd_id =
                           VIRTIO_BALLOON_FREE_PAGE_REPORT_CMD_ID_MIN;
        s->free_page_report_notify.notify =
                                       virtio_balloon_free_page_report_notify;
        precopy_add_notifier(&s->free_page_report_notify);
        if (s->iothread) {
            object_ref(OBJECT(s->iothread));
            s->free_page_bh = aio_bh_new(iothread_get_aio_context(s->iothread),
                                       virtio_ballloon_get_free_page_hints, s);
            qemu_mutex_init(&s->free_page_lock);
            qemu_cond_init(&s->free_page_cond);
            s->block_iothread = false;
        } else {
            /* Simply disable this feature if the iothread wasn't created. */
            s->host_features &= ~(1 << VIRTIO_BALLOON_F_FREE_PAGE_HINT);
            virtio_error(vdev, "iothread is missing");
        }
    }
    reset_stats(s);
}
예제 #18
0
static void tcg_init_vcpu(void *_env)
{
    CPUState *env = _env;
    /* share a single thread for all cpus with TCG */
    if (!tcg_cpu_thread) {
        env->thread = qemu_mallocz(sizeof(QemuThread));
        env->halt_cond = qemu_mallocz(sizeof(QemuCond));
        qemu_cond_init(env->halt_cond);
        qemu_thread_create(env->thread, tcg_cpu_thread_fn, env);
        while (env->created == 0)
            qemu_cond_timedwait(&qemu_cpu_cond, &qemu_global_mutex, 100);
        tcg_cpu_thread = env->thread;
        tcg_halt_cond = env->halt_cond;
    } else {
        env->thread = tcg_cpu_thread;
        env->halt_cond = tcg_halt_cond;
    }
}
예제 #19
0
int qemu_init_main_loop(void)
{
    int ret;

    ret = qemu_event_init();
    if (ret)
        return ret;

    qemu_cond_init(&qemu_pause_cond);
    qemu_mutex_init(&qemu_fair_mutex);
    qemu_mutex_init(&qemu_global_mutex);
    qemu_mutex_lock(&qemu_global_mutex);

    unblock_io_signals();
    qemu_thread_self(&io_thread);

    return 0;
}
예제 #20
0
파일: edu.c 프로젝트: 32bitmicro/riscv-qemu
static int pci_edu_init(PCIDevice *pdev)
{
    EduState *edu = DO_UPCAST(EduState, pdev, pdev);
    uint8_t *pci_conf = pdev->config;

    timer_init_ms(&edu->dma_timer, QEMU_CLOCK_VIRTUAL, edu_dma_timer, edu);

    qemu_mutex_init(&edu->thr_mutex);
    qemu_cond_init(&edu->thr_cond);
    qemu_thread_create(&edu->thread, "edu", edu_fact_thread,
                       edu, QEMU_THREAD_JOINABLE);

    pci_config_set_interrupt_pin(pci_conf, 1);

    memory_region_init_io(&edu->mmio, OBJECT(edu), &edu_mmio_ops, edu,
                    "edu-mmio", 1 << 20);
    pci_register_bar(pdev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &edu->mmio);

    return 0;
}
예제 #21
0
파일: cpus.c 프로젝트: 0bliv10n/s2e
static void qemu_dummy_start_vcpu(CPUArchState *env)
{
#ifdef CONFIG_S2E
    /* Forks inherit parent's memory, therefore we do not want
       to allocate new memory regions, just overwrite them. */
    if (!s2e_is_forking()) {
        env->thread = g_malloc0(sizeof(QemuThread));
        env->halt_cond = g_malloc0(sizeof(QemuCond));
    }
#else
    env->thread = g_malloc0(sizeof(QemuThread));
    env->halt_cond = g_malloc0(sizeof(QemuCond));
#endif

    qemu_cond_init(env->halt_cond);
    qemu_thread_create(env->thread, qemu_dummy_cpu_thread_fn, env,
                       QEMU_THREAD_JOINABLE);
    while (env->created == 0) {
        qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
    }
}
예제 #22
0
static void qemu_tcg_init_vcpu(CPUState *cpu)
{
    /* share a single thread for all cpus with TCG */
    if (!tcg_cpu_thread) {
        cpu->thread = g_malloc0(sizeof(QemuThread));
        cpu->halt_cond = g_malloc0(sizeof(QemuCond));
        qemu_cond_init(cpu->halt_cond);
        tcg_halt_cond = cpu->halt_cond;
        qemu_thread_create(cpu->thread, qemu_tcg_cpu_thread_fn, cpu,
                           QEMU_THREAD_JOINABLE);
#ifdef _WIN32
        cpu->hThread = qemu_thread_get_handle(cpu->thread);
#endif
        while (!cpu->created) {
            qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
        }
        tcg_cpu_thread = cpu->thread;
    } else {
        cpu->thread = tcg_cpu_thread;
        cpu->halt_cond = tcg_halt_cond;
    }
}
예제 #23
0
static void qemu_tcg_init_vcpu(void *_env)
{
    CPUArchState *env = _env;

    /* share a single thread for all cpus with TCG */
    if (!tcg_cpu_thread) {
        env->thread = g_malloc0(sizeof(QemuThread));
        env->halt_cond = g_malloc0(sizeof(QemuCond));
        qemu_cond_init(env->halt_cond);
        tcg_halt_cond = env->halt_cond;
        qemu_thread_create(env->thread, qemu_tcg_cpu_thread_fn, env,
                           QEMU_THREAD_JOINABLE);
#ifdef _WIN32
        env->hThread = qemu_thread_get_handle(env->thread);
#endif
        while (env->created == 0) {
            qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
        }
        tcg_cpu_thread = env->thread;
    } else {
        env->thread = tcg_cpu_thread;
        env->halt_cond = tcg_halt_cond;
    }
}
예제 #24
0
int
main(
    int argc,
    char *argv[]
) {
    char *qemu_host;
    char *qemu_port;
    VSCMsgHeader mhHeader;
    VSCMsgError *error_msg;

    int rv;
    int dwSendLength;
    int dwRecvLength;
    uint8_t pbRecvBuffer[APDUBufSize];
    uint8_t pbSendBuffer[APDUBufSize];
     VReaderStatus reader_status;
    VReader *reader = NULL;
    VCardEmulOptions *command_line_options = NULL;

    char *cert_names[MAX_CERTS];
    char *emul_args = NULL;
    int cert_count = 0;
    int c;

    while ((c = getopt(argc, argv, "c:e:pd:")) != -1) {
        switch (c) {
        case 'c':
            if (cert_count >= MAX_CERTS) {
                printf("too many certificates (max = %d)\n", MAX_CERTS);
                exit(5);
            }
            cert_names[cert_count++] = optarg;
            break;
        case 'e':
            emul_args = optarg;
            break;
        case 'p':
            print_usage();
            exit(4);
            break;
        case 'd':
            verbose = get_id_from_string(optarg, 1);
            break;
        }
    }

    if (argc - optind != 2) {
        print_usage();
        exit(4);
    }

    if (cert_count > 0) {
        char *new_args;
        int len, i;
        /* if we've given some -c options, we clearly we want do so some
         * software emulation.  add that emulation now. this is NSS Emulator
         * specific */
        if (emul_args == NULL) {
            emul_args = (char *)"db=\"/etc/pki/nssdb\"";
        }
#define SOFT_STRING ",soft=(,Virtual Reader,CAC,,"
             /* 2 == close paren & null */
        len = strlen(emul_args) + strlen(SOFT_STRING) + 2;
        for (i = 0; i < cert_count; i++) {
            len += strlen(cert_names[i])+1; /* 1 == comma */
        }
        new_args = g_malloc(len);
        strcpy(new_args, emul_args);
        strcat(new_args, SOFT_STRING);
        for (i = 0; i < cert_count; i++) {
            strcat(new_args, cert_names[i]);
            strcat(new_args, ",");
        }
        strcat(new_args, ")");
        emul_args = new_args;
    }
    if (emul_args) {
        command_line_options = vcard_emul_options(emul_args);
    }

    qemu_host = g_strdup(argv[argc - 2]);
    qemu_port = g_strdup(argv[argc - 1]);
    sock = connect_to_qemu(qemu_host, qemu_port);
    if (sock == -1) {
        fprintf(stderr, "error opening socket, exiting.\n");
        exit(5);
    }

    qemu_mutex_init(&write_lock);
    qemu_mutex_init(&pending_reader_lock);
    qemu_cond_init(&pending_reader_condition);

    vcard_emul_init(command_line_options);

    printf("> ");
    fflush(stdout);

    /* Send init message, Host responds (and then we send reader attachments) */
    VSCMsgInit init = {
        .version = htonl(VSCARD_VERSION),
        .magic = VSCARD_MAGIC,
        .capabilities = {0}
    };
    send_msg(VSC_Init, mhHeader.reader_id, &init, sizeof(init));

    do {
        fd_set fds;

        FD_ZERO(&fds);
        FD_SET(1, &fds);
        FD_SET(sock, &fds);

        /* waiting on input from the socket */
        rv = select(sock+1, &fds, NULL, NULL, NULL);
        if (rv < 0) {
            /* handle error */
            perror("select");
            return 7;
        }
        if (FD_ISSET(1, &fds)) {
            do_command();
        }
        if (!FD_ISSET(sock, &fds)) {
            continue;
        }

        rv = read(sock, &mhHeader, sizeof(mhHeader));
        if (rv < sizeof(mhHeader)) {
            /* Error */
            if (rv < 0) {
                perror("header read error\n");
            } else {
                fprintf(stderr, "header short read %d\n", rv);
            }
            return 8;
        }
        mhHeader.type = ntohl(mhHeader.type);
        mhHeader.reader_id = ntohl(mhHeader.reader_id);
        mhHeader.length = ntohl(mhHeader.length);
        if (verbose) {
            printf("Header: type=%d, reader_id=%u length=%d (0x%x)\n",
                    mhHeader.type, mhHeader.reader_id, mhHeader.length,
                                               mhHeader.length);
        }
        switch (mhHeader.type) {
        case VSC_APDU:
        case VSC_Flush:
        case VSC_Error:
        case VSC_Init:
            rv = read(sock, pbSendBuffer, mhHeader.length);
            break;
        default:
            fprintf(stderr, "Unexpected message of type 0x%X\n", mhHeader.type);
            return 0;
        }
        switch (mhHeader.type) {
        case VSC_APDU:
            if (rv < 0) {
                /* Error */
                fprintf(stderr, "read error\n");
                close(sock);
                return 8;
            }
            if (verbose) {
                printf(" recv APDU: ");
                print_byte_array(pbSendBuffer, mhHeader.length);
            }
            /* Transmit received APDU */
            dwSendLength = mhHeader.length;
            dwRecvLength = sizeof(pbRecvBuffer);
            reader = vreader_get_reader_by_id(mhHeader.reader_id);
            reader_status = vreader_xfr_bytes(reader,
                pbSendBuffer, dwSendLength,
                pbRecvBuffer, &dwRecvLength);
            if (reader_status == VREADER_OK) {
                mhHeader.length = dwRecvLength;
                if (verbose) {
                    printf(" send response: ");
                    print_byte_array(pbRecvBuffer, mhHeader.length);
                }
                send_msg(VSC_APDU, mhHeader.reader_id,
                         pbRecvBuffer, dwRecvLength);
            } else {
                rv = reader_status; /* warning: not meaningful */
                send_msg(VSC_Error, mhHeader.reader_id, &rv, sizeof(uint32_t));
            }
            vreader_free(reader);
            reader = NULL; /* we've freed it, don't use it by accident
                              again */
            break;
        case VSC_Flush:
            /* TODO: actually flush */
            send_msg(VSC_FlushComplete, mhHeader.reader_id, NULL, 0);
            break;
        case VSC_Error:
            error_msg = (VSCMsgError *) pbSendBuffer;
            if (error_msg->code == VSC_SUCCESS) {
                qemu_mutex_lock(&pending_reader_lock);
                if (pending_reader) {
                    vreader_set_id(pending_reader, mhHeader.reader_id);
                    vreader_free(pending_reader);
                    pending_reader = NULL;
                    qemu_cond_signal(&pending_reader_condition);
                }
                qemu_mutex_unlock(&pending_reader_lock);
                break;
            }
            printf("warning: qemu refused to add reader\n");
            if (error_msg->code == VSC_CANNOT_ADD_MORE_READERS) {
                /* clear pending reader, qemu can't handle any more */
                qemu_mutex_lock(&pending_reader_lock);
                if (pending_reader) {
                    pending_reader = NULL;
                    /* make sure the event loop doesn't hang */
                    qemu_cond_signal(&pending_reader_condition);
                }
                qemu_mutex_unlock(&pending_reader_lock);
            }
            break;
        case VSC_Init:
            if (on_host_init(&mhHeader, (VSCMsgInit *)pbSendBuffer) < 0) {
                return -1;
            }
            break;
        default:
            printf("Default\n");
            return 0;
        }
    } while (rv >= 0);

    return 0;
}
예제 #25
0
파일: vl.c 프로젝트: RKX1209/unicorn
void qemu_init_cpu_loop(struct uc_struct* uc)
{
    qemu_cond_init(&uc->qemu_cpu_cond);
    qemu_mutex_init(&uc->qemu_global_mutex);
}