Пример #1
0
static void qemu_tcg_wait_io_event(void)
{
    CPUState *env;

    while (all_cpu_threads_idle()) {
       /* Start accounting real time to the virtual clock if the CPUs
          are idle.  */
        qemu_clock_warp(vm_clock);
        qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
    }

    qemu_mutex_unlock(&qemu_global_mutex);

    /*
     * Users of qemu_global_mutex can be starved, having no chance
     * to acquire it since this path will get to it first.
     * So use another lock to provide fairness.
     */
    qemu_mutex_lock(&qemu_fair_mutex);
    qemu_mutex_unlock(&qemu_fair_mutex);

    qemu_mutex_lock(&qemu_global_mutex);

    for (env = first_cpu; env != NULL; env = env->next_cpu) {
        qemu_wait_io_event_common(env);
    }
}
Пример #2
0
static void virtio_balloon_set_status(VirtIODevice *vdev, uint8_t status)
{
    VirtIOBalloon *s = VIRTIO_BALLOON(vdev);

    if (!s->stats_vq_elem && vdev->vm_running &&
        (status & VIRTIO_CONFIG_S_DRIVER_OK) && virtqueue_rewind(s->svq, 1)) {
        /* poll stats queue for the element we have discarded when the VM
         * was stopped */
        virtio_balloon_receive_stats(vdev, s->svq);
    }

    if (virtio_balloon_free_page_support(s)) {
        /*
         * The VM is woken up and the iothread was blocked, so signal it to
         * continue.
         */
        if (vdev->vm_running && s->block_iothread) {
            qemu_mutex_lock(&s->free_page_lock);
            s->block_iothread = false;
            qemu_cond_signal(&s->free_page_cond);
            qemu_mutex_unlock(&s->free_page_lock);
        }

        /* The VM is stopped, block the iothread. */
        if (!vdev->vm_running) {
            qemu_mutex_lock(&s->free_page_lock);
            s->block_iothread = true;
            qemu_mutex_unlock(&s->free_page_lock);
        }
    }
}
Пример #3
0
/* called from spice server thread context only */
int qxl_render_cursor(PCIQXLDevice *qxl, QXLCommandExt *ext)
{
    QXLCursorCmd *cmd = qxl_phys2virt(qxl, ext->cmd.data, ext->group_id);
    QXLCursor *cursor;
    QEMUCursor *c;

    if (!cmd) {
        return 1;
    }

    if (!dpy_cursor_define_supported(qxl->vga.con)) {
        return 0;
    }

    if (qxl->debug > 1 && cmd->type != QXL_CURSOR_MOVE) {
        fprintf(stderr, "%s", __FUNCTION__);
        qxl_log_cmd_cursor(qxl, cmd, ext->group_id);
        fprintf(stderr, "\n");
    }
    switch (cmd->type) {
    case QXL_CURSOR_SET:
        cursor = qxl_phys2virt(qxl, cmd->u.set.shape, ext->group_id);
        if (!cursor) {
            return 1;
        }
        if (cursor->chunk.data_size != cursor->data_size) {
            fprintf(stderr, "%s: multiple chunks\n", __FUNCTION__);
            return 1;
        }
        c = qxl_cursor(qxl, cursor);
        if (c == NULL) {
            c = cursor_builtin_left_ptr();
        }
        qemu_mutex_lock(&qxl->ssd.lock);
        if (qxl->ssd.cursor) {
            cursor_put(qxl->ssd.cursor);
        }
        qxl->ssd.cursor = c;
        qxl->ssd.mouse_x = cmd->u.set.position.x;
        qxl->ssd.mouse_y = cmd->u.set.position.y;
        qemu_mutex_unlock(&qxl->ssd.lock);
        qemu_bh_schedule(qxl->ssd.cursor_bh);
        break;
    case QXL_CURSOR_MOVE:
        qemu_mutex_lock(&qxl->ssd.lock);
        qxl->ssd.mouse_x = cmd->u.position.x;
        qxl->ssd.mouse_y = cmd->u.position.y;
        qemu_mutex_unlock(&qxl->ssd.lock);
        qemu_bh_schedule(qxl->ssd.cursor_bh);
        break;
    }
    return 0;
}
Пример #4
0
void cpu_list_remove(CPUState *cpu)
{
    qemu_mutex_lock(&qemu_cpu_list_lock);
    if (!QTAILQ_IN_USE(cpu, node)) {
        /* there is nothing to undo since cpu_exec_init() hasn't been called */
        qemu_mutex_unlock(&qemu_cpu_list_lock);
        return;
    }

    assert(!(cpu_index_auto_assigned && cpu != QTAILQ_LAST(&cpus, CPUTailQ)));

    QTAILQ_REMOVE(&cpus, cpu, node);
    cpu->cpu_index = UNASSIGNED_CPU_INDEX;
    qemu_mutex_unlock(&qemu_cpu_list_lock);
}
Пример #5
0
void qemu_mutex_lock_iothread(void)
{
    if (kvm_enabled()) {
        qemu_mutex_lock(&qemu_fair_mutex);
        qemu_mutex_lock(&qemu_global_mutex);
        qemu_mutex_unlock(&qemu_fair_mutex);
    } else {
        qemu_mutex_lock(&qemu_fair_mutex);
        if (qemu_mutex_trylock(&qemu_global_mutex)) {
            qemu_thread_signal(tcg_cpu_thread, SIG_IPI);
            qemu_mutex_lock(&qemu_global_mutex);
        }
        qemu_mutex_unlock(&qemu_fair_mutex);
    }
}
Пример #6
0
void pfifo_write(void *opaque, hwaddr addr, uint64_t val, unsigned int size)
{
    NV2AState *d = (NV2AState *)opaque;

    reg_log_write(NV_PFIFO, addr, val);

    qemu_mutex_lock(&d->pfifo.lock);

    switch (addr) {
    case NV_PFIFO_INTR_0:
        d->pfifo.pending_interrupts &= ~val;
        update_irq(d);
        break;
    case NV_PFIFO_INTR_EN_0:
        d->pfifo.enabled_interrupts = val;
        update_irq(d);
        break;
    default:
        d->pfifo.regs[addr] = val;
        break;
    }

    qemu_cond_broadcast(&d->pfifo.pusher_cond);
    qemu_cond_broadcast(&d->pfifo.puller_cond);

    qemu_mutex_unlock(&d->pfifo.lock);
}
Пример #7
0
static void hostmem_client_set_memory(CPUPhysMemoryClient *client,
                                      target_phys_addr_t start_addr,
                                      ram_addr_t size,
                                      ram_addr_t phys_offset)
{
    HostMem *hostmem = container_of(client, HostMem, client);
    ram_addr_t flags = phys_offset & ~TARGET_PAGE_MASK;
    size_t s = offsetof(struct vhost_memory, regions) +
               (hostmem->mem->nregions + 1) * sizeof hostmem->mem->regions[0];

    /* TODO: this is a hack.
     * At least one vga card (cirrus) changes the gpa to hva
     * memory maps on data path, which slows us down.
     * Since we should never need to DMA into VGA memory
     * anyway, lets just skip these regions. */
    if (ranges_overlap(start_addr, size, 0xa0000, 0x10000)) {
        return;
    }

    qemu_mutex_lock(&hostmem->mem_lock);

    hostmem->mem = qemu_realloc(hostmem->mem, s);

    assert(size);

    vhost_mem_unassign_memory(hostmem->mem, start_addr, size);
    if (flags == IO_MEM_RAM) {
        /* Add given mapping, merging adjacent regions if any */
        vhost_mem_assign_memory(hostmem->mem, start_addr, size,
                                (uintptr_t)qemu_get_ram_ptr(phys_offset));
    }

    qemu_mutex_unlock(&hostmem->mem_lock);
}
Пример #8
0
/* Mark cpu as not executing, and release pending exclusive ops.  */
void cpu_exec_end(CPUState *cpu)
{
    atomic_set(&cpu->running, false);

    /* Write cpu->running before reading pending_cpus.  */
    smp_mb();

    /* 1. start_exclusive saw cpu->running == true.  Then it will increment
     * pending_cpus and wait for exclusive_cond.  After taking the lock
     * we'll see cpu->has_waiter == true.
     *
     * 2. start_exclusive saw cpu->running == false but here pending_cpus >= 1.
     * This includes the case when an exclusive item started after setting
     * cpu->running to false and before we read pending_cpus.  Then we'll see
     * cpu->has_waiter == false and not touch pending_cpus.  The next call to
     * cpu_exec_start will run exclusive_idle if still necessary, thus waiting
     * for the item to complete.
     *
     * 3. pending_cpus == 0.  Then start_exclusive is definitely going to
     * see cpu->running == false, and it can ignore this CPU until the
     * next cpu_exec_start.
     */
    if (unlikely(atomic_read(&pending_cpus))) {
        qemu_mutex_lock(&qemu_cpu_list_lock);
        if (cpu->has_waiter) {
            cpu->has_waiter = false;
            atomic_set(&pending_cpus, pending_cpus - 1);
            if (pending_cpus == 1) {
                qemu_cond_signal(&exclusive_cond);
            }
        }
        qemu_mutex_unlock(&qemu_cpu_list_lock);
    }
}
Пример #9
0
/* Finish an exclusive operation.  */
void end_exclusive(void)
{
    qemu_mutex_lock(&qemu_cpu_list_lock);
    atomic_set(&pending_cpus, 0);
    qemu_cond_broadcast(&exclusive_resume);
    qemu_mutex_unlock(&qemu_cpu_list_lock);
}
Пример #10
0
/* Start an exclusive operation.
   Must only be called from outside cpu_exec.  */
void start_exclusive(void)
{
    CPUState *other_cpu;
    int running_cpus;

    qemu_mutex_lock(&qemu_cpu_list_lock);
    exclusive_idle();

    /* Make all other cpus stop executing.  */
    atomic_set(&pending_cpus, 1);

    /* Write pending_cpus before reading other_cpu->running.  */
    smp_mb();
    running_cpus = 0;
    CPU_FOREACH(other_cpu) {
        if (atomic_read(&other_cpu->running)) {
            other_cpu->has_waiter = true;
            running_cpus++;
            qemu_cpu_kick(other_cpu);
        }
    }

    atomic_set(&pending_cpus, running_cpus + 1);
    while (pending_cpus > 1) {
        qemu_cond_wait(&exclusive_cond, &qemu_cpu_list_lock);
    }

    /* Can release mutex, no one will enter another exclusive
     * section until end_exclusive resets pending_cpus to 0.
     */
    qemu_mutex_unlock(&qemu_cpu_list_lock);
}
Пример #11
0
static void *rcu_read_perf_test(void *arg)
{
    int i;
    long long n_reads_local = 0;

    rcu_register_thread();

    *(struct rcu_reader_data **)arg = &rcu_reader;
    atomic_inc(&nthreadsrunning);
    while (goflag == GOFLAG_INIT) {
        g_usleep(1000);
    }
    while (goflag == GOFLAG_RUN) {
        for (i = 0; i < RCU_READ_RUN; i++) {
            rcu_read_lock();
            rcu_read_unlock();
        }
        n_reads_local += RCU_READ_RUN;
    }
    qemu_mutex_lock(&counts_mutex);
    n_reads += n_reads_local;
    qemu_mutex_unlock(&counts_mutex);

    rcu_unregister_thread();
    return NULL;
}
Пример #12
0
/**
 * Map guest physical address to host pointer
 */
void *hostmem_lookup(HostMem *hostmem, hwaddr phys, hwaddr len, bool is_write)
{
    HostMemRegion *region;
    void *host_addr = NULL;
    hwaddr offset_within_region;

    qemu_mutex_lock(&hostmem->current_regions_lock);
    region = bsearch(&phys, hostmem->current_regions,
                     hostmem->num_current_regions,
                     sizeof(hostmem->current_regions[0]),
                     hostmem_lookup_cmp);
    if (!region) {
        goto out;
    }
    if (is_write && region->readonly) {
        goto out;
    }
    offset_within_region = phys - region->guest_addr;
    if (len <= region->size - offset_within_region) {
        host_addr = region->host_addr + offset_within_region;
    }
out:
    qemu_mutex_unlock(&hostmem->current_regions_lock);

    return host_addr;
}
Пример #13
0
void qemu_cond_wait(QemuCond *cond, QemuMutex *mutex)
{
    /*
     * This access is protected under the mutex.
     */
    cond->waiters++;

    /*
     * Unlock external mutex and wait for signal.
     * NOTE: we've held mutex locked long enough to increment
     * waiters count above, so there's no problem with
     * leaving mutex unlocked before we wait on semaphore.
     */
    qemu_mutex_unlock(mutex);
    WaitForSingleObject(cond->sema, INFINITE);

    /* Now waiters must rendez-vous with the signaling thread and
     * let it continue.  For cond_broadcast this has heavy contention
     * and triggers thundering herd.  So goes life.
     *
     * Decrease waiters count.  The mutex is not taken, so we have
     * to do this atomically.
     *
     * All waiters contend for the mutex at the end of this function
     * until the signaling thread relinquishes it.  To ensure
     * each waiter consumes exactly one slice of the semaphore,
     * the signaling thread stops until it is told by the last
     * waiter that it can go on.
     */
    if (InterlockedDecrement(&cond->waiters) == cond->target) {
        SetEvent(cond->continue_event);
    }

    qemu_mutex_lock(mutex);
}
Пример #14
0
static void *iothread_run(void *opaque)
{
    IOThread *iothread = opaque;

    rcu_register_thread();

    my_iothread = iothread;
    qemu_mutex_lock(&iothread->init_done_lock);
    iothread->thread_id = qemu_get_thread_id();
    qemu_cond_signal(&iothread->init_done_cond);
    qemu_mutex_unlock(&iothread->init_done_lock);

    while (iothread->running) {
        aio_poll(iothread->ctx, true);

        if (atomic_read(&iothread->worker_context)) {
            GMainLoop *loop;

            g_main_context_push_thread_default(iothread->worker_context);
            iothread->main_loop =
                g_main_loop_new(iothread->worker_context, TRUE);
            loop = iothread->main_loop;

            g_main_loop_run(iothread->main_loop);
            iothread->main_loop = NULL;
            g_main_loop_unref(loop);

            g_main_context_pop_thread_default(iothread->worker_context);
        }
    }

    rcu_unregister_thread();
    return NULL;
}
Пример #15
0
static void iothread_complete(UserCreatable *obj, Error **errp)
{
    Error *local_error = NULL;
    IOThread *iothread = IOTHREAD(obj);

    iothread->stopping = false;
    iothread->thread_id = -1;
    iothread->ctx = aio_context_new(&local_error);
    if (!iothread->ctx) {
        error_propagate(errp, local_error);
        return;
    }

    qemu_mutex_init(&iothread->init_done_lock);
    qemu_cond_init(&iothread->init_done_cond);

    /* This assumes we are called from a thread with useful CPU affinity for us
     * to inherit.
     */
    qemu_thread_create(&iothread->thread, "iothread", iothread_run,
                       iothread, QEMU_THREAD_JOINABLE);

    /* Wait for initialization to complete */
    qemu_mutex_lock(&iothread->init_done_lock);
    while (iothread->thread_id == -1) {
        qemu_cond_wait(&iothread->init_done_cond,
                       &iothread->init_done_lock);
    }
    qemu_mutex_unlock(&iothread->init_done_lock);
}
Пример #16
0
static void iothread_complete(UserCreatable *obj, Error **errp)
{
    Error *local_error = NULL;
    IOThread *iothread = IOTHREAD(obj);
    char *name, *thread_name;

    iothread->stopping = false;
    iothread->thread_id = -1;
    iothread->ctx = aio_context_new(&local_error);
    if (!iothread->ctx) {
        error_propagate(errp, local_error);
        return;
    }

    qemu_mutex_init(&iothread->init_done_lock);
    qemu_cond_init(&iothread->init_done_cond);

    /* This assumes we are called from a thread with useful CPU affinity for us
     * to inherit.
     */
    name = object_get_canonical_path_component(OBJECT(obj));
    thread_name = g_strdup_printf("IO %s", name);
    qemu_thread_create(&iothread->thread, thread_name, iothread_run,
                       iothread, QEMU_THREAD_JOINABLE);
    g_free(thread_name);
    g_free(name);

    /* Wait for initialization to complete */
    qemu_mutex_lock(&iothread->init_done_lock);
    while (iothread->thread_id == -1) {
        qemu_cond_wait(&iothread->init_done_cond,
                       &iothread->init_done_lock);
    }
    qemu_mutex_unlock(&iothread->init_done_lock);
}
Пример #17
0
static void *iothread_run(void *opaque)
{
    IOThread *iothread = opaque;
    bool blocking;

    rcu_register_thread();

    qemu_mutex_lock(&iothread->init_done_lock);
    iothread->thread_id = qemu_get_thread_id();
    qemu_cond_signal(&iothread->init_done_cond);
    qemu_mutex_unlock(&iothread->init_done_lock);

    while (!iothread->stopping) {
        aio_context_acquire(iothread->ctx);
        blocking = true;
        while (!iothread->stopping && aio_poll(iothread->ctx, blocking)) {
            /* Progress was made, keep going */
            blocking = false;
        }
        aio_context_release(iothread->ctx);
    }

    rcu_unregister_thread();
    return NULL;
}
Пример #18
0
static void qemu_archipelago_close(BlockDriverState *bs)
{
    int r, targetlen;
    char *target;
    struct xseg_request *req;
    BDRVArchipelagoState *s = bs->opaque;

    s->stopping = true;

    qemu_mutex_lock(&s->request_mutex);
    while (!s->th_is_signaled) {
        qemu_cond_wait(&s->request_cond,
                       &s->request_mutex);
    }
    qemu_mutex_unlock(&s->request_mutex);
    qemu_thread_join(&s->request_th);
    qemu_cond_destroy(&s->request_cond);
    qemu_mutex_destroy(&s->request_mutex);

    qemu_cond_destroy(&s->archip_cond);
    qemu_mutex_destroy(&s->archip_mutex);

    targetlen = strlen(s->volname);
    req = xseg_get_request(s->xseg, s->srcport, s->vportno, X_ALLOC);
    if (!req) {
        archipelagolog("Cannot get XSEG request\n");
        goto err_exit;
    }
    r = xseg_prep_request(s->xseg, req, targetlen, 0);
    if (r < 0) {
        xseg_put_request(s->xseg, req, s->srcport);
        archipelagolog("Cannot prepare XSEG close request\n");
        goto err_exit;
    }

    target = xseg_get_target(s->xseg, req);
    memcpy(target, s->volname, targetlen);
    req->size = req->datalen;
    req->offset = 0;
    req->op = X_CLOSE;

    xport p = xseg_submit(s->xseg, req, s->srcport, X_ALLOC);
    if (p == NoPort) {
        xseg_put_request(s->xseg, req, s->srcport);
        archipelagolog("Cannot submit XSEG close request\n");
        goto err_exit;
    }

    xseg_signal(s->xseg, p);
    wait_reply(s->xseg, s->srcport, s->port, req);

    xseg_put_request(s->xseg, req, s->srcport);

err_exit:
    g_free(s->volname);
    g_free(s->segment_name);
    xseg_quit_local_signal(s->xseg, s->srcport);
    xseg_leave_dynport(s->xseg, s->port);
    xseg_leave(s->xseg);
}
Пример #19
0
/* PFIFO - MMIO and DMA FIFO submission to PGRAPH and VPE */
uint64_t pfifo_read(void *opaque, hwaddr addr, unsigned int size)
{
    NV2AState *d = (NV2AState *)opaque;

    qemu_mutex_lock(&d->pfifo.lock);

    uint64_t r = 0;
    switch (addr) {
    case NV_PFIFO_INTR_0:
        r = d->pfifo.pending_interrupts;
        break;
    case NV_PFIFO_INTR_EN_0:
        r = d->pfifo.enabled_interrupts;
        break;
    case NV_PFIFO_RUNOUT_STATUS:
        r = NV_PFIFO_RUNOUT_STATUS_LOW_MARK; /* low mark empty */
        break;
    default:
        r = d->pfifo.regs[addr];
        break;
    }

    qemu_mutex_unlock(&d->pfifo.lock);

    reg_log_read(NV_PFIFO, addr, r);
    return r;
}
Пример #20
0
/**
 * Map guest physical address to host pointer
 */
void *hostmem_lookup(HostMem *hostmem, uint64_t phys, uint64_t len,
                     bool is_write)
{
    struct vhost_memory_region *found = NULL;
    void *host_addr = NULL;
    uint64_t offset_within_region;
    unsigned int i;

    is_write = is_write; /*r/w information is currently not tracked */

    qemu_mutex_lock(&hostmem->mem_lock);
    for (i = 0; i < hostmem->mem->nregions; i++) {
        struct vhost_memory_region *region = &hostmem->mem->regions[i];

        if (range_covers_byte(region->guest_phys_addr,
                              region->memory_size,
                              phys)) {
            found = region;
            break;
        }
    }
    if (!found) {
        goto out;
    }
    offset_within_region = phys - found->guest_phys_addr;
    if (len <= found->memory_size - offset_within_region) {
        host_addr = (void*)(uintptr_t)(found->userspace_addr +
                                       offset_within_region);
    }
out:
    qemu_mutex_unlock(&hostmem->mem_lock);

    return host_addr;
}
Пример #21
0
static int tpm_emulator_ctrlcmd(TPMEmulator *tpm, unsigned long cmd, void *msg,
                                size_t msg_len_in, size_t msg_len_out)
{
    CharBackend *dev = &tpm->ctrl_chr;
    uint32_t cmd_no = cpu_to_be32(cmd);
    ssize_t n = sizeof(uint32_t) + msg_len_in;
    uint8_t *buf = NULL;
    int ret = -1;

    qemu_mutex_lock(&tpm->mutex);

    buf = g_alloca(n);
    memcpy(buf, &cmd_no, sizeof(cmd_no));
    memcpy(buf + sizeof(cmd_no), msg, msg_len_in);

    n = qemu_chr_fe_write_all(dev, buf, n);
    if (n <= 0) {
        goto end;
    }

    if (msg_len_out != 0) {
        n = qemu_chr_fe_read_all(dev, msg, msg_len_out);
        if (n <= 0) {
            goto end;
        }
    }

    ret = 0;

end:
    qemu_mutex_unlock(&tpm->mutex);
    return ret;
}
Пример #22
0
static void edu_mmio_write(void *opaque, hwaddr addr, uint64_t val,
                unsigned size)
{
    EduState *edu = opaque;

    if (addr < 0x80 && size != 4) {
        return;
    }

    if (addr >= 0x80 && size != 4 && size != 8) {
        return;
    }

    switch (addr) {
    case 0x04:
        edu->addr4 = ~val;
        break;
    case 0x08:
        if (atomic_read(&edu->status) & EDU_STATUS_COMPUTING) {
            break;
        }
        /* EDU_STATUS_COMPUTING cannot go 0->1 concurrently, because it is only
         * set in this function and it is under the iothread mutex.
         */
        qemu_mutex_lock(&edu->thr_mutex);
        edu->fact = val;
        atomic_or(&edu->status, EDU_STATUS_COMPUTING);
        qemu_cond_signal(&edu->thr_cond);
        qemu_mutex_unlock(&edu->thr_mutex);
        break;
    case 0x20:
        if (val & EDU_STATUS_IRQFACT) {
            atomic_or(&edu->status, EDU_STATUS_IRQFACT);
        } else {
            atomic_and(&edu->status, ~EDU_STATUS_IRQFACT);
        }
        break;
    case 0x60:
        edu_raise_irq(edu, val);
        break;
    case 0x64:
        edu_lower_irq(edu, val);
        break;
    case 0x80:
        dma_rw(edu, true, &val, &edu->dma.src, false);
        break;
    case 0x88:
        dma_rw(edu, true, &val, &edu->dma.dst, false);
        break;
    case 0x90:
        dma_rw(edu, true, &val, &edu->dma.cnt, false);
        break;
    case 0x98:
        if (!(val & EDU_DMA_RUN)) {
            break;
        }
        dma_rw(edu, true, &val, &edu->dma.cmd, true);
        break;
    }
}
Пример #23
0
static void test_acquire(void)
{
    QemuThread thread;
    AcquireTestData data;

    /* Dummy event notifier ensures aio_poll() will block */
    event_notifier_init(&data.notifier, false);
    set_event_notifier(ctx, &data.notifier, dummy_notifier_read);
    g_assert(!aio_poll(ctx, false)); /* consume aio_notify() */

    qemu_mutex_init(&data.start_lock);
    qemu_mutex_lock(&data.start_lock);
    data.thread_acquired = false;

    qemu_thread_create(&thread, "test_acquire_thread",
                       test_acquire_thread,
                       &data, QEMU_THREAD_JOINABLE);

    /* Block in aio_poll(), let other thread kick us and acquire context */
    aio_context_acquire(ctx);
    qemu_mutex_unlock(&data.start_lock); /* let the thread run */
    g_assert(aio_poll(ctx, true));
    g_assert(!data.thread_acquired);
    aio_context_release(ctx);

    qemu_thread_join(&thread);
    set_event_notifier(ctx, &data.notifier, NULL);
    event_notifier_cleanup(&data.notifier);

    g_assert(data.thread_acquired);
}
Пример #24
0
Файл: char.c Проект: mdroth/qemu
static int qemu_chr_write_buffer(Chardev *s,
                                 const uint8_t *buf, int len,
                                 int *offset, bool write_all)
{
    ChardevClass *cc = CHARDEV_GET_CLASS(s);
    int res = 0;
    *offset = 0;

    qemu_mutex_lock(&s->chr_write_lock);
    while (*offset < len) {
    retry:
        res = cc->chr_write(s, buf + *offset, len - *offset);
        if (res < 0 && errno == EAGAIN && write_all) {
            g_usleep(100);
            goto retry;
        }

        if (res <= 0) {
            break;
        }

        *offset += res;
        if (!write_all) {
            break;
        }
    }
    if (*offset > 0) {
        qemu_chr_write_log(s, buf, *offset);
    }
    qemu_mutex_unlock(&s->chr_write_lock);

    return res;
}
Пример #25
0
void qxl_render_update_area_bh(void *opaque)
{
    PCIQXLDevice *qxl = opaque;

    qemu_mutex_lock(&qxl->ssd.lock);
    qxl_render_update_area_unlocked(qxl);
    qemu_mutex_unlock(&qxl->ssd.lock);
}
Пример #26
0
void qemu_mutex_lock_iothread(void)
{
    if (kvm_enabled()) {
        qemu_mutex_lock(&qemu_fair_mutex);
        qemu_mutex_lock(&qemu_global_mutex);
        qemu_mutex_unlock(&qemu_fair_mutex);
    } else
        qemu_signal_lock(100);
}
Пример #27
0
static void qemu_wait_io_event(CPUState *env)
{
    while (!tcg_has_work())
        qemu_cond_timedwait(env->halt_cond, &qemu_global_mutex, 1000);

    qemu_mutex_unlock(&qemu_global_mutex);

    /*
     * Users of qemu_global_mutex can be starved, having no chance
     * to acquire it since this path will get to it first.
     * So use another lock to provide fairness.
     */
    qemu_mutex_lock(&qemu_fair_mutex);
    qemu_mutex_unlock(&qemu_fair_mutex);

    qemu_mutex_lock(&qemu_global_mutex);
    qemu_wait_io_event_common(env);
}
Пример #28
0
void qxl_render_update_area_done(PCIQXLDevice *qxl, QXLCookie *cookie)
{
    qemu_mutex_lock(&qxl->ssd.lock);
    trace_qxl_render_update_area_done(cookie);
    qemu_bh_schedule(qxl->update_area_bh);
    qxl->render_update_cookie_num--;
    qemu_mutex_unlock(&qxl->ssd.lock);
    g_free(cookie);
}
Пример #29
0
/*
 * We purposely use a thread, so that users are forced to wait for the status
 * register.
 */
static void *edu_fact_thread(void *opaque)
{
    EduState *edu = opaque;

    while (1) {
        uint32_t val, ret = 1;

        qemu_mutex_lock(&edu->thr_mutex);
        while ((atomic_read(&edu->status) & EDU_STATUS_COMPUTING) == 0 &&
                        !edu->stopping) {
            qemu_cond_wait(&edu->thr_cond, &edu->thr_mutex);
        }

        if (edu->stopping) {
            qemu_mutex_unlock(&edu->thr_mutex);
            break;
        }

        val = edu->fact;
        qemu_mutex_unlock(&edu->thr_mutex);

        while (val > 0) {
            ret *= val--;
        }

        /*
         * We should sleep for a random period here, so that students are
         * forced to check the status properly.
         */

        qemu_mutex_lock(&edu->thr_mutex);
        edu->fact = ret;
        qemu_mutex_unlock(&edu->thr_mutex);
        atomic_and(&edu->status, ~EDU_STATUS_COMPUTING);

        if (atomic_read(&edu->status) & EDU_STATUS_IRQFACT) {
            qemu_mutex_lock_iothread();
            edu_raise_irq(edu, FACT_IRQ);
            qemu_mutex_unlock_iothread();
        }
    }

    return NULL;
}
Пример #30
0
static void qemu_signal_lock(unsigned int msecs)
{
    qemu_mutex_lock(&qemu_fair_mutex);

    while (qemu_mutex_trylock(&qemu_global_mutex)) {
        qemu_thread_signal(tcg_cpu_thread, SIGUSR1);
        if (!qemu_mutex_timedlock(&qemu_global_mutex, msecs))
            break;
    }
    qemu_mutex_unlock(&qemu_fair_mutex);
}