void pfifo_write(void *opaque, hwaddr addr, uint64_t val, unsigned int size) { NV2AState *d = (NV2AState *)opaque; reg_log_write(NV_PFIFO, addr, val); qemu_mutex_lock(&d->pfifo.lock); switch (addr) { case NV_PFIFO_INTR_0: d->pfifo.pending_interrupts &= ~val; update_irq(d); break; case NV_PFIFO_INTR_EN_0: d->pfifo.enabled_interrupts = val; update_irq(d); break; default: d->pfifo.regs[addr] = val; break; } qemu_cond_broadcast(&d->pfifo.pusher_cond); qemu_cond_broadcast(&d->pfifo.puller_cond); qemu_mutex_unlock(&d->pfifo.lock); }
void qemu_cpu_kick(void *_env) { CPUState *env = _env; qemu_cond_broadcast(env->halt_cond); if (kvm_enabled()) qemu_thread_signal(env->thread, SIGUSR1); }
/* Finish an exclusive operation. */ void end_exclusive(void) { qemu_mutex_lock(&qemu_cpu_list_lock); atomic_set(&pending_cpus, 0); qemu_cond_broadcast(&exclusive_resume); qemu_mutex_unlock(&qemu_cpu_list_lock); }
void qemu_cpu_kick(void *_env) { CPUState *env = _env; qemu_cond_broadcast(env->halt_cond); qemu_thread_signal(env->thread, SIG_IPI); }
void qemu_cpu_kick(CPUState *cpu) { qemu_cond_broadcast(cpu->halt_cond); if (!tcg_enabled() && !cpu->thread_kicked) { qemu_cpu_kick_thread(cpu); cpu->thread_kicked = true; } }
void qemu_cpu_kick(void *_env) { CPUState *env = _env; qemu_cond_broadcast(env->halt_cond); if (kvm_enabled() && !env->thread_kicked) { qemu_cpu_kick_thread(env); env->thread_kicked = true; } }
void rfifolock_unlock(RFifoLock *r) { qemu_mutex_lock(&r->lock); assert(r->nesting > 0); assert(qemu_thread_is_self(&r->owner_thread)); if (--r->nesting == 0) { r->head++; qemu_cond_broadcast(&r->cond); } qemu_mutex_unlock(&r->lock); }
void qemu_cpu_kick(void *_env) { CPUArchState *env = _env; #ifdef CONFIG_S2E_DEBUG s2e_debug_print("MAIN: qemu_cpu_kick: qemu_cond_broadcast(env->halt_cond) %p\n", env->halt_cond); #endif qemu_cond_broadcast(env->halt_cond); if (!tcg_enabled() && !env->thread_kicked) { qemu_cpu_kick_thread(env); env->thread_kicked = true; } }
void qemu_mutex_lock_iothread(void) { if (!tcg_enabled()) { qemu_mutex_lock(&qemu_global_mutex); } else { iothread_requesting_mutex = true; if (qemu_mutex_trylock(&qemu_global_mutex)) { qemu_cpu_kick_thread(ENV_GET_CPU(first_cpu)); qemu_mutex_lock(&qemu_global_mutex); } iothread_requesting_mutex = false; qemu_cond_broadcast(&qemu_io_proceeded_cond); } }
static void flush_queued_work(CPUState *env) { struct qemu_work_item *wi; if (!env->queued_work_first) return; while ((wi = env->queued_work_first)) { env->queued_work_first = wi->next; wi->func(wi->data); wi->done = true; } env->queued_work_last = NULL; qemu_cond_broadcast(&qemu_work_cond); }
static void flush_queued_work(CPUState *cpu) { struct qemu_work_item *wi; if (cpu->queued_work_first == NULL) { return; } while ((wi = cpu->queued_work_first)) { cpu->queued_work_first = wi->next; wi->func(wi->data); wi->done = true; } cpu->queued_work_last = NULL; qemu_cond_broadcast(&qemu_work_cond); }
void process_queued_cpu_work(CPUState *cpu) { struct qemu_work_item *wi; if (cpu->queued_work_first == NULL) { return; } qemu_mutex_lock(&cpu->work_mutex); while (cpu->queued_work_first != NULL) { wi = cpu->queued_work_first; cpu->queued_work_first = wi->next; if (!cpu->queued_work_first) { cpu->queued_work_last = NULL; } qemu_mutex_unlock(&cpu->work_mutex); if (wi->exclusive) { /* Running work items outside the BQL avoids the following deadlock: * 1) start_exclusive() is called with the BQL taken while another * CPU is running; 2) cpu_exec in the other CPU tries to takes the * BQL, so it goes to sleep; start_exclusive() is sleeping too, so * neither CPU can proceed. */ qemu_mutex_unlock_iothread(); start_exclusive(); wi->func(cpu, wi->data); end_exclusive(); qemu_mutex_lock_iothread(); } else { wi->func(cpu, wi->data); } qemu_mutex_lock(&cpu->work_mutex); if (wi->free) { g_free(wi); } else { atomic_mb_set(&wi->done, true); } } qemu_mutex_unlock(&cpu->work_mutex); qemu_cond_broadcast(&qemu_work_cond); }
void qemu_mutex_lock_iothread(void) { LOGD_CPUS("%s1\n", __func__); if (!tcg_enabled()) { LOGD_CPUS("%s2\n", __func__); qemu_mutex_lock(&qemu_global_mutex); LOGD_CPUS("%s3\n", __func__); } else { LOGD_CPUS("%s4\n", __func__); iothread_requesting_mutex = true; if (qemu_mutex_trylock(&qemu_global_mutex)) { LOGD_CPUS("%s5\n", __func__); qemu_cpu_kick_thread(first_cpu); LOGD_CPUS("%s6\n", __func__); qemu_mutex_lock(&qemu_global_mutex); LOGD_CPUS("%s7\n", __func__); } LOGD_CPUS("%s8\n", __func__); iothread_requesting_mutex = false; LOGD_CPUS("%s9\n", __func__); qemu_cond_broadcast(&qemu_io_proceeded_cond); LOGD_CPUS("%s10\n", __func__); } }
void qemu_main_loop_start(void) { qemu_system_ready = 1; qemu_cond_broadcast(&qemu_system_cond); }
void qemu_cpu_kick(CPUState *cpu) { qemu_cond_broadcast(cpu->halt_cond); }