void pause_all_vcpus(void) { CPUArchState *penv = first_cpu; qemu_clock_enable(vm_clock, false); while (penv) { penv->stop = 1; qemu_cpu_kick(penv); penv = penv->next_cpu; } if (!qemu_thread_is_self(&io_thread)) { cpu_stop_current(); if (!kvm_enabled()) { while (penv) { penv->stop = 0; penv->stopped = 1; penv = penv->next_cpu; } return; } } while (!all_vcpus_paused()) { qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex); penv = first_cpu; while (penv) { qemu_cpu_kick(penv); penv = penv->next_cpu; } } }
void rfifolock_unlock(RFifoLock *r) { qemu_mutex_lock(&r->lock); assert(r->nesting > 0); assert(qemu_thread_is_self(&r->owner_thread)); if (--r->nesting == 0) { r->head++; qemu_cond_broadcast(&r->cond); } qemu_mutex_unlock(&r->lock); }
void vm_stop(int reason) { if (!qemu_thread_is_self(&io_thread)) { qemu_system_vmstop_request(reason); /* * FIXME: should not return to device code in case * vm_stop() has been requested. */ cpu_stop_current(); return; } do_vm_stop(reason); }
static void qemu_cpu_kick_thread(CPUState *cpu) { #ifndef _WIN32 int err; err = pthread_kill(cpu->thread->thread, SIG_IPI); if (err) { fprintf(stderr, "qemu:%s: %s", __func__, strerror(err)); exit(1); } #else /* _WIN32 */ if (!qemu_thread_is_self(cpu->thread)) { CONTEXT tcgContext; if (SuspendThread(cpu->hThread) == (DWORD)-1) { fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__, GetLastError()); exit(1); } /* On multi-core systems, we are not sure that the thread is actually * suspended until we can get the context. */ tcgContext.ContextFlags = CONTEXT_CONTROL; while (GetThreadContext(cpu->hThread, &tcgContext) != 0) { continue; } // FIXME(danghvu): anysignal ? // cpu_signal(0); if (ResumeThread(cpu->hThread) == (DWORD)-1) { fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__, GetLastError()); exit(1); } } #endif }
void pause_all_vcpus(void) { CPUArchState *penv = first_cpu; qemu_clock_enable(vm_clock, false); while (penv) { #ifdef CONFIG_S2E_DEBUG s2e_debug_print("MAIN: pause_all_vcpus kiking cpus\n"); #endif penv->stop = 1; qemu_cpu_kick(penv); penv = penv->next_cpu; } if (!qemu_thread_is_self(&io_thread)) { cpu_stop_current(); if (!kvm_enabled()) { while (penv) { penv->stop = 0; penv->stopped = 1; penv = penv->next_cpu; } return; } } while (!all_vcpus_paused()) { #ifdef CONFIG_S2E_DEBUG s2e_debug_print("MAIN: pause_all_vcpus waiting for qemu_pause_cond\n"); #endif qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex); penv = first_cpu; while (penv) { qemu_cpu_kick(penv); penv = penv->next_cpu; } } }
/* * Theory of operation: * * In order to ensure FIFO ordering, implement a ticketlock. Threads acquiring * the lock enqueue themselves by incrementing the tail index. When the lock * is unlocked, the head is incremented and waiting threads are notified. * * Recursive locking does not take a ticket since the head is only incremented * when the outermost recursive caller unlocks. */ void rfifolock_lock(RFifoLock *r) { qemu_mutex_lock(&r->lock); /* Take a ticket */ unsigned int ticket = r->tail++; if (r->nesting > 0 && qemu_thread_is_self(&r->owner_thread)) { r->tail--; /* put ticket back, we're nesting */ } else { while (ticket != r->head) { /* Invoke optional contention callback */ if (r->cb) { r->cb(r->cb_opaque); } qemu_cond_wait(&r->cond, &r->lock); } } qemu_thread_get_self(&r->owner_thread); r->nesting++; qemu_mutex_unlock(&r->lock); }
bool qemu_cpu_is_self(CPUState *cpu) { return qemu_thread_is_self(cpu->thread); }
int qemu_cpu_is_self(void *_env) { CPUState *env = _env; return qemu_thread_is_self(env->thread); }