void pause_all_vcpus(void) { CPUState *cpu = first_cpu; qemu_clock_enable(QEMU_CLOCK_VIRTUAL, false); while (cpu) { cpu->stop = true; qemu_cpu_kick(cpu); cpu = cpu->next_cpu; } if (qemu_in_vcpu_thread()) { cpu_stop_current(); if (!kvm_enabled()) { cpu = first_cpu; while (cpu) { cpu->stop = false; cpu->stopped = true; cpu = cpu->next_cpu; } return; } } while (!all_vcpus_paused()) { qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex); cpu = first_cpu; while (cpu) { qemu_cpu_kick(cpu); cpu = cpu->next_cpu; } } }
void pause_all_vcpus(void) { CPUArchState *penv = first_cpu; qemu_clock_enable(vm_clock, false); while (penv) { CPUState *pcpu = ENV_GET_CPU(penv); pcpu->stop = true; qemu_cpu_kick(pcpu); penv = penv->next_cpu; } if (qemu_in_vcpu_thread()) { cpu_stop_current(); if (!kvm_enabled()) { while (penv) { CPUState *pcpu = ENV_GET_CPU(penv); pcpu->stop = 0; pcpu->stopped = true; penv = penv->next_cpu; } return; } } while (!all_vcpus_paused()) { qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex); penv = first_cpu; while (penv) { qemu_cpu_kick(ENV_GET_CPU(penv)); penv = penv->next_cpu; } } }
void pause_all_vcpus(void) { CPUArchState *penv = first_cpu; qemu_clock_enable(vm_clock, false); while (penv) { penv->stop = 1; qemu_cpu_kick(penv); penv = penv->next_cpu; } if (!qemu_thread_is_self(&io_thread)) { cpu_stop_current(); if (!kvm_enabled()) { while (penv) { penv->stop = 0; penv->stopped = 1; penv = penv->next_cpu; } return; } } while (!all_vcpus_paused()) { qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex); penv = first_cpu; while (penv) { qemu_cpu_kick(penv); penv = penv->next_cpu; } } }
/* Start an exclusive operation. Must only be called from outside cpu_exec. */ void start_exclusive(void) { CPUState *other_cpu; int running_cpus; qemu_mutex_lock(&qemu_cpu_list_lock); exclusive_idle(); /* Make all other cpus stop executing. */ atomic_set(&pending_cpus, 1); /* Write pending_cpus before reading other_cpu->running. */ smp_mb(); running_cpus = 0; CPU_FOREACH(other_cpu) { if (atomic_read(&other_cpu->running)) { other_cpu->has_waiter = true; running_cpus++; qemu_cpu_kick(other_cpu); } } atomic_set(&pending_cpus, running_cpus + 1); while (pending_cpus > 1) { qemu_cond_wait(&exclusive_cond, &qemu_cpu_list_lock); } /* Can release mutex, no one will enter another exclusive * section until end_exclusive resets pending_cpus to 0. */ qemu_mutex_unlock(&qemu_cpu_list_lock); }
void run_on_cpu(CPUState *env, void (*func)(void *data), void *data) { struct qemu_work_item wi; if (qemu_cpu_self(env)) { func(data); return; } wi.func = func; wi.data = data; if (!env->queued_work_first) env->queued_work_first = &wi; else env->queued_work_last->next = &wi; env->queued_work_last = &wi; wi.next = NULL; wi.done = false; qemu_cpu_kick(env); while (!wi.done) { CPUState *self_env = cpu_single_env; qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex); cpu_single_env = self_env; } }
static void openrisc_timer_cb(void *opaque) { OpenRISCCPU *cpu = opaque; if ((cpu->env.ttmr & TTMR_IE) && timer_expired(cpu->env.timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL))) { CPUState *cs = CPU(cpu); cpu->env.ttmr |= TTMR_IP; cs->interrupt_request |= CPU_INTERRUPT_TIMER; } switch (cpu->env.ttmr & TTMR_M) { case TIMER_NONE: break; case TIMER_INTR: or1k_timer->ttcr = 0; break; case TIMER_SHOT: cpu_openrisc_count_stop(cpu); break; case TIMER_CONT: break; } cpu_openrisc_timer_update(cpu); qemu_cpu_kick(CPU(cpu)); }
static void spin_kick(void *data) { SpinKick *kick = data; CPUState *cpu = CPU(kick->cpu); CPUPPCState *env = &kick->cpu->env; SpinInfo *curspin = kick->spin; hwaddr map_size = 64 * 1024 * 1024; hwaddr map_start; cpu_synchronize_state(cpu); stl_p(&curspin->pir, env->spr[SPR_PIR]); env->nip = ldq_p(&curspin->addr) & (map_size - 1); env->gpr[3] = ldq_p(&curspin->r3); env->gpr[4] = 0; env->gpr[5] = 0; env->gpr[6] = 0; env->gpr[7] = map_size; env->gpr[8] = 0; env->gpr[9] = 0; map_start = ldq_p(&curspin->addr) & ~(map_size - 1); mmubooke_create_initial_mapping(env, 0, map_start, map_size); cpu->halted = 0; env->exception_index = -1; cpu->stopped = false; qemu_cpu_kick(cpu); }
void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data) { struct qemu_work_item wi; if (qemu_cpu_is_self(cpu)) { func(data); return; } wi.func = func; wi.data = data; if (cpu->queued_work_first == NULL) { cpu->queued_work_first = &wi; } else { cpu->queued_work_last->next = &wi; } cpu->queued_work_last = &wi; wi.next = NULL; wi.done = false; qemu_cpu_kick(cpu); while (!wi.done) { CPUArchState *self_env = cpu_single_env; qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex); cpu_single_env = self_env; } }
/* mask must never be zero, except for A20 change call */ static void tcg_handle_interrupt(CPUState *cpu, int mask) { int old_mask; old_mask = cpu->interrupt_request; cpu->interrupt_request |= mask; /* * If called from iothread context, wake the target cpu in * case its halted. */ if (!qemu_cpu_is_self(cpu)) { qemu_cpu_kick(cpu); return; } if (use_icount) { cpu->icount_decr.u16.high = 0xffff; if (!cpu->can_do_io && (mask & ~old_mask) != 0) { cpu_abort(cpu, "Raised interrupt while not in I/O function"); } } else { cpu->tcg_exit_req = 1; } }
static void cpu_kick_irq(SPARCCPU *cpu) { CPUSPARCState *env = &cpu->env; env->halted = 0; cpu_check_irqs(env); qemu_cpu_kick(CPU(cpu)); }
static void kvm_handle_interrupt(CPUState *env, int mask) { env->interrupt_request |= mask; if (!qemu_cpu_is_self(env)) { qemu_cpu_kick(env); } }
static void whpx_handle_interrupt(CPUState *cpu, int mask) { cpu->interrupt_request |= mask; if (!qemu_cpu_is_self(cpu)) { qemu_cpu_kick(cpu); } }
static void cpu_kick_irq(SPARCCPU *cpu) { CPUSPARCState *env = &cpu->env; CPUState *cs = CPU(cpu); cs->halted = 0; cpu_check_irqs(env); qemu_cpu_kick(cs); }
void pause_all_vcpus(void) { CPUState *penv = first_cpu; while (penv) { penv->stop = 1; qemu_cpu_kick(penv); penv = (CPUState *)penv->next_cpu; } while (!all_vcpus_paused()) { qemu_cond_timedwait(&qemu_pause_cond, &qemu_global_mutex, 100); penv = first_cpu; while (penv) { qemu_cpu_kick(penv); penv = (CPUState *)penv->next_cpu; } } }
void pause_all_vcpus(void) { CPUState *penv = first_cpu; qemu_clock_enable(vm_clock, false); while (penv) { penv->stop = 1; qemu_cpu_kick(penv); penv = (CPUState *)penv->next_cpu; } while (!all_vcpus_paused()) { qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex); penv = first_cpu; while (penv) { qemu_cpu_kick(penv); penv = (CPUState *)penv->next_cpu; } } }
void resume_all_vcpus(void) { CPUState *penv = first_cpu; while (penv) { penv->stop = 0; penv->stopped = 0; qemu_cpu_kick(penv); penv = (CPUState *)penv->next_cpu; } }
static void rtas_start_cpu(PowerPCCPU *cpu_, sPAPRMachineState *spapr, uint32_t token, uint32_t nargs, target_ulong args, uint32_t nret, target_ulong rets) { target_ulong id, start, r3; PowerPCCPU *cpu; if (nargs != 3 || nret != 1) { rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); return; } id = rtas_ld(args, 0); start = rtas_ld(args, 1); r3 = rtas_ld(args, 2); cpu = spapr_find_cpu(id); if (cpu != NULL) { CPUState *cs = CPU(cpu); CPUPPCState *env = &cpu->env; PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); if (!cs->halted) { rtas_st(rets, 0, RTAS_OUT_HW_ERROR); return; } /* This will make sure qemu state is up to date with kvm, and * mark it dirty so our changes get flushed back before the * new cpu enters */ kvm_cpu_synchronize_state(cs); env->msr = (1ULL << MSR_SF) | (1ULL << MSR_ME); /* Enable Power-saving mode Exit Cause exceptions for the new CPU */ env->spr[SPR_LPCR] |= pcc->lpcr_pm; env->nip = start; env->gpr[3] = r3; cs->halted = 0; spapr_cpu_set_endianness(cpu); spapr_cpu_update_tb_offset(cpu); qemu_cpu_kick(cs); rtas_st(rets, 0, RTAS_OUT_SUCCESS); return; } /* Didn't find a matching cpu */ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); }
void resume_all_vcpus(void) { CPUArchState *penv = first_cpu; qemu_clock_enable(vm_clock, true); while (penv) { penv->stop = 0; penv->stopped = 0; qemu_cpu_kick(penv); penv = penv->next_cpu; } }
void resume_all_vcpus(void) { CPUState *penv = first_cpu; while (penv) { penv->stop = 0; penv->stopped = 0; qemu_thread_signal(penv->thread, SIGUSR1); qemu_cpu_kick(penv); penv = (CPUState *)penv->next_cpu; } }
void resume_all_vcpus(void) { CPUArchState *penv = first_cpu; qemu_clock_enable(vm_clock, true); while (penv) { CPUState *pcpu = ENV_GET_CPU(penv); pcpu->stop = false; pcpu->stopped = false; qemu_cpu_kick(pcpu); penv = penv->next_cpu; } }
void pause_all_vcpus(void) { CPUArchState *penv = first_cpu; qemu_clock_enable(vm_clock, false); while (penv) { #ifdef CONFIG_S2E_DEBUG s2e_debug_print("MAIN: pause_all_vcpus kiking cpus\n"); #endif penv->stop = 1; qemu_cpu_kick(penv); penv = penv->next_cpu; } if (!qemu_thread_is_self(&io_thread)) { cpu_stop_current(); if (!kvm_enabled()) { while (penv) { penv->stop = 0; penv->stopped = 1; penv = penv->next_cpu; } return; } } while (!all_vcpus_paused()) { #ifdef CONFIG_S2E_DEBUG s2e_debug_print("MAIN: pause_all_vcpus waiting for qemu_pause_cond\n"); #endif qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex); penv = first_cpu; while (penv) { qemu_cpu_kick(penv); penv = penv->next_cpu; } } }
void pause_all_vcpus(void) { CPUState *penv = first_cpu; qemu_clock_enable(vm_clock, false); while (penv) { penv->stop = 1; qemu_cpu_kick(penv); penv = (CPUState *)penv->next_cpu; } while (!all_vcpus_paused()) { qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex); penv = first_cpu; while (penv) { qemu_cpu_kick(penv); penv = (CPUState *)penv->next_cpu; } } show_all_ifetch_counters(); tcg_plugin_cpus_stopped(); }
void resume_all_vcpus(void) { CPUArchState *penv = first_cpu; qemu_clock_enable(vm_clock, true); while (penv) { penv->stop = 0; penv->stopped = 0; #ifdef CONFIG_S2E_DEBUG s2e_debug_print("MAIN: resume_all_vcpus kicking\n"); #endif qemu_cpu_kick(penv); penv = penv->next_cpu; } }
static void queue_work_on_cpu(CPUState *cpu, struct qemu_work_item *wi) { qemu_mutex_lock(&cpu->work_mutex); if (cpu->queued_work_first == NULL) { cpu->queued_work_first = wi; } else { cpu->queued_work_last->next = wi; } cpu->queued_work_last = wi; wi->next = NULL; wi->done = false; qemu_mutex_unlock(&cpu->work_mutex); qemu_cpu_kick(cpu); }
static void rtas_stop_self(PowerPCCPU *cpu, sPAPRMachineState *spapr, uint32_t token, uint32_t nargs, target_ulong args, uint32_t nret, target_ulong rets) { CPUState *cs = CPU(cpu); CPUPPCState *env = &cpu->env; PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); cs->halted = 1; qemu_cpu_kick(cs); /* Disable Power-saving mode Exit Cause exceptions for the CPU. * This could deliver an interrupt on a dying CPU and crash the * guest */ env->spr[SPR_LPCR] &= ~pcc->lpcr_pm; }
static void rtas_start_cpu(PowerPCCPU *cpu_, sPAPREnvironment *spapr, uint32_t token, uint32_t nargs, target_ulong args, uint32_t nret, target_ulong rets) { target_ulong id, start, r3; PowerPCCPU *cpu; if (nargs != 3 || nret != 1) { rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); return; } id = rtas_ld(args, 0); start = rtas_ld(args, 1); r3 = rtas_ld(args, 2); cpu = ppc_get_vcpu_by_dt_id(id); if (cpu != NULL) { CPUState *cs = CPU(cpu); CPUPPCState *env = &cpu->env; if (!cs->halted) { rtas_st(rets, 0, RTAS_OUT_HW_ERROR); return; } /* This will make sure qemu state is up to date with kvm, and * mark it dirty so our changes get flushed back before the * new cpu enters */ kvm_cpu_synchronize_state(cs); env->msr = (1ULL << MSR_SF) | (1ULL << MSR_ME); env->nip = start; env->gpr[3] = r3; cs->halted = 0; qemu_cpu_kick(cs); rtas_st(rets, 0, RTAS_OUT_SUCCESS); return; } /* Didn't find a matching cpu */ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); }
void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data) { struct qemu_work_item *wi; if (qemu_cpu_is_self(cpu)) { func(data); return; } wi = g_malloc0(sizeof(struct qemu_work_item)); wi->func = func; wi->data = data; wi->free = true; if (cpu->queued_work_first == NULL) { cpu->queued_work_first = wi; } else { cpu->queued_work_last->next = wi; } cpu->queued_work_last = wi; wi->next = NULL; wi->done = false; qemu_cpu_kick(cpu); }
static void cpu_kick_irq(CPUSPARCState *env) { env->halted = 0; cpu_check_irqs(env); qemu_cpu_kick(env); }
// cpus.c void cpu_resume(CPUState *cpu) { cpu->stop = false; cpu->stopped = false; qemu_cpu_kick(cpu); }
static void kvm_kick_cpu(void *opaque) { PowerPCCPU *cpu = opaque; qemu_cpu_kick(CPU(cpu)); }