static void tcg_exec_all(void) { int r; /* Account partial waits to the vm_clock. */ qemu_clock_warp(vm_clock); if (next_cpu == NULL) { next_cpu = first_cpu; } for (; next_cpu != NULL && !exit_request; next_cpu = next_cpu->next_cpu) { CPUArchState *env = next_cpu; CPUState *cpu = ENV_GET_CPU(env); qemu_clock_enable(vm_clock, (env->singlestep_enabled & SSTEP_NOTIMER) == 0); if (cpu_can_run(cpu)) { r = tcg_cpu_exec(env); if (r == EXCP_DEBUG) { cpu_handle_guest_debug(env); break; } } else if (cpu->stop || cpu->stopped) { break; } } exit_request = 0; }
static void *qemu_kvm_cpu_thread_fn(void *arg) { CPUArchState *env = arg; CPUState *cpu = ENV_GET_CPU(env); int r; qemu_mutex_lock(&qemu_global_mutex); qemu_thread_get_self(cpu->thread); cpu->thread_id = qemu_get_thread_id(); cpu_single_env = env; r = kvm_init_vcpu(env); if (r < 0) { fprintf(stderr, "kvm_init_vcpu failed: %s\n", strerror(-r)); exit(1); } qemu_kvm_init_cpu_signals(env); /* signal CPU creation */ cpu->created = true; qemu_cond_signal(&qemu_cpu_cond); while (1) { if (cpu_can_run(cpu)) { r = kvm_cpu_exec(env); if (r == EXCP_DEBUG) { cpu_handle_guest_debug(env); } } qemu_kvm_wait_io_event(env); } return NULL; }
bool tcg_cpu_exec(void) { int ret = 0; if (next_cpu == NULL) next_cpu = first_cpu; for (; next_cpu != NULL; next_cpu = next_cpu->next_cpu) { CPUState *env = cur_cpu = next_cpu; qemu_clock_enable(vm_clock, (cur_cpu->singlestep_enabled & SSTEP_NOTIMER) == 0); if (qemu_alarm_pending()) break; if (cpu_can_run(env)) ret = qemu_cpu_exec(env); else if (env->stop) break; if (ret == EXCP_DEBUG) { gdb_set_stop_cpu(env); debug_requested = EXCP_DEBUG; break; } } return tcg_has_work(); }
static void tcg_exec_all(void) { int r; /* Account partial waits to QEMU_CLOCK_VIRTUAL. */ qemu_clock_warp(QEMU_CLOCK_VIRTUAL); if (next_cpu == NULL) { next_cpu = first_cpu; } for (; next_cpu != NULL && !exit_request; next_cpu = next_cpu->next_cpu) { CPUState *cpu = next_cpu; CPUArchState *env = cpu->env_ptr; qemu_clock_enable(QEMU_CLOCK_VIRTUAL, (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0); if (cpu_can_run(cpu)) { r = tcg_cpu_exec(env); if (r == EXCP_DEBUG) { cpu_handle_guest_debug(cpu); break; } } else if (cpu->stop || cpu->stopped) { break; } } exit_request = 0; }
static void *kvm_cpu_thread_fn(void *arg) { CPUState *env = arg; qemu_mutex_lock(&qemu_global_mutex); qemu_thread_self(env->thread); if (kvm_enabled()) kvm_init_vcpu(env); kvm_init_ipi(env); /* signal CPU creation */ env->created = 1; qemu_cond_signal(&qemu_cpu_cond); /* and wait for machine initialization */ while (!qemu_system_ready) qemu_cond_timedwait(&qemu_system_cond, &qemu_global_mutex, 100); while (1) { if (cpu_can_run(env)) qemu_cpu_exec(env); qemu_kvm_wait_io_event(env); } return NULL; }
bool cpu_exec_all(void) { int r; /* Account partial waits to the vm_clock. */ qemu_clock_warp(vm_clock); if (next_cpu == NULL) { next_cpu = first_cpu; } for (; next_cpu != NULL && !exit_request; next_cpu = next_cpu->next_cpu) { CPUState *env = next_cpu; qemu_clock_enable(vm_clock, (env->singlestep_enabled & SSTEP_NOTIMER) == 0); if (cpu_can_run(env)) { if (kvm_enabled()) { r = kvm_cpu_exec(env); qemu_kvm_eat_signals(env); } else { r = tcg_cpu_exec(env); } if (r == EXCP_DEBUG) { cpu_handle_guest_debug(env); break; } } else if (env->stop || env->stopped) { break; } } exit_request = 0; return !all_cpu_threads_idle(); }
static void *qemu_kvm_cpu_thread_fn(void *arg) { CPUState *env = arg; int r; qemu_mutex_lock(&qemu_global_mutex); qemu_thread_get_self(env->thread); env->thread_id = qemu_get_thread_id(); r = kvm_init_vcpu(env); if (r < 0) { fprintf(stderr, "kvm_init_vcpu failed: %s\n", strerror(-r)); exit(1); } qemu_kvm_init_cpu_signals(env); /* signal CPU creation */ env->created = 1; qemu_cond_signal(&qemu_cpu_cond); // TLC profiling tlc_cpu_profile_init(env); while (1) { if (cpu_can_run(env)) { r = kvm_cpu_exec(env); if (r == EXCP_DEBUG) { cpu_handle_guest_debug(env); } } qemu_kvm_wait_io_event(env); } return NULL; }
bool cpu_exec_all (void) { static int i = 0; if (next_cpu == NULL) { next_cpu = first_cpu; i = 0; } for (; next_cpu != NULL && !exit_request; next_cpu = next_cpu->next_cpu, i++) { CPUState *env = next_cpu; rr_run = env->kvm_run; cpu_number = i; qemu_clock_enable(vm_clock, (env->singlestep_enabled & SSTEP_NOTIMER) == 0); if (qemu_alarm_pending()) break; if (cpu_can_run(env)) { if (qemu_cpu_exec(env) == EXCP_DEBUG) { break; } } else if (env->stop) { break; } } exit_request = 0; return any_cpu_has_work(); }
static bool tcg_exec_all(struct uc_struct* uc) { int r; bool finish = false; while (!uc->exit_request) { CPUState *cpu = uc->cpu; CPUArchState *env = cpu->env_ptr; //qemu_clock_enable(QEMU_CLOCK_VIRTUAL, // (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0); if (cpu_can_run(cpu)) { uc->quit_request = false; r = tcg_cpu_exec(uc, env); // quit current TB but continue emulating? if (uc->quit_request) { // reset stop_request uc->stop_request = false; } else if (uc->stop_request) { //printf(">>> got STOP request!!!\n"); finish = true; break; } // save invalid memory access error & quit if (env->invalid_error) { // printf(">>> invalid memory accessed, STOP = %u!!!\n", env->invalid_error); uc->invalid_addr = env->invalid_addr; uc->invalid_error = env->invalid_error; finish = true; break; } // printf(">>> stop with r = %x, HLT=%x\n", r, EXCP_HLT); if (r == EXCP_DEBUG) { cpu_handle_guest_debug(cpu); break; } if (r == EXCP_HLT) { //printf(">>> got HLT!!!\n"); finish = true; break; } } else if (cpu->stop || cpu->stopped) { printf(">>> got stopped!!!\n"); break; } } uc->exit_request = 0; return finish; }
bool cpu_exec_all(void) { int r; /* Account partial waits to the vm_clock. */ qemu_clock_warp(vm_clock); if (next_cpu == NULL) { next_cpu = first_cpu; } for (; next_cpu != NULL && !exit_request; next_cpu = next_cpu->next_cpu) { CPUState *env = next_cpu; qemu_clock_enable(vm_clock, (env->singlestep_enabled & SSTEP_NOTIMER) == 0); #ifndef CONFIG_IOTHREAD if (qemu_alarm_pending()) { break; } #endif if (cpu_can_run(env)) { if (kvm_enabled()) { r = kvm_cpu_exec(env); qemu_kvm_eat_signals(env); } else { r = tcg_cpu_exec(env); } if (r == EXCP_TRIPLE) { cpu_dump_state(env, stderr, fprintf, 0); fprintf(stderr, "Triple fault. Halting for inspection via" " QEMU monitor.\n"); if (gdbserver_running()) r = EXCP_DEBUG; else { vm_stop(0); break; } } if (r == EXCP_DEBUG) { cpu_handle_guest_debug(env); break; } } else if (env->stop || env->stopped) { break; } } exit_request = 0; return !all_cpu_threads_idle(); }
bool cpu_exec_all(void) { if (next_cpu == NULL) next_cpu = first_cpu; for (; next_cpu != NULL && !exit_request; next_cpu = next_cpu->next_cpu) { CPUState *env = next_cpu; qemu_clock_enable(vm_clock, (env->singlestep_enabled & SSTEP_NOTIMER) == 0); if (qemu_alarm_pending()) break; if (cpu_can_run(env)) { if (qemu_cpu_exec(env) == EXCP_DEBUG) { break; } } else if (env->stop) { #ifdef MARSS_QEMU continue; /* Let other CPUS execute */ #else break; #endif } #ifdef MARSS_QEMU if (in_simulation) /* No need to run for other cpus */ break; #endif } #ifdef MARSS_QEMU if (!any_cpu_has_work()) { /* All CPUs are paused, call ptl_simpoint reached * to check if we need to switch to simulation or not */ ptl_simpoint_reached(0); } #endif exit_request = 0; return any_cpu_has_work(); }
void tcg_cpu_exec(void) { int ret = 0; if (next_cpu == NULL) next_cpu = first_cpu; for (; next_cpu != NULL; next_cpu = next_cpu->next_cpu) { CPUState *env = cur_cpu = next_cpu; if (!vm_running) break; if (qemu_timer_alarm_pending()) { break; } if (cpu_can_run(env)) ret = qemu_cpu_exec(env); if (ret == EXCP_DEBUG) { gdb_set_stop_cpu(env); debug_requested = 1; break; } } }