void pause_all_vcpus(void) { CPUArchState *penv = first_cpu; qemu_clock_enable(vm_clock, false); while (penv) { CPUState *pcpu = ENV_GET_CPU(penv); pcpu->stop = true; qemu_cpu_kick(pcpu); penv = penv->next_cpu; } if (qemu_in_vcpu_thread()) { cpu_stop_current(); if (!kvm_enabled()) { while (penv) { CPUState *pcpu = ENV_GET_CPU(penv); pcpu->stop = 0; pcpu->stopped = true; penv = penv->next_cpu; } return; } } while (!all_vcpus_paused()) { qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex); penv = first_cpu; while (penv) { qemu_cpu_kick(ENV_GET_CPU(penv)); penv = penv->next_cpu; } } }
bool Context::check_events() const { if(ENV_GET_CPU(this)->exit_request) return true; if(eflags & IF_MASK) return (ENV_GET_CPU(this)->interrupt_request > 0); return false; }
static stackid get_stackid(CPUArchState* env) { #ifdef USE_STACK_HEURISTIC target_ulong asid; // Track all kernel-mode stacks together if (in_kernelspace(env)) asid = 0; else asid = panda_current_asid(ENV_GET_CPU(env)); // Invalidate cached stack pointer on ASID change if (cached_asid == 0 || cached_asid != asid) { cached_sp = 0; cached_asid = asid; } target_ulong sp = get_stack_pointer(env); // We can short-circuit the search in most cases if (std::abs(sp - cached_sp) < MAX_STACK_DIFF) { return std::make_pair(asid, cached_sp); } auto &stackset = stacks_seen[asid]; if (stackset.empty()) { stackset.insert(sp); cached_sp = sp; return std::make_pair(asid,sp); } else { // Find the closest stack pointer we've seen auto lb = std::lower_bound(stackset.begin(), stackset.end(), sp); target_ulong stack1 = *lb; lb--; target_ulong stack2 = *lb; target_ulong stack = (std::abs(stack1 - sp) < std::abs(stack2 - sp)) ? stack1 : stack2; int diff = std::abs(stack-sp); if (diff < MAX_STACK_DIFF) { return std::make_pair(asid,stack); } else { stackset.insert(sp); cached_sp = sp; return std::make_pair(asid,sp); } } #else return panda_current_asid(ENV_GET_CPU(env)); #endif }
static int cpu_write_c_reg(CPUS390XState *env, uint8_t *mem_buf, int n) { switch (n) { case S390_C0_REGNUM ... S390_C15_REGNUM: env->cregs[n] = ldtul_p(mem_buf); if (tcg_enabled()) { tlb_flush(ENV_GET_CPU(env), 1); } cpu_synchronize_post_init(ENV_GET_CPU(env)); return 8; default: return 0; } }
/* Return true if ADDR is present in the victim tlb, and has been copied back to the main tlb. */ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index, size_t elt_ofs, target_ulong page) { size_t vidx; assert_cpu_is_self(ENV_GET_CPU(env)); for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) { CPUTLBEntry *vtlb = &env->tlb_v_table[mmu_idx][vidx]; target_ulong cmp; /* elt_ofs might correspond to .addr_write, so use atomic_read */ #if TCG_OVERSIZED_GUEST cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs); #else cmp = atomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs)); #endif if (cmp == page) { /* Found entry in victim tlb, swap tlb and iotlb. */ CPUTLBEntry tmptlb, *tlb = &env->tlb_table[mmu_idx][index]; qemu_spin_lock(&env->tlb_c.lock); copy_tlb_helper_locked(&tmptlb, tlb); copy_tlb_helper_locked(tlb, vtlb); copy_tlb_helper_locked(vtlb, &tmptlb); qemu_spin_unlock(&env->tlb_c.lock); CPUIOTLBEntry tmpio, *io = &env->iotlb[mmu_idx][index]; CPUIOTLBEntry *vio = &env->iotlb_v[mmu_idx][vidx]; tmpio = *io; *io = *vio; *vio = tmpio; return true; } } return false; }
/* NOTE2: the returned address is not exactly the physical address: it * is actually a ram_addr_t (in system mode; the user mode emulation * version of this function returns a guest virtual address). */ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr) { uintptr_t mmu_idx = cpu_mmu_index(env, true); uintptr_t index = tlb_index(env, mmu_idx, addr); CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); void *p; if (unlikely(!tlb_hit(entry->addr_code, addr))) { if (!VICTIM_TLB_HIT(addr_code, addr)) { tlb_fill(ENV_GET_CPU(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0); index = tlb_index(env, mmu_idx, addr); entry = tlb_entry(env, mmu_idx, addr); } assert(tlb_hit(entry->addr_code, addr)); } if (unlikely(entry->addr_code & (TLB_RECHECK | TLB_MMIO))) { /* * Return -1 if we can't translate and execute from an entire * page of RAM here, which will cause us to execute by loading * and translating one insn at a time, without caching: * - TLB_RECHECK: means the MMU protection covers a smaller range * than a target page, so we must redo the MMU check every insn * - TLB_MMIO: region is not backed by RAM */ return -1; } p = (void *)((uintptr_t)addr + entry->addend); return qemu_ram_addr_from_host_nofail(p); }
static int hax_vcpu_interrupt(CPUArchState *env) { CPUState *cpu = ENV_GET_CPU(env); struct hax_vcpu_state *vcpu = cpu->hax_vcpu; struct hax_tunnel *ht = vcpu->tunnel; /* * Try to inject an interrupt if the guest can accept it * Unlike KVM, HAX kernel check for the eflags, instead of qemu */ if (ht->ready_for_interrupt_injection && (cpu->interrupt_request & CPU_INTERRUPT_HARD)) { int irq; irq = cpu_get_pic_interrupt(env); if (irq >= 0) { hax_inject_interrupt(env, irq); cpu->interrupt_request &= ~CPU_INTERRUPT_HARD; } } /* If we have an interrupt but the guest is not ready to receive an * interrupt, request an interrupt window exit. This will * cause a return to userspace as soon as the guest is ready to * receive interrupts. */ if ((cpu->interrupt_request & CPU_INTERRUPT_HARD)) { ht->request_interrupt_window = 1; } else { ht->request_interrupt_window = 0; } return 0; }
void helper_cmpxchg16b(CPUX86State *env, target_ulong a0) { uintptr_t ra = GETPC(); if ((a0 & 0xf) != 0) { raise_exception_ra(env, EXCP0D_GPF, ra); } else { #ifndef CONFIG_ATOMIC128 cpu_loop_exit_atomic(ENV_GET_CPU(env), ra); #else int eflags = cpu_cc_compute_all(env, CC_OP); Int128 cmpv = int128_make128(env->regs[R_EAX], env->regs[R_EDX]); Int128 newv = int128_make128(env->regs[R_EBX], env->regs[R_ECX]); int mem_idx = cpu_mmu_index(env, false); TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); Int128 oldv = helper_atomic_cmpxchgo_le_mmu(env, a0, cmpv, newv, oi, ra); if (int128_eq(oldv, cmpv)) { eflags |= CC_Z; } else { env->regs[R_EAX] = int128_getlo(oldv); env->regs[R_EDX] = int128_gethi(oldv); eflags &= ~CC_Z; } CC_SRC = eflags; #endif } }
static void *qemu_kvm_cpu_thread_fn(void *arg) { CPUArchState *env = arg; CPUState *cpu = ENV_GET_CPU(env); int r; qemu_mutex_lock(&qemu_global_mutex); qemu_thread_get_self(cpu->thread); cpu->thread_id = qemu_get_thread_id(); cpu_single_env = env; r = kvm_init_vcpu(env); if (r < 0) { fprintf(stderr, "kvm_init_vcpu failed: %s\n", strerror(-r)); exit(1); } qemu_kvm_init_cpu_signals(env); /* signal CPU creation */ cpu->created = true; qemu_cond_signal(&qemu_cpu_cond); while (1) { if (cpu_can_run(cpu)) { r = kvm_cpu_exec(env); if (r == EXCP_DEBUG) { cpu_handle_guest_debug(env); } } qemu_kvm_wait_io_event(env); } return NULL; }
static void tcg_exec_all(void) { int r; /* Account partial waits to the vm_clock. */ qemu_clock_warp(vm_clock); if (next_cpu == NULL) { next_cpu = first_cpu; } for (; next_cpu != NULL && !exit_request; next_cpu = next_cpu->next_cpu) { CPUArchState *env = next_cpu; CPUState *cpu = ENV_GET_CPU(env); qemu_clock_enable(vm_clock, (env->singlestep_enabled & SSTEP_NOTIMER) == 0); if (cpu_can_run(cpu)) { r = tcg_cpu_exec(env); if (r == EXCP_DEBUG) { cpu_handle_guest_debug(env); break; } } else if (cpu->stop || cpu->stopped) { break; } } exit_request = 0; }
void get_prog_point(CPUState* cpu, prog_point *p) { CPUArchState* env = (CPUArchState*)cpu->env_ptr; if (!p) return; // Get address space identifier target_ulong asid = panda_current_asid(ENV_GET_CPU(env)); // Lump all kernel-mode CR3s together if(!in_kernelspace(env)) p->cr3 = asid; // Try to get the caller int n_callers = 0; n_callers = get_callers(&p->caller, 1, cpu); if (n_callers == 0) { #ifdef TARGET_I386 // fall back to EBP on x86 int word_size = (env->hflags & HF_LMA_MASK) ? 8 : 4; panda_virtual_memory_rw(cpu, env->regs[R_EBP]+word_size, (uint8_t *)&p->caller, word_size, 0); #endif #ifdef TARGET_ARM p->caller = env->regs[14]; // LR #endif } p->pc = cpu->panda_guest_pc; }
static void cpu_handle_guest_debug(CPUArchState *env) { CPUState *cpu = ENV_GET_CPU(env); gdb_set_stop_cpu(env); qemu_system_debug_request(); cpu->stopped = true; }
hax_fd hax_vcpu_get_fd(CPUArchState *env) { struct hax_vcpu_state *vcpu = ENV_GET_CPU(env)->hax_vcpu; if (!vcpu) { return HAX_INVALID_FD; } return vcpu->fd; }
void cpu_stop_current(void) { if (cpu_single_env) { CPUState *cpu_single_cpu = ENV_GET_CPU(cpu_single_env); cpu_single_cpu->stop = false; cpu_single_cpu->stopped = true; cpu_exit(cpu_single_env); qemu_cond_signal(&qemu_pause_cond); } }
static int cpu_write_ac_reg(CPUS390XState *env, uint8_t *mem_buf, int n) { switch (n) { case S390_A0_REGNUM ... S390_A15_REGNUM: env->aregs[n] = ldl_p(mem_buf); cpu_synchronize_post_init(ENV_GET_CPU(env)); return 4; default: return 0; } }
static void qemu_kvm_wait_io_event(CPUArchState *env) { CPUState *cpu = ENV_GET_CPU(env); while (cpu_thread_is_idle(env)) { qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex); } qemu_kvm_eat_signals(env); qemu_wait_io_event_common(cpu); }
/* Called with tlb_c.lock held */ static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx, target_ulong page) { int k; assert_cpu_is_self(ENV_GET_CPU(env)); for (k = 0; k < CPU_VTLB_SIZE; k++) { if (tlb_flush_entry_locked(&env->tlb_v_table[mmu_idx][k], page)) { tlb_n_used_entries_dec(env, mmu_idx); } } }
static void qemu_dummy_start_vcpu(CPUArchState *env) { CPUState *cpu = ENV_GET_CPU(env); cpu->thread = g_malloc0(sizeof(QemuThread)); cpu->halt_cond = g_malloc0(sizeof(QemuCond)); qemu_cond_init(cpu->halt_cond); qemu_thread_create(cpu->thread, qemu_dummy_cpu_thread_fn, env, QEMU_THREAD_JOINABLE); while (!cpu->created) { qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex); } }
void resume_all_vcpus(void) { CPUArchState *penv = first_cpu; qemu_clock_enable(vm_clock, true); while (penv) { CPUState *pcpu = ENV_GET_CPU(penv); pcpu->stop = false; pcpu->stopped = false; qemu_cpu_kick(pcpu); penv = penv->next_cpu; } }
/* Probe for whether the specified guest write access is permitted. * If it is not permitted then an exception will be taken in the same * way as if this were a real write access (and we will not return). * Otherwise the function will return, and there will be a valid * entry in the TLB for this access. */ void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, uintptr_t retaddr) { uintptr_t index = tlb_index(env, mmu_idx, addr); CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); if (!tlb_hit(tlb_addr_write(entry), addr)) { /* TLB entry is for a different page */ if (!VICTIM_TLB_HIT(addr_write, addr)) { tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE, mmu_idx, retaddr); } } }
void qemu_mutex_lock_iothread(void) { if (!tcg_enabled()) { qemu_mutex_lock(&qemu_global_mutex); } else { iothread_requesting_mutex = true; if (qemu_mutex_trylock(&qemu_global_mutex)) { qemu_cpu_kick_thread(ENV_GET_CPU(first_cpu)); qemu_mutex_lock(&qemu_global_mutex); } iothread_requesting_mutex = false; qemu_cond_broadcast(&qemu_io_proceeded_cond); } }
static int all_vcpus_paused(void) { CPUArchState *penv = first_cpu; while (penv) { CPUState *pcpu = ENV_GET_CPU(penv); if (!pcpu->stopped) { return 0; } penv = penv->next_cpu; } return 1; }
void qemu_cpu_kick_self(void) { #ifndef _WIN32 assert(cpu_single_env); CPUState *cpu_single_cpu = ENV_GET_CPU(cpu_single_env); if (!cpu_single_cpu->thread_kicked) { qemu_cpu_kick_thread(cpu_single_cpu); cpu_single_cpu->thread_kicked = true; } #else abort(); #endif }
static void *qemu_tcg_cpu_thread_fn(void *arg) { CPUState *cpu = arg; CPUArchState *env; qemu_tcg_init_cpu_signals(); qemu_thread_get_self(cpu->thread); /* signal CPU creation */ qemu_mutex_lock(&qemu_global_mutex); for (env = first_cpu; env != NULL; env = env->next_cpu) { cpu = ENV_GET_CPU(env); cpu->thread_id = qemu_get_thread_id(); cpu->created = true; } qemu_cond_signal(&qemu_cpu_cond); /* wait for initial kick-off after machine start */ while (ENV_GET_CPU(first_cpu)->stopped) { qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex); /* process any pending work */ for (env = first_cpu; env != NULL; env = env->next_cpu) { qemu_wait_io_event_common(ENV_GET_CPU(env)); } } while (1) { tcg_exec_all(); if (use_icount && qemu_clock_deadline(vm_clock) <= 0) { qemu_notify_event(); } qemu_tcg_wait_io_event(); } return NULL; }
/* Raise IRQ to CPU if necessary. It must be called every time the active IRQ may change */ void cpu_mips_update_irq(CPUOldState *env) { CPUState *cpu = ENV_GET_CPU(env); if ((env->CP0_Status & (1 << CP0St_IE)) && !(env->CP0_Status & (1 << CP0St_EXL)) && !(env->CP0_Status & (1 << CP0St_ERL)) && !(env->hflags & MIPS_HFLAG_DM)) { if ((env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) && !(cpu->interrupt_request & CPU_INTERRUPT_HARD)) { cpu_interrupt(cpu, CPU_INTERRUPT_HARD); } } else cpu_reset_interrupt(cpu, CPU_INTERRUPT_HARD); }
void helper_cpuid(CPUX86State *env) { uint32_t eax, ebx, ecx, edx; cpu_svm_check_intercept_param(env, SVM_EXIT_CPUID, 0, GETPC()); panda_callbacks_cpuid(ENV_GET_CPU(env)); cpu_x86_cpuid(env, (uint32_t)env->regs[R_EAX], (uint32_t)env->regs[R_ECX], &eax, &ebx, &ecx, &edx); env->regs[R_EAX] = eax; env->regs[R_EBX] = ebx; env->regs[R_ECX] = ecx; env->regs[R_EDX] = edx; }
static bool cpu_thread_is_idle(CPUArchState *env) { CPUState *cpu = ENV_GET_CPU(env); if (cpu->stop || cpu->queued_work_first) { return false; } if (cpu->stopped || !runstate_is_running()) { return true; } if (!env->halted || qemu_cpu_has_work(cpu) || kvm_async_interrupts_enabled()) { return false; } return true; }
uint64_t helper_stq_c_phys(CPUAlphaState *env, uint64_t p, uint64_t v) { CPUState *cs = ENV_GET_CPU(env); uint64_t ret = 0; if (p == env->lock_addr) { uint64_t old = ldq_phys(cs->as, p); if (old == env->lock_value) { stq_phys(cs->as, p, v); ret = 1; } } env->lock_addr = -1; return ret; }
void qemu_init_vcpu(void *_env) { CPUArchState *env = _env; CPUState *cpu = ENV_GET_CPU(env); env->nr_cores = smp_cores; env->nr_threads = smp_threads; cpu->stopped = true; if (kvm_enabled()) { qemu_kvm_start_vcpu(env); } else if (tcg_enabled()) { qemu_tcg_init_vcpu(cpu); } else { qemu_dummy_start_vcpu(env); } }
void *HELPER(lookup_tb_ptr)(CPUArchState *env) { CPUState *cpu = ENV_GET_CPU(env); TranslationBlock *tb; target_ulong cs_base, pc; uint32_t flags; tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, curr_cflags()); if (tb == NULL) { return tcg_ctx->code_gen_epilogue; } qemu_log_mask_and_addr(CPU_LOG_EXEC, pc, "Chain %p [%d: " TARGET_FMT_lx "] %s\n", tb->tc.ptr, cpu->cpu_index, pc, lookup_symbol(pc)); return tb->tc.ptr; }