/* mask must never be zero, except for A20 change call */ static void tcg_handle_interrupt(CPUState *cpu, int mask) { int old_mask; old_mask = cpu->interrupt_request; cpu->interrupt_request |= mask; /* * If called from iothread context, wake the target cpu in * case its halted. */ if (!qemu_cpu_is_self(cpu)) { qemu_cpu_kick(cpu); return; } if (use_icount) { cpu->icount_decr.u16.high = 0xffff; if (!cpu->can_do_io && (mask & ~old_mask) != 0) { cpu_abort(cpu, "Raised interrupt while not in I/O function"); } } else { cpu->tcg_exit_req = 1; } }
void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data) { struct qemu_work_item wi; if (qemu_cpu_is_self(cpu)) { func(data); return; } wi.func = func; wi.data = data; if (cpu->queued_work_first == NULL) { cpu->queued_work_first = &wi; } else { cpu->queued_work_last->next = &wi; } cpu->queued_work_last = &wi; wi.next = NULL; wi.done = false; qemu_cpu_kick(cpu); while (!wi.done) { CPUArchState *self_env = cpu_single_env; qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex); cpu_single_env = self_env; } }
static void kvm_handle_interrupt(CPUState *env, int mask) { env->interrupt_request |= mask; if (!qemu_cpu_is_self(env)) { qemu_cpu_kick(env); } }
static void whpx_handle_interrupt(CPUState *cpu, int mask) { cpu->interrupt_request |= mask; if (!qemu_cpu_is_self(cpu)) { qemu_cpu_kick(cpu); } }
void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap) { tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap); if (cpu->created && !qemu_cpu_is_self(cpu)) { async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work, RUN_ON_CPU_HOST_INT(idxmap)); } else { tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap)); } }
static void apic_sync_vapic(APICCommonState *s, int sync_type) { VAPICState vapic_state; //size_t length; //off_t start; int vector; if (!s->vapic_paddr) { return; } if (sync_type & SYNC_FROM_VAPIC) { cpu_physical_memory_read(NULL, s->vapic_paddr, &vapic_state, sizeof(vapic_state)); s->tpr = vapic_state.tpr; } if (sync_type & (SYNC_TO_VAPIC | SYNC_ISR_IRR_TO_VAPIC)) { //start = offsetof(VAPICState, isr); //length = offsetof(VAPICState, enabled) - offsetof(VAPICState, isr); if (sync_type & SYNC_TO_VAPIC) { assert(qemu_cpu_is_self(CPU(s->cpu))); vapic_state.tpr = s->tpr; vapic_state.enabled = 1; //start = 0; //length = sizeof(VAPICState); } vector = get_highest_priority_int(s->isr); if (vector < 0) { vector = 0; } vapic_state.isr = vector & 0xf0; vapic_state.zero = 0; vector = get_highest_priority_int(s->irr); if (vector < 0) { vector = 0; } vapic_state.irr = vector & 0xff; //cpu_physical_memory_write_rom(&address_space_memory, // s->vapic_paddr + start, // ((void *)&vapic_state) + start, length); // FIXME qq } }
static void qemu_cpu_kick_thread(CPUState *cpu) { #ifndef _WIN32 int err; err = pthread_kill(cpu->thread->thread, SIG_IPI); if (err) { fprintf(stderr, "qemu:%s: %s", __func__, strerror(err)); exit(1); } #else /* _WIN32 */ if (!qemu_cpu_is_self(cpu)) { SuspendThread(cpu->hThread); cpu_signal(0); ResumeThread(cpu->hThread); } #endif }
void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap) { target_ulong addr_and_mmu_idx; tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap); /* This should already be page aligned */ addr_and_mmu_idx = addr & TARGET_PAGE_MASK; addr_and_mmu_idx |= idxmap; if (!qemu_cpu_is_self(cpu)) { async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_work, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); } else { tlb_flush_page_by_mmuidx_async_work( cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); } }
static void qemu_cpu_kick_thread(CPUState *cpu) { #ifndef _WIN32 int err; err = pthread_kill(cpu->thread->thread, SIG_IPI); if (err) { fprintf(stderr, "qemu:%s: %s", __func__, strerror(err)); exit(1); } #else /* _WIN32 */ if (!qemu_cpu_is_self(cpu)) { CONTEXT tcgContext; if (SuspendThread(cpu->hThread) == (DWORD)-1) { fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__, GetLastError()); exit(1); } /* On multi-core systems, we are not sure that the thread is actually * suspended until we can get the context. */ tcgContext.ContextFlags = CONTEXT_CONTROL; while (GetThreadContext(cpu->hThread, &tcgContext) != 0) { continue; } cpu_signal(0); if (ResumeThread(cpu->hThread) == (DWORD)-1) { fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__, GetLastError()); exit(1); } } #endif }
static void qemu_cpu_kick_thread(CPUArchState *env) { #ifndef _WIN32 int err; LOGD_CPUS("%s1\n", __func__); err = pthread_kill(env->thread->thread, SIG_IPI); LOGD_CPUS("%s2: KILL pthread\n", __func__); if (err) { LOGD_CPUS("%s3: ERROR = %d:%s\n", __func__, err, strerror(err)); fprintf(stderr, "qemu:%s: %s", __func__, strerror(err)); exit(1); } LOGD_CPUS("%s4\n", __func__); #else /* _WIN32 */ if (!qemu_cpu_is_self(env)) { SuspendThread(env->hThread); cpu_signal(0); ResumeThread(env->hThread); } #endif }
void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data, QemuMutex *mutex) { struct qemu_work_item wi; if (qemu_cpu_is_self(cpu)) { func(cpu, data); return; } wi.func = func; wi.data = data; wi.done = false; wi.free = false; wi.exclusive = false; queue_work_on_cpu(cpu, &wi); while (!atomic_mb_read(&wi.done)) { CPUState *self_cpu = current_cpu; qemu_cond_wait(&qemu_work_cond, mutex); current_cpu = self_cpu; } }
void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data) { struct qemu_work_item *wi; if (qemu_cpu_is_self(cpu)) { func(data); return; } wi = g_malloc0(sizeof(struct qemu_work_item)); wi->func = func; wi->data = data; wi->free = true; if (cpu->queued_work_first == NULL) { cpu->queued_work_first = wi; } else { cpu->queued_work_last->next = wi; } cpu->queued_work_last = wi; wi->next = NULL; wi->done = false; qemu_cpu_kick(cpu); }
static bool qemu_in_vcpu_thread(void) { return cpu_single_env && qemu_cpu_is_self(ENV_GET_CPU(cpu_single_env)); }
static void whpx_get_registers(CPUState *cpu) { struct whpx_state *whpx = &whpx_global; struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu); struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr); X86CPU *x86_cpu = X86_CPU(cpu); struct whpx_register_set vcxt; uint64_t tpr, apic_base; HRESULT hr; int idx = 0; int i; assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu)); hr = WHvGetVirtualProcessorRegisters(whpx->partition, cpu->cpu_index, whpx_register_names, RTL_NUMBER_OF(whpx_register_names), &vcxt.values[0]); if (FAILED(hr)) { error_report("WHPX: Failed to get virtual processor context, hr=%08lx", hr); } /* Indexes for first 16 registers match between HV and QEMU definitions */ for (idx = 0; idx < CPU_NB_REGS64; idx += 1) { env->regs[idx] = vcxt.values[idx].Reg64; } /* Same goes for RIP and RFLAGS */ assert(whpx_register_names[idx] == WHvX64RegisterRip); env->eip = vcxt.values[idx++].Reg64; assert(whpx_register_names[idx] == WHvX64RegisterRflags); env->eflags = vcxt.values[idx++].Reg64; /* Translate 6+4 segment registers. HV and QEMU order matches */ assert(idx == WHvX64RegisterEs); for (i = 0; i < 6; i += 1, idx += 1) { env->segs[i] = whpx_seg_h2q(&vcxt.values[idx].Segment); } assert(idx == WHvX64RegisterLdtr); env->ldt = whpx_seg_h2q(&vcxt.values[idx++].Segment); assert(idx == WHvX64RegisterTr); env->tr = whpx_seg_h2q(&vcxt.values[idx++].Segment); assert(idx == WHvX64RegisterIdtr); env->idt.base = vcxt.values[idx].Table.Base; env->idt.limit = vcxt.values[idx].Table.Limit; idx += 1; assert(idx == WHvX64RegisterGdtr); env->gdt.base = vcxt.values[idx].Table.Base; env->gdt.limit = vcxt.values[idx].Table.Limit; idx += 1; /* CR0, 2, 3, 4, 8 */ assert(whpx_register_names[idx] == WHvX64RegisterCr0); env->cr[0] = vcxt.values[idx++].Reg64; assert(whpx_register_names[idx] == WHvX64RegisterCr2); env->cr[2] = vcxt.values[idx++].Reg64; assert(whpx_register_names[idx] == WHvX64RegisterCr3); env->cr[3] = vcxt.values[idx++].Reg64; assert(whpx_register_names[idx] == WHvX64RegisterCr4); env->cr[4] = vcxt.values[idx++].Reg64; assert(whpx_register_names[idx] == WHvX64RegisterCr8); tpr = vcxt.values[idx++].Reg64; if (tpr != vcpu->tpr) { vcpu->tpr = tpr; cpu_set_apic_tpr(x86_cpu->apic_state, tpr); } /* 8 Debug Registers - Skipped */ /* 16 XMM registers */ assert(whpx_register_names[idx] == WHvX64RegisterXmm0); for (i = 0; i < 16; i += 1, idx += 1) { env->xmm_regs[i].ZMM_Q(0) = vcxt.values[idx].Reg128.Low64; env->xmm_regs[i].ZMM_Q(1) = vcxt.values[idx].Reg128.High64; } /* 8 FP registers */ assert(whpx_register_names[idx] == WHvX64RegisterFpMmx0); for (i = 0; i < 8; i += 1, idx += 1) { env->fpregs[i].mmx.MMX_Q(0) = vcxt.values[idx].Fp.AsUINT128.Low64; /* env->fpregs[i].mmx.MMX_Q(1) = vcxt.values[idx].Fp.AsUINT128.High64; */ } /* FP control status register */ assert(whpx_register_names[idx] == WHvX64RegisterFpControlStatus); env->fpuc = vcxt.values[idx].FpControlStatus.FpControl; env->fpstt = (vcxt.values[idx].FpControlStatus.FpStatus >> 11) & 0x7; env->fpus = vcxt.values[idx].FpControlStatus.FpStatus & ~0x3800; for (i = 0; i < 8; ++i) { env->fptags[i] = !((vcxt.values[idx].FpControlStatus.FpTag >> i) & 1); } env->fpop = vcxt.values[idx].FpControlStatus.LastFpOp; env->fpip = vcxt.values[idx].FpControlStatus.LastFpRip; idx += 1; /* XMM control status register */ assert(whpx_register_names[idx] == WHvX64RegisterXmmControlStatus); env->mxcsr = vcxt.values[idx].XmmControlStatus.XmmStatusControl; idx += 1; /* MSRs */ assert(whpx_register_names[idx] == WHvX64RegisterTsc); env->tsc = vcxt.values[idx++].Reg64; assert(whpx_register_names[idx] == WHvX64RegisterEfer); env->efer = vcxt.values[idx++].Reg64; #ifdef TARGET_X86_64 assert(whpx_register_names[idx] == WHvX64RegisterKernelGsBase); env->kernelgsbase = vcxt.values[idx++].Reg64; #endif assert(whpx_register_names[idx] == WHvX64RegisterApicBase); apic_base = vcxt.values[idx++].Reg64; if (apic_base != vcpu->apic_base) { vcpu->apic_base = apic_base; cpu_set_apic_base(x86_cpu->apic_state, vcpu->apic_base); } /* WHvX64RegisterPat - Skipped */ assert(whpx_register_names[idx] == WHvX64RegisterSysenterCs); env->sysenter_cs = vcxt.values[idx++].Reg64;; assert(whpx_register_names[idx] == WHvX64RegisterSysenterEip); env->sysenter_eip = vcxt.values[idx++].Reg64; assert(whpx_register_names[idx] == WHvX64RegisterSysenterEsp); env->sysenter_esp = vcxt.values[idx++].Reg64; assert(whpx_register_names[idx] == WHvX64RegisterStar); env->star = vcxt.values[idx++].Reg64; #ifdef TARGET_X86_64 assert(whpx_register_names[idx] == WHvX64RegisterLstar); env->lstar = vcxt.values[idx++].Reg64; assert(whpx_register_names[idx] == WHvX64RegisterCstar); env->cstar = vcxt.values[idx++].Reg64; assert(whpx_register_names[idx] == WHvX64RegisterSfmask); env->fmask = vcxt.values[idx++].Reg64; #endif /* Interrupt / Event Registers - Skipped */ assert(idx == RTL_NUMBER_OF(whpx_register_names)); return; }
static void whpx_set_registers(CPUState *cpu) { struct whpx_state *whpx = &whpx_global; struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu); struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr); X86CPU *x86_cpu = X86_CPU(cpu); struct whpx_register_set vcxt = {0}; HRESULT hr; int idx = 0; int i; int v86, r86; assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu)); v86 = (env->eflags & VM_MASK); r86 = !(env->cr[0] & CR0_PE_MASK); vcpu->tpr = cpu_get_apic_tpr(x86_cpu->apic_state); vcpu->apic_base = cpu_get_apic_base(x86_cpu->apic_state); /* Indexes for first 16 registers match between HV and QEMU definitions */ for (idx = 0; idx < CPU_NB_REGS64; idx += 1) { vcxt.values[idx].Reg64 = env->regs[idx]; } /* Same goes for RIP and RFLAGS */ assert(whpx_register_names[idx] == WHvX64RegisterRip); vcxt.values[idx++].Reg64 = env->eip; assert(whpx_register_names[idx] == WHvX64RegisterRflags); vcxt.values[idx++].Reg64 = env->eflags; /* Translate 6+4 segment registers. HV and QEMU order matches */ assert(idx == WHvX64RegisterEs); for (i = 0; i < 6; i += 1, idx += 1) { vcxt.values[idx].Segment = whpx_seg_q2h(&env->segs[i], v86, r86); } assert(idx == WHvX64RegisterLdtr); vcxt.values[idx++].Segment = whpx_seg_q2h(&env->ldt, 0, 0); assert(idx == WHvX64RegisterTr); vcxt.values[idx++].Segment = whpx_seg_q2h(&env->tr, 0, 0); assert(idx == WHvX64RegisterIdtr); vcxt.values[idx].Table.Base = env->idt.base; vcxt.values[idx].Table.Limit = env->idt.limit; idx += 1; assert(idx == WHvX64RegisterGdtr); vcxt.values[idx].Table.Base = env->gdt.base; vcxt.values[idx].Table.Limit = env->gdt.limit; idx += 1; /* CR0, 2, 3, 4, 8 */ assert(whpx_register_names[idx] == WHvX64RegisterCr0); vcxt.values[idx++].Reg64 = env->cr[0]; assert(whpx_register_names[idx] == WHvX64RegisterCr2); vcxt.values[idx++].Reg64 = env->cr[2]; assert(whpx_register_names[idx] == WHvX64RegisterCr3); vcxt.values[idx++].Reg64 = env->cr[3]; assert(whpx_register_names[idx] == WHvX64RegisterCr4); vcxt.values[idx++].Reg64 = env->cr[4]; assert(whpx_register_names[idx] == WHvX64RegisterCr8); vcxt.values[idx++].Reg64 = vcpu->tpr; /* 8 Debug Registers - Skipped */ /* 16 XMM registers */ assert(whpx_register_names[idx] == WHvX64RegisterXmm0); for (i = 0; i < 16; i += 1, idx += 1) { vcxt.values[idx].Reg128.Low64 = env->xmm_regs[i].ZMM_Q(0); vcxt.values[idx].Reg128.High64 = env->xmm_regs[i].ZMM_Q(1); } /* 8 FP registers */ assert(whpx_register_names[idx] == WHvX64RegisterFpMmx0); for (i = 0; i < 8; i += 1, idx += 1) { vcxt.values[idx].Fp.AsUINT128.Low64 = env->fpregs[i].mmx.MMX_Q(0); /* vcxt.values[idx].Fp.AsUINT128.High64 = env->fpregs[i].mmx.MMX_Q(1); */ } /* FP control status register */ assert(whpx_register_names[idx] == WHvX64RegisterFpControlStatus); vcxt.values[idx].FpControlStatus.FpControl = env->fpuc; vcxt.values[idx].FpControlStatus.FpStatus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11; vcxt.values[idx].FpControlStatus.FpTag = 0; for (i = 0; i < 8; ++i) { vcxt.values[idx].FpControlStatus.FpTag |= (!env->fptags[i]) << i; } vcxt.values[idx].FpControlStatus.Reserved = 0; vcxt.values[idx].FpControlStatus.LastFpOp = env->fpop; vcxt.values[idx].FpControlStatus.LastFpRip = env->fpip; idx += 1; /* XMM control status register */ assert(whpx_register_names[idx] == WHvX64RegisterXmmControlStatus); vcxt.values[idx].XmmControlStatus.LastFpRdp = 0; vcxt.values[idx].XmmControlStatus.XmmStatusControl = env->mxcsr; vcxt.values[idx].XmmControlStatus.XmmStatusControlMask = 0x0000ffff; idx += 1; /* MSRs */ assert(whpx_register_names[idx] == WHvX64RegisterTsc); vcxt.values[idx++].Reg64 = env->tsc; assert(whpx_register_names[idx] == WHvX64RegisterEfer); vcxt.values[idx++].Reg64 = env->efer; #ifdef TARGET_X86_64 assert(whpx_register_names[idx] == WHvX64RegisterKernelGsBase); vcxt.values[idx++].Reg64 = env->kernelgsbase; #endif assert(whpx_register_names[idx] == WHvX64RegisterApicBase); vcxt.values[idx++].Reg64 = vcpu->apic_base; /* WHvX64RegisterPat - Skipped */ assert(whpx_register_names[idx] == WHvX64RegisterSysenterCs); vcxt.values[idx++].Reg64 = env->sysenter_cs; assert(whpx_register_names[idx] == WHvX64RegisterSysenterEip); vcxt.values[idx++].Reg64 = env->sysenter_eip; assert(whpx_register_names[idx] == WHvX64RegisterSysenterEsp); vcxt.values[idx++].Reg64 = env->sysenter_esp; assert(whpx_register_names[idx] == WHvX64RegisterStar); vcxt.values[idx++].Reg64 = env->star; #ifdef TARGET_X86_64 assert(whpx_register_names[idx] == WHvX64RegisterLstar); vcxt.values[idx++].Reg64 = env->lstar; assert(whpx_register_names[idx] == WHvX64RegisterCstar); vcxt.values[idx++].Reg64 = env->cstar; assert(whpx_register_names[idx] == WHvX64RegisterSfmask); vcxt.values[idx++].Reg64 = env->fmask; #endif /* Interrupt / Event Registers - Skipped */ assert(idx == RTL_NUMBER_OF(whpx_register_names)); hr = WHvSetVirtualProcessorRegisters(whpx->partition, cpu->cpu_index, whpx_register_names, RTL_NUMBER_OF(whpx_register_names), &vcxt.values[0]); if (FAILED(hr)) { error_report("WHPX: Failed to set virtual processor context, hr=%08lx", hr); } return; }
static bool qemu_in_vcpu_thread(void) { return current_cpu && qemu_cpu_is_self(current_cpu); }