static void continue_new_vcpu(struct vcpu *prev) { schedule_tail(prev); if ( is_idle_vcpu(current) ) reset_stack_and_jump(idle_loop); else if ( is_32bit_domain(current->domain) ) /* check_wakeup_from_wait(); */ reset_stack_and_jump(return_to_new_vcpu32); else /* check_wakeup_from_wait(); */ reset_stack_and_jump(return_to_new_vcpu64); }
void startup_cpu_idle_loop(void) { struct vcpu *v = current; ASSERT(is_idle_vcpu(v)); cpu_set(smp_processor_id(), v->domain->domain_dirty_cpumask); cpu_set(smp_processor_id(), v->vcpu_dirty_cpumask); /* Finally get off the boot stack. */ reset_stack_and_jump(idle_loop); }
void startup_cpu_idle_loop(void) { struct vcpu *v = current; ASSERT(is_idle_vcpu(v)); /* TODO cpumask_set_cpu(v->processor, v->domain->domain_dirty_cpumask); cpumask_set_cpu(v->processor, v->vcpu_dirty_cpumask); */ reset_stack_and_jump(idle_loop); }
void vmx_do_resume(struct vcpu *v) { bool_t debug_state; if ( v->arch.hvm_vmx.active_cpu == smp_processor_id() ) { if ( v->arch.hvm_vmx.vmcs != this_cpu(current_vmcs) ) vmx_load_vmcs(v); } else { /* * For pass-through domain, guest PCI-E device driver may leverage the * "Non-Snoop" I/O, and explicitly WBINVD or CLFLUSH to a RAM space. * Since migration may occur before WBINVD or CLFLUSH, we need to * maintain data consistency either by: * 1: flushing cache (wbinvd) when the guest is scheduled out if * there is no wbinvd exit, or * 2: execute wbinvd on all dirty pCPUs when guest wbinvd exits. */ if ( !list_empty(&(domain_hvm_iommu(v->domain)->pdev_list)) && !cpu_has_wbinvd_exiting ) { int cpu = v->arch.hvm_vmx.active_cpu; if ( cpu != -1 ) on_selected_cpus(cpumask_of_cpu(cpu), wbinvd_ipi, NULL, 1, 1); } vmx_clear_vmcs(v); vmx_load_vmcs(v); hvm_migrate_timers(v); vmx_set_host_env(v); } debug_state = v->domain->debugger_attached; if ( unlikely(v->arch.hvm_vcpu.debug_state_latch != debug_state) ) { unsigned long intercepts = __vmread(EXCEPTION_BITMAP); unsigned long mask = (1U << TRAP_debug) | (1U << TRAP_int3); v->arch.hvm_vcpu.debug_state_latch = debug_state; if ( debug_state ) intercepts |= mask; else intercepts &= ~mask; __vmwrite(EXCEPTION_BITMAP, intercepts); } hvm_do_resume(v); reset_stack_and_jump(vmx_asm_do_vmentry); }
static void noreturn continue_nonidle_domain(struct vcpu *v) { check_wakeup_from_wait(); reset_stack_and_jump(ret_from_intr); }