void pause_all_vcpus(void) { CPUArchState *penv = first_cpu; qemu_clock_enable(vm_clock, false); while (penv) { CPUState *pcpu = ENV_GET_CPU(penv); pcpu->stop = true; qemu_cpu_kick(pcpu); penv = penv->next_cpu; } if (qemu_in_vcpu_thread()) { cpu_stop_current(); if (!kvm_enabled()) { while (penv) { CPUState *pcpu = ENV_GET_CPU(penv); pcpu->stop = 0; pcpu->stopped = true; penv = penv->next_cpu; } return; } } while (!all_vcpus_paused()) { qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex); penv = first_cpu; while (penv) { qemu_cpu_kick(ENV_GET_CPU(penv)); penv = penv->next_cpu; } } }
void cpu_reloading_memory_map(void) { if (qemu_in_vcpu_thread() && current_cpu->running) { /* The guest can in theory prolong the RCU critical section as long * as it feels like. The major problem with this is that because it * can do multiple reconfigurations of the memory map within the * critical section, we could potentially accumulate an unbounded * collection of memory data structures awaiting reclamation. * * Because the only thing we're currently protecting with RCU is the * memory data structures, it's sufficient to break the critical section * in this callback, which we know will get called every time the * memory map is rearranged. * * (If we add anything else in the system that uses RCU to protect * its data structures, we will need to implement some other mechanism * to force TCG CPUs to exit the critical section, at which point this * part of this callback might become unnecessary.) * * This pair matches cpu_exec's rcu_read_lock()/rcu_read_unlock(), which * only protects cpu->as->dispatch. Since we know our caller is about * to reload it, it's safe to split the critical section. */ rcu_read_unlock(); rcu_read_lock(); } }
void pause_all_vcpus(void) { CPUState *cpu = first_cpu; qemu_clock_enable(QEMU_CLOCK_VIRTUAL, false); while (cpu) { cpu->stop = true; qemu_cpu_kick(cpu); cpu = cpu->next_cpu; } if (qemu_in_vcpu_thread()) { cpu_stop_current(); if (!kvm_enabled()) { cpu = first_cpu; while (cpu) { cpu->stop = false; cpu->stopped = true; cpu = cpu->next_cpu; } return; } } while (!all_vcpus_paused()) { qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex); cpu = first_cpu; while (cpu) { qemu_cpu_kick(cpu); cpu = cpu->next_cpu; } } }
void vm_stop(RunState state) { if (qemu_in_vcpu_thread()) { qemu_system_vmstop_request(state); /* * FIXME: should not return to device code in case * vm_stop() has been requested. */ cpu_stop_current(); return; } do_vm_stop(state); }