void kvm_arch_destroy_vm(struct kvm *kvm) { unsigned int i; struct kvm_vcpu *vcpu; #ifdef CONFIG_KVM_XICS /* * We call kick_all_cpus_sync() to ensure that all * CPUs have executed any pending IPIs before we * continue and free VCPUs structures below. */ if (is_kvmppc_hv_enabled(kvm)) kick_all_cpus_sync(); #endif kvm_for_each_vcpu(i, vcpu, kvm) kvm_arch_vcpu_free(vcpu); mutex_lock(&kvm->lock); for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) kvm->vcpus[i] = NULL; atomic_set(&kvm->online_vcpus, 0); kvmppc_core_destroy_vm(kvm); mutex_unlock(&kvm->lock); /* drop the module reference */ module_put(kvm->arch.kvm_ops->owner); }
/** * cpuidle_uninstall_idle_handler - uninstalls the cpuidle idle loop handler */ void cpuidle_uninstall_idle_handler(void) { if (enabled_devices) { initialized = 0; kick_all_cpus_sync(); } }
void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) { pmd_t pmd = pmd_mksplitting(*pmdp); VM_BUG_ON(address & ~PMD_MASK); set_pmd_at(vma->vm_mm, address, pmdp, pmd); /* dummy IPI to serialise against fast_gup */ kick_all_cpus_sync(); }
/* * We use this to invalidate a pmdp entry before switching from a * hugepte to regular pmd entry. */ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) { pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0); flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); /* * This ensures that generic code that rely on IRQ disabling * to prevent a parallel THP split work as expected. */ kick_all_cpus_sync(); }
int __kprobes aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt) { struct aarch64_insn_patch patch = { .text_addrs = addrs, .new_insns = insns, .insn_cnt = cnt, .cpu_count = ATOMIC_INIT(0), }; if (cnt <= 0) return -EINVAL; return stop_machine(aarch64_insn_patch_text_cb, &patch, cpu_online_mask); } int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt) { int ret; u32 insn; /* Unsafe to patch multiple instructions without synchronizaiton */ if (cnt == 1) { ret = aarch64_insn_read(addrs[0], &insn); if (ret) return ret; if (aarch64_insn_hotpatch_safe(insn, insns[0])) { /* * ARMv8 architecture doesn't guarantee all CPUs see * the new instruction after returning from function * aarch64_insn_patch_text_nosync(). So send IPIs to * all other CPUs to achieve instruction * synchronization. */ ret = aarch64_insn_patch_text_nosync(addrs[0], insns[0]); kick_all_cpus_sync(); return ret; } } return aarch64_insn_patch_text_sync(addrs, insns, cnt); }