static unsigned int steal_all_contexts(void) { struct mm_struct *mm; int cpu = smp_processor_id(); unsigned int id; for (id = first_context; id <= last_context; id++) { /* Pick up the victim mm */ mm = context_mm[id]; pr_hardcont(" | steal %d from 0x%p", id, mm); /* Mark this mm as having no context anymore */ mm->context.id = MMU_NO_CONTEXT; if (id != first_context) { context_mm[id] = NULL; __clear_bit(id, context_map); #ifdef DEBUG_MAP_CONSISTENCY mm->context.active = 0; #endif } __clear_bit(id, stale_map[cpu]); } /* Flush the TLB for all contexts (not to be used on SMP) */ _tlbil_all(); nr_free_contexts = last_context - first_context; return first_context; }
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { if (vcpu->guest_debug.enabled) kvmppc_core_load_host_debugstate(vcpu); /* Don't leave guest TLB entries resident when being de-scheduled. */ /* XXX It would be nice to differentiate between heavyweight exit and * sched_out here, since we could avoid the TLB flush for heavyweight * exits. */ _tlbil_all(); kvmppc_core_vcpu_put(vcpu); }