static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu) { struct hpte_cache *pte; struct hlist_node *node, *tmp; int i; for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) { struct hlist_head *list = &vcpu->arch.hpte_hash_vpte_long[i]; hlist_for_each_entry_safe(pte, node, tmp, list, list_vpte_long) invalidate_pte(vcpu, pte); } }
/* Flush with mask 0xfffffffff */ static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp) { struct hlist_head *list; struct hlist_node *node, *tmp; struct hpte_cache *pte; u64 vp_mask = 0xfffffffffULL; list = &vcpu->arch.hpte_hash_vpte[kvmppc_mmu_hash_vpte(guest_vp)]; /* Check the list for matching entries and invalidate */ hlist_for_each_entry_safe(pte, node, tmp, list, list_vpte) if ((pte->pte.vpage & vp_mask) == guest_vp) invalidate_pte(vcpu, pte); }
static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea) { struct hlist_head *list; struct hlist_node *node, *tmp; struct hpte_cache *pte; /* Find the list of entries in the map */ list = &vcpu->arch.hpte_hash_pte[kvmppc_mmu_hash_pte(guest_ea)]; /* Check the list for matching entries and invalidate */ hlist_for_each_entry_safe(pte, node, tmp, list, list_pte) if ((pte->pte.eaddr & ~0xfffUL) == guest_ea) invalidate_pte(vcpu, pte); }
static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu) { struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); struct hpte_cache *pte; int i; rcu_read_lock(); for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) { struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i]; hlist_for_each_entry_rcu(pte, list, list_vpte_long) invalidate_pte(vcpu, pte); } rcu_read_unlock(); }
void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end) { struct hlist_node *node, *tmp; struct hpte_cache *pte; int i; dprintk_mmu("KVM: Flushing %d Shadow pPTEs: 0x%lx - 0x%lx\n", vcpu->arch.hpte_cache_count, pa_start, pa_end); for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) { struct hlist_head *list = &vcpu->arch.hpte_hash_vpte_long[i]; hlist_for_each_entry_safe(pte, node, tmp, list, list_vpte_long) if ((pte->pte.raddr >= pa_start) && (pte->pte.raddr < pa_end)) invalidate_pte(vcpu, pte); } }
/* Flush with mask 0xfffffffff */ static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp) { struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); struct hlist_head *list; struct hpte_cache *pte; u64 vp_mask = 0xfffffffffULL; list = &vcpu3s->hpte_hash_vpte[kvmppc_mmu_hash_vpte(guest_vp)]; rcu_read_lock(); /* Check the list for matching entries and invalidate */ hlist_for_each_entry_rcu(pte, list, list_vpte) if ((pte->pte.vpage & vp_mask) == guest_vp) invalidate_pte(vcpu, pte); rcu_read_unlock(); }
static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea) { struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); struct hlist_head *list; struct hpte_cache *pte; /* Find the list of entries in the map */ list = &vcpu3s->hpte_hash_pte[kvmppc_mmu_hash_pte(guest_ea)]; rcu_read_lock(); /* Check the list for matching entries and invalidate */ hlist_for_each_entry_rcu(pte, list, list_pte) if ((pte->pte.eaddr & ~0xfffUL) == guest_ea) invalidate_pte(vcpu, pte); rcu_read_unlock(); }
void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end) { struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); struct hpte_cache *pte; int i; trace_kvm_book3s_mmu_flush("p", vcpu, pa_start, pa_end); rcu_read_lock(); for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) { struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i]; hlist_for_each_entry_rcu(pte, list, list_vpte_long) if ((pte->pte.raddr >= pa_start) && (pte->pte.raddr < pa_end)) invalidate_pte(vcpu, pte); } rcu_read_unlock(); }