void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
{
	u64 index;
	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);

	trace_kvm_book3s_mmu_map(pte);

	spin_lock(&vcpu3s->mmu_lock);

	/* Add to ePTE list */
	index = kvmppc_mmu_hash_pte(pte->pte.eaddr);
	hlist_add_head_rcu(&pte->list_pte, &vcpu3s->hpte_hash_pte[index]);

	/* Add to ePTE_long list */
	index = kvmppc_mmu_hash_pte_long(pte->pte.eaddr);
	hlist_add_head_rcu(&pte->list_pte_long,
			   &vcpu3s->hpte_hash_pte_long[index]);

	/* Add to vPTE list */
	index = kvmppc_mmu_hash_vpte(pte->pte.vpage);
	hlist_add_head_rcu(&pte->list_vpte, &vcpu3s->hpte_hash_vpte[index]);

	/* Add to vPTE_long list */
	index = kvmppc_mmu_hash_vpte_long(pte->pte.vpage);
	hlist_add_head_rcu(&pte->list_vpte_long,
			   &vcpu3s->hpte_hash_vpte_long[index]);

	spin_unlock(&vcpu3s->mmu_lock);
}
Пример #2
0
/* Flush with mask 0xfffffffff */
static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp)
{
	struct hlist_head *list;
	struct hlist_node *node, *tmp;
	struct hpte_cache *pte;
	u64 vp_mask = 0xfffffffffULL;

	list = &vcpu->arch.hpte_hash_vpte[kvmppc_mmu_hash_vpte(guest_vp)];

	/* Check the list for matching entries and invalidate */
	hlist_for_each_entry_safe(pte, node, tmp, list, list_vpte)
		if ((pte->pte.vpage & vp_mask) == guest_vp)
			invalidate_pte(vcpu, pte);
}
Пример #3
0
void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
{
	u64 index;

	/* Add to ePTE list */
	index = kvmppc_mmu_hash_pte(pte->pte.eaddr);
	hlist_add_head(&pte->list_pte, &vcpu->arch.hpte_hash_pte[index]);

	/* Add to vPTE list */
	index = kvmppc_mmu_hash_vpte(pte->pte.vpage);
	hlist_add_head(&pte->list_vpte, &vcpu->arch.hpte_hash_vpte[index]);

	/* Add to vPTE_long list */
	index = kvmppc_mmu_hash_vpte_long(pte->pte.vpage);
	hlist_add_head(&pte->list_vpte_long,
		       &vcpu->arch.hpte_hash_vpte_long[index]);
}
/* Flush with mask 0xfffffffff */
static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp)
{
	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
	struct hlist_head *list;
	struct hpte_cache *pte;
	u64 vp_mask = 0xfffffffffULL;

	list = &vcpu3s->hpte_hash_vpte[kvmppc_mmu_hash_vpte(guest_vp)];

	rcu_read_lock();

	/* Check the list for matching entries and invalidate */
	hlist_for_each_entry_rcu(pte, list, list_vpte)
		if ((pte->pte.vpage & vp_mask) == guest_vp)
			invalidate_pte(vcpu, pte);

	rcu_read_unlock();
}