static void inspect_spte_has_rmap(struct vm *pvm, u64 *sptep) { static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10); unsigned long *rmapp; struct vmmr0_mmu_page *rev_sp; gfn_t gfn; rev_sp = page_header(__pa(sptep)); #ifdef HOST_LINUX_OPTIMIZED gfn = vmmr0_mmu_page_get_gfn(rev_sp, sptep - rev_sp->spt); #else gfn = vmmr0_mmu_page_get_gfn(rev_sp, (u64*)__pa(sptep) - (u64*)__pa(rev_sp->spt)); #endif if (!mmu_gfn_to_memslot(vmmr0, gfn)) { if (!__ratelimit(&ratelimit_state)) return; audit_printk(vmmr0, "no memslot for gfn %llx\n", gfn); audit_printk(vmmr0, "index %ld of sp (gfn=%llx)\n", (long int)(sptep - rev_sp->spt), rev_sp->gfn); dump_stack(); return; } rmapp = gfn_to_rmap(vmmr0, gfn, rev_sp->role.level); if (!*rmapp) { if (!__ratelimit(&ratelimit_state)) return; audit_printk(vmmr0, "no rmap for writable spte %llx\n", *sptep); dump_stack(); } }
static void audit_mappings(struct kvm_vcpu *vcpu, u64 *sptep, int level) { struct kvm_mmu_page *sp; gfn_t gfn; pfn_t pfn; hpa_t hpa; sp = page_header(__pa(sptep)); if (sp->unsync) { if (level != PT_PAGE_TABLE_LEVEL) { audit_printk(vcpu->kvm, "unsync sp: %p " "level = %d\n", sp, level); return; } } if (!is_shadow_present_pte(*sptep) || !is_last_spte(*sptep, level)) return; gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt); pfn = gfn_to_pfn_atomic(vcpu->kvm, gfn); if (is_error_pfn(pfn)) { kvm_release_pfn_clean(pfn); return; } hpa = pfn << PAGE_SHIFT; if ((*sptep & PT64_BASE_ADDR_MASK) != hpa) audit_printk(vcpu->kvm, "levels %d pfn %llx hpa %llx " "ent %llxn", vcpu->arch.mmu.root_level, pfn, hpa, *sptep); }
static void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep) { unsigned long *rmapp; struct kvm_mmu_page *rev_sp; gfn_t gfn; rev_sp = page_header(__pa(sptep)); gfn = kvm_mmu_page_get_gfn(rev_sp, sptep - rev_sp->spt); if (!gfn_to_memslot(kvm, gfn)) { if (!printk_ratelimit()) return; audit_printk(kvm, "no memslot for gfn %llx\n", gfn); audit_printk(kvm, "index %ld of sp (gfn=%llx)\n", (long int)(sptep - rev_sp->spt), rev_sp->gfn); dump_stack(); return; } rmapp = gfn_to_rmap(kvm, gfn, rev_sp->role.level); if (!*rmapp) { if (!printk_ratelimit()) return; audit_printk(kvm, "no rmap for writable spte %llx\n", *sptep); dump_stack(); } }
static void audit_spte_after_sync(struct vmmr0_vcpu *vcpu, u64 *sptep, int level) { struct vmmr0_mmu_page *sp = page_header(__pa(sptep)); if (vcpu->vmmr0->arch.audit_point == AUDIT_POST_SYNC && sp->unsync) audit_printk(vcpu->vmmr0, "meet unsync sp(%p) after sync " "root.\n", sp); }
static void audit_write_protection(struct kvm *kvm, struct kvm_mmu_page *sp) { unsigned long *rmapp; u64 *sptep; struct rmap_iterator iter; if (sp->role.direct || sp->unsync || sp->role.invalid) return; rmapp = gfn_to_rmap(kvm, sp->gfn, PT_PAGE_TABLE_LEVEL); for (sptep = rmap_get_first(*rmapp, &iter); sptep; sptep = rmap_get_next(&iter)) { if (is_writable_pte(*sptep)) audit_printk(kvm, "shadow page has writable " "mappings: gfn %llx role %x\n", sp->gfn, sp->role.word); } }
static void audit_write_protection(struct vm *pvm, struct vmmr0_mmu_page *sp) { struct vmmr0_memory_slot *slot; unsigned long *rmapp; u64 *spte; if (sp->role.direct || sp->unsync || sp->role.invalid) return; slot = mmu_gfn_to_memslot(vmmr0, sp->gfn); rmapp = &slot->rmap[sp->gfn - slot->base_gfn]; spte = rmap_next(rmapp, NULL); while (spte) { if (is_writable_pte(*spte)) audit_printk(vmmr0, "shadow page has writable " "mappings: gfn %llx role %x\n", sp->gfn, sp->role.word); spte = rmap_next(rmapp, spte); } }
static void audit_write_protection(struct kvm *kvm, struct kvm_mmu_page *sp) { struct kvm_memory_slot *slot; unsigned long *rmapp; u64 *sptep; struct rmap_iterator iter; if (sp->role.direct || sp->unsync || sp->role.invalid) return; slot = gfn_to_memslot(kvm, sp->gfn); rmapp = &slot->rmap[sp->gfn - slot->base_gfn]; for (sptep = rmap_get_first(*rmapp, &iter); sptep; sptep = rmap_get_next(&iter)) { if (is_writable_pte(*sptep)) audit_printk(kvm, "shadow page has writable " "mappings: gfn %llx role %x\n", sp->gfn, sp->role.word); } }