static void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep) { unsigned long *rmapp; struct kvm_mmu_page *rev_sp; gfn_t gfn; rev_sp = page_header(__pa(sptep)); gfn = kvm_mmu_page_get_gfn(rev_sp, sptep - rev_sp->spt); if (!gfn_to_memslot(kvm, gfn)) { if (!printk_ratelimit()) return; audit_printk(kvm, "no memslot for gfn %llx\n", gfn); audit_printk(kvm, "index %ld of sp (gfn=%llx)\n", (long int)(sptep - rev_sp->spt), rev_sp->gfn); dump_stack(); return; } rmapp = gfn_to_rmap(kvm, gfn, rev_sp->role.level); if (!*rmapp) { if (!printk_ratelimit()) return; audit_printk(kvm, "no rmap for writable spte %llx\n", *sptep); dump_stack(); } }
static void inspect_spte_has_rmap(struct vm *pvm, u64 *sptep) { static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10); unsigned long *rmapp; struct vmmr0_mmu_page *rev_sp; gfn_t gfn; rev_sp = page_header(__pa(sptep)); #ifdef HOST_LINUX_OPTIMIZED gfn = vmmr0_mmu_page_get_gfn(rev_sp, sptep - rev_sp->spt); #else gfn = vmmr0_mmu_page_get_gfn(rev_sp, (u64*)__pa(sptep) - (u64*)__pa(rev_sp->spt)); #endif if (!mmu_gfn_to_memslot(vmmr0, gfn)) { if (!__ratelimit(&ratelimit_state)) return; audit_printk(vmmr0, "no memslot for gfn %llx\n", gfn); audit_printk(vmmr0, "index %ld of sp (gfn=%llx)\n", (long int)(sptep - rev_sp->spt), rev_sp->gfn); dump_stack(); return; } rmapp = gfn_to_rmap(vmmr0, gfn, rev_sp->role.level); if (!*rmapp) { if (!__ratelimit(&ratelimit_state)) return; audit_printk(vmmr0, "no rmap for writable spte %llx\n", *sptep); dump_stack(); } }
static void audit_write_protection(struct kvm *kvm, struct kvm_mmu_page *sp) { unsigned long *rmapp; u64 *sptep; struct rmap_iterator iter; if (sp->role.direct || sp->unsync || sp->role.invalid) return; rmapp = gfn_to_rmap(kvm, sp->gfn, PT_PAGE_TABLE_LEVEL); for (sptep = rmap_get_first(*rmapp, &iter); sptep; sptep = rmap_get_next(&iter)) { if (is_writable_pte(*sptep)) audit_printk(kvm, "shadow page has writable " "mappings: gfn %llx role %x\n", sp->gfn, sp->role.word); } }