static void audit_write_protection(struct kvm *kvm, struct kvm_mmu_page *sp) { unsigned long *rmapp; u64 *sptep; struct rmap_iterator iter; if (sp->role.direct || sp->unsync || sp->role.invalid) return; rmapp = gfn_to_rmap(kvm, sp->gfn, PT_PAGE_TABLE_LEVEL); for (sptep = rmap_get_first(*rmapp, &iter); sptep; sptep = rmap_get_next(&iter)) { if (is_writable_pte(*sptep)) audit_printk(kvm, "shadow page has writable " "mappings: gfn %llx role %x\n", sp->gfn, sp->role.word); } }
static void audit_write_protection(struct vm *pvm, struct vmmr0_mmu_page *sp) { struct vmmr0_memory_slot *slot; unsigned long *rmapp; u64 *spte; if (sp->role.direct || sp->unsync || sp->role.invalid) return; slot = mmu_gfn_to_memslot(vmmr0, sp->gfn); rmapp = &slot->rmap[sp->gfn - slot->base_gfn]; spte = rmap_next(rmapp, NULL); while (spte) { if (is_writable_pte(*spte)) audit_printk(vmmr0, "shadow page has writable " "mappings: gfn %llx role %x\n", sp->gfn, sp->role.word); spte = rmap_next(rmapp, spte); } }
static void audit_write_protection(struct kvm *kvm, struct kvm_mmu_page *sp) { struct kvm_memory_slot *slot; unsigned long *rmapp; u64 *sptep; struct rmap_iterator iter; if (sp->role.direct || sp->unsync || sp->role.invalid) return; slot = gfn_to_memslot(kvm, sp->gfn); rmapp = &slot->rmap[sp->gfn - slot->base_gfn]; for (sptep = rmap_get_first(*rmapp, &iter); sptep; sptep = rmap_get_next(&iter)) { if (is_writable_pte(*sptep)) audit_printk(kvm, "shadow page has writable " "mappings: gfn %llx role %x\n", sp->gfn, sp->role.word); } }