static void unmap_range(struct kvm *kvm, pgd_t *pgdp, phys_addr_t start, u64 size) { pgd_t *pgd; phys_addr_t addr = start, end = start + size; phys_addr_t next; pgd = pgdp + kvm_pgd_index(addr); do { next = kvm_pgd_addr_end(addr, end); unmap_puds(kvm, pgd, addr, next); } while (pgd++, addr = next, addr != end); }
static void unmap_range(struct kvm *kvm, pgd_t *pgdp, phys_addr_t start, u64 size) { pgd_t *pgd; phys_addr_t addr = start, end = start + size; phys_addr_t next; pgd = pgdp + kvm_pgd_index(addr); do { next = kvm_pgd_addr_end(addr, end); if (!pgd_none(*pgd)) unmap_puds(kvm, pgd, addr, next); /* * If we are dealing with a large range in * stage2 table, release the kvm->mmu_lock * to prevent starvation and lockup detector * warnings. */ if (kvm && (next != end)) cond_resched_lock(&kvm->mmu_lock); } while (pgd++, addr = next, addr != end); }