int pmap_traverse_present_mappings(pmap_t pmap, vm_map_offset_t start, vm_map_offset_t end, pmap_traverse_callback callback, void *context) { int ret = KERN_SUCCESS; vm_map_offset_t vcurstart, vcur; boolean_t lastvavalid = FALSE; /* Assumes pmap is locked, or being called from the kernel debugger */ if (start > end) { return (KERN_INVALID_ARGUMENT); } if (start & PAGE_MASK_64) { return (KERN_INVALID_ARGUMENT); } for (vcur = vcurstart = start; (ret == KERN_SUCCESS) && (vcur < end); ) { ppnum_t ppn = pmap_find_phys(pmap, vcur); if (ppn != 0 && !pmap_valid_page(ppn)) { /* not something we want */ ppn = 0; } if (ppn != 0) { if (!lastvavalid) { /* Start of a new virtual region */ vcurstart = vcur; lastvavalid = TRUE; } } else { if (lastvavalid) { /* end of a virtual region */ ret = callback(vcurstart, vcur, context); lastvavalid = FALSE; } } vcur += PAGE_SIZE; } if ((ret == KERN_SUCCESS) && lastvavalid) { /* send previous run */ ret = callback(vcurstart, vcur, context); } return (ret); }
/** * pmap_verify_free * * Look at the page and verify that it is free. */ boolean_t pmap_verify_free(ppnum_t pa) { pv_entry_t pv_h; int pai; boolean_t result; assert(pa != vm_page_fictitious_addr); if(!pmap_initialized) return TRUE; if(!pmap_valid_page(pa)) return TRUE; pai = pa_index(pa); pv_h = pai_to_pvh(pai); result = (pv_h->pmap == PMAP_NULL); return TRUE; /* result, since pmap_remove is not done yet */ }
/** * pmap_zero_page * * Zero a physical page. */ void pmap_zero_page(ppnum_t p) { assert(p != vm_page_fictitious_addr); assert(pmap_valid_page(p)); bzero(phys_to_virt(p), PAGE_SIZE); }