static void less_pages(uint64_t base, uint64_t len) { uint64_t pa, end = base + len; extern int kcage_on; for (pa = base; pa < end; pa += PAGESIZE) { pfn_t pfnum; page_t *pp; pfnum = (pfn_t)(pa >> PAGESHIFT); if ((pp = page_numtopp_nolock(pfnum)) == NULL) cmn_err(CE_PANIC, "missing pfnum %lx", pfnum); /* * must break up any large pages that may have * constituent pages being utilized for * prom_alloc()'s. page_reclaim() can't handle * large pages. */ if (pp->p_szc != 0) page_boot_demote(pp); if (!PAGE_LOCKED(pp) && pp->p_lckcnt == 0) { /* * Ahhh yes, a prom page, * suck it off the freelist, * lock it, and hashin on prom_pages vp. */ if (page_trylock(pp, SE_EXCL) == 0) cmn_err(CE_PANIC, "prom page locked"); (void) page_reclaim(pp, NULL); /* * vnode offsets on the prom_ppages vnode * are page numbers (gack) for >32 bit * physical memory machines. */ (void) page_hashin(pp, &promvp, (offset_t)pfnum, NULL); if (kcage_on) { ASSERT(pp->p_szc == 0); if (PP_ISNORELOC(pp) == 0) { PP_SETNORELOC(pp); PLCNT_XFER_NORELOC(pp); } } (void) page_pp_lock(pp, 0, 1); } } }
void boot_mapin(caddr_t addr, size_t size) { caddr_t eaddr; page_t *pp; pfn_t pfnum; if (page_resv(btop(size), KM_NOSLEEP) == 0) panic("boot_mapin: page_resv failed"); for (eaddr = addr + size; addr < eaddr; addr += PAGESIZE) { pfnum = va_to_pfn(addr); if (pfnum == PFN_INVALID) continue; if ((pp = page_numtopp_nolock(pfnum)) == NULL) panic("boot_mapin(): No pp for pfnum = %lx", pfnum); /* * must break up any large pages that may have constituent * pages being utilized for BOP_ALLOC()'s before calling * page_numtopp().The locking code (ie. page_reclaim()) * can't handle them */ if (pp->p_szc != 0) page_boot_demote(pp); pp = page_numtopp(pfnum, SE_EXCL); if (pp == NULL || PP_ISFREE(pp)) panic("boot_alloc: pp is NULL or free"); /* * If the cage is on but doesn't yet contain this page, * mark it as non-relocatable. */ if (kcage_on && !PP_ISNORELOC(pp)) { PP_SETNORELOC(pp); PLCNT_XFER_NORELOC(pp); } (void) page_hashin(pp, &kvp, (u_offset_t)(uintptr_t)addr, NULL); pp->p_lckcnt = 1; #if defined(__x86) page_downgrade(pp); #else page_unlock(pp); #endif } }
/* * Act like page_destroy(), but instead of freeing the page, hash it onto * the retired_pages vnode, and mark it retired. * * For fun, we try to scrub the page until it's squeaky clean. * availrmem is adjusted here. */ static void page_retire_destroy(page_t *pp) { u_offset_t off = (u_offset_t)((uintptr_t)pp); ASSERT(PAGE_EXCL(pp)); ASSERT(!PP_ISFREE(pp)); ASSERT(pp->p_szc == 0); ASSERT(!hat_page_is_mapped(pp)); ASSERT(!pp->p_vnode); page_clr_all_props(pp); pagescrub(pp, 0, MMU_PAGESIZE); pp->p_next = NULL; pp->p_prev = NULL; if (page_hashin(pp, retired_pages, off, NULL) == 0) { cmn_err(CE_PANIC, "retired page %p hashin failed", (void *)pp); } page_settoxic(pp, PR_RETIRED); page_clrtoxic(pp, PR_BUSY); page_retire_dequeue(pp); PR_INCR_KSTAT(pr_retired); if (pp->p_toxic & PR_FMA) { PR_INCR_KSTAT(pr_fma); } else if (pp->p_toxic & PR_UE) { PR_INCR_KSTAT(pr_ue); } else { PR_INCR_KSTAT(pr_mce); } mutex_enter(&freemem_lock); availrmem--; mutex_exit(&freemem_lock); page_unlock(pp); }