static void mmu_queue_protect_range(MMU *mmu, void *p, size_t len, int type, int writeable, void **src_block) { mmu_assert_os_page_aligned(mmu, (size_t)p); mmu_assert_os_page_aligned(mmu, len); #ifdef USE_BLOCK_CACHE block_cache_queue_protect_range(mmu->block_cache, p, len, type, writeable, src_block); #elif !( defined(_WIN32) || defined(OSKIT) ) page_range_add(mmu->page_range, p, len, writeable); #else os_protect_pages(p, len, writeable); #endif }
static void page_range_flush(Page_Range *pr, int writeable) { Range *work; page_range_compact(pr); for (work = pr->range_start; work; work = work->next) { os_protect_pages((void *)work->start, work->len, writeable); GC_MP_CNT_INC(mp_pr_call_cnt); } page_range_reset(pr); }
static void mmu_write_unprotect_page(MMU *mmu, void *p, size_t len) { mmu_assert_os_page_aligned(mmu, (size_t)p); mmu_assert_os_page_aligned(mmu, len); os_protect_pages(p, len, 1); }