static void __init init_free_pfn_range(unsigned long start, unsigned long end) { unsigned long pfn; struct page *page = pfn_to_page(start); for (pfn = start; pfn < end; ) { /* Optimize by freeing pages in large batches */ int order = __ffs(pfn); int count, i; struct page *p; if (order >= MAX_ORDER) order = MAX_ORDER-1; count = 1 << order; while (pfn + count > end) { count >>= 1; --order; } for (p = page, i = 0; i < count; ++i, ++p) { __ClearPageReserved(p); /* * Hacky direct set to avoid unnecessary * lock take/release for EVERY page here. */ p->_count.counter = 0; p->_mapcount.counter = -1; } init_page_count(page); __free_pages(page, order); totalram_pages += count; page += count; pfn += count; } }
static void free_init_pages(char *what, unsigned long begin, unsigned long end) { #ifdef CONFIG_HOMECACHE int home = initial_heap_home(); #endif unsigned long addr = (unsigned long) begin; if (kdata_huge && !initfree) { pr_warning("Warning: ignoring initfree=0:" " incompatible with kdata=huge\n"); initfree = 1; } end = (end + PAGE_SIZE - 1) & PAGE_MASK; local_flush_tlb_pages(NULL, begin, PAGE_SIZE, end - begin); for (addr = begin; addr < end; addr += PAGE_SIZE) { /* * Note we just reset the home here directly in the * page table. We know this is safe because our caller * just flushed the caches on all the other cpus, * and they won't be touching any of these pages. */ int pfn = kaddr_to_pfn((void *)addr); struct page *page = pfn_to_page(pfn); pte_t *ptep = virt_to_pte(NULL, addr); if (!initfree) { /* * If debugging page accesses then do not free * this memory but mark them not present - any * buggy init-section access will create a * kernel page fault: */ pte_clear(&init_mm, addr, ptep); continue; } #ifdef CONFIG_HOMECACHE set_page_home(page, home); __clear_bit(PG_homecache_nomigrate, &page->flags); #endif __ClearPageReserved(page); init_page_count(page); if (pte_huge(*ptep)) BUG_ON(!kdata_huge); else set_pte_at(&init_mm, addr, ptep, pfn_pte(pfn, PAGE_KERNEL)); memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); free_page(addr); totalram_pages++; } pr_info("Freeing %s: %ldk freed\n", what, (end - begin) >> 10); }
unsigned long __init prom_free_prom_memory(void) { unsigned long freed = 0; unsigned long addr; int i; #ifdef CONFIG_REALTEK_RECLAIM_BOOT_MEM unsigned long dest; struct page *page; int count; #endif for (i = 0; i < boot_mem_map.nr_map; i++) { if (boot_mem_map.map[i].type != BOOT_MEM_ROM_DATA) continue; addr = boot_mem_map.map[i].addr; while (addr < boot_mem_map.map[i].addr + boot_mem_map.map[i].size) { ClearPageReserved(virt_to_page(__va(addr))); set_page_count(virt_to_page(__va(addr)), 1); free_page((unsigned long)__va(addr)); addr += PAGE_SIZE; freed += PAGE_SIZE; } } printk("Freeing prom memory: %ldkb freed\n", freed >> 10); #ifdef CONFIG_REALTEK_RECLAIM_BOOT_MEM if (!is_mars_cpu()) { // venus or neptune addr = F_ADDR1; if (debug_flag) dest = T_ADDR1; else dest = T_ADDR2; } else { // mars addr = F_ADDR2; if (debug_flag) dest = T_ADDR1; else dest = T_ADDR3; } printk("Reclaim bootloader memory from %x to %x\n", addr, dest); count = 0; while (addr < dest) { page = virt_to_page(addr); /* printk("mem_map: %x, page: %x, size: %d \n", (int)mem_map, (int)page, sizeof(struct page)); if (PageReserved(page) != 1) BUG(); if (page->_count.counter != -1) BUG(); */ count++; __ClearPageReserved(page); set_page_count(page, 1); __free_page(page); addr += 0x1000; // 4KB } totalram_pages += count; #endif return freed; }