static void init_zones_in_node(pg_data_t *pgdat) { struct zone *zone; struct zone *node_zones = pgdat->node_zones; for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) { if (!populated_zone(zone)) continue; init_pages_in_zone(pgdat, zone); } }
int swsusp_shrink_memory(void) { long tmp; struct zone *zone; unsigned long pages = 0; unsigned int i = 0; char *p = "-\\|/"; struct timeval start, stop; printk("Shrinking memory... "); do_gettimeofday(&start); do { long size, highmem_size; highmem_size = count_highmem_pages(); size = count_data_pages() + PAGES_FOR_IO; tmp = size; size += highmem_size; for_each_zone (zone) if (populated_zone(zone)) { if (is_highmem(zone)) { highmem_size -= zone_page_state(zone, NR_FREE_PAGES); } else { tmp -= zone_page_state(zone, NR_FREE_PAGES); tmp += zone->lowmem_reserve[ZONE_NORMAL]; tmp += snapshot_additional_pages(zone); } } if (highmem_size < 0) highmem_size = 0; tmp += highmem_size; if (tmp > 0) { tmp = __shrink_memory(tmp); if (!tmp) return -ENOMEM; pages += tmp; } else if (size > image_size / PAGE_SIZE) { tmp = __shrink_memory(size - (image_size / PAGE_SIZE)); pages += tmp; } printk("\b%c", p[i++%4]); } while (tmp > 0); do_gettimeofday(&stop); printk("\bdone (%lu pages freed)\n", pages); swsusp_show_speed(&start, &stop, pages, "Freed"); return 0; }
static void init_zones_in_node(pg_data_t *pgdat) { struct zone *zone; struct zone *node_zones = pgdat->node_zones; unsigned long flags; for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) { if (!populated_zone(zone)) continue; spin_lock_irqsave(&zone->lock, flags); init_pages_in_zone(pgdat, zone); spin_unlock_irqrestore(&zone->lock, flags); } }
//static void mlog_buddyinfo(void) void mlog_buddyinfo(void) { int i; struct zone *zone; struct zone *node_zones; unsigned int order; int zone_nr = 0; unsigned long normal_nr_free[MAX_ORDER]; unsigned long high_nr_free[MAX_ORDER]; for_each_online_node(i) { pg_data_t *pgdat = NODE_DATA(i); unsigned long flags; node_zones = pgdat->node_zones; // MAX_NR_ZONES 3 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) { if (!populated_zone(zone)) continue; spin_lock_irqsave(&zone->lock, flags); zone_nr++; for (order = 0; order < MAX_ORDER; ++order) { if (zone_nr == 1) normal_nr_free[order] = zone->free_area[order].nr_free; if (zone_nr == 2) high_nr_free[order] = zone->free_area[order].nr_free; } spin_unlock_irqrestore(&zone->lock, flags); } } if(zone_nr == 1) { for (order = 0; order < MAX_ORDER; ++order) high_nr_free[order] = 0; } #ifdef CONFIG_MTKPASR if(zone_nr == 2) { high_nr_free[MAX_ORDER - 1] += (mtkpasr_show_page_reserved() >> (MAX_ORDER - 1)); }
/* The zonelists are simply reported, validation is manual. */ void mminit_verify_zonelist(void) { int nid; if (mminit_loglevel < MMINIT_VERIFY) return; for_each_online_node(nid) { pg_data_t *pgdat = NODE_DATA(nid); struct zone *zone; struct zoneref *z; struct zonelist *zonelist; int i, listid, zoneid; BUG_ON(MAX_ZONELISTS > 2); for (i = 0; i < MAX_ZONELISTS * MAX_NR_ZONES; i++) { /* Identify the zone and nodelist */ zoneid = i % MAX_NR_ZONES; listid = i / MAX_NR_ZONES; zonelist = &pgdat->node_zonelists[listid]; zone = &pgdat->node_zones[zoneid]; if (!populated_zone(zone)) continue; /* Print information about the zonelist */ printk(KERN_DEBUG "mminit::zonelist %s %d:%s = ", listid > 0 ? "thisnode" : "general", nid, zone->name); /* Iterate the zonelist */ for_each_zone_zonelist(zone, z, zonelist, zoneid) { #ifdef CONFIG_NUMA printk(KERN_CONT "%d:%s ", zone->node, zone->name); #else printk(KERN_CONT "0:%s ", zone->name); #endif /* CONFIG_NUMA */ } printk(KERN_CONT "\n"); } }
/* * The normal show_free_areas() is too verbose on Tile, with dozens * of processors and often four NUMA zones each with high and lowmem. */ void show_mem(void) { struct zone *zone; pr_err("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu" " free:%lu\n slab:%lu mapped:%lu pagetables:%lu bounce:%lu" " pagecache:%lu swap:%lu\n", (global_page_state(NR_ACTIVE_ANON) + global_page_state(NR_ACTIVE_FILE)), (global_page_state(NR_INACTIVE_ANON) + global_page_state(NR_INACTIVE_FILE)), global_page_state(NR_FILE_DIRTY), global_page_state(NR_WRITEBACK), global_page_state(NR_UNSTABLE_NFS), global_page_state(NR_FREE_PAGES), (global_page_state(NR_SLAB_RECLAIMABLE) + global_page_state(NR_SLAB_UNRECLAIMABLE)), global_page_state(NR_FILE_MAPPED), global_page_state(NR_PAGETABLE), global_page_state(NR_BOUNCE), global_page_state(NR_FILE_PAGES), nr_swap_pages); for_each_zone(zone) { unsigned long flags, order, total = 0, largest_order = -1; if (!populated_zone(zone)) continue; spin_lock_irqsave(&zone->lock, flags); for (order = 0; order < MAX_ORDER; order++) { int nr = zone->free_area[order].nr_free; total += nr << order; if (nr) largest_order = order; } spin_unlock_irqrestore(&zone->lock, flags); pr_err("Node %d %7s: %lukB (largest %luKb)\n", zone_to_nid(zone), zone->name, K(total), largest_order ? K(1UL) << largest_order : 0); } }
void show_mem(unsigned int filter) { pg_data_t *pgdat; unsigned long total = 0, reserved = 0, highmem = 0; printk("Mem-Info:\n"); show_free_areas(filter); for_each_online_pgdat(pgdat) { unsigned long flags; int zoneid; pgdat_resize_lock(pgdat, &flags); for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { struct zone *zone = &pgdat->node_zones[zoneid]; if (!populated_zone(zone)) continue; total += zone->present_pages; reserved += zone->present_pages - zone->managed_pages; if (is_highmem_idx(zoneid)) highmem += zone->present_pages; } pgdat_resize_unlock(pgdat, &flags); } printk("%lu pages RAM\n", total); printk("%lu pages HighMem/MovableOnly\n", highmem); printk("%lu pages reserved\n", reserved); #ifdef CONFIG_CMA printk("%lu pages cma reserved\n", totalcma_pages); #endif #ifdef CONFIG_QUICKLIST printk("%lu pages in pagetable cache\n", quicklist_total_size()); #endif #ifdef CONFIG_MEMORY_FAILURE printk("%lu pages hwpoisoned\n", atomic_long_read(&num_poisoned_pages)); #endif }
static int memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed) { struct chain_allocator ca; struct zone *zone; struct zone_bitmap *zone_bm; struct bm_block *bb; unsigned int nr; chain_init(&ca, gfp_mask, safe_needed); /* Compute the number of zones */ nr = 0; for_each_zone(zone) if (populated_zone(zone)) nr++; /* Allocate the list of zones bitmap objects */ zone_bm = create_zone_bm_list(nr, &ca); bm->zone_bm_list = zone_bm; if (!zone_bm) { chain_free(&ca, PG_UNSAFE_CLEAR); return -ENOMEM; } /* Initialize the zone bitmap objects */ for_each_zone(zone) { unsigned long pfn; if (!populated_zone(zone)) continue; zone_bm->start_pfn = zone->zone_start_pfn; zone_bm->end_pfn = zone->zone_start_pfn + zone->spanned_pages; /* Allocate the list of bitmap block objects */ nr = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK); bb = create_bm_block_list(nr, &ca); zone_bm->bm_blocks = bb; zone_bm->cur_block = bb; if (!bb) goto Free; nr = zone->spanned_pages; pfn = zone->zone_start_pfn; /* Initialize the bitmap block objects */ while (bb) { unsigned long *ptr; ptr = get_image_page(gfp_mask, safe_needed); bb->data = ptr; if (!ptr) goto Free; bb->start_pfn = pfn; if (nr >= BM_BITS_PER_BLOCK) { pfn += BM_BITS_PER_BLOCK; bb->size = BM_CHUNKS_PER_BLOCK; nr -= BM_BITS_PER_BLOCK; } else { /* This is executed only once in the loop */ pfn += nr; bb->size = DIV_ROUND_UP(nr, BM_BITS_PER_CHUNK); } bb->end_pfn = pfn; bb = bb->next; } zone_bm = zone_bm->next; } bm->p_list = ca.chain; memory_bm_position_reset(bm); return 0; Free: bm->p_list = ca.chain; memory_bm_free(bm, PG_UNSAFE_CLEAR); return -ENOMEM; }
int online_pages(unsigned long pfn, unsigned long nr_pages) { unsigned long flags; unsigned long onlined_pages = 0; struct zone *zone; int need_zonelists_rebuild = 0; int nid; int ret; struct memory_notify arg; arg.start_pfn = pfn; arg.nr_pages = nr_pages; arg.status_change_nid = -1; nid = page_to_nid(pfn_to_page(pfn)); if (node_present_pages(nid) == 0) arg.status_change_nid = nid; ret = memory_notify(MEM_GOING_ONLINE, &arg); ret = notifier_to_errno(ret); if (ret) { memory_notify(MEM_CANCEL_ONLINE, &arg); return ret; } /* * This doesn't need a lock to do pfn_to_page(). * The section can't be removed here because of the * memory_block->state_sem. */ zone = page_zone(pfn_to_page(pfn)); pgdat_resize_lock(zone->zone_pgdat, &flags); grow_zone_span(zone, pfn, pfn + nr_pages); grow_pgdat_span(zone->zone_pgdat, pfn, pfn + nr_pages); pgdat_resize_unlock(zone->zone_pgdat, &flags); /* * If this zone is not populated, then it is not in zonelist. * This means the page allocator ignores this zone. * So, zonelist must be updated after online. */ if (!populated_zone(zone)) need_zonelists_rebuild = 1; walk_memory_resource(pfn, nr_pages, &onlined_pages, online_pages_range); zone->present_pages += onlined_pages; zone->zone_pgdat->node_present_pages += onlined_pages; setup_per_zone_pages_min(); if (onlined_pages) { kswapd_run(zone_to_nid(zone)); node_set_state(zone_to_nid(zone), N_HIGH_MEMORY); } if (need_zonelists_rebuild) build_all_zonelists(); vm_total_pages = nr_free_pagecache_pages(); writeback_set_ratelimit(); if (onlined_pages) memory_notify(MEM_ONLINE, &arg); return 0; }
void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask) { struct page_ext *page_ext = lookup_page_ext(page); struct stack_trace trace = { .nr_entries = 0, .max_entries = ARRAY_SIZE(page_ext->trace_entries), .entries = &page_ext->trace_entries[0], .skip = 3, }; if (unlikely(!page_ext)) return; save_stack_trace(&trace); page_ext->order = order; page_ext->gfp_mask = gfp_mask; page_ext->nr_entries = trace.nr_entries; page_ext->last_migrate_reason = -1; __set_bit(PAGE_EXT_OWNER, &page_ext->flags); } void __set_page_owner_migrate_reason(struct page *page, int reason) { struct page_ext *page_ext = lookup_page_ext(page); if (unlikely(!page_ext)) return; page_ext->last_migrate_reason = reason; } gfp_t __get_page_owner_gfp(struct page *page) { struct page_ext *page_ext = lookup_page_ext(page); if (unlikely(!page_ext)) /* * The caller just returns 0 if no valid gfp * So return 0 here too. */ return 0; return page_ext->gfp_mask; } void __copy_page_owner(struct page *oldpage, struct page *newpage) { struct page_ext *old_ext = lookup_page_ext(oldpage); struct page_ext *new_ext = lookup_page_ext(newpage); int i; if (unlikely(!old_ext || !new_ext)) return; new_ext->order = old_ext->order; new_ext->gfp_mask = old_ext->gfp_mask; new_ext->nr_entries = old_ext->nr_entries; for (i = 0; i < ARRAY_SIZE(new_ext->trace_entries); i++) new_ext->trace_entries[i] = old_ext->trace_entries[i]; /* * We don't clear the bit on the oldpage as it's going to be freed * after migration. Until then, the info can be useful in case of * a bug, and the overal stats will be off a bit only temporarily. * Also, migrate_misplaced_transhuge_page() can still fail the * migration and then we want the oldpage to retain the info. But * in that case we also don't need to explicitly clear the info from * the new page, which will be freed. */ __set_bit(PAGE_EXT_OWNER, &new_ext->flags); } static ssize_t print_page_owner(char __user *buf, size_t count, unsigned long pfn, struct page *page, struct page_ext *page_ext) { int ret; int pageblock_mt, page_mt; char *kbuf; struct stack_trace trace = { .nr_entries = page_ext->nr_entries, .entries = &page_ext->trace_entries[0], }; kbuf = kmalloc(count, GFP_KERNEL); if (!kbuf) return -ENOMEM; ret = snprintf(kbuf, count, "Page allocated via order %u, mask %#x(%pGg)\n", page_ext->order, page_ext->gfp_mask, &page_ext->gfp_mask); if (ret >= count) goto err; /* Print information relevant to grouping pages by mobility */ pageblock_mt = get_pageblock_migratetype(page); page_mt = gfpflags_to_migratetype(page_ext->gfp_mask); ret += snprintf(kbuf + ret, count - ret, "PFN %lu type %s Block %lu type %s Flags %#lx(%pGp)\n", pfn, migratetype_names[page_mt], pfn >> pageblock_order, migratetype_names[pageblock_mt], page->flags, &page->flags); if (ret >= count) goto err; ret += snprint_stack_trace(kbuf + ret, count - ret, &trace, 0); if (ret >= count) goto err; if (page_ext->last_migrate_reason != -1) { ret += snprintf(kbuf + ret, count - ret, "Page has been migrated, last migrate reason: %s\n", migrate_reason_names[page_ext->last_migrate_reason]); if (ret >= count) goto err; } ret += snprintf(kbuf + ret, count - ret, "\n"); if (ret >= count) goto err; if (copy_to_user(buf, kbuf, ret)) ret = -EFAULT; kfree(kbuf); return ret; err: kfree(kbuf); return -ENOMEM; } void __dump_page_owner(struct page *page) { struct page_ext *page_ext = lookup_page_ext(page); struct stack_trace trace = { .nr_entries = page_ext->nr_entries, .entries = &page_ext->trace_entries[0], }; gfp_t gfp_mask = page_ext->gfp_mask; int mt = gfpflags_to_migratetype(gfp_mask); if (unlikely(!page_ext)) { pr_alert("There is not page extension available.\n"); return; } if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) { pr_alert("page_owner info is not active (free page?)\n"); return; } pr_alert("page allocated via order %u, migratetype %s, gfp_mask %#x(%pGg)\n", page_ext->order, migratetype_names[mt], gfp_mask, &gfp_mask); print_stack_trace(&trace, 0); if (page_ext->last_migrate_reason != -1) pr_alert("page has been migrated, last migrate reason: %s\n", migrate_reason_names[page_ext->last_migrate_reason]); } static ssize_t read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos) { unsigned long pfn; struct page *page; struct page_ext *page_ext; if (!static_branch_unlikely(&page_owner_inited)) return -EINVAL; page = NULL; pfn = min_low_pfn + *ppos; /* Find a valid PFN or the start of a MAX_ORDER_NR_PAGES area */ while (!pfn_valid(pfn) && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0) pfn++; drain_all_pages(NULL); /* Find an allocated page */ for (; pfn < max_pfn; pfn++) { /* * If the new page is in a new MAX_ORDER_NR_PAGES area, * validate the area as existing, skip it if not */ if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0 && !pfn_valid(pfn)) { pfn += MAX_ORDER_NR_PAGES - 1; continue; } /* Check for holes within a MAX_ORDER area */ if (!pfn_valid_within(pfn)) continue; page = pfn_to_page(pfn); if (PageBuddy(page)) { unsigned long freepage_order = page_order_unsafe(page); if (freepage_order < MAX_ORDER) pfn += (1UL << freepage_order) - 1; continue; } page_ext = lookup_page_ext(page); if (unlikely(!page_ext)) continue; /* * Some pages could be missed by concurrent allocation or free, * because we don't hold the zone lock. */ if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) continue; /* Record the next PFN to read in the file offset */ *ppos = (pfn - min_low_pfn) + 1; return print_page_owner(buf, count, pfn, page, page_ext); } return 0; } static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone) { struct page *page; struct page_ext *page_ext; unsigned long pfn = zone->zone_start_pfn, block_end_pfn; unsigned long end_pfn = pfn + zone->spanned_pages; unsigned long count = 0; /* Scan block by block. First and last block may be incomplete */ pfn = zone->zone_start_pfn; /* * Walk the zone in pageblock_nr_pages steps. If a page block spans * a zone boundary, it will be double counted between zones. This does * not matter as the mixed block count will still be correct */ for (; pfn < end_pfn; ) { if (!pfn_valid(pfn)) { pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES); continue; } block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); block_end_pfn = min(block_end_pfn, end_pfn); page = pfn_to_page(pfn); for (; pfn < block_end_pfn; pfn++) { if (!pfn_valid_within(pfn)) continue; page = pfn_to_page(pfn); if (page_zone(page) != zone) continue; /* * We are safe to check buddy flag and order, because * this is init stage and only single thread runs. */ if (PageBuddy(page)) { pfn += (1UL << page_order(page)) - 1; continue; } if (PageReserved(page)) continue; page_ext = lookup_page_ext(page); if (unlikely(!page_ext)) continue; /* Maybe overraping zone */ if (test_bit(PAGE_EXT_OWNER, &page_ext->flags)) continue; /* Found early allocated page */ set_page_owner(page, 0, 0); count++; } } pr_info("Node %d, zone %8s: page owner found early allocated %lu pages\n", pgdat->node_id, zone->name, count); } static void init_zones_in_node(pg_data_t *pgdat) { struct zone *zone; struct zone *node_zones = pgdat->node_zones; unsigned long flags; for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) { if (!populated_zone(zone)) continue; spin_lock_irqsave(&zone->lock, flags); init_pages_in_zone(pgdat, zone); spin_unlock_irqrestore(&zone->lock, flags); } } static void init_early_allocated_pages(void) { pg_data_t *pgdat; drain_all_pages(NULL); for_each_online_pgdat(pgdat) init_zones_in_node(pgdat); }
static ssize_t print_page_owner(char __user *buf, size_t count, unsigned long pfn, struct page *page, struct page_owner *page_owner, depot_stack_handle_t handle) { int ret; int pageblock_mt, page_mt; char *kbuf; unsigned long entries[PAGE_OWNER_STACK_DEPTH]; struct stack_trace trace = { .nr_entries = 0, .entries = entries, .max_entries = PAGE_OWNER_STACK_DEPTH, .skip = 0 }; kbuf = kmalloc(count, GFP_KERNEL); if (!kbuf) return -ENOMEM; ret = snprintf(kbuf, count, "Page allocated via order %u, mask %#x(%pGg)\n", page_owner->order, page_owner->gfp_mask, &page_owner->gfp_mask); if (ret >= count) goto err; /* Print information relevant to grouping pages by mobility */ pageblock_mt = get_pageblock_migratetype(page); page_mt = gfpflags_to_migratetype(page_owner->gfp_mask); ret += snprintf(kbuf + ret, count - ret, "PFN %lu type %s Block %lu type %s Flags %#lx(%pGp)\n", pfn, migratetype_names[page_mt], pfn >> pageblock_order, migratetype_names[pageblock_mt], page->flags, &page->flags); if (ret >= count) goto err; depot_fetch_stack(handle, &trace); ret += snprint_stack_trace(kbuf + ret, count - ret, &trace, 0); if (ret >= count) goto err; if (page_owner->last_migrate_reason != -1) { ret += snprintf(kbuf + ret, count - ret, "Page has been migrated, last migrate reason: %s\n", migrate_reason_names[page_owner->last_migrate_reason]); if (ret >= count) goto err; } ret += snprintf(kbuf + ret, count - ret, "\n"); if (ret >= count) goto err; if (copy_to_user(buf, kbuf, ret)) ret = -EFAULT; kfree(kbuf); return ret; err: kfree(kbuf); return -ENOMEM; } void __dump_page_owner(struct page *page) { struct page_ext *page_ext = lookup_page_ext(page); struct page_owner *page_owner; unsigned long entries[PAGE_OWNER_STACK_DEPTH]; struct stack_trace trace = { .nr_entries = 0, .entries = entries, .max_entries = PAGE_OWNER_STACK_DEPTH, .skip = 0 }; depot_stack_handle_t handle; gfp_t gfp_mask; int mt; if (unlikely(!page_ext)) { pr_alert("There is not page extension available.\n"); return; } page_owner = get_page_owner(page_ext); gfp_mask = page_owner->gfp_mask; mt = gfpflags_to_migratetype(gfp_mask); if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) { pr_alert("page_owner info is not active (free page?)\n"); return; } handle = READ_ONCE(page_owner->handle); if (!handle) { pr_alert("page_owner info is not active (free page?)\n"); return; } depot_fetch_stack(handle, &trace); pr_alert("page allocated via order %u, migratetype %s, gfp_mask %#x(%pGg)\n", page_owner->order, migratetype_names[mt], gfp_mask, &gfp_mask); print_stack_trace(&trace, 0); if (page_owner->last_migrate_reason != -1) pr_alert("page has been migrated, last migrate reason: %s\n", migrate_reason_names[page_owner->last_migrate_reason]); } static ssize_t read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos) { unsigned long pfn; struct page *page; struct page_ext *page_ext; struct page_owner *page_owner; depot_stack_handle_t handle; if (!static_branch_unlikely(&page_owner_inited)) return -EINVAL; page = NULL; pfn = min_low_pfn + *ppos; /* Find a valid PFN or the start of a MAX_ORDER_NR_PAGES area */ while (!pfn_valid(pfn) && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0) pfn++; drain_all_pages(NULL); /* Find an allocated page */ for (; pfn < max_pfn; pfn++) { /* * If the new page is in a new MAX_ORDER_NR_PAGES area, * validate the area as existing, skip it if not */ if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0 && !pfn_valid(pfn)) { pfn += MAX_ORDER_NR_PAGES - 1; continue; } /* Check for holes within a MAX_ORDER area */ if (!pfn_valid_within(pfn)) continue; page = pfn_to_page(pfn); if (PageBuddy(page)) { unsigned long freepage_order = page_order_unsafe(page); if (freepage_order < MAX_ORDER) pfn += (1UL << freepage_order) - 1; continue; } page_ext = lookup_page_ext(page); if (unlikely(!page_ext)) continue; /* * Some pages could be missed by concurrent allocation or free, * because we don't hold the zone lock. */ if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) continue; page_owner = get_page_owner(page_ext); /* * Access to page_ext->handle isn't synchronous so we should * be careful to access it. */ handle = READ_ONCE(page_owner->handle); if (!handle) continue; /* Record the next PFN to read in the file offset */ *ppos = (pfn - min_low_pfn) + 1; return print_page_owner(buf, count, pfn, page, page_owner, handle); } return 0; } static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone) { struct page *page; struct page_ext *page_ext; unsigned long pfn = zone->zone_start_pfn, block_end_pfn; unsigned long end_pfn = pfn + zone->spanned_pages; unsigned long count = 0; /* Scan block by block. First and last block may be incomplete */ pfn = zone->zone_start_pfn; /* * Walk the zone in pageblock_nr_pages steps. If a page block spans * a zone boundary, it will be double counted between zones. This does * not matter as the mixed block count will still be correct */ for (; pfn < end_pfn; ) { if (!pfn_valid(pfn)) { pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES); continue; } block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); block_end_pfn = min(block_end_pfn, end_pfn); page = pfn_to_page(pfn); for (; pfn < block_end_pfn; pfn++) { if (!pfn_valid_within(pfn)) continue; page = pfn_to_page(pfn); if (page_zone(page) != zone) continue; /* * We are safe to check buddy flag and order, because * this is init stage and only single thread runs. */ if (PageBuddy(page)) { pfn += (1UL << page_order(page)) - 1; continue; } if (PageReserved(page)) continue; page_ext = lookup_page_ext(page); if (unlikely(!page_ext)) continue; /* Maybe overraping zone */ if (test_bit(PAGE_EXT_OWNER, &page_ext->flags)) continue; /* Found early allocated page */ set_page_owner(page, 0, 0); count++; } } pr_info("Node %d, zone %8s: page owner found early allocated %lu pages\n", pgdat->node_id, zone->name, count); } static void init_zones_in_node(pg_data_t *pgdat) { struct zone *zone; struct zone *node_zones = pgdat->node_zones; unsigned long flags; for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) { if (!populated_zone(zone)) continue; spin_lock_irqsave(&zone->lock, flags); init_pages_in_zone(pgdat, zone); spin_unlock_irqrestore(&zone->lock, flags); } } static void init_early_allocated_pages(void) { pg_data_t *pgdat; drain_all_pages(NULL); for_each_online_pgdat(pgdat) init_zones_in_node(pgdat); }