static int get_nid_for_pfn(unsigned long pfn) { struct page *page; if (!pfn_valid_within(pfn)) return -1; page = pfn_to_page(pfn); if (!page_initialized(page)) return -1; return pfn_to_nid(pfn); }
static int __ref get_nid_for_pfn(unsigned long pfn) { struct page *page; if (!pfn_valid_within(pfn)) return -1; #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT if (system_state == SYSTEM_BOOTING) return early_pfn_to_nid(pfn); #endif page = pfn_to_page(pfn); if (!page_initialized(page)) return -1; return pfn_to_nid(pfn); }
void __init page_cgroup_init(void) { unsigned long pfn; int nid; if (mem_cgroup_disabled()) return; for_each_node_state(nid, N_HIGH_MEMORY) { unsigned long start_pfn, end_pfn; start_pfn = node_start_pfn(nid); end_pfn = node_end_pfn(nid); /* * start_pfn and end_pfn may not be aligned to SECTION and the * page->flags of out of node pages are not initialized. So we * scan [start_pfn, the biggest section's pfn < end_pfn) here. */ for (pfn = start_pfn; pfn < end_pfn; pfn = ALIGN(pfn + 1, PAGES_PER_SECTION)) { /* * even if the first pfn is invalid, that doesn't mean * that the entire section is not used. * so we should check if the section has the mem map. * if it has the mem map then we should go ahead and * create the page cgroup. */ struct mem_section *section = __pfn_to_section(pfn); if (!section->section_mem_map) continue; /* * Nodes's pfns can be overlapping. * We know some arch can have a nodes layout such as * -------------pfn--------------> * N0 | N1 | N2 | N0 | N1 | N2|.... */ if (pfn_to_nid(pfn) != nid) continue; if (init_section_page_cgroup(pfn, nid)) goto oom; } }
static int __meminit kasan_mem_notifier(struct notifier_block *nb, unsigned long action, void *data) { struct memory_notify *mem_data = data; unsigned long nr_shadow_pages, start_kaddr, shadow_start; unsigned long shadow_end, shadow_size; nr_shadow_pages = mem_data->nr_pages >> KASAN_SHADOW_SCALE_SHIFT; start_kaddr = (unsigned long)pfn_to_kaddr(mem_data->start_pfn); shadow_start = (unsigned long)kasan_mem_to_shadow((void *)start_kaddr); shadow_size = nr_shadow_pages << PAGE_SHIFT; shadow_end = shadow_start + shadow_size; if (WARN_ON(mem_data->nr_pages % KASAN_SHADOW_SCALE_SIZE) || WARN_ON(start_kaddr % (KASAN_SHADOW_SCALE_SIZE << PAGE_SHIFT))) return NOTIFY_BAD; switch (action) { case MEM_GOING_ONLINE: { void *ret; ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start, shadow_end, GFP_KERNEL, PAGE_KERNEL, VM_NO_GUARD, pfn_to_nid(mem_data->start_pfn), __builtin_return_address(0)); if (!ret) return NOTIFY_BAD; kmemleak_ignore(ret); return NOTIFY_OK; } case MEM_OFFLINE: vfree((void *)shadow_start); } return NOTIFY_OK; }
void __init page_cgroup_init(void) { unsigned long pfn; int nid; if (mem_cgroup_disabled()) return; for_each_node_state(nid, N_HIGH_MEMORY) { unsigned long start_pfn, end_pfn; start_pfn = node_start_pfn(nid); end_pfn = node_end_pfn(nid); /* * start_pfn and end_pfn may not be aligned to SECTION and the * page->flags of out of node pages are not initialized. So we * scan [start_pfn, the biggest section's pfn < end_pfn) here. */ for (pfn = start_pfn; pfn < end_pfn; pfn = ALIGN(pfn + 1, PAGES_PER_SECTION)) { if (!pfn_valid(pfn)) continue; /* * Nodes's pfns can be overlapping. * We know some arch can have a nodes layout such as * -------------pfn--------------> * N0 | N1 | N2 | N0 | N1 | N2|.... */ if (pfn_to_nid(pfn) != nid) continue; if (init_section_page_cgroup(pfn, nid)) goto oom; } }
void __init kasan_init(void) { u64 kimg_shadow_start, kimg_shadow_end; u64 mod_shadow_start, mod_shadow_end; struct memblock_region *reg; int i; kimg_shadow_start = (u64)kasan_mem_to_shadow(_text); kimg_shadow_end = (u64)kasan_mem_to_shadow(_end); mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR); mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END); /* * We are going to perform proper setup of shadow memory. * At first we should unmap early shadow (clear_pgds() call bellow). * However, instrumented code couldn't execute without shadow memory. * tmp_pg_dir used to keep early shadow mapped until full shadow * setup will be finished. */ memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir)); dsb(ishst); cpu_replace_ttbr1(lm_alias(tmp_pg_dir)); clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END); vmemmap_populate(kimg_shadow_start, kimg_shadow_end, pfn_to_nid(virt_to_pfn(_text))); /* * vmemmap_populate() has populated the shadow region that covers the * kernel image with SWAPPER_BLOCK_SIZE mappings, so we have to round * the start and end addresses to SWAPPER_BLOCK_SIZE as well, to prevent * kasan_populate_zero_shadow() from replacing the page table entries * (PMD or PTE) at the edges of the shadow region for the kernel * image. */ kimg_shadow_start = round_down(kimg_shadow_start, SWAPPER_BLOCK_SIZE); kimg_shadow_end = round_up(kimg_shadow_end, SWAPPER_BLOCK_SIZE); kasan_populate_zero_shadow((void *)KASAN_SHADOW_START, (void *)mod_shadow_start); kasan_populate_zero_shadow((void *)kimg_shadow_end, kasan_mem_to_shadow((void *)PAGE_OFFSET)); if (kimg_shadow_start > mod_shadow_end) kasan_populate_zero_shadow((void *)mod_shadow_end, (void *)kimg_shadow_start); for_each_memblock(memory, reg) { void *start = (void *)__phys_to_virt(reg->base); void *end = (void *)__phys_to_virt(reg->base + reg->size); if (start >= end) break; /* * end + 1 here is intentional. We check several shadow bytes in * advance to slightly speed up fastpath. In some rare cases * we could cross boundary of mapped shadow, so we just map * some more here. */ vmemmap_populate((unsigned long)kasan_mem_to_shadow(start), (unsigned long)kasan_mem_to_shadow(end) + 1, pfn_to_nid(virt_to_pfn(start))); }