void __init zone_sizes_init(void) { unsigned long zones_size[MAX_NR_ZONES], zholes_size[MAX_NR_ZONES]; unsigned long low, start_pfn; int nid, i; mem_prof_t *mp; for_each_online_node(nid) { mp = &mem_prof[nid]; for (i = 0 ; i < MAX_NR_ZONES ; i++) { zones_size[i] = 0; zholes_size[i] = 0; } start_pfn = START_PFN(nid); low = MAX_LOW_PFN(nid); zones_size[ZONE_DMA] = low - start_pfn; zholes_size[ZONE_DMA] = mp->holes; node_set_state(nid, N_NORMAL_MEMORY); free_area_init_node(nid, zones_size, start_pfn, zholes_size); } /* * For test * Use all area of internal RAM. * see __alloc_pages() */ NODE_DATA(1)->node_zones->watermark[WMARK_MIN] = 0; NODE_DATA(1)->node_zones->watermark[WMARK_LOW] = 0; NODE_DATA(1)->node_zones->watermark[WMARK_HIGH] = 0; }
unsigned long __init zone_sizes_init(void) { unsigned long zones_size[MAX_NR_ZONES], zholes_size[MAX_NR_ZONES]; unsigned long low, start_pfn; unsigned long holes = 0; int nid, i; mem_prof_t *mp; for_each_online_node(nid) { mp = &mem_prof[nid]; for (i = 0 ; i < MAX_NR_ZONES ; i++) { zones_size[i] = 0; zholes_size[i] = 0; } start_pfn = START_PFN(nid); low = MAX_LOW_PFN(nid); zones_size[ZONE_DMA] = low - start_pfn; zholes_size[ZONE_DMA] = mp->holes; holes += zholes_size[ZONE_DMA]; free_area_init_node(nid, NODE_DATA(nid), zones_size, start_pfn, zholes_size); } /* * For test * Use all area of internal RAM. * see __alloc_pages() */ NODE_DATA(1)->node_zones->pages_min = 0; NODE_DATA(1)->node_zones->pages_low = 0; NODE_DATA(1)->node_zones->pages_high = 0; return holes; }
void __init paging_init(void) { unsigned long zones_size[MAX_NR_ZONES]; int i, nid; struct page *node_mem_map; for (i = 1; i < MAX_NR_ZONES; i++) zones_size[i] = 0; for (nid = 0; nid < numnodes; nid++) { unsigned long start_pfn; unsigned long end_pfn; start_pfn = plat_node_bdata[nid].node_boot_start >> PAGE_SHIFT; end_pfn = plat_node_bdata[nid].node_low_pfn; zones_size[ZONE_DMA] = end_pfn - start_pfn; dbg("free_area_init node %d %lx %lx\n", nid, zones_size[ZONE_DMA], start_pfn); /* * Give this empty node a dummy struct page to avoid * us from trying to allocate a node local mem_map * in free_area_init_node (which will fail). */ if (!node_data[nid].node_spanned_pages) node_mem_map = alloc_bootmem(sizeof(struct page)); else node_mem_map = NULL; free_area_init_node(nid, NODE_DATA(nid), node_mem_map, zones_size, start_pfn, NULL); } }
/* * First memory setup routine called from setup_arch() * 1. setup swapper's mm @init_mm * 2. Count the pages we have and setup bootmem allocator * 3. zone setup */ void __init setup_arch_memory(void) { unsigned long zones_size[MAX_NR_ZONES]; unsigned long end_mem = CONFIG_LINUX_LINK_BASE + arc_mem_sz; init_mm.start_code = (unsigned long)_text; init_mm.end_code = (unsigned long)_etext; init_mm.end_data = (unsigned long)_edata; init_mm.brk = (unsigned long)_end; /* * We do it here, so that memory is correctly instantiated * even if "mem=xxx" cmline over-ride is given and/or * DT has memory node. Each causes an update to @arc_mem_sz * and we finally add memory one here */ memblock_add(CONFIG_LINUX_LINK_BASE, arc_mem_sz); /*------------- externs in mm need setting up ---------------*/ /* first page of system - kernel .vector starts here */ min_low_pfn = ARCH_PFN_OFFSET; /* Last usable page of low mem (no HIGHMEM yet for ARC port) */ max_low_pfn = max_pfn = PFN_DOWN(end_mem); max_mapnr = max_low_pfn - min_low_pfn; /*------------- reserve kernel image -----------------------*/ memblock_reserve(CONFIG_LINUX_LINK_BASE, __pa(_end) - CONFIG_LINUX_LINK_BASE); #ifdef CONFIG_BLK_DEV_INITRD /*------------- reserve initrd image -----------------------*/ if (initrd_start) memblock_reserve(__pa(initrd_start), initrd_end - initrd_start); #endif memblock_dump_all(); /*-------------- node setup --------------------------------*/ memset(zones_size, 0, sizeof(zones_size)); zones_size[ZONE_NORMAL] = max_mapnr; /* * We can't use the helper free_area_init(zones[]) because it uses * PAGE_OFFSET to compute the @min_low_pfn which would be wrong * when our kernel doesn't start at PAGE_OFFSET, i.e. * PAGE_OFFSET != CONFIG_LINUX_LINK_BASE */ free_area_init_node(0, /* node-id */ zones_size, /* num pages per zone */ min_low_pfn, /* first pfn of node */ NULL); /* NO holes */ high_memory = (void *)end_mem; }
void __init zones_init(void) { /* All pages are DMA-able, so we put them all in the DMA zone. */ unsigned long zones_size[MAX_NR_ZONES] = { [ZONE_DMA] = max_low_pfn - ARCH_PFN_OFFSET, #ifdef CONFIG_HIGHMEM [ZONE_HIGHMEM] = max_pfn - max_low_pfn, #endif }; free_area_init_node(0, zones_size, ARCH_PFN_OFFSET, NULL); }
static pg_data_t *hotadd_new_pgdat(int nid, u64 start) { struct pglist_data *pgdat; unsigned long zones_size[MAX_NR_ZONES] = {0}; unsigned long zholes_size[MAX_NR_ZONES] = {0}; unsigned long start_pfn = start >> PAGE_SHIFT; pgdat = arch_alloc_nodedata(nid); if (!pgdat) return NULL; arch_refresh_nodedata(nid, pgdat); /* we can use NODE_DATA(nid) from here */ /* init node's zones as empty zones, we don't have any present pages.*/ free_area_init_node(nid, pgdat, zones_size, start_pfn, zholes_size); return pgdat; }
/* * First memory setup routine called from setup_arch() * 1. setup swapper's mm @init_mm * 2. Count the pages we have and setup bootmem allocator * 3. zone setup */ void __init setup_arch_memory(void) { unsigned long zones_size[MAX_NR_ZONES]; unsigned long zones_holes[MAX_NR_ZONES]; init_mm.start_code = (unsigned long)_text; init_mm.end_code = (unsigned long)_etext; init_mm.end_data = (unsigned long)_edata; init_mm.brk = (unsigned long)_end; /* first page of system - kernel .vector starts here */ min_low_pfn = ARCH_PFN_OFFSET; /* Last usable page of low mem */ max_low_pfn = max_pfn = PFN_DOWN(low_mem_start + low_mem_sz); #ifdef CONFIG_HIGHMEM min_high_pfn = PFN_DOWN(high_mem_start); max_pfn = PFN_DOWN(high_mem_start + high_mem_sz); #endif max_mapnr = max_pfn - min_low_pfn; /*------------- bootmem allocator setup -----------------------*/ /* * seed the bootmem allocator after any DT memory node parsing or * "mem=xxx" cmdline overrides have potentially updated @arc_mem_sz * * Only low mem is added, otherwise we have crashes when allocating * mem_map[] itself. NO_BOOTMEM allocates mem_map[] at the end of * avail memory, ending in highmem with a > 32-bit address. However * it then tries to memset it with a truncaed 32-bit handle, causing * the crash */ memblock_add(low_mem_start, low_mem_sz); memblock_reserve(low_mem_start, __pa(_end) - low_mem_start); #ifdef CONFIG_BLK_DEV_INITRD if (initrd_start) memblock_reserve(__pa(initrd_start), initrd_end - initrd_start); #endif memblock_dump_all(); /*----------------- node/zones setup --------------------------*/ memset(zones_size, 0, sizeof(zones_size)); memset(zones_holes, 0, sizeof(zones_holes)); zones_size[ZONE_NORMAL] = max_low_pfn - min_low_pfn; zones_holes[ZONE_NORMAL] = 0; #ifdef CONFIG_HIGHMEM zones_size[ZONE_HIGHMEM] = max_pfn - max_low_pfn; /* This handles the peripheral address space hole */ zones_holes[ZONE_HIGHMEM] = min_high_pfn - max_low_pfn; #endif /* * We can't use the helper free_area_init(zones[]) because it uses * PAGE_OFFSET to compute the @min_low_pfn which would be wrong * when our kernel doesn't start at PAGE_OFFSET, i.e. * PAGE_OFFSET != CONFIG_LINUX_LINK_BASE */ free_area_init_node(0, /* node-id */ zones_size, /* num pages per zone */ min_low_pfn, /* first pfn of node */ zones_holes); /* holes */ #ifdef CONFIG_HIGHMEM high_memory = (void *)(min_high_pfn << PAGE_SHIFT); kmap_init(); #endif }
/* * First memory setup routine called from setup_arch() * 1. setup swapper's mm @init_mm * 2. Count the pages we have and setup bootmem allocator * 3. zone setup */ void __init setup_arch_memory(void) { unsigned long zones_size[MAX_NR_ZONES]; unsigned long zones_holes[MAX_NR_ZONES]; init_mm.start_code = (unsigned long)_text; init_mm.end_code = (unsigned long)_etext; init_mm.end_data = (unsigned long)_edata; init_mm.brk = (unsigned long)_end; /* first page of system - kernel .vector starts here */ min_low_pfn = ARCH_PFN_OFFSET; /* Last usable page of low mem */ max_low_pfn = max_pfn = PFN_DOWN(low_mem_start + low_mem_sz); #ifdef CONFIG_FLATMEM /* pfn_valid() uses this */ max_mapnr = max_low_pfn - min_low_pfn; #endif /*------------- bootmem allocator setup -----------------------*/ /* * seed the bootmem allocator after any DT memory node parsing or * "mem=xxx" cmdline overrides have potentially updated @arc_mem_sz * * Only low mem is added, otherwise we have crashes when allocating * mem_map[] itself. NO_BOOTMEM allocates mem_map[] at the end of * avail memory, ending in highmem with a > 32-bit address. However * it then tries to memset it with a truncaed 32-bit handle, causing * the crash */ memblock_add_node(low_mem_start, low_mem_sz, 0); memblock_reserve(CONFIG_LINUX_LINK_BASE, __pa(_end) - CONFIG_LINUX_LINK_BASE); #ifdef CONFIG_BLK_DEV_INITRD if (initrd_start) memblock_reserve(__pa(initrd_start), initrd_end - initrd_start); #endif early_init_fdt_reserve_self(); early_init_fdt_scan_reserved_mem(); memblock_dump_all(); /*----------------- node/zones setup --------------------------*/ memset(zones_size, 0, sizeof(zones_size)); memset(zones_holes, 0, sizeof(zones_holes)); zones_size[ZONE_NORMAL] = max_low_pfn - min_low_pfn; zones_holes[ZONE_NORMAL] = 0; /* * We can't use the helper free_area_init(zones[]) because it uses * PAGE_OFFSET to compute the @min_low_pfn which would be wrong * when our kernel doesn't start at PAGE_OFFSET, i.e. * PAGE_OFFSET != CONFIG_LINUX_RAM_BASE */ free_area_init_node(0, /* node-id */ zones_size, /* num pages per zone */ min_low_pfn, /* first pfn of node */ zones_holes); /* holes */ #ifdef CONFIG_HIGHMEM /* * Populate a new node with highmem * * On ARC (w/o PAE) HIGHMEM addresses are actually smaller (0 based) * than addresses in normal ala low memory (0x8000_0000 based). * Even with PAE, the huge peripheral space hole would waste a lot of * mem with single mem_map[]. This warrants a mem_map per region design. * Thus HIGHMEM on ARC is imlemented with DISCONTIGMEM. * * DISCONTIGMEM in turns requires multiple nodes. node 0 above is * populated with normal memory zone while node 1 only has highmem */ node_set_online(1); min_high_pfn = PFN_DOWN(high_mem_start); max_high_pfn = PFN_DOWN(high_mem_start + high_mem_sz); zones_size[ZONE_NORMAL] = 0; zones_holes[ZONE_NORMAL] = 0; zones_size[ZONE_HIGHMEM] = max_high_pfn - min_high_pfn; zones_holes[ZONE_HIGHMEM] = 0; free_area_init_node(1, /* node-id */ zones_size, /* num pages per zone */ min_high_pfn, /* first pfn of node */ zones_holes); /* holes */ high_memory = (void *)(min_high_pfn << PAGE_SHIFT); kmap_init(); #endif }