void ccci_md_mem_reserve(void) { int reserved_size = 0; phys_addr_t ptr = 0; int i; CCCI_UTIL_INF_MSG("ccci_md_mem_reserve===1.\n"); #if defined(CONFIG_OF) lk_meta_tag_info_collect(); CCCI_UTIL_INF_MSG("ccci_md_mem_reserve===2.\n"); #if defined(FEATURE_DFO_EN) // DFO enable and using device tree lk_dfo_tag_info_collect(); #endif #endif CCCI_UTIL_INF_MSG("ccci_md_mem_reserve===3.\n"); // Get MD memory requirements collect_md_settings(); CCCI_UTIL_INF_MSG("ccci_md_mem_reserve===4.\n"); // For internal MD for(i=0; i<4; i++) {// 0~3 for internal if(modem_size_list[i] == 0) continue; reserved_size = ALIGN(modem_size_list[MD_SYS1+i], SZ_2M); memblock_set_current_limit(0xFFFFFFFF); #ifdef CONFIG_ARM64 ptr = arm64_memblock_steal(reserved_size, CCCI_MEM_ALIGN); #else ptr = arm_memblock_steal(reserved_size, CCCI_MEM_ALIGN); #endif memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE); if(ptr) { md_resv_mem_list[i] = ptr; CCCI_UTIL_INF_MSG("md%d mem reserve successfully, ptr=0x%pa, size=0x%x\n", i+1, &ptr, reserved_size); }else{ CCCI_UTIL_INF_MSG("md%d mem reserve fail.\n", i+1); } } #if 0//def CONFIG_ARM64 memblock_set_current_limit(0xFFFFFFFF); ptr = arm64_memblock_steal(90*1024*1024, CCCI_MEM_ALIGN); md_resv_mem_list[0] = ptr; ptr = arm64_memblock_steal(32*1024*1024, CCCI_MEM_ALIGN); md_resv_mem_list[1] = ptr; memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE); #endif // Parse META setting ccci_parse_meta_md_setting(md_info_tag_val); CCCI_UTIL_INF_MSG("ccci_md_mem_reserve===5.\n"); // Calculate memory layout cal_md_mem_setting(MEM_LAY_OUT_VER); CCCI_UTIL_INF_MSG("ccci_md_mem_reserve===6.\n"); }
unsigned long __init mmu_mapin_ram(unsigned long top) { unsigned long mapped; if (__map_without_ltlbs) { mapped = 0; mmu_mapin_immr(); #ifndef CONFIG_PIN_TLB_IMMR patch_instruction(&DTLBMiss_jmp, PPC_INST_NOP); #endif #ifndef CONFIG_PIN_TLB_TEXT mmu_patch_cmp_limit(&ITLBMiss_cmp, 0); #endif } else { mapped = top & ~(LARGE_PAGE_SIZE_8M - 1); } mmu_patch_cmp_limit(&DTLBMiss_cmp, mapped); mmu_patch_cmp_limit(&FixupDAR_cmp, mapped); /* If the size of RAM is not an exact power of two, we may not * have covered RAM in its entirety with 8 MiB * pages. Consequently, restrict the top end of RAM currently * allocable so that calls to the MEMBLOCK to allocate PTEs for "tail" * coverage with normal-sized pages (or other reasons) do not * attempt to allocate outside the allowed range. */ if (mapped) memblock_set_current_limit(mapped); block_mapped_ram = mapped; return mapped; }
void __init bootmem_init(void) { /* Reserve all memory below PHYS_OFFSET, as memory * accounting doesn't work for pages below that address. * * If PHYS_OFFSET is zero reserve page at address 0: * successfull allocations should never return NULL. */ if (PHYS_OFFSET) memblock_reserve(0, PHYS_OFFSET); else memblock_reserve(0, 1); early_init_fdt_scan_reserved_mem(); if (!memblock_phys_mem_size()) panic("No memory found!\n"); min_low_pfn = PFN_UP(memblock_start_of_DRAM()); min_low_pfn = max(min_low_pfn, PFN_UP(PHYS_OFFSET)); max_pfn = PFN_DOWN(memblock_end_of_DRAM()); max_low_pfn = min(max_pfn, MAX_LOW_PFN); memblock_set_current_limit(PFN_PHYS(max_low_pfn)); dma_contiguous_reserve(PFN_PHYS(max_low_pfn)); memblock_dump_all(); }
void setup_initial_memory_limit(phys_addr_t first_memblock_base, phys_addr_t first_memblock_size) { BUG_ON(first_memblock_base != 0); memblock_set_current_limit(min_t(u64, first_memblock_size, 0x00800000)); }
static void __init map_mem(void) { struct memblock_region *reg; phys_addr_t limit; /* * Temporarily limit the memblock range. We need to do this as * create_mapping requires puds, pmds and ptes to be allocated from * memory addressable from the initial direct kernel mapping. * * The initial direct kernel mapping, located at swapper_pg_dir, gives * us PUD_SIZE (4K pages) or PMD_SIZE (64K pages) memory starting from * PHYS_OFFSET (which must be aligned to 2MB as per * Documentation/arm64/booting.txt). */ if (IS_ENABLED(CONFIG_ARM64_64K_PAGES)) limit = PHYS_OFFSET + PMD_SIZE; else limit = PHYS_OFFSET + PUD_SIZE; memblock_set_current_limit(limit); /* map all the memory banks */ for_each_memblock(memory, reg) { phys_addr_t start = reg->base; phys_addr_t end = start + reg->size; if (start >= end) break; #ifndef CONFIG_ARM64_64K_PAGES /* * For the first memory bank align the start address and * current memblock limit to prevent create_mapping() from * allocating pte page tables from unmapped memory. * When 64K pages are enabled, the pte page table for the * first PGDIR_SIZE is already present in swapper_pg_dir. */ if (start < limit) start = ALIGN(start, PMD_SIZE); if (end < limit) { limit = end & PMD_MASK; memblock_set_current_limit(limit); } #endif __map_memblock(start, end); }
static void __init map_mem(void) { struct memblock_region *reg; phys_addr_t limit; /* * Temporarily limit the memblock range. We need to do this as * create_mapping requires puds, pmds and ptes to be allocated from * memory addressable from the initial direct kernel mapping. * * The initial direct kernel mapping, located at swapper_pg_dir, gives * us PUD_SIZE (with SECTION maps) or PMD_SIZE (without SECTION maps, * memory starting from PHYS_OFFSET (which must be aligned to 2MB as * per Documentation/arm64/booting.txt). */ limit = PHYS_OFFSET + SWAPPER_INIT_MAP_SIZE; memblock_set_current_limit(limit); /* map all the memory banks */ for_each_memblock(memory, reg) { phys_addr_t start = reg->base; phys_addr_t end = start + reg->size; if (start >= end) break; if (ARM64_SWAPPER_USES_SECTION_MAPS) { /* * For the first memory bank align the start address and * current memblock limit to prevent create_mapping() from * allocating pte page tables from unmapped memory. With * the section maps, if the first block doesn't end on section * size boundary, create_mapping() will try to allocate a pte * page, which may be returned from an unmapped area. * When section maps are not used, the pte page table for the * current limit is already present in swapper_pg_dir. */ if (start < limit) start = ALIGN(start, SECTION_SIZE); if (end < limit) { limit = end & SECTION_MASK; memblock_set_current_limit(limit); } } __map_memblock(start, end); }
void setup_initial_memory_limit(phys_addr_t first_memblock_base, phys_addr_t first_memblock_size) { phys_addr_t limit = first_memblock_base + first_memblock_size; /* 64M mapped initially according to head_fsl_booke.S */ memblock_set_current_limit(min_t(u64, limit, 0x04000000)); }
/* Free up now-unused memory */ static void free_sec(unsigned long start, unsigned long end, const char *name) { unsigned long cnt = 0; while (start < end) { ClearPageReserved(virt_to_page(start)); init_page_count(virt_to_page(start)); free_page(start); cnt++; start += PAGE_SIZE; } if (cnt) { printk(" %ldk %s", cnt << (PAGE_SHIFT - 10), name); totalram_pages += cnt; } } void free_initmem(void) { #define FREESEC(TYPE) \ free_sec((unsigned long)(&__ ## TYPE ## _begin), \ (unsigned long)(&__ ## TYPE ## _end), \ #TYPE); printk ("Freeing unused kernel memory:"); FREESEC(init); printk("\n"); ppc_md.progress = NULL; #undef FREESEC } #ifdef CONFIG_8xx /* No 8xx specific .c file to put that in ... */ void setup_initial_memory_limit(phys_addr_t first_memblock_base, phys_addr_t first_memblock_size) { /* We don't currently support the first MEMBLOCK not mapping 0 * physical on those processors */ BUG_ON(first_memblock_base != 0); /* 8xx can only access 8MB at the moment */ memblock_set_current_limit(min_t(u64, first_memblock_size, 0x00800000)); }
unsigned long __init mmu_mapin_ram(unsigned long top) { unsigned long v, s, mapped; phys_addr_t p; v = KERNELBASE; p = 0; s = total_lowmem; if (__map_without_ltlbs) return 0; while (s >= LARGE_PAGE_SIZE_16M) { pmd_t *pmdp; unsigned long val = p | _PMD_SIZE_16M | _PAGE_EXEC | _PAGE_HWWRITE; pmdp = pmd_offset(pud_offset(pgd_offset_k(v), v), v); pmd_val(*pmdp++) = val; pmd_val(*pmdp++) = val; pmd_val(*pmdp++) = val; pmd_val(*pmdp++) = val; v += LARGE_PAGE_SIZE_16M; p += LARGE_PAGE_SIZE_16M; s -= LARGE_PAGE_SIZE_16M; } while (s >= LARGE_PAGE_SIZE_4M) { pmd_t *pmdp; unsigned long val = p | _PMD_SIZE_4M | _PAGE_EXEC | _PAGE_HWWRITE; pmdp = pmd_offset(pud_offset(pgd_offset_k(v), v), v); pmd_val(*pmdp) = val; v += LARGE_PAGE_SIZE_4M; p += LARGE_PAGE_SIZE_4M; s -= LARGE_PAGE_SIZE_4M; } mapped = total_lowmem - s; memblock_set_current_limit(mapped); return mapped; }
void __init adjust_total_lowmem(void) { unsigned long ram; int i; /* adjust lowmem size to __max_low_memory */ ram = min((phys_addr_t)__max_low_memory, (phys_addr_t)total_lowmem); __max_low_memory = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM); pr_info("Memory CAM mapping: "); for (i = 0; i < tlbcam_index - 1; i++) pr_cont("%lu/", tlbcam_sz(i) >> 20); pr_cont("%lu Mb, residual: %dMb\n", tlbcam_sz(tlbcam_index - 1) >> 20, (unsigned int)((total_lowmem - __max_low_memory) >> 20)); memblock_set_current_limit(memstart_addr + __max_low_memory); }
/* * MMU_init sets up the basic memory mappings for the kernel, * including both RAM and possibly some I/O regions, * and sets up the page tables and the MMU hardware ready to go. */ void __init MMU_init(void) { if (ppc_md.progress) ppc_md.progress("MMU:enter", 0x111); /* parse args from command line */ MMU_setup(); /* * Reserve gigantic pages for hugetlb. This MUST occur before * lowmem_end_addr is initialized below. */ reserve_hugetlb_gpages(); if (memblock.memory.cnt > 1) { #ifndef CONFIG_WII memblock_enforce_memory_limit(memblock.memory.regions[0].size); printk(KERN_WARNING "Only using first contiguous memory region"); #else wii_memory_fixups(); #endif } total_lowmem = total_memory = memblock_end_of_DRAM() - memstart_addr; lowmem_end_addr = memstart_addr + total_lowmem; #ifdef CONFIG_FSL_BOOKE /* Freescale Book-E parts expect lowmem to be mapped by fixed TLB * entries, so we need to adjust lowmem to match the amount we can map * in the fixed entries */ adjust_total_lowmem(); #endif /* CONFIG_FSL_BOOKE */ if (total_lowmem > __max_low_memory) { total_lowmem = __max_low_memory; lowmem_end_addr = memstart_addr + total_lowmem; #ifndef CONFIG_HIGHMEM total_memory = total_lowmem; memblock_enforce_memory_limit(total_lowmem); #endif /* CONFIG_HIGHMEM */ } /* Initialize the MMU hardware */ if (ppc_md.progress) ppc_md.progress("MMU:hw init", 0x300); MMU_init_hw(); /* Map in all of RAM starting at KERNELBASE */ if (ppc_md.progress) ppc_md.progress("MMU:mapin", 0x301); mapin_ram(); /* Initialize early top-down ioremap allocator */ ioremap_bot = IOREMAP_TOP; /* Map in I/O resources */ if (ppc_md.progress) ppc_md.progress("MMU:setio", 0x302); if (ppc_md.progress) ppc_md.progress("MMU:exit", 0x211); /* From now on, btext is no longer BAT mapped if it was at all */ #ifdef CONFIG_BOOTX_TEXT btext_unmap(); #endif /* Shortly after that, the entire linear mapping will be available */ memblock_set_current_limit(lowmem_end_addr); }