static void __init msm8625_reserve(void) { msm7x27a_reserve(); memblock_remove(MSM8625_SECONDARY_PHYS, SZ_8); memblock_remove(MSM8625_WARM_BOOT_PHYS, SZ_32); memblock_remove(MSM8625_NON_CACHE_MEM, SZ_2K); }
static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size) { unsigned long start, start_pfn; struct zone *zone; int ret; start_pfn = base >> PAGE_SHIFT; if (!pfn_valid(start_pfn)) { memblock_remove(base, memblock_size); return 0; } zone = page_zone(pfn_to_page(start_pfn)); ret = __remove_pages(zone, start_pfn, memblock_size >> PAGE_SHIFT); if (ret) return ret; memblock_remove(base, memblock_size); start = (unsigned long)__va(base); ret = remove_section_mapping(start, start + memblock_size); vm_unmap_aliases(); return ret; }
static void __init acclaim_reserve(void) { omap_init_ram_size(); #ifdef CONFIG_ION_OMAP omap_android_display_setup(&acclaim_dss_data, NULL, NULL, &acclaim_fb_pdata, get_omap_ion_platform_data()); omap_ion_init(); #else omap_android_display_setup(&acclaim_dss_data, NULL, NULL, &acclaim_fb_pdata, NULL); #endif /* do the static reservations first */ memblock_remove(PHYS_ADDR_SMC_MEM, PHYS_ADDR_SMC_SIZE); memblock_remove(PHYS_ADDR_DUCATI_MEM, PHYS_ADDR_DUCATI_SIZE); /* ipu needs to recognize secure input buffer area as well */ omap_ipu_set_static_mempool(PHYS_ADDR_DUCATI_MEM, PHYS_ADDR_DUCATI_SIZE + OMAP4_ION_HEAP_SECURE_INPUT_SIZE + OMAP4_ION_HEAP_SECURE_OUTPUT_WFDHDCP_SIZE); omap_reserve(); }
static void __init mx6q_sabrelite_reserve(void) { phys_addr_t phys; int i; if (imx6q_gpu_pdata.reserved_mem_size) { phys = memblock_alloc_base(imx6q_gpu_pdata.reserved_mem_size, SZ_4K, SZ_1G); memblock_free(phys, imx6q_gpu_pdata.reserved_mem_size); memblock_remove(phys, imx6q_gpu_pdata.reserved_mem_size); imx6q_gpu_pdata.reserved_mem_base = phys; } if (imx_ion_data.heaps[0].size) { phys = memblock_alloc(imx_ion_data.heaps[0].size, SZ_4K); memblock_free(phys, imx_ion_data.heaps[0].size); memblock_remove(phys, imx_ion_data.heaps[0].size); imx_ion_data.heaps[0].base = phys; } for (i = 0; i < ARRAY_SIZE(sabrelite_fb_data); i++) if (sabrelite_fb_data[i].res_size[0]) { /* reserve for background buffer */ phys = memblock_alloc(sabrelite_fb_data[i].res_size[0], SZ_4K); memblock_free(phys, sabrelite_fb_data[i].res_size[0]); memblock_remove(phys, sabrelite_fb_data[i].res_size[0]); sabrelite_fb_data[i].res_base[0] = phys; } }
static void __init mx6q_hdmidongle_reserve(void) { phys_addr_t phys; int i; #if defined(CONFIG_MXC_GPU_VIV) || defined(CONFIG_MXC_GPU_VIV_MODULE) if (imx6q_gpu_pdata.reserved_mem_size) { phys = memblock_alloc_base(imx6q_gpu_pdata.reserved_mem_size, SZ_4K, SZ_2G); memblock_remove(phys, imx6q_gpu_pdata.reserved_mem_size); imx6q_gpu_pdata.reserved_mem_base = phys; } #endif #if defined(CONFIG_ION) if (imx_ion_data.heaps[0].size) { phys = memblock_alloc(imx_ion_data.heaps[0].size, SZ_4K); memblock_remove(phys, imx_ion_data.heaps[0].size); imx_ion_data.heaps[0].base = phys; } #endif for (i = 0; i < ARRAY_SIZE(hdmidongle_fb_data); i++) if (hdmidongle_fb_data[i].res_size[0]) { /* reserve for background buffer */ phys = memblock_alloc_base(hdmidongle_fb_data[i].res_size[0], SZ_4K, SZ_2G); memblock_remove(phys, hdmidongle_fb_data[i].res_size[0]); hdmidongle_fb_data[i].res_base[0] = phys; } }
static void __init gokey_reserve(void) { #ifdef CONFIG_ION_OMAP omap_init_ram_size(); omap4_gokey_memory_display_init(); omap4_gokey_init_carveout_sizes(get_omap_ion_platform_data()); omap_ion_init(); #endif /* do the static reservations first */ if (sec_debug_get_level()) { #if defined(CONFIG_ANDROID_RAM_CONSOLE) memblock_remove(GOKEY_RAMCONSOLE_START, GOKEY_RAMCONSOLE_SIZE); #endif #if defined(CONFIG_RAMOOPS) memblock_remove(GOKEY_RAMOOPS_START, GOKEY_RAMOOPS_SIZE); #endif } memblock_remove(PHYS_ADDR_SMC_MEM, PHYS_ADDR_SMC_SIZE); memblock_remove(PHYS_ADDR_DUCATI_MEM, PHYS_ADDR_DUCATI_SIZE); /* ipu needs to recognize secure input buffer area as well */ omap_ipu_set_static_mempool(PHYS_ADDR_DUCATI_MEM, PHYS_ADDR_DUCATI_SIZE + OMAP4_ION_HEAP_SECURE_INPUT_SIZE + OMAP4_ION_HEAP_SECURE_OUTPUT_WFDHDCP_SIZE); omap_reserve(); sec_log_buf_reserve(); }
void __init tegra_ram_console_debug_reserve(unsigned long ram_console_size) { struct resource *res; long ret; unsigned long real_start, real_size; res = platform_get_resource(&ram_console_device, IORESOURCE_MEM, 0); if (!res) goto fail; res->start = memblock_end_of_DRAM() - ram_console_size; res->end = res->start + ram_console_size - 1; ret = memblock_remove(res->start, ram_console_size); // Register an extra 1M before ramconsole to store kexec stuff real_start = res->start - SZ_1M; real_size = ram_console_size + SZ_1M; ret = memblock_remove(real_start, real_size); if (ret) goto fail; return; fail: ram_console_device.resource = NULL; ram_console_device.num_resources = 0; pr_err("Failed to reserve memory block for ram console\n"); }
static void __init mx6q_sabresd_reserve(void) { phys_addr_t phys; int i, fb0_reserved = 0, fb_array_size; /* * Reserve primary framebuffer memory if its base address * is set by kernel command line. */ fb_array_size = ARRAY_SIZE(sabresd_fb_data); if (fb_array_size > 0 && sabresd_fb_data[0].res_base[0] && sabresd_fb_data[0].res_size[0]) { if (sabresd_fb_data[0].res_base[0] > SZ_2G) printk(KERN_INFO"UI Performance downgrade with FB phys address %x!\n", sabresd_fb_data[0].res_base[0]); memblock_reserve(sabresd_fb_data[0].res_base[0], sabresd_fb_data[0].res_size[0]); memblock_remove(sabresd_fb_data[0].res_base[0], sabresd_fb_data[0].res_size[0]); sabresd_fb_data[0].late_init = true; ipu_data[ldb_data.ipu_id].bypass_reset = true; fb0_reserved = 1; } for (i = fb0_reserved; i < fb_array_size; i++) if (sabresd_fb_data[i].res_size[0]) { /* Reserve for other background buffer. */ phys = memblock_alloc_base(sabresd_fb_data[i].res_size[0], SZ_4K, SZ_2G); memblock_remove(phys, sabresd_fb_data[i].res_size[0]); sabresd_fb_data[i].res_base[0] = phys; } #ifdef CONFIG_ANDROID_RAM_CONSOLE phys = memblock_alloc_base(SZ_1M, SZ_4K, SZ_1G); memblock_remove(phys, SZ_1M); memblock_free(phys, SZ_1M); ram_console_resource.start = phys; ram_console_resource.end = phys + SZ_1M - 1; #endif #if defined(CONFIG_MXC_GPU_VIV) || defined(CONFIG_MXC_GPU_VIV_MODULE) if (imx6q_gpu_pdata.reserved_mem_size) { phys = memblock_alloc_base(imx6q_gpu_pdata.reserved_mem_size, SZ_4K, SZ_2G); memblock_remove(phys, imx6q_gpu_pdata.reserved_mem_size); imx6q_gpu_pdata.reserved_mem_base = phys; } #endif #if defined(CONFIG_ION) if (imx_ion_data.heaps[0].size) { phys = memblock_alloc(imx_ion_data.heaps[0].size, SZ_4K); memblock_remove(phys, imx_ion_data.heaps[0].size); imx_ion_data.heaps[0].base = phys; } #endif }
static void __init omap4_panda_reserve(void) { /* do the static reservations first */ memblock_remove(PHYS_ADDR_SMC_MEM, PHYS_ADDR_SMC_SIZE); memblock_remove(PHYS_ADDR_DUCATI_MEM, PHYS_ADDR_DUCATI_SIZE); /* ipu needs to recognize secure input buffer area as well */ omap_ipu_set_static_mempool(PHYS_ADDR_DUCATI_MEM, PHYS_ADDR_DUCATI_SIZE + OMAP_ION_HEAP_SECURE_INPUT_SIZE); omap_reserve(); }
static void __init msm7x27a_reserve(void) { reserve_info = &msm7x27a_reserve_info; memblock_remove(MSM8625_NON_CACHE_MEM, SZ_2K); memblock_remove(BOOTLOADER_BASE_ADDR, msm_ion_audio_size); msm_reserve(); #ifdef CONFIG_CMA dma_declare_contiguous( &ion_cma_device.dev, msm_ion_camera_size, CAMERA_HEAP_BASE, 0x26000000); #endif }
static void __init omap_tablet_reserve(void) { /* do the static reservations first */ memblock_remove(PHYS_ADDR_SMC_MEM, PHYS_ADDR_SMC_SIZE); memblock_remove(PHYS_ADDR_DUCATI_MEM, PHYS_ADDR_DUCATI_SIZE); /* ipu needs to recognize secure input buffer area as well */ omap_ipu_set_static_mempool(PHYS_ADDR_DUCATI_MEM, PHYS_ADDR_DUCATI_SIZE + OMAP4_ION_HEAP_SECURE_INPUT_SIZE); #ifdef CONFIG_ION_OMAP omap_ion_init(); #endif omap_reserve(); omap_ram_console_init(ACCLAIM_RAM_CONSOLE_START, ACCLAIM_RAM_CONSOLE_SIZE); }
/* Reserve a portion of memory for CEU 0 and CEU 1 buffers */ static void __init ecovec_mv_mem_reserve(void) { phys_addr_t phys; phys_addr_t size = CEU_BUFFER_MEMORY_SIZE; phys = memblock_alloc_base(size, PAGE_SIZE, MEMBLOCK_ALLOC_ANYWHERE); memblock_free(phys, size); memblock_remove(phys, size); ceu0_dma_membase = phys; phys = memblock_alloc_base(size, PAGE_SIZE, MEMBLOCK_ALLOC_ANYWHERE); memblock_free(phys, size); memblock_remove(phys, size); ceu1_dma_membase = phys; }
void __init lge_reserve(void) { #ifdef CONFIG_KEXEC_HARDBOOT // Reserve space for hardboot page - just after ram_console, // at the start of second memory bank int ret; phys_addr_t start; struct membank* bank; if (meminfo.nr_banks < 2) { pr_err("%s: not enough membank\n", __func__); return; } bank = &meminfo.bank[1]; start = bank->start + SZ_1M + LGE_PERSISTENT_RAM_SIZE; ret = memblock_remove(start, SZ_1M); if(!ret) pr_info("Hardboot page reserved at 0x%X\n", start); else pr_err("Failed to reserve space for hardboot page at 0x%X!\n", start); #endif #if defined(CONFIG_ANDROID_PERSISTENT_RAM) lge_add_persist_ram_devices(); #endif }
int __init __weak early_init_dt_alloc_reserved_memory_arch(phys_addr_t size, phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap, phys_addr_t *res_base) { phys_addr_t base; /* * We use __memblock_alloc_base() because memblock_alloc_base() * panic()s on allocation failure. */ end = !end ? MEMBLOCK_ALLOC_ANYWHERE : end; base = __memblock_alloc_base(size, align, end); if (!base) return -ENOMEM; /* * Check if the allocated region fits in to start..end window */ if (base < start) { memblock_free(base, size); return -ENOMEM; } *res_base = base; if (nomap) return memblock_remove(base, size); return 0; }
void __init msm_8974_reserve(void) { #ifdef CONFIG_KEXEC_HARDBOOT // Reserve space for hardboot page - just after ram_console, // at the start of second memory bank int ret; phys_addr_t start; struct membank* bank; if (meminfo.nr_banks < 2) { pr_err("%s: not enough membank\n", __func__); return; } bank = &meminfo.bank[1]; start = bank->start + SZ_1M + OPPO_PERSISTENT_RAM_SIZE; ret = memblock_remove(start, SZ_1M); if (ret) pr_err("Failed to reserve space for hardboot page at 0x%X!\n", start); else pr_info("Hardboot page reserved at 0x%X\n", start); #endif reserve_info = &msm8974_reserve_info; of_scan_flat_dt(dt_scan_for_memory_reserve, msm8974_reserve_table); msm_reserve(); }
void __init msm_8974_reserve(void) { #ifdef CONFIG_KEXEC_HARDBOOT int ret; phys_addr_t start; struct membank* bank; #endif #if defined(CONFIG_RAMDUMP_TAGS) || defined(CONFIG_CRASH_LAST_LOGS) reserve_debug_memory(); #endif #ifdef CONFIG_ANDROID_PERSISTENT_RAM reserve_persistent_ram(); #endif reserve_info = &msm8974_reserve_info; of_scan_flat_dt(dt_scan_for_memory_reserve, msm8974_reserve_table); #ifdef CONFIG_KEXEC_HARDBOOT // Reserve space for hardboot page - just after ram_console, // at the start of second memory bank if (meminfo.nr_banks < 2) { pr_err("%s: not enough membank\n", __func__); return; } bank = &meminfo.bank[1]; start = bank->start + bank->size - SZ_1M + KEXEC_HB_OFFSET; ret = memblock_remove(start, SZ_1M); if(!ret) pr_info("Hardboot page reserved at 0x%X\n", start); else pr_err("Failed to reserve space for hardboot page at 0x%X!\n", start); #endif msm_reserve(); }
/** * omap_reserve_secure_workspace_addr() - reserves the secure workspace * * Sets the SMC secure_workspace address and size in the platform_data and * reserves the memory from being mapped to the Kernel memory space. */ void __init omap_reserve_secure_workspace_addr(void) { phys_addr_t start, size; /* Dont need to do anything for GP devices */ if (OMAP2_DEVICE_TYPE_GP == omap_type()) return; start = omap_secure_data.secure_workspace_addr; size = omap_secure_data.secure_workspace_size; /* * SMC needs statically allocated memory not mapped by kernel. * This is because the PPA will need to have a pre-knowledge of * this address range for doing specific cleanups as needed. This * may also imply(depending on SoC) additional firewalls setup * from secure boot perspective. */ if (!start || !size) goto out; if (memblock_remove(start, size)) { WARN(1, "Unable to remove 0x%08x for size 0x%08x\n", start, size); /* Reset the params */ start = 0; size = 0; } out: omap_secure_data.secure_workspace_addr = start; omap_secure_data.secure_workspace_size = size; }
static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size) { unsigned long block_sz, start_pfn; int sections_per_block; int i, nid; start_pfn = base >> PAGE_SHIFT; lock_device_hotplug(); if (!pfn_valid(start_pfn)) goto out; block_sz = pseries_memory_block_size(); sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE; nid = memory_add_physaddr_to_nid(base); for (i = 0; i < sections_per_block; i++) { remove_memory(nid, base, MIN_MEMORY_BLOCK_SIZE); base += MIN_MEMORY_BLOCK_SIZE; } out: /* Update memory regions for memory remove */ memblock_remove(base, memblock_size); unlock_device_hotplug(); return 0; }
static int dlpar_remove_lmb(struct of_drconf_cell *lmb) { struct memory_block *mem_block; unsigned long block_sz; int nid, rc; if (!lmb_is_removable(lmb)) return -EINVAL; mem_block = lmb_to_memblock(lmb); if (!mem_block) return -EINVAL; rc = device_offline(&mem_block->dev); put_device(&mem_block->dev); if (rc) return rc; block_sz = pseries_memory_block_size(); nid = memory_add_physaddr_to_nid(lmb->base_addr); remove_memory(nid, lmb->base_addr, block_sz); /* Update memory regions for memory remove */ memblock_remove(lmb->base_addr, block_sz); dlpar_remove_device_tree_lmb(lmb); return 0; }
void __init wii_memory_fixups(void) { struct memblock_region *p = memblock.memory.regions; /* * This is part of a workaround to allow the use of two * discontinuous RAM ranges on the Wii, even if this is * currently unsupported on 32-bit PowerPC Linux. * * We coalesce the two memory ranges of the Wii into a * single range, then create a reservation for the "hole" * between both ranges. */ BUG_ON(memblock.memory.cnt != 2); BUG_ON(!page_aligned(p[0].base) || !page_aligned(p[1].base)); /* trim unaligned tail */ memblock_remove(ALIGN(p[1].base + p[1].size, PAGE_SIZE), (phys_addr_t)ULLONG_MAX); /* determine hole, add & reserve them */ wii_hole_start = ALIGN(p[0].base + p[0].size, PAGE_SIZE); wii_hole_size = p[1].base - wii_hole_start; memblock_add(wii_hole_start, wii_hole_size); memblock_reserve(wii_hole_start, wii_hole_size); BUG_ON(memblock.memory.cnt != 1); __memblock_dump_all(); /* allow ioremapping the address space in the hole */ __allow_ioremap_reserved = 1; }
static void __init bootloader_logger_reserve(int size) { struct resource *res; long ret; res = platform_get_resource(&bootloader_logger_device, IORESOURCE_MEM, 0); if (!res) goto fail; res->start = memblock_end_of_DRAM() - size; if (res->start != 0x7fff0000) pr_err("%s: shit! ram is not start at 0x7fff0000!\n", __func__); res->end = res->start + size - 1; ret = memblock_remove(res->start, size); if (ret) goto fail; pr_info("%s: base 0x%08x, size 0x%08x\n", __func__, res->start, size); return; fail: bootloader_logger_device.resource = NULL; bootloader_logger_device.num_resources = 0; pr_err("Failed to reserve memory for bootloader logger\n"); }
static int __init omap_ion_init(void) { int i; int ret; early_memblock(param_memblock); omap_register_ion(); ret = ion_init(); if (ret != 0) { pr_err("ion_init failed err %d\n", ret); return ret; } for (i = 0; i < omap_ion_data.nr; i++) if (omap_ion_data.heaps[i].type == ION_HEAP_TYPE_CARVEOUT || omap_ion_data.heaps[i].type == OMAP_ION_HEAP_TYPE_TILER) { ret = memblock_remove(omap_ion_data.heaps[i].base, omap_ion_data.heaps[i].size); if (ret) pr_err("memblock remove of %x@%lx failed\n", omap_ion_data.heaps[i].size, omap_ion_data.heaps[i].base); } return ret; }
void __init pmsav8_adjust_lowmem_bounds(void) { phys_addr_t mem_end; struct memblock_region *reg; bool first = true; for_each_memblock(memory, reg) { if (first) { phys_addr_t phys_offset = PHYS_OFFSET; /* * Initially only use memory continuous from * PHYS_OFFSET */ if (reg->base != phys_offset) panic("First memory bank must be contiguous from PHYS_OFFSET"); mem_end = reg->base + reg->size; first = false; } else { /* * memblock auto merges contiguous blocks, remove * all blocks afterwards in one go (we can't remove * blocks separately while iterating) */ pr_notice("Ignoring RAM after %pa, memory at %pa ignored\n", &mem_end, ®->base); memblock_remove(reg->base, 0 - reg->base); break; } } }
static void __init mx6q_reserve(void) { phys_addr_t phys; int i, fb0_reserved = 0, fb_array_size; /* * Reserve primary framebuffer memory if its base address * is set by kernel command line. */ fb_array_size = ARRAY_SIZE(sabr_fb_data); if (fb_array_size > 0 && sabr_fb_data[0].res_base[0] && sabr_fb_data[0].res_size[0]) { memblock_reserve(sabr_fb_data[0].res_base[0], sabr_fb_data[0].res_size[0]); memblock_remove(sabr_fb_data[0].res_base[0], sabr_fb_data[0].res_size[0]); sabr_fb_data[0].late_init = true; ipu_data[ldb_data.ipu_id].bypass_reset = true; fb0_reserved = 1; } for (i = fb0_reserved; i < fb_array_size; i++) if (sabr_fb_data[i].res_size[0]) { /* Reserve for other background buffer. */ phys = memblock_alloc(sabr_fb_data[i].res_size[0], SZ_4K); memblock_remove(phys, sabr_fb_data[i].res_size[0]); sabr_fb_data[i].res_base[0] = phys; } #if defined(CONFIG_MXC_GPU_VIV) || defined(CONFIG_MXC_GPU_VIV_MODULE) if (imx6q_gpu_pdata.reserved_mem_size) { phys = memblock_alloc_base(imx6q_gpu_pdata.reserved_mem_size, SZ_4K, SZ_2G); memblock_remove(phys, imx6q_gpu_pdata.reserved_mem_size); imx6q_gpu_pdata.reserved_mem_base = phys; } #endif #if defined(CONFIG_ION) if (imx_ion_data.heaps[0].size) { phys = memblock_alloc(imx_ion_data.heaps[0].size, SZ_4K); memblock_free(phys, imx_ion_data.heaps[0].size); memblock_remove(phys, imx_ion_data.heaps[0].size); imx_ion_data.heaps[0].base = phys; } #endif }
static void __init pcm037_reserve(void) { /* reserve 4 MiB for mx3-camera */ mx3_camera_base = memblock_alloc(MX3_CAMERA_BUF_SIZE, MX3_CAMERA_BUF_SIZE); memblock_free(mx3_camera_base, MX3_CAMERA_BUF_SIZE); memblock_remove(mx3_camera_base, MX3_CAMERA_BUF_SIZE); }
static void __init mx31_3ds_reserve(void) { /* reserve MX31_3DS_CAMERA_BUF_SIZE bytes for mx3-camera */ mx3_camera_base = memblock_alloc(MX31_3DS_CAMERA_BUF_SIZE, MX31_3DS_CAMERA_BUF_SIZE); memblock_free(mx3_camera_base, MX31_3DS_CAMERA_BUF_SIZE); memblock_remove(mx3_camera_base, MX31_3DS_CAMERA_BUF_SIZE); }
static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size) { unsigned long start, start_pfn; struct zone *zone; int ret; start_pfn = base >> PAGE_SHIFT; if (!pfn_valid(start_pfn)) { memblock_remove(base, memblock_size); return 0; } zone = page_zone(pfn_to_page(start_pfn)); /* * Remove section mappings and sysfs entries for the * section of the memory we are removing. * * NOTE: Ideally, this should be done in generic code like * remove_memory(). But remove_memory() gets called by writing * to sysfs "state" file and we can't remove sysfs entries * while writing to it. So we have to defer it to here. */ ret = __remove_pages(zone, start_pfn, memblock_size >> PAGE_SHIFT); if (ret) return ret; /* * Update memory regions for memory remove */ memblock_remove(base, memblock_size); /* * Remove htab bolted mappings for this section of memory */ start = (unsigned long)__va(base); ret = remove_section_mapping(start, start + memblock_size); /* Ensure all vmalloc mappings are flushed in case they also * hit that section of memory */ vm_unmap_aliases(); return ret; }
void __init omap_ion_init(void) { int i; int ret; memblock_remove(OMAP4_RAMCONSOLE_START, OMAP4_RAMCONSOLE_SIZE); for (i = 0; i < omap4_ion_data.nr; i++) if (omap4_ion_data.heaps[i].type == ION_HEAP_TYPE_CARVEOUT || omap4_ion_data.heaps[i].type == OMAP_ION_HEAP_TYPE_TILER) { ret = memblock_remove(omap4_ion_data.heaps[i].base, omap4_ion_data.heaps[i].size); if (ret) pr_err("memblock remove of %x@%lx failed\n", omap4_ion_data.heaps[i].size, omap4_ion_data.heaps[i].base); } }
void __init board_mem_reserved(void) { phys_addr_t base = reserved_base_end - reserved_size; if(reserved_size){ memblock_remove(base, reserved_size); pr_info("memory reserve: Total reserved %dM\n", reserved_size/SZ_1M); } }
int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size, bool nomap) { if (memblock_is_region_reserved(base, size)) return -EBUSY; if (nomap) return memblock_remove(base, size); return memblock_reserve(base, size); }