void __init msm_8974_reserve(void) { reserve_info = &msm8974_reserve_info; of_scan_flat_dt(dt_scan_for_memory_reserve, msm8974_reserve_table); #ifdef CONFIG_MACH_LGE of_scan_flat_dt(lge_init_dt_scan_chosen, NULL); #endif msm_reserve(); lge_reserve(); }
void __init early_init_devtree(void *params) { initial_boot_params = params; of_scan_flat_dt(early_init_dt_scan_chosen, c6x_command_line); of_scan_flat_dt(early_init_dt_scan_root, NULL); of_scan_flat_dt(early_init_dt_scan_memory, NULL); }
static void __init msm8610_reserve(void) { reserve_info = &msm8610_reserve_info; of_scan_flat_dt(dt_scan_for_memory_reserve, msm8610_reserve_table); #ifdef CONFIG_MACH_LGE of_scan_flat_dt(lge_init_dt_scan_chosen, NULL); #endif msm_reserve(); #if defined(CONFIG_ANDROID_RAM_CONSOLE) lge_reserve(); #endif }
void __init early_init_devtree(void *params) { initial_boot_params = params; of_scan_flat_dt(early_init_dt_scan_chosen, arcs_cmdline); of_scan_flat_dt(early_init_dt_scan_root, NULL); of_scan_flat_dt(early_init_dt_scan_memory_arch, NULL); }
static void __init setup_machine_fdt(phys_addr_t dt_phys) { struct boot_param_header *devtree; unsigned long dt_root; cpuinfo_store_cpu(); /* Check we have a non-NULL DT pointer */ if (!dt_phys) { early_print("\n" "Error: NULL or invalid device tree blob\n" "The dtb must be 8-byte aligned and passed in the first 512MB of memory\n" "\nPlease check your bootloader.\n"); while (true) cpu_relax(); } devtree = phys_to_virt(dt_phys); /* Check device tree validity */ if (be32_to_cpu(devtree->magic) != OF_DT_HEADER) { early_print("\n" "Error: invalid device tree blob at physical address 0x%p (virtual address 0x%p)\n" "Expected 0x%x, found 0x%x\n" "\nPlease check your bootloader.\n", dt_phys, devtree, OF_DT_HEADER, be32_to_cpu(devtree->magic)); while (true) cpu_relax(); } initial_boot_params = devtree; dt_root = of_get_flat_dt_root(); machine_name = of_get_flat_dt_prop(dt_root, "model", NULL); if (!machine_name) machine_name = of_get_flat_dt_prop(dt_root, "compatible", NULL); if (!machine_name) machine_name = "<unknown>"; pr_info("Machine: %s\n", machine_name); /* Retrieve various information from the /chosen node */ of_scan_flat_dt(early_init_dt_scan_chosen, boot_command_line); /* Initialize {size,address}-cells info */ of_scan_flat_dt(early_init_dt_scan_root, NULL); /* Setup memory, calling early_init_dt_add_memory_arch */ of_scan_flat_dt(early_init_dt_scan_memory, NULL); }
void __init early_init_devtree(void *params) { /* Setup flat device-tree pointer */ initial_boot_params = params; /* Retrieve various informations from the /chosen node of the * device-tree, including the platform type, initrd location and * size, and more ... */ of_scan_flat_dt(early_init_dt_scan_chosen, NULL); /* Scan memory nodes */ of_scan_flat_dt(early_init_dt_scan_root, NULL); of_scan_flat_dt(early_init_dt_scan_memory_arch, NULL); }
/** * dma_contiguous_reserve() - reserve area for contiguous memory handling * @limit: End address of the reserved memory (optional, 0 for any). * * This function reserves memory from early allocator. It should be * called by arch specific code once the early allocator (memblock or bootmem) * has been activated and all other subsystems have already allocated/reserved * memory. It reserves contiguous areas for global, device independent * allocations and (optionally) all areas defined in device tree structures. */ void __init dma_contiguous_reserve(phys_addr_t limit) { phys_addr_t sel_size = 0; pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit); if (size_cmdline != -1) { sel_size = size_cmdline; } else { #ifdef CONFIG_CMA_SIZE_SEL_MBYTES sel_size = size_bytes; #elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE) sel_size = cma_early_percent_memory(); #elif defined(CONFIG_CMA_SIZE_SEL_MIN) sel_size = min(size_bytes, cma_early_percent_memory()); #elif defined(CONFIG_CMA_SIZE_SEL_MAX) sel_size = max(size_bytes, cma_early_percent_memory()); #endif } if (sel_size) { phys_addr_t base = 0; pr_debug("%s: reserving %ld MiB for global area\n", __func__, (unsigned long)sel_size / SZ_1M); if (dma_contiguous_reserve_area(sel_size, &base, limit) == 0) { dma_contiguous_def_base = base; strcpy(cma_areas[cma_area_count - 1].name, "cma_global"); } } #ifdef CONFIG_OF of_scan_flat_dt(cma_fdt_scan, NULL); #endif };
void init_boot_reason(unsigned int line) { #ifdef CONFIG_OF int rc; if (BOOT_REASON_INITIALIZING == atomic_read(&g_br_state)) { pr_warn("%s (%d) state(%d)\n", __func__, line, atomic_read(&g_br_state)); atomic_inc(&g_br_errcnt); return; } if (BOOT_REASON_UNINIT == atomic_read(&g_br_state)) atomic_set(&g_br_state, BOOT_REASON_INITIALIZING); else return; if ((BR_UNKNOWN != g_boot_reason)) { atomic_set(&g_br_state, BOOT_REASON_INITIALIZED); pr_alert("boot_reason = %d\n", g_boot_reason); return; } pr_info("%s %d %d %d\n", __func__, line, g_boot_reason, atomic_read(&g_br_state)); rc = of_scan_flat_dt(dt_get_boot_reason, NULL); if (0 != rc) atomic_set(&g_br_state, BOOT_REASON_INITIALIZED); else atomic_set(&g_br_state, BOOT_REASON_UNINIT); pr_info("%s %d %d %d\n", __func__, line, g_boot_reason, atomic_read(&g_br_state)); #endif }
void __init msm_8974_reserve(void) { #ifdef CONFIG_KEXEC_HARDBOOT int ret; phys_addr_t start; struct membank* bank; #endif #if defined(CONFIG_RAMDUMP_TAGS) || defined(CONFIG_CRASH_LAST_LOGS) reserve_debug_memory(); #endif #ifdef CONFIG_ANDROID_PERSISTENT_RAM reserve_persistent_ram(); #endif reserve_info = &msm8974_reserve_info; of_scan_flat_dt(dt_scan_for_memory_reserve, msm8974_reserve_table); #ifdef CONFIG_KEXEC_HARDBOOT // Reserve space for hardboot page - just after ram_console, // at the start of second memory bank if (meminfo.nr_banks < 2) { pr_err("%s: not enough membank\n", __func__); return; } bank = &meminfo.bank[1]; start = bank->start + bank->size - SZ_1M + KEXEC_HB_OFFSET; ret = memblock_remove(start, SZ_1M); if(!ret) pr_info("Hardboot page reserved at 0x%X\n", start); else pr_err("Failed to reserve space for hardboot page at 0x%X!\n", start); #endif msm_reserve(); }
void __init dt_cpu_ftrs_scan(void) { if (!using_dt_cpu_ftrs) return; of_scan_flat_dt(dt_cpu_ftrs_scan_callback, NULL); }
/** * Return whether bitfix is needed on this platform. */ static bool bitfix_is_needed(void) { bool is_needed = false; of_scan_flat_dt(init_dt_scan_bitfix, &is_needed); return is_needed; }
static void __init msm8226_reserve(void) { reserve_info = &msm8226_reserve_info; of_scan_flat_dt(dt_scan_for_memory_reserve, msm8226_reserve_table); persistent_ram_early_init(&msm8226_persistent_ram); msm_reserve(); }
void __init msm_8974_reserve(void) { reserve_info = &msm8974_reserve_info; of_scan_flat_dt(dt_scan_for_memory_reserve, msm8974_reserve_table); msm_reserve(); lge_reserve(); }
void __init msm_8974_reserve(void) { #ifdef CONFIG_KEXEC_HARDBOOT // Reserve space for hardboot page - just after ram_console, // at the start of second memory bank int ret; phys_addr_t start; struct membank* bank; if (meminfo.nr_banks < 2) { pr_err("%s: not enough membank\n", __func__); return; } bank = &meminfo.bank[1]; start = bank->start + SZ_1M + OPPO_PERSISTENT_RAM_SIZE; ret = memblock_remove(start, SZ_1M); if (ret) pr_err("Failed to reserve space for hardboot page at 0x%X!\n", start); else pr_info("Hardboot page reserved at 0x%X\n", start); #endif reserve_info = &msm8974_reserve_info; of_scan_flat_dt(dt_scan_for_memory_reserve, msm8974_reserve_table); msm_reserve(); }
void init_boot_common(unsigned int line) { #ifdef CONFIG_OF int rc; if (BOOT_INITIALIZING == atomic_read(&g_boot_init)) { pr_warn("%s (%d) state(%d)\n", __func__, line, atomic_read(&g_boot_init)); return; } if (BOOT_UNINIT == atomic_read(&g_boot_init)) atomic_set(&g_boot_init, BOOT_INITIALIZING); else return; if ((UNKNOWN_BOOT != g_boot_mode) && (BR_UNKNOWN != g_boot_reason)) return; pr_info("%s %d [%d %d] [%d]\n", __func__, line, g_boot_mode, g_boot_reason, atomic_read(&g_boot_init)); rc = of_scan_flat_dt(dt_get_boot_common, NULL); if (0 != rc) atomic_set(&g_boot_init, BOOT_INITIALIZED); else atomic_set(&g_boot_init, BOOT_UNINIT); pr_info("%s %d [%d %d] [%d]\n", __func__, line, g_boot_mode, g_boot_reason, atomic_read(&g_boot_init)); #endif }
/** * dma_contiguous_reserve() - reserve area(s) for contiguous memory handling * @limit: End address of the reserved memory (optional, 0 for any). * * This function reserves memory from early allocator. It should be * called by arch specific code once the early allocator (memblock or bootmem) * has been activated and all other subsystems have already allocated/reserved * memory. */ void __init dma_contiguous_reserve(phys_addr_t limit) { phys_addr_t selected_size = 0; pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit); if (size_cmdline != -1) { selected_size = size_cmdline; } else { #ifdef CONFIG_CMA_SIZE_SEL_MBYTES selected_size = size_bytes; #elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE) selected_size = cma_early_percent_memory(); #elif defined(CONFIG_CMA_SIZE_SEL_MIN) selected_size = min(size_bytes, cma_early_percent_memory()); #elif defined(CONFIG_CMA_SIZE_SEL_MAX) selected_size = max(size_bytes, cma_early_percent_memory()); #endif } if (selected_size && !dma_contiguous_default_area) { pr_debug("%s: reserving %ld MiB for global area\n", __func__, (unsigned long)selected_size / SZ_1M); dma_contiguous_reserve_area(selected_size, 0, limit, &dma_contiguous_default_area); } #if (defined(CONFIG_OF) && defined(CONFIG_HISI_CMA_RESERVE_MEMORY)) of_scan_flat_dt(cma_reserve_mem_fdt_scan, NULL); #endif };
static void lk_meta_tag_info_collect(void) { // Device tree method char *tags; int ret; ret = of_scan_flat_dt(early_init_dt_get_chosen, NULL); if(ret==0){ CCCI_UTIL_INF_MSG("device node no chosen node\n"); return; } tags = (char*)of_get_flat_dt_prop(dt_chosen_node, "atag,mdinfo", NULL); if (tags) { tags+=8; // Fix me, Arm64 doesn't have atag defination now md_info_tag_val[0] = tags[0]; md_info_tag_val[1] = tags[1]; md_info_tag_val[2] = tags[2]; md_info_tag_val[3] = tags[3]; CCCI_UTIL_INF_MSG("Get MD info Tags\n"); CCCI_UTIL_INF_MSG("md_inf[0]=%d\n", md_info_tag_val[0]); CCCI_UTIL_INF_MSG("md_inf[1]=%d\n", md_info_tag_val[1]); CCCI_UTIL_INF_MSG("md_inf[2]=%d\n", md_info_tag_val[2]); CCCI_UTIL_INF_MSG("md_inf[3]=%d\n", md_info_tag_val[3]); }else{ CCCI_UTIL_INF_MSG("atag,mdinfo=NULL\n"); } }
/* * acpi_boot_table_init() called from setup_arch(), always. * 1. find RSDP and get its address, and then find XSDT * 2. extract all tables and checksums them all * 3. check ACPI FADT revision * 4. check ACPI FADT HW reduced flag * * We can parse ACPI boot-time tables such as MADT after * this function is called. * * On return ACPI is enabled if either: * * - ACPI tables are initialized and sanity checks passed * - acpi=force was passed in the command line and ACPI was not disabled * explicitly through acpi=off command line parameter * * ACPI is disabled on function return otherwise */ void __init acpi_boot_table_init(void) { /* * Enable ACPI instead of device tree unless * - ACPI has been disabled explicitly (acpi=off), or * - the device tree is not empty (it has more than just a /chosen node) * and ACPI has not been force enabled (acpi=force) */ if (param_acpi_off || (!param_acpi_force && of_scan_flat_dt(dt_scan_depth1_nodes, NULL))) return; /* * ACPI is disabled at this point. Enable it in order to parse * the ACPI tables and carry out sanity checks */ enable_acpi(); /* * If ACPI tables are initialized and FADT sanity checks passed, * leave ACPI enabled and carry on booting; otherwise disable ACPI * on initialization error. * If acpi=force was passed on the command line it forces ACPI * to be enabled even if its initialization failed. */ if (acpi_table_init() || acpi_fadt_sanity_check()) { pr_err("Failed to init ACPI tables\n"); if (!param_acpi_force) disable_acpi(); } }
static void __init vexpress_dt_smp_init_cpus(void) { int ncores = 0, i; switch (vexpress_dt_scu) { case GENERIC_SCU: ncores = of_scan_flat_dt(vexpress_dt_cpus_num, NULL); break; case CORTEX_A9_SCU: ncores = scu_get_core_count(vexpress_dt_cortex_a9_scu_base); break; default: WARN_ON(1); break; } if (ncores < 2) return; if (ncores > nr_cpu_ids) { pr_warn("SMP: %u cores greater than maximum (%u), clipping\n", ncores, nr_cpu_ids); ncores = nr_cpu_ids; } for (i = 0; i < ncores; ++i) set_cpu_possible(i, true); set_smp_cross_call(gic_raise_softirq); }
static int __init pSeries_probe(void) { unsigned long root = of_get_flat_dt_root(); char *dtype = of_get_flat_dt_prop(of_get_flat_dt_root(), "device_type", NULL); if (dtype == NULL) return 0; if (strcmp(dtype, "chrp")) return 0; /* Cell blades firmware claims to be chrp while it's not. Until this * is fixed, we need to avoid those here. */ if (of_flat_dt_is_compatible(root, "IBM,CPBW-1.0") || of_flat_dt_is_compatible(root, "IBM,CBEA")) return 0; DBG("pSeries detected, looking for LPAR capability...\n"); /* Now try to figure out if we are running on LPAR */ of_scan_flat_dt(pSeries_probe_hypertas, NULL); DBG("Machine is%s LPAR !\n", (powerpc_firmware_features & FW_FEATURE_LPAR) ? "" : " not"); return 1; }
static int __init ram_console_early_init(void) { struct ram_console_buffer *bufp = NULL; size_t buffer_size = 0; #if defined(CONFIG_MTK_RAM_CONSOLE_USING_SRAM) #ifdef CONFIG_OF mem_desc_t sram = {0}; if (of_scan_flat_dt(dt_get_ram_console, &sram)) { if (sram.start == 0) { sram.start = CONFIG_MTK_RAM_CONSOLE_ADDR; sram.size = CONFIG_MTK_RAM_CONSOLE_SIZE; } bufp = ioremap(sram.start, sram.size); ram_console_buffer_pa = sram.start; if (bufp) buffer_size = sram.size; else { pr_err("ram_console: ioremap failed, [0x%x, 0x%x]\n", sram.start, sram.size); return 0; } /*check if pl/lk use dram*/ struct ram_console_buffer *bufp_dram = NULL; bufp_dram = remap_lowmem(CONFIG_MTK_RAM_CONSOLE_DRAM_ADDR, CONFIG_MTK_RAM_CONSOLE_DRAM_SIZE); if (bufp_dram->sig == REBOOT_REASON_SIG) { pr_err("ram_console: kernel use sram, but pl/lk use dram.\n"); memcpy(bufp, bufp_dram, sizeof(struct ram_console_buffer)); bufp_dram->sig = 0; } } else { return 0; } #else bufp = (struct ram_console_buffer *)CONFIG_MTK_RAM_CONSOLE_ADDR; buffer_size = CONFIG_MTK_RAM_CONSOLE_SIZE; /*check if pl/lk use dram*/ struct ram_console_buffer *bufp_dram = NULL; bufp_dram = remap_lowmem(CONFIG_MTK_RAM_CONSOLE_DRAM_ADDR, CONFIG_MTK_RAM_CONSOLE_DRAM_SIZE); if (bufp_dram->sig == REBOOT_REASON_SIG) { pr_err("ram_console: kernel use sram, but pl/lk use dram.\n"); memcpy(bufp, bufp_dram, sizeof(struct ram_console_buffer)); bufp_dram->sig = 0; } #endif #elif defined(CONFIG_MTK_RAM_CONSOLE_USING_DRAM) bufp = remap_lowmem(CONFIG_MTK_RAM_CONSOLE_DRAM_ADDR, CONFIG_MTK_RAM_CONSOLE_DRAM_SIZE); ram_console_buffer_pa = CONFIG_MTK_RAM_CONSOLE_DRAM_ADDR; if (bufp == NULL) { pr_err("ram_console: ioremap failed\n"); return 0; } buffer_size = CONFIG_MTK_RAM_CONSOLE_DRAM_SIZE; #else return 0; #endif pr_err("ram_console: buffer start: 0x%p, size: 0x%zx\n", bufp, buffer_size); mtk_cpu_num = num_present_cpus(); return ram_console_init(bufp, buffer_size); }
void __init early_init_devtree(void *params) { /* Setup flat device-tree pointer */ initial_boot_params = params; /* Retrieve various informations from the /chosen node of the * device-tree, including the platform type, initrd location and * size, TCE reserve, and more ... */ if (!command_line[0]) of_scan_flat_dt(early_init_dt_scan_chosen, command_line); /* Scan memory nodes and rebuild MEMBLOCKs */ of_scan_flat_dt(early_init_dt_scan_root, NULL); if (sysmem.nr_banks == 0) of_scan_flat_dt(early_init_dt_scan_memory, NULL); }
void __init early_init_devtree(void *params) { early_init_dt_scan(params); of_scan_flat_dt(xtensa_dt_io_area, NULL); if (!command_line[0]) strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE); }
void __init msm_8974_reserve(void) { #if defined(CONFIG_RAMDUMP_TAGS) || defined(CONFIG_CRASH_LAST_LOGS) reserve_debug_memory(); #endif reserve_info = &msm8974_reserve_info; of_scan_flat_dt(dt_scan_for_memory_reserve, msm8974_reserve_table); msm_reserve(); }
static void __init msm8226_reserve(void) { reserve_info = &msm8226_reserve_info; of_scan_flat_dt(dt_scan_for_memory_reserve, msm8226_reserve_table); msm_reserve(); #ifdef CONFIG_ANDROID_RAM_CONSOLE ram_console_debug_reserve(SZ_1M *2); #endif }
static void __init msm8226_reserve(void) { reserve_info = &msm8226_reserve_info; of_scan_flat_dt(dt_scan_for_memory_reserve, msm8226_reserve_table); msm_reserve(); #ifdef CONFIG_ANDROID_PERSISTENT_RAM persistent_ram_early_init(&per_ram); #endif }
void __init msm_8974_reserve(void) { bootloader_logger_reserve(SZ_64K); reserve_info = &msm8974_reserve_info; of_scan_flat_dt(dt_scan_for_memory_reserve, msm8974_reserve_table); msm_reserve(); ram_console_reserve(SZ_1M); }
void __init early_init_devtree(void *params) { if (sysmem.nr_banks == 0) dt_memory_scan = true; early_init_dt_scan(params); of_scan_flat_dt(xtensa_dt_io_area, NULL); if (!command_line[0]) strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE); }
static void __init exynos4_reserve(void) { #ifdef CONFIG_S5P_DEV_MFC struct s5p_mfc_dt_meminfo mfc_mem; /* Reserve memory for MFC only if it's available */ mfc_mem.compatible = "samsung,mfc-v5"; if (of_scan_flat_dt(s5p_fdt_find_mfc_mem, &mfc_mem)) s5p_mfc_reserve_mem(mfc_mem.roff, mfc_mem.rsize, mfc_mem.loff, mfc_mem.lsize); #endif }
void __init msm_8974_reserve(void) { #if defined(CONFIG_RAMDUMP_TAGS) || defined(CONFIG_CRASH_LAST_LOGS) reserve_debug_memory(); #endif #ifdef CONFIG_ANDROID_PERSISTENT_RAM reserve_persistent_ram(); #endif reserve_info = &msm8974_reserve_info; of_scan_flat_dt(dt_scan_for_memory_reserve, msm8974_reserve_table); msm_reserve(); }