/* * The UEFI specification makes it clear that the operating system is free to do * whatever it wants with boot services code after ExitBootServices() has been * called. Ignoring this recommendation a significant bunch of EFI implementations * continue calling into boot services code (SetVirtualAddressMap). In order to * work around such buggy implementations we reserve boot services region during * EFI init and make sure it stays executable. Then, after SetVirtualAddressMap(), it * is discarded. */ void __init efi_reserve_boot_services(void) { void *p; for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { efi_memory_desc_t *md = p; u64 start = md->phys_addr; u64 size = md->num_pages << EFI_PAGE_SHIFT; if (md->type != EFI_BOOT_SERVICES_CODE && md->type != EFI_BOOT_SERVICES_DATA) continue; /* Only reserve where possible: * - Not within any already allocated areas * - Not over any memory area (really needed, if above?) * - Not within any part of the kernel * - Not the bios reserved area */ if ((start + size > __pa_symbol(_text) && start <= __pa_symbol(_end)) || !e820_all_mapped(start, start+size, E820_RAM) || memblock_is_region_reserved(start, size)) { /* Could not reserve, skip it */ md->num_pages = 0; memblock_dbg("Could not reserve boot range [0x%010llx-0x%010llx]\n", start, start+size-1); } else memblock_reserve(start, size); } }
static void __init request_standard_resources(void) { struct memblock_region *region; struct resource *res; kernel_code.start = __pa_symbol(_text); kernel_code.end = __pa_symbol(__init_begin - 1); kernel_data.start = __pa_symbol(_sdata); kernel_data.end = __pa_symbol(_end - 1); for_each_memblock(memory, region) { res = alloc_bootmem_low(sizeof(*res)); if (memblock_is_nomap(region)) { res->name = "reserved"; res->flags = IORESOURCE_MEM; } else { res->name = "System RAM"; res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; } res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region)); res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1; request_resource(&iomem_resource, res); if (kernel_code.start >= res->start && kernel_code.end <= res->end) request_resource(res, &kernel_code); if (kernel_data.start >= res->start && kernel_data.end <= res->end) request_resource(res, &kernel_data); }
void __init x86_64_start_reservations(char *real_mode_data) { copy_bootdata(__va(real_mode_data)); memblock_reserve(__pa_symbol(&_text), __pa_symbol(&__bss_stop) - __pa_symbol(&_text)); #ifdef CONFIG_BLK_DEV_INITRD /* Reserve INITRD */ if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) { /* Assume only end is not page aligned */ unsigned long ramdisk_image = boot_params.hdr.ramdisk_image; unsigned long ramdisk_size = boot_params.hdr.ramdisk_size; unsigned long ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size); memblock_reserve(ramdisk_image, ramdisk_end - ramdisk_image); } #endif reserve_ebda_region(); /* * At this point everything still needed from the boot loader * or BIOS or kernel text should be early reserved or marked not * RAM in e820. All other memory is free game. */ start_kernel(); }
static efi_system_table_t __init *xen_efi_probe(void) { struct xen_platform_op op = { .cmd = XENPF_firmware_info, .u.firmware_info = { .type = XEN_FW_EFI_INFO, .index = XEN_FW_EFI_CONFIG_TABLE } }; union xenpf_efi_info *info = &op.u.firmware_info.u.efi_info; if (!xen_initial_domain() || HYPERVISOR_platform_op(&op) < 0) return NULL; /* Here we know that Xen runs on EFI platform. */ efi.get_time = xen_efi_get_time; efi.set_time = xen_efi_set_time; efi.get_wakeup_time = xen_efi_get_wakeup_time; efi.set_wakeup_time = xen_efi_set_wakeup_time; efi.get_variable = xen_efi_get_variable; efi.get_next_variable = xen_efi_get_next_variable; efi.set_variable = xen_efi_set_variable; efi.query_variable_info = xen_efi_query_variable_info; efi.update_capsule = xen_efi_update_capsule; efi.query_capsule_caps = xen_efi_query_capsule_caps; efi.get_next_high_mono_count = xen_efi_get_next_high_mono_count; efi.reset_system = xen_efi_reset_system; efi_systab_xen.tables = info->cfg.addr; efi_systab_xen.nr_tables = info->cfg.nent; op.cmd = XENPF_firmware_info; op.u.firmware_info.type = XEN_FW_EFI_INFO; op.u.firmware_info.index = XEN_FW_EFI_VENDOR; info->vendor.bufsz = sizeof(vendor); set_xen_guest_handle(info->vendor.name, vendor); if (HYPERVISOR_platform_op(&op) == 0) { efi_systab_xen.fw_vendor = __pa_symbol(vendor); efi_systab_xen.fw_revision = info->vendor.revision; } else efi_systab_xen.fw_vendor = __pa_symbol(L"UNKNOWN"); op.cmd = XENPF_firmware_info; op.u.firmware_info.type = XEN_FW_EFI_INFO; op.u.firmware_info.index = XEN_FW_EFI_VERSION; if (HYPERVISOR_platform_op(&op) == 0) efi_systab_xen.hdr.revision = info->version; op.cmd = XENPF_firmware_info; op.u.firmware_info.type = XEN_FW_EFI_INFO; op.u.firmware_info.index = XEN_FW_EFI_RT_VERSION; if (HYPERVISOR_platform_op(&op) == 0) efi.runtime_version = info->version; return &efi_systab_xen; }
void __init i386_start_kernel(void) { reserve_early(__pa_symbol(&_text), __pa_symbol(&_end), "TEXT DATA BSS"); #ifdef CONFIG_BLK_DEV_INITRD /* Reserve INITRD */ if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) { u64 ramdisk_image = boot_params.hdr.ramdisk_image; u64 ramdisk_size = boot_params.hdr.ramdisk_size; u64 ramdisk_end = ramdisk_image + ramdisk_size; reserve_early(ramdisk_image, ramdisk_end, "RAMDISK"); } #endif reserve_early(init_pg_tables_start, init_pg_tables_end, "INIT_PG_TABLE"); reserve_ebda_region(); /* * At this point everything still needed from the boot loader * or BIOS or kernel text should be early reserved or marked not * RAM in e820. All other memory is free game. */ start_kernel(); }
int arch_hibernation_header_save(void *addr, unsigned int max_size) { struct arch_hibernate_hdr *hdr = addr; if (max_size < sizeof(*hdr)) return -EOVERFLOW; arch_hdr_invariants(&hdr->invariants); hdr->ttbr1_el1 = __pa_symbol(swapper_pg_dir); hdr->reenter_kernel = _cpu_resume; /* We can't use __hyp_get_vectors() because kvm may still be loaded */ if (el2_reset_needed()) hdr->__hyp_stub_vectors = __pa_symbol(__hyp_stub_vectors); else hdr->__hyp_stub_vectors = 0; /* Save the mpidr of the cpu we called cpu_suspend() on... */ if (sleep_cpu < 0) { pr_err("Failing to hibernate on an unknown CPU.\n"); return -ENODEV; } hdr->sleep_cpu_mpidr = cpu_logical_map(sleep_cpu); pr_info("Hibernating on CPU %d [mpidr:0x%llx]\n", sleep_cpu, hdr->sleep_cpu_mpidr); return 0; }
void __init x86_64_start_reservations(char *real_mode_data) { copy_bootdata(__va(real_mode_data)); reserve_trampoline_memory(); reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS"); #ifdef CONFIG_BLK_DEV_INITRD /* Reserve INITRD */ if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) { unsigned long ramdisk_image = boot_params.hdr.ramdisk_image; unsigned long ramdisk_size = boot_params.hdr.ramdisk_size; unsigned long ramdisk_end = ramdisk_image + ramdisk_size; reserve_early(ramdisk_image, ramdisk_end, "RAMDISK"); } #endif reserve_ebda_region(); /* * At this point everything still needed from the boot loader * or BIOS or kernel text should be early reserved or marked not * RAM in e820. All other memory is free game. */ start_kernel(); }
/* Check for some hardcoded bad areas that early boot is not allowed to touch */ static inline int bad_addr(unsigned long *addrp, unsigned long size) { unsigned long addr = *addrp, last = addr + size; /* various gunk below that needed for SMP startup */ if (addr < 0x8000) { *addrp = 0x8000; return 1; } /* direct mapping tables of the kernel */ if (last >= table_start<<PAGE_SHIFT && addr < table_end<<PAGE_SHIFT) { *addrp = table_end << PAGE_SHIFT; return 1; } /* initrd */ #ifdef CONFIG_BLK_DEV_INITRD if (LOADER_TYPE && INITRD_START && last >= INITRD_START && addr < INITRD_START+INITRD_SIZE) { *addrp = INITRD_START + INITRD_SIZE; return 1; } #endif /* kernel code + 640k memory hole (later should not be needed, but be paranoid for now) */ if (last >= 640*1024 && addr < __pa_symbol(&_end)) { *addrp = __pa_symbol(&_end); return 1; } /* XXX ramdisk image here? */ return 0; }
void __init i386_start_kernel(void) { reserve_trampoline_memory(); reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS"); #ifdef CONFIG_BLK_DEV_INITRD /* Reserve INITRD */ if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) { u64 ramdisk_image = boot_params.hdr.ramdisk_image; u64 ramdisk_size = boot_params.hdr.ramdisk_size; u64 ramdisk_end = ramdisk_image + ramdisk_size; reserve_early(ramdisk_image, ramdisk_end, "RAMDISK"); } #endif /* Call the subarch specific early setup function */ switch (boot_params.hdr.hardware_subarch) { case X86_SUBARCH_MRST: x86_mrst_early_setup(); break; default: i386_default_early_setup(); break; } /* * At this point everything still needed from the boot loader * or BIOS or kernel text should be early reserved or marked not * RAM in e820. All other memory is free game. */ start_kernel(); }
void __init x86_64_start_reservations(char *real_mode_data) { copy_bootdata(__va(real_mode_data)); /* &boot_params에 복사 */ /* text부터 bss까지의 커널 영역을 memblock에 예약한다. */ memblock_reserve(__pa_symbol(&_text), __pa_symbol(&__bss_stop) - __pa_symbol(&_text)); #ifdef CONFIG_BLK_DEV_INITRD /* Reserve INITRD */ /* 같이 로드한 INITRD(init ramdisk) 영역도 예약한다. */ if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) { /* Assume only end is not page aligned */ unsigned long ramdisk_image = boot_params.hdr.ramdisk_image; unsigned long ramdisk_size = boot_params.hdr.ramdisk_size; unsigned long ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size); memblock_reserve(ramdisk_image, ramdisk_end - ramdisk_image); } #endif reserve_ebda_region(); /* EBDA 메모리 영역을 예약해놓았다. */ /* * At this point everything still needed from the boot loader * or BIOS or kernel text should be early reserved or marked not * RAM in e820. All other memory is free game. /* int 15h 서 예약된 부분과 커널/바이오스/부트로더 코드영역 * 외의 메모리 공간은 예약되어있지 않다. */ start_kernel(); }
/* * cr3 is the current toplevel pagetable page: the principle is the same as * cr0. Keep a local copy, and tell the Host when it changes. */ static void lguest_write_cr3(unsigned long cr3) { lazy_hcall1(LHCALL_NEW_PGTABLE, cr3); current_cr3 = cr3; /* These two page tables are simple, linear, and used during boot */ if (cr3 != __pa_symbol(swapper_pg_dir) && cr3 != __pa_symbol(initial_page_table)) cr3_changed = true; }
static void __init resource_init(void) { int i; if (UNCAC_BASE != IO_BASE) return; code_resource.start = __pa_symbol(&_text); code_resource.end = __pa_symbol(&_etext) - 1; data_resource.start = __pa_symbol(&_etext); data_resource.end = __pa_symbol(&_edata) - 1; /* * Request address space for all standard RAM. */ for (i = 0; i < boot_mem_map.nr_map; i++) { struct resource *res; unsigned long start, end; start = boot_mem_map.map[i].addr; end = boot_mem_map.map[i].addr + boot_mem_map.map[i].size - 1; if (start >= HIGHMEM_START) continue; if (end >= HIGHMEM_START) end = HIGHMEM_START - 1; res = alloc_bootmem(sizeof(struct resource)); switch (boot_mem_map.map[i].type) { case BOOT_MEM_RAM: case BOOT_MEM_INIT_RAM: case BOOT_MEM_ROM_DATA: res->name = "System RAM"; break; case BOOT_MEM_RESERVED: default: res->name = "reserved"; } res->start = start; res->end = end; res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; request_resource(&iomem_resource, res); /* * We don't know which RAM region contains kernel data, * so we try it repeatedly and let the resource manager * test it. */ request_resource(res, &code_resource); request_resource(res, &data_resource); } }
void __init i386_start_kernel(void) { reserve_trampoline_memory(); reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS"); #ifndef CONFIG_XEN #ifdef CONFIG_BLK_DEV_INITRD /* Reserve INITRD */ if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) { u64 ramdisk_image = boot_params.hdr.ramdisk_image; u64 ramdisk_size = boot_params.hdr.ramdisk_size; u64 ramdisk_end = ramdisk_image + ramdisk_size; reserve_early(ramdisk_image, ramdisk_end, "RAMDISK"); } #endif /* Call the subarch specific early setup function */ switch (boot_params.hdr.hardware_subarch) { case X86_SUBARCH_MRST: x86_mrst_early_setup(); break; default: i386_default_early_setup(); break; } #else { int max_cmdline; if ((max_cmdline = MAX_GUEST_CMDLINE) > COMMAND_LINE_SIZE) max_cmdline = COMMAND_LINE_SIZE; memcpy(boot_command_line, xen_start_info->cmd_line, max_cmdline); boot_command_line[max_cmdline-1] = '\0'; } i386_default_early_setup(); xen_start_kernel(); #endif /* * At this point everything still needed from the boot loader * or BIOS or kernel text should be early reserved or marked not * RAM in e820. All other memory is free game. */ start_kernel(); }
static void __init arch_mem_init(char **cmdline_p) { extern void plat_mem_setup(void); phys_t init_mem, init_end, init_size; /* call board setup routine */ plat_mem_setup(); init_mem = PFN_UP(__pa_symbol(&__init_begin)) << PAGE_SHIFT; init_end = PFN_DOWN(__pa_symbol(&__init_end)) << PAGE_SHIFT; init_size = init_end - init_mem; if (init_size) { /* Make sure it is in the boot_mem_map */ int i, found; found = 0; for (i = 0; i < boot_mem_map.nr_map; i++) { if (init_mem >= boot_mem_map.map[i].addr && init_mem < (boot_mem_map.map[i].addr + boot_mem_map.map[i].size)) { found = 1; break; } } if (!found) add_memory_region(init_mem, init_size, BOOT_MEM_INIT_RAM); } printk("Determined physical RAM map:\n"); print_memory_map(); strlcpy(command_line, arcs_cmdline, sizeof(command_line)); strlcpy(boot_command_line, command_line, COMMAND_LINE_SIZE); *cmdline_p = command_line; parse_early_param(); if (usermem) { printk("User-defined physical RAM map:\n"); print_memory_map(); } bootmem_init(); sparse_init(); paging_init(); }
void __init x86_64_start_kernel(char * real_mode_data) { char *s; clear_bss(); pda_init(0); copy_bootdata(real_mode_data); #ifdef CONFIG_SMP cpu_set(0, cpu_online_map); #endif /* default console: */ if (!strstr(saved_command_line, "console=")) strcat(saved_command_line, " console=tty0"); s = strstr(saved_command_line, "earlyprintk="); if (s != NULL) setup_early_printk(s); #ifdef CONFIG_DISCONTIGMEM s = strstr(saved_command_line, "numa="); if (s != NULL) numa_setup(s+5); #endif #ifdef CONFIG_X86_IO_APIC if (strstr(saved_command_line, "disableapic")) disable_apic = 1; #endif /* You need early console to see that */ if (__pa_symbol(&_end) >= KERNEL_TEXT_SIZE) panic("Kernel too big for kernel mapping\n"); setup_boot_cpu_data(); start_kernel(); }
static void __init map_vsyscall(void) { unsigned long physaddr_page0 = __pa_symbol(&__vsyscall_0); __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_page0, PAGE_KERNEL_VSYSCALL); if (hpet_address) __set_fixmap(VSYSCALL_HPET, hpet_address, PAGE_KERNEL_VSYSCALL); }
int __init mcpm_sync_init( void (*power_up_setup)(unsigned int affinity_level)) { unsigned int i, j, mpidr, this_cluster; BUILD_BUG_ON(MCPM_SYNC_CLUSTER_SIZE * MAX_NR_CLUSTERS != sizeof mcpm_sync); BUG_ON((unsigned long)&mcpm_sync & (__CACHE_WRITEBACK_GRANULE - 1)); /* * Set initial CPU and cluster states. * Only one cluster is assumed to be active at this point. */ for (i = 0; i < MAX_NR_CLUSTERS; i++) { mcpm_sync.clusters[i].cluster = CLUSTER_DOWN; mcpm_sync.clusters[i].inbound = INBOUND_NOT_COMING_UP; for (j = 0; j < MAX_CPUS_PER_CLUSTER; j++) mcpm_sync.clusters[i].cpus[j].cpu = CPU_DOWN; } mpidr = read_cpuid_mpidr(); this_cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); for_each_online_cpu(i) { mcpm_cpu_use_count[this_cluster][i] = 1; mcpm_sync.clusters[this_cluster].cpus[i].cpu = CPU_UP; } mcpm_sync.clusters[this_cluster].cluster = CLUSTER_UP; sync_cache_w(&mcpm_sync); if (power_up_setup) { mcpm_power_up_setup_phys = __pa_symbol(power_up_setup); sync_cache_w(&mcpm_power_up_setup_phys); } return 0; }
/* Replace instructions with better alternatives for this CPU type. This runs before SMP is initialized to avoid SMP problems with self modifying code. This implies that assymetric systems where APs have less capabilities than the boot processor are not handled. In this case boot with "noreplacement". */ void apply_alternatives(void *start, void *end) { struct alt_instr *a; int diff, i, k; for (a = start; (void *)a < end; a++) { u8 *instr; if (!boot_cpu_has(a->cpuid)) continue; BUG_ON(a->replacementlen > a->instrlen); instr = a->instr; /* vsyscall code is not mapped yet. resolve it manually. */ if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0)); __inline_memcpy(instr, a->replacement, a->replacementlen); diff = a->instrlen - a->replacementlen; /* Pad the rest with nops */ for (i = a->replacementlen; diff > 0; diff -= k, i += k) { k = diff; if (k > ASM_NOP_MAX) k = ASM_NOP_MAX; __inline_memcpy(instr + i, k8_nops[k], k); } } }
int pfn_is_nosave(unsigned long pfn) { unsigned long nosave_begin_pfn = __pa_symbol(&__nosave_begin) >> PAGE_SHIFT; unsigned long nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT; return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn); }
static int zynq_boot_secondary(unsigned int cpu, struct task_struct *idle) { if (!zynq_efuse_cpu_state(cpu)) return -1; return zynq_cpun_start(__pa_symbol(secondary_startup), cpu); }
static int hip01_boot_secondary(unsigned int cpu, struct task_struct *idle) { phys_addr_t jumpaddr; unsigned int remap_reg_value = 0; struct device_node *node; jumpaddr = __pa_symbol(secondary_startup); hip01_set_boot_addr(HIP01_BOOT_ADDRESS, jumpaddr); node = of_find_compatible_node(NULL, NULL, "hisilicon,hip01-sysctrl"); if (WARN_ON(!node)) return -1; ctrl_base = of_iomap(node, 0); /* set the secondary core boot from DDR */ remap_reg_value = readl_relaxed(ctrl_base + REG_SC_CTRL); barrier(); remap_reg_value |= SC_SCTL_REMAP_CLR; barrier(); writel_relaxed(remap_reg_value, ctrl_base + REG_SC_CTRL); hip01_set_cpu(cpu, true); return 0; }
void hi3xxx_set_cpu_jump(int cpu, void *jump_addr) { cpu = cpu_logical_map(cpu); if (!cpu || !ctrl_base) return; writel_relaxed(__pa_symbol(jump_addr), ctrl_base + ((cpu - 1) << 2)); }
void __init map_vsyscall(void) { extern char __vsyscall_page; unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page); unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page); __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, vsyscall_mode == NATIVE ? PAGE_KERNEL_VSYSCALL : PAGE_KERNEL_VVAR); BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) != (unsigned long)VSYSCALL_START); __set_fixmap(VVAR_PAGE, physaddr_vvar_page, PAGE_KERNEL_VVAR); BUILD_BUG_ON((unsigned long)__fix_to_virt(VVAR_PAGE) != (unsigned long)VVAR_ADDRESS); }
/** * Mark in-use memory regions as reserved. * This prevents the bootmem allocator from allocating them. */ static void __init reserve_memory(void) { /* Reserve the kernel page table memory */ reserve_bootmem(table_start << PAGE_SHIFT, (table_end - table_start) << PAGE_SHIFT); /* Reserve kernel memory */ reserve_bootmem(__pa_symbol(&_text), __pa_symbol(&_end) - __pa_symbol(&_text)); /* Reserve physical page 0... it's a often a special BIOS page */ reserve_bootmem(0, PAGE_SIZE); /* Reserve the Extended BIOS Data Area memory */ if (ebda_addr) reserve_bootmem(ebda_addr, ebda_size); /* Reserve SMP trampoline */ reserve_bootmem(SMP_TRAMPOLINE_BASE, 2*PAGE_SIZE); /* Find and reserve boot-time SMP configuration */ find_mp_config(); /* Reserve memory used by the initrd image */ if (LOADER_TYPE && INITRD_START) { if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) { printk(KERN_DEBUG "reserving memory used by initrd image\n"); printk(KERN_DEBUG " INITRD_START=0x%lx, INITRD_SIZE=%ld bytes\n", (unsigned long) INITRD_START, (unsigned long) INITRD_SIZE); reserve_bootmem(INITRD_START, INITRD_SIZE); initrd_start = INITRD_START; initrd_end = initrd_start+INITRD_SIZE; } else { printk(KERN_ERR "initrd extends beyond end of memory " "(0x%08lx > 0x%08lx)\ndisabling initrd\n", (unsigned long)(INITRD_START + INITRD_SIZE), (unsigned long)(end_pfn << PAGE_SHIFT)); initrd_start = 0; } } }
static void __init request_standard_resources(void) { struct memblock_region *region; struct resource *res; unsigned long i = 0; size_t res_size; kernel_code.start = __pa_symbol(_text); kernel_code.end = __pa_symbol(__init_begin - 1); kernel_data.start = __pa_symbol(_sdata); kernel_data.end = __pa_symbol(_end - 1); num_standard_resources = memblock.memory.cnt; res_size = num_standard_resources * sizeof(*standard_resources); standard_resources = memblock_alloc_low(res_size, SMP_CACHE_BYTES); if (!standard_resources) panic("%s: Failed to allocate %zu bytes\n", __func__, res_size); for_each_memblock(memory, region) { res = &standard_resources[i++]; if (memblock_is_nomap(region)) { res->name = "reserved"; res->flags = IORESOURCE_MEM; } else { res->name = "System RAM"; res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; } res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region)); res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1; request_resource(&iomem_resource, res); if (kernel_code.start >= res->start && kernel_code.end <= res->end) request_resource(res, &kernel_code); if (kernel_data.start >= res->start && kernel_data.end <= res->end) request_resource(res, &kernel_data); #ifdef CONFIG_KEXEC_CORE /* Userspace will find "Crash kernel" region in /proc/iomem. */ if (crashk_res.end && crashk_res.start >= res->start && crashk_res.end <= res->end) request_resource(res, &crashk_res); #endif }
/* it returns the next free pfn after initrd */ static unsigned long __init init_initrd(void) { unsigned long end; /* * Board specific code or command line parser should have * already set up initrd_start and initrd_end. In these cases * perfom sanity checks and use them if all looks good. */ if (!initrd_start || initrd_end <= initrd_start) { #ifdef CONFIG_PROBE_INITRD_HEADER u32 *initrd_header; /* * See if initrd has been added to the kernel image by * arch/mips/boot/addinitrd.c. In that case a header is * prepended to initrd and is made up by 8 bytes. The first * word is a magic number and the second one is the size of * initrd. Initrd start must be page aligned in any cases. */ initrd_header = __va(PAGE_ALIGN(__pa_symbol(&_end) + 8)) - 8; if (initrd_header[0] != 0x494E5244) goto disable; initrd_start = (unsigned long)(initrd_header + 2); initrd_end = initrd_start + initrd_header[1]; #else goto disable; #endif } if (initrd_start & ~PAGE_MASK) { pr_err("initrd start must be page aligned\n"); goto disable; } if (initrd_start < PAGE_OFFSET) { pr_err("initrd start < PAGE_OFFSET\n"); goto disable; } /* * Sanitize initrd addresses. For example firmware * can't guess if they need to pass them through * 64-bits values if the kernel has been built in pure * 32-bit. We need also to switch from KSEG0 to XKPHYS * addresses now, so the code can now safely use __pa(). */ end = __pa(initrd_end); initrd_end = (unsigned long)__va(end); initrd_start = (unsigned long)__va(__pa(initrd_start)); ROOT_DEV = Root_RAM0; return PFN_UP(end); disable: initrd_start = 0; initrd_end = 0; return 0; }
void __init i386_start_kernel(void) { #ifdef CONFIG_X86_TRAMPOLINE /* * But first pinch a few for the stack/trampoline stuff * FIXME: Don't need the extra page at 4K, but need to fix * trampoline before removing it. (see the GDT stuff) */ reserve_early_overlap_ok(PAGE_SIZE, PAGE_SIZE + PAGE_SIZE, "EX TRAMPOLINE"); #endif reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS"); #ifdef CONFIG_BLK_DEV_INITRD /* Reserve INITRD */ if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) { /* Assume only end is not page aligned */ u64 ramdisk_image = boot_params.hdr.ramdisk_image; u64 ramdisk_size = boot_params.hdr.ramdisk_size; u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size); reserve_early(ramdisk_image, ramdisk_end, "RAMDISK"); } #endif /* Call the subarch specific early setup function */ switch (boot_params.hdr.hardware_subarch) { case X86_SUBARCH_MRST: x86_mrst_early_setup(); break; default: i386_default_early_setup(); break; } /* * At this point everything still needed from the boot loader * or BIOS or kernel text should be early reserved or marked not * RAM in e820. All other memory is free game. */ start_kernel(); }
void __init tegra_cpu_reset_handler_init(void) { #ifdef CONFIG_SMP __tegra_cpu_reset_handler_data[TEGRA_RESET_MASK_PRESENT] = *((u32 *)cpu_possible_mask); __tegra_cpu_reset_handler_data[TEGRA_RESET_STARTUP_SECONDARY] = __pa_symbol((void *)secondary_startup); #endif #ifdef CONFIG_PM_SLEEP __tegra_cpu_reset_handler_data[TEGRA_RESET_STARTUP_LP1] = TEGRA_IRAM_LPx_RESUME_AREA; __tegra_cpu_reset_handler_data[TEGRA_RESET_STARTUP_LP2] = __pa_symbol((void *)tegra_resume); #endif tegra_cpu_reset_handler_enable(); }
static int hix5hd2_boot_secondary(unsigned int cpu, struct task_struct *idle) { phys_addr_t jumpaddr; jumpaddr = __pa_symbol(secondary_startup); hix5hd2_set_scu_boot_addr(HIX5HD2_BOOT_ADDRESS, jumpaddr); hix5hd2_set_cpu(cpu, true); arch_send_wakeup_ipi_mask(cpumask_of(cpu)); return 0; }
static void __cpuinit trap_init_f00f_bug(void) { __set_fixmap(FIX_F00F_IDT, __pa_symbol(idt_table), PAGE_KERNEL_RO); /* * Update the IDT descriptor and reload the IDT so that * it uses the read-only mapped virtual address. */ idt_descr.address = fix_to_virt(FIX_F00F_IDT); load_idt(&idt_descr); }