int machine_kexec_prepare(struct kimage *image) { struct kexec_segment *current_segment; __be32 header; int i, err; /* * Validate that if the current HW supports SMP, then the SW supports * and implements CPU hotplug for the current HW. If not, we won't be * able to kexec reliably, so fail the prepare operation. */ if (num_possible_cpus() > 1 && platform_can_secondary_boot() && !platform_can_cpu_hotplug()) return -EINVAL; /* * No segment at default ATAGs address. try to locate * a dtb using magic. */ for (i = 0; i < image->nr_segments; i++) { current_segment = &image->segment[i]; if (!memblock_is_region_memory(current_segment->mem, current_segment->memsz)) return -EINVAL; err = get_user(header, (__be32*)current_segment->buf); if (err) return err; if (be32_to_cpu(header) == OF_DT_HEADER) dt_mem = current_segment->mem; } return 0; }
int machine_kexec_prepare(struct kimage *image) { #ifdef CONFIG_KEXEC_HARDBOOT struct kexec_segment *current_segment; __be32 header; int i, err; /* No segment at default ATAGs address. try to locate * a dtb using magic */ for (i = 0; i < image->nr_segments; i++) { current_segment = &image->segment[i]; err = memblock_is_region_memory(current_segment->mem, current_segment->memsz); if (!err) return - EINVAL; if(current_segment->mem == image->start) mem_text_write_kernel_word(&kexec_kernel_len, current_segment->memsz); err = get_user(header, (__be32*)current_segment->buf); if (err) return err; if (be32_to_cpu(header) == OF_DT_HEADER) { mem_text_write_kernel_word(&kexec_boot_atags, current_segment->mem); mem_text_write_kernel_word(&kexec_boot_atags_len, current_segment->memsz); } } #endif return 0; }
int machine_kexec_prepare(struct kimage *image) { struct kexec_segment *current_segment; __be32 header; int i, err; /* * No segment at default ATAGs address. try to locate * a dtb using magic. */ for (i = 0; i < image->nr_segments; i++) { current_segment = &image->segment[i]; if (!memblock_is_region_memory(current_segment->mem, current_segment->memsz)) return -EINVAL; err = get_user(header, (__be32*)current_segment->buf); if (err) return err; if (be32_to_cpu(header) == OF_DT_HEADER) kexec_boot_atags = current_segment->mem; } return 0; }
/* * reserve_crashkernel() - reserves memory for crash kernel * * This function reserves memory area given in "crashkernel=" kernel command * line parameter. The memory reserved is used by dump capture kernel when * primary kernel is crashing. */ static void __init reserve_crashkernel(void) { unsigned long long crash_base, crash_size; int ret; ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(), &crash_size, &crash_base); /* no crashkernel= or invalid value specified */ if (ret || !crash_size) return; crash_size = PAGE_ALIGN(crash_size); if (crash_base == 0) { /* Current arm64 boot protocol requires 2MB alignment */ crash_base = memblock_find_in_range(0, ARCH_LOW_ADDRESS_LIMIT, crash_size, SZ_2M); if (crash_base == 0) { pr_warn("cannot allocate crashkernel (size:0x%llx)\n", crash_size); return; } } else { /* User specifies base address explicitly. */ if (!memblock_is_region_memory(crash_base, crash_size)) { pr_warn("cannot reserve crashkernel: region is not memory\n"); return; } if (memblock_is_region_reserved(crash_base, crash_size)) { pr_warn("cannot reserve crashkernel: region overlaps reserved memory\n"); return; } if (!IS_ALIGNED(crash_base, SZ_2M)) { pr_warn("cannot reserve crashkernel: base address is not 2MB aligned\n"); return; } } memblock_reserve(crash_base, crash_size); pr_info("crashkernel reserved: 0x%016llx - 0x%016llx (%lld MB)\n", crash_base, crash_base + crash_size, crash_size >> 20); crashk_res.start = crash_base; crashk_res.end = crash_base + crash_size - 1; }
/** * machine_kexec_prepare - Prepare for a kexec reboot. * * Called from the core kexec code when a kernel image is loaded. */ int machine_kexec_prepare(struct kimage *image) { struct kexec_segment *current_segment; __be32 header; int i, err; /* * No segment at default ATAGs address. try to locate * a dtb using magic. */ for (i = 0; i < image->nr_segments; i++) { current_segment = &image->segment[i]; if (!memblock_is_region_memory(current_segment->mem, current_segment->memsz)) return -EINVAL; #ifdef CONFIG_KEXEC_HARDBOOT if(current_segment->mem == image->start) kexec_kernel_len = current_segment->memsz; #endif err = get_user(header, (__be32*)current_segment->buf); if (err) return err; if (be32_to_cpu(header) == OF_DT_HEADER) { kexec_boot_atags = current_segment->mem; #ifdef CONFIG_KEXEC_HARDBOOT kexec_boot_atags_len = current_segment->memsz; #endif } } arm64_kexec_kimage_start = image->start; kexec_image_info(image); return 0; }
/* * reserve_crashkernel() - reserves memory for crash kernel * * This function reserves memory area given in "crashkernel=" kernel command * line parameter. The memory reserved is used by a dump capture kernel when * primary kernel is crashing. */ static void __init reserve_crashkernel(phys_addr_t limit) { unsigned long long crash_size = 0, crash_base = 0; int ret; ret = parse_crashkernel(boot_command_line, limit, &crash_size, &crash_base); if (ret) return; if (crash_base == 0) { crash_base = memblock_alloc(crash_size, 1 << 20); if (crash_base == 0) { pr_warn("crashkernel allocation failed (size:%llx)\n", crash_size); return; } } else { /* User specifies base address explicitly. Sanity check */ if (!memblock_is_region_memory(crash_base, crash_size) || memblock_is_region_reserved(crash_base, crash_size)) { pr_warn("crashkernel= has wrong address or size\n"); return; } if (memblock_reserve(crash_base, crash_size)) { pr_warn("crashkernel reservation failed - out of memory\n"); return; } } pr_info("Reserving %lldMB of memory at %lldMB for crashkernel\n", crash_size >> 20, crash_base >> 20); crashk_res.start = crash_base; crashk_res.end = crash_base + crash_size - 1; }
static int valid_sdram(unsigned long addr, unsigned long size) { return memblock_is_region_memory(addr, size); }