示例#1
0
/* Loads additional segments in case of a panic kernel is being loaded.
 * One segment for backup region, another segment for storing elf headers
 * for crash memory image.
 */
int load_crashdump_segments(struct kexec_info *info, char* mod_cmdline,
				unsigned long UNUSED(max_addr),
				unsigned long UNUSED(min_base))
{
	void *tmp;
	unsigned long sz, elfcorehdr;
	int nr_ranges, align = 1024;
	struct memory_range *mem_range;
	crash_create_elf_headers_func crash_create = crash_create_elf32_headers;
	struct crash_elf_info *elf_info = &elf_info32;
	unsigned long start_offset = 0x80000000UL;

#ifdef __mips64
	if (arch_options.core_header_type == CORE_TYPE_ELF64) {
		elf_info = &elf_info64;
		crash_create = crash_create_elf64_headers;
		start_offset = 0xffffffff80000000UL;
	}
#endif

	if (get_kernel_paddr(elf_info))
		return -1;

	if (get_kernel_vaddr_and_size(elf_info, start_offset))
		return -1;

	if (get_crash_memory_ranges(&mem_range, &nr_ranges) < 0)
		return -1;

	info->backup_src_start = BACKUP_SRC_START;
	info->backup_src_size = BACKUP_SRC_SIZE;
	/* Create a backup region segment to store backup data*/
	sz = (BACKUP_SRC_SIZE + align - 1) & ~(align - 1);
	tmp = xmalloc(sz);
	memset(tmp, 0, sz);
	info->backup_start = add_buffer(info, tmp, sz, sz, align,
				crash_reserved_mem.start,
				crash_reserved_mem.end, -1);

	if (crash_create(info, elf_info, crash_memory_range, nr_ranges,
			 &tmp, &sz, ELF_CORE_HEADER_ALIGN) < 0)
		return -1;
	elfcorehdr = add_buffer(info, tmp, sz, sz, align,
		crash_reserved_mem.start,
		crash_reserved_mem.end, -1);

	/*
	 * backup segment is after elfcorehdr, so use elfcorehdr as top of
	 * kernel's available memory
	 */
	cmdline_add_mem(mod_cmdline, crash_reserved_mem.start,
		elfcorehdr - crash_reserved_mem.start);
	cmdline_add_elfcorehdr(mod_cmdline, elfcorehdr);
	cmdline_add_savemaxmem(mod_cmdline, saved_max_mem);

	dbgprintf("CRASH MEMORY RANGES:\n");
	dbgprintf("%016Lx-%016Lx\n", crash_reserved_mem.start,
			crash_reserved_mem.end);
	return 0;
}
int load_crashdump_segments(struct kexec_info *info, struct mem_ehdr *ehdr,
                            unsigned long max_addr, unsigned long min_base,
			    const char **cmdline)
{
	struct memory_range *mem_range;
	int nr_ranges;
	unsigned long sz;
	size_t size;
	void *tmp;
	if (info->kexec_flags & KEXEC_ON_CRASH &&
	    get_crash_memory_ranges(&mem_range, &nr_ranges) == 0) {
		int i;

		info->kern_paddr_start = kernel_code_start;
		for (i=0; i < nr_ranges; i++) {
			unsigned long long mstart = crash_memory_range[i].start;
			unsigned long long mend = crash_memory_range[i].end;
			if (!mstart && !mend)
				continue;
			if (kernel_code_start >= mstart &&
			    kernel_code_start < mend) {
				info->kern_vaddr_start = mstart + LOAD_OFFSET;
				break;
			}
		}
		info->kern_size = kernel_code_end - kernel_code_start + 1;
		if (crash_create_elf64_headers(info, &elf_info,
					       crash_memory_range, nr_ranges,
					       &tmp, &sz, EFI_PAGE_SIZE) < 0)
			return -1;

		elfcorehdr = add_buffer(info, tmp, sz, sz, EFI_PAGE_SIZE,
					min_base, max_addr, -1);
		loaded_segments[loaded_segments_num].start = elfcorehdr;
		loaded_segments[loaded_segments_num].end = elfcorehdr + sz;
		loaded_segments_num++;
		cmdline_add_elfcorehdr(cmdline, elfcorehdr);
	}
	add_loaded_segments_info(info, ehdr, max_addr);
	size = sizeof(struct loaded_segment) * loaded_segments_num;
	qsort(loaded_segments, loaded_segments_num,
                        sizeof(struct loaded_segment), seg_comp);
        loaded_segments_base = add_buffer(info, loaded_segments,
                        size, size, 16, 0, max_addr, -1);

        elf_rel_set_symbol(&info->rhdr, "__loaded_segments",
                        &loaded_segments_base, sizeof(long));
        elf_rel_set_symbol(&info->rhdr, "__loaded_segments_num",
                         &loaded_segments_num, sizeof(long));
	return 0;
}
示例#3
0
/* Loads additional segments in case of a panic kernel is being loaded.
 * One segment for backup region, another segment for storing elf headers
 * for crash memory image.
 */
int load_crashdump_segments(struct kexec_info *info, char* mod_cmdline,
				uint64_t max_addr, unsigned long min_base)
{
	void *tmp;
	unsigned long sz;
	uint64_t elfcorehdr;
	int nr_ranges, align = 1024, i;
	unsigned long long end;
	struct memory_range *mem_range;

	if (get_crash_memory_ranges(&mem_range, &nr_ranges) < 0)
		return -1;

	/* Create a backup region segment to store backup data*/
	sz = (BACKUP_SRC_SIZE + align - 1) & ~(align - 1);
	tmp = xmalloc(sz);
	memset(tmp, 0, sz);
	info->backup_start = add_buffer(info, tmp, sz, sz, align,
					0, max_addr, 1);
	reserve(info->backup_start, sz);

	/* On ppc64 memory ranges in device-tree is denoted as start
	 * and size rather than start and end, as is the case with
	 * other architectures like i386 . Because of this when loading
	 * the memory ranges in crashdump-elf.c the filesz calculation
	 * [ end - start + 1 ] goes for a toss.
	 *
	 * To be in sync with other archs adjust the end value for
	 * every crash memory range before calling the generic function
	 */

	for (i = 0; i < nr_ranges; i++) {
		end = crash_memory_range[i].end - 1;
		crash_memory_range[i].end = end;
	}


	/* Create elf header segment and store crash image data. */
	if (arch_options.core_header_type == CORE_TYPE_ELF64) {
		if (crash_create_elf64_headers(info, &elf_info64,
					       crash_memory_range, nr_ranges,
					       &tmp, &sz,
					       ELF_CORE_HEADER_ALIGN) < 0)
			return -1;
	}
	else {
		if (crash_create_elf32_headers(info, &elf_info32,
					       crash_memory_range, nr_ranges,
					       &tmp, &sz,
					       ELF_CORE_HEADER_ALIGN) < 0)
			return -1;
	}

	elfcorehdr = add_buffer(info, tmp, sz, sz, align, min_base,
				max_addr, 1);
	reserve(elfcorehdr, sz);
	/* modify and store the cmdline in a global array. This is later
	 * read by flatten_device_tree and modified if required
	 */
	add_cmdline_param(mod_cmdline, elfcorehdr, " elfcorehdr=", "K");
	add_cmdline_param(mod_cmdline, saved_max_mem, " savemaxmem=", "M");
	return 0;
}
/* Loads additional segments in case of a panic kernel is being loaded.
 * One segment for backup region, another segment for storing elf headers
 * for crash memory image.
 */
int load_crashdump_segments(struct kexec_info *info, char* mod_cmdline,
				unsigned long max_addr, unsigned long min_base)
{
	void *tmp;
	unsigned long sz, elfcorehdr;
	int nr_ranges, align = 1024, i;
	struct memory_range *mem_range, *memmap_p;

	struct crash_elf_info elf_info =
	{
		class: ELFCLASS64,
		data: ELFDATA2LSB,
		machine: EM_X86_64,
		backup_src_start: BACKUP_SRC_START,
		backup_src_end: BACKUP_SRC_END,
		page_offset: page_offset,
	};

	if (get_kernel_paddr(info))
		return -1;

	if (get_kernel_vaddr_and_size(info))
		return -1;

	if (get_crash_memory_ranges(&mem_range, &nr_ranges,
				    info->kexec_flags) < 0)
		return -1;

	/* Memory regions which panic kernel can safely use to boot into */
	sz = (sizeof(struct memory_range) * (KEXEC_MAX_SEGMENTS + 1));
	memmap_p = xmalloc(sz);
	memset(memmap_p, 0, sz);
	add_memmap(memmap_p, BACKUP_SRC_START, BACKUP_SRC_SIZE);
	sz = crash_reserved_mem.end - crash_reserved_mem.start +1;
	add_memmap(memmap_p, crash_reserved_mem.start, sz);

	/* Create a backup region segment to store backup data*/
	if (!(info->kexec_flags & KEXEC_PRESERVE_CONTEXT)) {
		sz = (BACKUP_SRC_SIZE + align - 1) & ~(align - 1);
		tmp = xmalloc(sz);
		memset(tmp, 0, sz);
		info->backup_start = add_buffer(info, tmp, sz, sz, align,
						0, max_addr, 1);
		if (delete_memmap(memmap_p, info->backup_start, sz) < 0)
			return -1;
	}

	/* Create elf header segment and store crash image data. */
	if (crash_create_elf64_headers(info, &elf_info,
				       crash_memory_range, nr_ranges,
				       &tmp, &sz,
				       ELF_CORE_HEADER_ALIGN) < 0)
		return -1;

	/* Hack: With some ld versions (GNU ld version 2.14.90.0.4 20030523),
	 * vmlinux program headers show a gap of two pages between bss segment
	 * and data segment but effectively kernel considers it as bss segment
	 * and overwrites the any data placed there. Hence bloat the memsz of
	 * elf core header segment to 16K to avoid being placed in such gaps.
	 * This is a makeshift solution until it is fixed in kernel.
	 */
	elfcorehdr = add_buffer(info, tmp, sz, 16*1024, align, min_base,
							max_addr, -1);
	if (delete_memmap(memmap_p, elfcorehdr, sz) < 0)
		return -1;
	cmdline_add_memmap(mod_cmdline, memmap_p);
	cmdline_add_elfcorehdr(mod_cmdline, elfcorehdr);

	/* Inform second kernel about the presence of ACPI tables. */
	for (i = 0; i < CRASH_MAX_MEMORY_RANGES; i++) {
		unsigned long start, end;
		if ( !( mem_range[i].type == RANGE_ACPI
			|| mem_range[i].type == RANGE_ACPI_NVS) )
			continue;
		start = mem_range[i].start;
		end = mem_range[i].end;
		cmdline_add_memmap_acpi(mod_cmdline, start, end);
	}
	return 0;
}
/* Loads additional segments in case of a panic kernel is being loaded.
 * One segment for backup region, another segment for storing elf headers
 * for crash memory image.
 */
int load_crashdump_segments(struct kexec_info *info, char* mod_cmdline,
                            unsigned long max_addr, unsigned long min_base)
{
    void *tmp;
    unsigned long sz, elfcorehdr;
    int nr_ranges, align = 1024;
    struct memory_range *mem_range, *memmap_p;

    if (get_crash_memory_ranges(&mem_range, &nr_ranges) < 0)
        return -1;

    /*
     * if the core type has not been set on command line, set it here
     * automatically
     */
    if (arch_options.core_header_type == CORE_TYPE_UNDEF) {
        arch_options.core_header_type =
            get_core_type(info, mem_range, nr_ranges);
    }

    /* Memory regions which panic kernel can safely use to boot into */
    sz = (sizeof(struct memory_range) * (KEXEC_MAX_SEGMENTS + 1));
    memmap_p = xmalloc(sz);
    memset(memmap_p, 0, sz);
    add_memmap(memmap_p, BACKUP_SRC_START, BACKUP_SRC_SIZE);
    sz = crash_reserved_mem.end - crash_reserved_mem.start +1;
    add_memmap(memmap_p, crash_reserved_mem.start, sz);

    /* Create a backup region segment to store backup data*/
    sz = (BACKUP_SRC_SIZE + align - 1) & ~(align - 1);
    tmp = xmalloc(sz);
    memset(tmp, 0, sz);
    info->backup_start = add_buffer(info, tmp, sz, sz, align,
                                    0, max_addr, -1);
    dbgprintf("Created backup segment at 0x%lx\n", info->backup_start);
    if (delete_memmap(memmap_p, info->backup_start, sz) < 0)
        return -1;

    /* Create elf header segment and store crash image data. */
    if (arch_options.core_header_type == CORE_TYPE_ELF64) {
        if (crash_create_elf64_headers(info, &elf_info64,
                                       crash_memory_range, nr_ranges,
                                       &tmp, &sz,
                                       ELF_CORE_HEADER_ALIGN) < 0)
            return -1;
    }
    else {
        if (crash_create_elf32_headers(info, &elf_info32,
                                       crash_memory_range, nr_ranges,
                                       &tmp, &sz,
                                       ELF_CORE_HEADER_ALIGN) < 0)
            return -1;
    }

    /* Hack: With some ld versions (GNU ld version 2.14.90.0.4 20030523),
     * vmlinux program headers show a gap of two pages between bss segment
     * and data segment but effectively kernel considers it as bss segment
     * and overwrites the any data placed there. Hence bloat the memsz of
     * elf core header segment to 16K to avoid being placed in such gaps.
     * This is a makeshift solution until it is fixed in kernel.
     */
    elfcorehdr = add_buffer(info, tmp, sz, 16*1024, align, min_base,
                            max_addr, -1);
    dbgprintf("Created elf header segment at 0x%lx\n", elfcorehdr);
    if (delete_memmap(memmap_p, elfcorehdr, sz) < 0)
        return -1;
    cmdline_add_memmap(mod_cmdline, memmap_p);
    cmdline_add_elfcorehdr(mod_cmdline, elfcorehdr);
    return 0;
}