Пример #1
0
/**
 * Return a sorted list of memory ranges.
 *
 * If we have the /sys/firmware/memmap interface, then use that. If not,
 * or if parsing of that fails, use /proc/iomem as fallback.
 *
 * @param[out] range pointer that will be set to an array that holds the
 *             memory ranges
 * @param[out] ranges number of ranges valid in @p range
 * @param[in]  kexec_flags the kexec_flags to determine if we load a normal
 *             or a crashdump kernel
 *
 * @return 0 on success, any other value on failure.
 */
int get_memory_ranges(struct memory_range **range, int *ranges,
		      unsigned long kexec_flags)
{
	int ret, i;

	if (!efi_map_added() && !xen_present() && have_sys_firmware_memmap()) {
		ret = get_memory_ranges_sysfs(range, ranges);
		if (!ret)
			ret = fixup_memory_ranges(range, ranges);
	} else if (xen_present()) {
		ret = get_memory_ranges_xen(range, ranges);
		if (!ret)
			ret = fixup_memory_ranges(range, ranges);
	} else
		ret = get_memory_ranges_proc_iomem(range, ranges);

	/*
	 * get_memory_ranges_sysfs(), get_memory_ranges_proc_iomem() and
	 * get_memory_ranges_xen() have already printed an error message,
	 * so fail silently here.
	 */
	if (ret != 0)
		return ret;

	/* Don't report the interrupt table as ram */
	for (i = 0; i < *ranges; i++) {
		if ((*range)[i].type == RANGE_RAM &&
				((*range)[i].start < 0x100)) {
			(*range)[i].start = 0x100;
			break;
		}
	}

	/*
	 * Redefine the memory region boundaries if kernel
	 * exports the limits and if it is panic kernel.
	 * Override user values only if kernel exported values are
	 * subset of user defined values.
	 */
	if ((kexec_flags & KEXEC_ON_CRASH) &&
	    !(kexec_flags & KEXEC_PRESERVE_CONTEXT)) {
		uint64_t start, end;

		ret = get_max_crash_kernel_limit(&start, &end);
		if (ret != 0) {
			fprintf(stderr, "get_max_crash_kernel_limit failed.\n");
			return -1;
		}

		if (start > mem_min)
			mem_min = start;
		if (end < mem_max)
			mem_max = end;
	}

	dbgprint_mem_range("MEMORY RANGES", *range, *ranges);

	return ret;
}
Пример #2
0
unsigned long crash_architecture(struct crash_elf_info *elf_info)
{
	if (xen_present())
		return xen_architecture(elf_info);
	else
		return elf_info->machine;
}
Пример #3
0
/* Read kernel physical load addr from the file returned by proc_iomem()
 * (Kernel Code) and store in kexec_info */
static int get_kernel_paddr(struct crash_elf_info *elf_info)
{
	uint64_t start;

	if (xen_present()) /* Kernel not entity mapped under Xen */
		return 0;

	if (parse_iomem_single("Kernel code\n", &start, NULL) == 0) {
		elf_info->kern_paddr_start = start;
		dbgprintf("kernel load physical addr start = 0x%lx\n", start);
		return 0;
	}

	fprintf(stderr, "Cannot determine kernel physical load addr\n");
	return -1;
}
/* Read kernel physical load addr from the file returned by proc_iomem()
 * (Kernel Code) and store in kexec_info */
static int get_kernel_paddr(struct kexec_info *info)
{
	uint64_t start;

	if (xen_present()) /* Kernel not entity mapped under Xen */
		return 0;

	if (parse_iomem_single("Kernel code\n", &start, NULL) == 0) {
		info->kern_paddr_start = start;
#ifdef DEBUG
		printf("kernel load physical addr start = 0x%016Lx\n", start);
#endif
		return 0;
	}

	fprintf(stderr, "Cannot determine kernel physical load addr\n");
	return -1;
}
static int get_kernel_vaddr_and_size(struct kexec_info *info)
{
	int result;
	const char kcore[] = "/proc/kcore";
	char *buf;
	struct mem_ehdr ehdr;
	struct mem_phdr *phdr, *end_phdr;
	int align;
	unsigned long size;
	uint32_t elf_flags = 0;

	if (xen_present()) /* Kernel not entity mapped under Xen */
		return 0;

	align = getpagesize();
	size = KCORE_ELF_HEADERS_SIZE;
	buf = slurp_file_len(kcore, size);
	if (!buf) {
		fprintf(stderr, "Cannot read %s: %s\n", kcore, strerror(errno));
		return -1;
	}

	/* Don't perform checks to make sure stated phdrs and shdrs are
	 * actually present in the core file. It is not practical
	 * to read the GB size file into a user space buffer, Given the
	 * fact that we don't use any info from that.
	 */
	elf_flags |= ELF_SKIP_FILESZ_CHECK;
	result = build_elf_core_info(buf, size, &ehdr, elf_flags);
	if (result < 0) {
		fprintf(stderr, "ELF core (kcore) parse failed\n");
		return -1;
	}

	/* Traverse through the Elf headers and find the region where
	 * kernel is mapped. */
	end_phdr = &ehdr.e_phdr[ehdr.e_phnum];
	for(phdr = ehdr.e_phdr; phdr != end_phdr; phdr++) {
		if (phdr->p_type == PT_LOAD) {
			unsigned long saddr = phdr->p_vaddr;
			unsigned long eaddr = phdr->p_vaddr + phdr->p_memsz;
			unsigned long size;

			/* Look for kernel text mapping header. */
			if ((saddr >= __START_KERNEL_map) &&
			    (eaddr <= __START_KERNEL_map + KERNEL_TEXT_SIZE)) {
				saddr = (saddr) & (~(KERN_VADDR_ALIGN - 1));
				info->kern_vaddr_start = saddr;
				size = eaddr - saddr;
				/* Align size to page size boundary. */
				size = (size + align - 1) & (~(align - 1));
				info->kern_size = size;
#ifdef DEBUG
			printf("kernel vaddr = 0x%lx size = 0x%lx\n",
					saddr, size);
#endif
				return 0;
			}
		}
	}
	fprintf(stderr, "Can't find kernel text map area from kcore\n");
	return -1;
}
/* Prepares the crash memory headers and stores in supplied buffer. */
int FUNC(struct kexec_info *info,
	 struct crash_elf_info *elf_info,
	 struct memory_range *range, int ranges,
	 void **buf, unsigned long *size, unsigned long align)
{
	EHDR *elf;
	PHDR *phdr;
	int i;
	unsigned long sz;
	char *bufp;
	long int nr_cpus = 0;
	uint64_t notes_addr, notes_len;
	uint64_t vmcoreinfo_addr, vmcoreinfo_len;
	int has_vmcoreinfo = 0;
	uint64_t vmcoreinfo_addr_xen, vmcoreinfo_len_xen;
	int has_vmcoreinfo_xen = 0;
	int (*get_note_info)(int cpu, uint64_t *addr, uint64_t *len);

	if (xen_present())
		nr_cpus = xen_get_nr_phys_cpus();
	else
		nr_cpus = sysconf(_SC_NPROCESSORS_CONF);

	if (nr_cpus < 0) {
		return -1;
	}

	if (get_kernel_vmcoreinfo(&vmcoreinfo_addr, &vmcoreinfo_len) == 0) {
		has_vmcoreinfo = 1;
	}

	if (xen_present() &&
	    get_xen_vmcoreinfo(&vmcoreinfo_addr_xen, &vmcoreinfo_len_xen) == 0) {
		has_vmcoreinfo_xen = 1;
	}

	sz = sizeof(EHDR) + (nr_cpus + has_vmcoreinfo + has_vmcoreinfo_xen) * sizeof(PHDR) +
	     ranges * sizeof(PHDR);

	/*
	 * Certain architectures such as x86_64 and ia64 require a separate
	 * PT_LOAD program header for the kernel. This is controlled through
	 * info->kern_size.
	 *
	 * The separate PT_LOAD program header is required either because the
	 * kernel is mapped at a different location than the rest of the
	 * physical memory or because we need to support relocatable kernels.
	 * Or both as on x86_64.
	 *
	 * In the relocatable kernel case this PT_LOAD segment is used to tell
	 * where the kernel was actually loaded which may be different from
	 * the load address present in the vmlinux file.
	 *
	 * The extra kernel PT_LOAD program header results in a vmcore file
	 * which is larger than the size of the physical memory. This is
	 * because the memory for the kernel is present both in the kernel
	 * PT_LOAD program header and in the physical RAM program headers.
	 */

	if (info->kern_size && !xen_present()) {
		sz += sizeof(PHDR);
	}

	/*
	 * Make sure the ELF core header is aligned to at least 1024.
	 * We do this because the secondary kernel gets the ELF core
	 * header address on the kernel command line through the memmap=
	 * option, and this option requires 1k granularity.
	 */

	if (align % ELF_CORE_HEADER_ALIGN) {
		return -1;
	}

	sz += align - 1;
	sz &= ~(align - 1);

	bufp = xmalloc(sz);
	memset(bufp, 0, sz);

	*buf = bufp;
	*size = sz;

	/* Setup ELF Header*/
	elf = (EHDR *) bufp;
	bufp += sizeof(EHDR);
	memcpy(elf->e_ident, ELFMAG, SELFMAG);
	elf->e_ident[EI_CLASS]  = elf_info->class;
	elf->e_ident[EI_DATA]   = elf_info->data;
	elf->e_ident[EI_VERSION]= EV_CURRENT;
	elf->e_ident[EI_OSABI] = ELFOSABI_NONE;
	memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
	elf->e_type	= ET_CORE;
	elf->e_machine  = crash_architecture(elf_info);
	elf->e_version	= EV_CURRENT;
	elf->e_entry	= 0;
	elf->e_phoff	= sizeof(EHDR);
	elf->e_shoff	= 0;
	elf->e_flags	= 0;
	elf->e_ehsize   = sizeof(EHDR);
	elf->e_phentsize= sizeof(PHDR);
	elf->e_phnum    = 0;
	elf->e_shentsize= 0;
	elf->e_shnum    = 0;
	elf->e_shstrndx = 0;

	/* Default way to get crash notes is by get_crash_notes_per_cpu() */

	get_note_info = elf_info->get_note_info;
	if (!get_note_info)
		get_note_info = get_crash_notes_per_cpu;

	if (xen_present())
		get_note_info = xen_get_note;

	/* PT_NOTE program headers. One per cpu */

	for (i = 0; i < nr_cpus; i++) {
		if (get_note_info(i, &notes_addr, &notes_len) < 0) {
			/* This cpu is not present. Skip it. */
			continue;
		}

		phdr = (PHDR *) bufp;
		bufp += sizeof(PHDR);
		phdr->p_type	= PT_NOTE;
		phdr->p_flags	= 0;
		phdr->p_offset  = phdr->p_paddr = notes_addr;
		phdr->p_vaddr   = 0;
		phdr->p_filesz	= phdr->p_memsz	= notes_len;
		/* Do we need any alignment of segments? */
		phdr->p_align	= 0;

		/* Increment number of program headers. */
		(elf->e_phnum)++;
		dbgprintf_phdr("Elf header", phdr);
	}

	if (has_vmcoreinfo && !(info->kexec_flags & KEXEC_PRESERVE_CONTEXT)) {
		phdr = (PHDR *) bufp;
		bufp += sizeof(PHDR);
		phdr->p_type	= PT_NOTE;
		phdr->p_flags	= 0;
		phdr->p_offset  = phdr->p_paddr = vmcoreinfo_addr;
		phdr->p_vaddr   = 0;
		phdr->p_filesz	= phdr->p_memsz	= vmcoreinfo_len;
		/* Do we need any alignment of segments? */
		phdr->p_align	= 0;

		(elf->e_phnum)++;
		dbgprintf_phdr("vmcoreinfo header", phdr);
	}

	if (has_vmcoreinfo_xen) {
		phdr = (PHDR *) bufp;
		bufp += sizeof(PHDR);
		phdr->p_type	= PT_NOTE;
		phdr->p_flags	= 0;
		phdr->p_offset  = phdr->p_paddr = vmcoreinfo_addr_xen;
		phdr->p_vaddr   = 0;
		phdr->p_filesz	= phdr->p_memsz	= vmcoreinfo_len_xen;
		/* Do we need any alignment of segments? */
		phdr->p_align	= 0;

		(elf->e_phnum)++;
		dbgprintf_phdr("vmcoreinfo_xen header", phdr);
	}

	/* Setup an PT_LOAD type program header for the region where
	 * Kernel is mapped if info->kern_size is non-zero.
	 */

	if (info->kern_size && !xen_present()) {
		phdr = (PHDR *) bufp;
		bufp += sizeof(PHDR);
		phdr->p_type	= PT_LOAD;
		phdr->p_flags	= PF_R|PF_W|PF_X;
		phdr->p_offset	= phdr->p_paddr = info->kern_paddr_start;
		phdr->p_vaddr	= info->kern_vaddr_start;
		phdr->p_filesz	= phdr->p_memsz	= info->kern_size;
		phdr->p_align	= 0;
		(elf->e_phnum)++;
		dbgprintf_phdr("Kernel text Elf header", phdr);
	}

	/* Setup PT_LOAD type program header for every system RAM chunk.
	 * A seprate program header for Backup Region*/
	for (i = 0; i < ranges; i++, range++) {
		unsigned long long mstart, mend;
		if (range->type != RANGE_RAM)
			continue;
		mstart = range->start;
		mend = range->end;
		if (!mstart && !mend)
			continue;
		phdr = (PHDR *) bufp;
		bufp += sizeof(PHDR);
		phdr->p_type	= PT_LOAD;
		phdr->p_flags	= PF_R|PF_W|PF_X;
		phdr->p_offset	= mstart;

		if (mstart == elf_info->backup_src_start
		    && mend == elf_info->backup_src_end)
			phdr->p_offset	= info->backup_start;

		/* We already prepared the header for kernel text. Map
		 * rest of the memory segments to kernel linearly mapped
		 * memory region.
		 */
		phdr->p_paddr = mstart;
		phdr->p_vaddr = phys_to_virt(elf_info, mstart);
		phdr->p_filesz	= phdr->p_memsz	= mend - mstart + 1;
		/* Do we need any alignment of segments? */
		phdr->p_align	= 0;

		/* HIGMEM has a virtual address of -1 */

		if (elf_info->lowmem_limit
		    && (mend > (elf_info->lowmem_limit - 1)))
			phdr->p_vaddr = -1;

		/* Increment number of program headers. */
		(elf->e_phnum)++;
		dbgprintf_phdr("Elf header", phdr);
	}
	return 0;
}
Пример #7
0
/**
 * Return a sorted list of memory ranges.
 *
 * If we have the /sys/firmware/memmap interface, then use that. If not,
 * or if parsing of that fails, use /proc/iomem as fallback.
 *
 * @param[out] range pointer that will be set to an array that holds the
 *             memory ranges
 * @param[out] ranges number of ranges valid in @p range
 * @param[in]  kexec_flags the kexec_flags to determine if we load a normal
 *             or a crashdump kernel
 *
 * @return 0 on success, any other value on failure.
 */
int get_memory_ranges(struct memory_range **range, int *ranges,
                      unsigned long kexec_flags)
{
    int ret, i;

    /*
     * When using Xen, /sys/firmware/memmap (i.e., the E820 map) is
     * wrong, it just provides one large memory are and that cannot
     * be used for Kdump. Use always the /proc/iomem interface there
     * even if we have /sys/firmware/memmap. Without that, /proc/vmcore
     * is empty in the kdump kernel.
     */
    if (!xen_present() && have_sys_firmware_memmap()) {
        ret = get_memory_ranges_sysfs(range, ranges);
        if (!ret)
            ret = fixup_memory_ranges_sysfs(range, ranges);
    } else
        ret = get_memory_ranges_proc_iomem(range, ranges);

    /*
     * both get_memory_ranges_sysfs() and get_memory_ranges_proc_iomem()
     * have already printed an error message, so fail silently here
     */
    if (ret != 0)
        return ret;

    /* Don't report the interrupt table as ram */
    for (i = 0; i < *ranges; i++) {
        if ((*range)[i].type == RANGE_RAM &&
                ((*range)[i].start < 0x100)) {
            (*range)[i].start = 0x100;
            break;
        }
    }

    /*
     * Redefine the memory region boundaries if kernel
     * exports the limits and if it is panic kernel.
     * Override user values only if kernel exported values are
     * subset of user defined values.
     */
    if ((kexec_flags & KEXEC_ON_CRASH) &&
            !(kexec_flags & KEXEC_PRESERVE_CONTEXT)) {
        uint64_t start, end;

        ret = parse_iomem_single("Crash kernel\n", &start, &end);
        if (ret != 0) {
            fprintf(stderr, "parse_iomem_single failed.\n");
            return -1;
        }

        if (start > mem_min)
            mem_min = start;
        if (end < mem_max)
            mem_max = end;
    }

    /* just set 0 to 1 to enable printing for debugging */
#if 0
    {
        int i;
        printf("MEMORY RANGES\n");
        for (i = 0; i < *ranges; i++) {
            printf("%016Lx-%016Lx (%d)\n", (*range)[i].start,
                   (*range)[i].end, (*range)[i].type);
        }
    }
#endif

    return ret;
}