Esempio n. 1
0
static int valid_memory_segment(struct kexec_info *info,
				struct kexec_segment *segment)
{
	unsigned long sstart, send;
	sstart = (unsigned long)segment->mem;
	send   = sstart + segment->memsz - 1;

	return valid_memory_range(info, sstart, send);
}
Esempio n. 2
0
void add_segment_phys_virt(struct kexec_info *info,
	const void *buf, size_t bufsz,
	unsigned long base, size_t memsz, int phys)
{
	unsigned long last;
	size_t size;
	int pagesize;

	if (bufsz > memsz) {
		bufsz = memsz;
	}
	/* Forget empty segments */
	if (memsz == 0) {
		return;
	}

	/* Round memsz up to a multiple of pagesize */
	pagesize = getpagesize();
	memsz = (memsz + (pagesize - 1)) & ~(pagesize - 1);

	/* Verify base is pagesize aligned.
	 * Finding a way to cope with this problem
	 * is important but for now error so at least
	 * we are not surprised by the code doing the wrong
	 * thing.
	 */
	if (base & (pagesize -1)) {
		die("Base address: %x is not page aligned\n", base);
	}

	if (phys)
		base = virt_to_phys(base);

	last = base + memsz -1;
	if (!valid_memory_range(info, base, last)) {
		die("Invalid memory segment %p - %p\n",
			(void *)base, (void *)last);
	}

	size = (info->nr_segments + 1) * sizeof(info->segment[0]);
	info->segment = xrealloc(info->segment, size);
	info->segment[info->nr_segments].buf   = buf;
	info->segment[info->nr_segments].bufsz = bufsz;
	info->segment[info->nr_segments].mem   = (void *)base;
	info->segment[info->nr_segments].memsz = memsz;
	info->nr_segments++;
	if (info->nr_segments > KEXEC_MAX_SEGMENTS) {
		fprintf(stderr, "Warning: kernel segment limit reached. "
			"This will likely fail\n");
	}
}
Esempio n. 3
0
int elf_exec_load(struct mem_ehdr *ehdr, struct kexec_info *info)
{
	unsigned long base;
	int result;
	int i;

	if (!ehdr->e_phdr) {
		fprintf(stderr, "No program header?\n");
		result = -1;
		goto out;
	}

	/* If I have a dynamic executable find it's size
	 * and then find a location for it in memory.
	 */
	base = 0;
	if (ehdr->e_type == ET_DYN) {
		unsigned long first, last, align;
		first = ULONG_MAX;
		last  = 0;
		align = 0;
		for(i = 0; i < ehdr->e_phnum; i++) {
			unsigned long start, stop;
			struct mem_phdr *phdr;
			phdr = &ehdr->e_phdr[i];
			if ((phdr->p_type != PT_LOAD) ||
				(phdr->p_memsz == 0))
			{
				continue;
			}
			start = phdr->p_paddr;
			stop  = start + phdr->p_memsz;
			if (first > start) {
				first = start;
			}
			if (last < stop) {
				last = stop;
			}
			if (align < phdr->p_align) {
				align = phdr->p_align;
			}
		}
		/* If I can't use the default paddr find a new
		 * hole for the dynamic executable.
		 */
		if (!valid_memory_range(info, first, last)) {
			unsigned long hole;
			hole = locate_hole(info,
				last - first + 1, align, 
				0, elf_max_addr(ehdr), 1);
			if (hole == ULONG_MAX) {
				result = -1;
				goto out;
			}
			/* Base is the value that when added
			 * to any virtual address in the file
			 * yields it's load virtual address.
			 */
			base = hole - first;
		}

	}

	/* Read in the PT_LOAD segments */
	for(i = 0; i < ehdr->e_phnum; i++) {
		struct mem_phdr *phdr;
		size_t size;
		phdr = &ehdr->e_phdr[i];
		if (phdr->p_type != PT_LOAD) {
			continue;
		}
		size = phdr->p_filesz;
		if (size > phdr->p_memsz) {
			size = phdr->p_memsz;
		}
		add_segment(info,
			phdr->p_data, size,
			phdr->p_paddr + base, phdr->p_memsz);
	}

	/* Update entry point to reflect new load address*/
	ehdr->e_entry += base;

	result = 0;
 out:
	return result;
}
int zImage_sh_load(int argc, char **argv, const char *buf, off_t len,
	struct kexec_info *info)
{
        char *command_line;
	int opt;
	unsigned long empty_zero, zero_page_base, zero_page_size, k;
	unsigned long image_base;
	char *param;

	static const struct option options[] = {
       	        KEXEC_ARCH_OPTIONS
		{0, 0, 0, 0},
	};

	static const char short_options[] = KEXEC_ARCH_OPT_STR "";

	command_line = 0;
	while ((opt = getopt_long(argc, argv, short_options, options, 0)) != -1) {
		switch (opt) {
		default:
			/* Ignore core options */
			if (opt < OPT_ARCH_MAX) {
				break;
			}
		case '?':
			usage();
			return -1;
		case OPT_APPEND:
			command_line = optarg;
			break;
		}
	}

	if (!command_line)
	        command_line = get_append();

	/* assume the zero page is the page before the vmlinux entry point.
	 * we don't know the page size though, but 64k seems to be max.
	 * put several 4k zero page copies before the entry point to cover
	 * all combinations.
	 */

	empty_zero = zImage_head32(buf, HEAD32_KERNEL_START_ADDR);

	zero_page_size = 0x10000;
	zero_page_base = virt_to_phys(empty_zero - zero_page_size);

	while (!valid_memory_range(info, zero_page_base,
				   zero_page_base + zero_page_size - 1)) {
		zero_page_base += 0x1000;
		zero_page_size -= 0x1000;
		if (zero_page_size == 0)
			die("Unable to determine zero page size from %p \n",
			    (void *)empty_zero);
	}

	param = xmalloc(zero_page_size);
	for (k = 0; k < (zero_page_size / 0x1000); k++)
		kexec_sh_setup_zero_page(param + (k * 0x1000), 0x1000,
					 command_line);

	add_segment(info, param, zero_page_size,
		    0x80000000 | zero_page_base, zero_page_size);

	/* load image a bit above the zero page, round up to 64k
	 * the zImage will relocate itself, but only up seems supported.
	 */

	image_base = (empty_zero + (0x10000 - 1)) & ~(0x10000 - 1);
	add_segment(info, buf, len, image_base, len);
	info->entry = (void *)virt_to_phys(image_base);
	return 0;
}
Esempio n. 5
0
static int ppc_load_bare_bits(int argc, char **argv, const char *buf,
		off_t len, struct kexec_info *info, unsigned int load_addr,
		unsigned int ep)
{
	char *command_line;
	int command_line_len;
	char *dtb;
	unsigned int addr;
	unsigned long dtb_addr;
#define FIXUP_ENTRYS    (20)
	char *fixup_nodes[FIXUP_ENTRYS + 1];
	int cur_fixup = 0;
	int opt;
	int ret;

	command_line = NULL;
	dtb = NULL;

	while ((opt = getopt_long(argc, argv, short_options, options, 0)) != -1) {
		switch (opt) {
		default:
			/* Ignore core options */
			if (opt < OPT_ARCH_MAX) {
				break;
			}
		case '?':
			usage();
			return -1;
		case OPT_APPEND:
			command_line = optarg;
			break;

		case OPT_DTB:
			dtb = optarg;
			break;

		case OPT_NODES:
			if (cur_fixup >= FIXUP_ENTRYS) {
				fprintf(stderr, "The number of entries for the fixup is too large\n");
				exit(1);
			}
			fixup_nodes[cur_fixup] = optarg;
			cur_fixup++;
			break;
		}
	}

	command_line_len = 0;
	if (command_line)
		command_line_len = strlen(command_line) + 1;

	fixup_nodes[cur_fixup] = NULL;

	/*
	 * len contains the length of the whole kernel image except the bss
	 * section. The 3 MiB should cover it. The purgatory and the dtb are
	 * allocated from memtop down towards zero so we should never get too
	 * close to the bss :)
	 */
	ret = valid_memory_range(info, load_addr, len + 3 * 1024 * 1024);
	if (!ret) {
		printf("Can't add kernel to addr 0x%08x len %ld\n",
				load_addr, len + 3 * 1024 * 1024);
		return -1;
	}
	add_segment(info, buf, len, load_addr, len + 3 * 1024 * 1024);
	if (dtb) {
		char *blob_buf;
		off_t blob_size = 0;

		/* Grab device tree from buffer */
		blob_buf = slurp_file(dtb, &blob_size);
		if (!blob_buf || !blob_size)
			die("Device tree seems to be an empty file.\n");
		blob_buf = fixup_dtb_nodes(blob_buf, &blob_size, fixup_nodes, command_line);

		dtb_addr = add_buffer(info, blob_buf, blob_size, blob_size, 0, 0,
				KERNEL_ACCESS_TOP, -1);
	} else {
		dtb_addr = 0;
	}

	elf_rel_build_load(info, &info->rhdr, (const char *)purgatory,
			purgatory_size, 0, -1, -1, 0);

	/* set various variables for the purgatory */
	addr = ep;
	elf_rel_set_symbol(&info->rhdr, "kernel", &addr, sizeof(addr));

	addr = dtb_addr;
	elf_rel_set_symbol(&info->rhdr, "dt_offset", &addr, sizeof(addr));

	addr = rmo_top;
	elf_rel_set_symbol(&info->rhdr, "mem_size", &addr, sizeof(addr));

#define PUL_STACK_SIZE  (16 * 1024)
	addr = locate_hole(info, PUL_STACK_SIZE, 0, 0, -1, 1);
	addr += PUL_STACK_SIZE;
	elf_rel_set_symbol(&info->rhdr, "pul_stack", &addr, sizeof(addr));
	/* No allocation past here in order not to overwrite the stack */
#undef PUL_STACK_SIZE

	addr = elf_rel_get_addr(&info->rhdr, "purgatory_start");
	info->entry = (void *)addr;
	return 0;
}