コード例 #1
0
static void mprotect_kernel_mem(int w)
{
	unsigned long start, end;
	int pages;

	if(!jail || (current == &init_task)) return;

	pages = (1 << CONFIG_KERNEL_STACK_ORDER);

	start = (unsigned long) current_thread + PAGE_SIZE;
	end = (unsigned long) current_thread + PAGE_SIZE * pages;
	protect_memory(uml_reserved, start - uml_reserved, 1, w, 1, 1);
	protect_memory(end, high_physmem - end, 1, w, 1, 1);

	start = (unsigned long) UML_ROUND_DOWN(&_stext);
	end = (unsigned long) UML_ROUND_UP(&_etext);
	protect_memory(start, end - start, 1, w, 1, 1);

	start = (unsigned long) UML_ROUND_DOWN(&_unprotected_end);
	end = (unsigned long) UML_ROUND_UP(&_edata);
	protect_memory(start, end - start, 1, w, 1, 1);

	start = (unsigned long) UML_ROUND_DOWN(&__bss_start);
	end = (unsigned long) UML_ROUND_UP(brk_start);
	protect_memory(start, end - start, 1, w, 1, 1);

	mprotect_kernel_vm(w);
}
コード例 #2
0
ファイル: mem.c プロジェクト: Dronevery/JetsonTK1-kernel
void before_mem_tt(unsigned long brk_start)
{
	if(debug)
		remap_data(UML_ROUND_DOWN(&_stext), UML_ROUND_UP(&_etext), 1);
	remap_data(UML_ROUND_DOWN(&_sdata), UML_ROUND_UP(&_edata), 1);
	remap_data(UML_ROUND_DOWN(&__bss_start), UML_ROUND_UP(&_end), 1);
}
コード例 #3
0
ファイル: um_arch.c プロジェクト: Antonio-Zhou/Linux-2.6.11
int linux_main(int argc, char **argv)
{
	unsigned long avail, diff;
	unsigned long virtmem_size, max_physmem;
	unsigned int i, add;

	for (i = 1; i < argc; i++){
		if((i == 1) && (argv[i][0] == ' ')) continue;
		add = 1;
		uml_checksetup(argv[i], &add);
		if(add) add_arg(saved_command_line, argv[i]);
	}
	if(have_root == 0) add_arg(saved_command_line, DEFAULT_COMMAND_LINE);

	mode_tt = force_tt ? 1 : !can_do_skas();
#ifndef CONFIG_MODE_TT
	if (mode_tt) {
		/*Since CONFIG_MODE_TT is #undef'ed, force_tt cannot be 1. So,
		 * can_do_skas() returned 0, and the message is correct. */
		printf("Support for TT mode is disabled, and no SKAS support is present on the host.\n");
		exit(1);
	}
#endif
	uml_start = CHOOSE_MODE_PROC(set_task_sizes_tt, set_task_sizes_skas, 0,
				     &host_task_size, &task_size);

	/* Need to check this early because mmapping happens before the
	 * kernel is running.
	 */
	check_tmpexec();

	brk_start = (unsigned long) sbrk(0);
	CHOOSE_MODE_PROC(before_mem_tt, before_mem_skas, brk_start);
	/* Increase physical memory size for exec-shield users
	so they actually get what they asked for. This should
	add zero for non-exec shield users */

	diff = UML_ROUND_UP(brk_start) - UML_ROUND_UP(&_end);
	if(diff > 1024 * 1024){
		printf("Adding %ld bytes to physical memory to account for "
		       "exec-shield gap\n", diff);
		physmem_size += UML_ROUND_UP(brk_start) - UML_ROUND_UP(&_end);
	}

	uml_physmem = uml_start;

	/* Reserve up to 4M after the current brk */
	uml_reserved = ROUND_4M(brk_start) + (1 << 22);

	setup_machinename(system_utsname.machine);

#ifdef CONFIG_MODE_TT
	argv1_begin = argv[1];
	argv1_end = &argv[1][strlen(argv[1])];
#endif
  
	highmem = 0;
	iomem_size = (iomem_size + PAGE_SIZE - 1) & PAGE_MASK;
	max_physmem = get_kmem_end() - uml_physmem - iomem_size - MIN_VMALLOC;

	/* Zones have to begin on a 1 << MAX_ORDER page boundary,
	 * so this makes sure that's true for highmem
	 */
	max_physmem &= ~((1 << (PAGE_SHIFT + MAX_ORDER)) - 1);
	if(physmem_size + iomem_size > max_physmem){
		highmem = physmem_size + iomem_size - max_physmem;
		physmem_size -= highmem;
#ifndef CONFIG_HIGHMEM
		highmem = 0;
		printf("CONFIG_HIGHMEM not enabled - physical memory shrunk "
		       "to %ld bytes\n", physmem_size);
#endif
	}

	high_physmem = uml_physmem + physmem_size;
	end_iomem = high_physmem + iomem_size;
	high_memory = (void *) end_iomem;

	start_vm = VMALLOC_START;

	setup_physmem(uml_physmem, uml_reserved, physmem_size, highmem);
	if(init_maps(physmem_size, iomem_size, highmem)){
		printf("Failed to allocate mem_map for %ld bytes of physical "
		       "memory and %ld bytes of highmem\n", physmem_size,
		       highmem);
		exit(1);
	}

	virtmem_size = physmem_size;
	avail = get_kmem_end() - start_vm;
	if(physmem_size > avail) virtmem_size = avail;
	end_vm = start_vm + virtmem_size;

	if(virtmem_size < physmem_size)
		printf("Kernel virtual memory size shrunk to %ld bytes\n",
		       virtmem_size);

  	uml_postsetup();

	task_protections((unsigned long) &init_thread_info);
	os_flush_stdout();

	return(CHOOSE_MODE(start_uml_tt(), start_uml_skas()));
}
コード例 #4
0
int __init linux_main(int argc, char **argv)
{
	unsigned long avail, diff;
	unsigned long virtmem_size, max_physmem;
	unsigned long stack;
	unsigned int i;
	int add;

	for (i = 1; i < argc; i++) {
		if ((i == 1) && (argv[i][0] == ' '))
			continue;
		add = 1;
		uml_checksetup(argv[i], &add);
		if (add)
			add_arg(argv[i]);
	}
	if (have_root == 0)
		add_arg(DEFAULT_COMMAND_LINE);

	host_task_size = os_get_top_address();
	/*
	 * TASK_SIZE needs to be PGDIR_SIZE aligned or else exit_mmap craps
	 * out
	 */
	task_size = host_task_size & PGDIR_MASK;

	/* OS sanity checks that need to happen before the kernel runs */
	os_early_checks();

	brk_start = (unsigned long) sbrk(0);

	/*
	 * Increase physical memory size for exec-shield users
	 * so they actually get what they asked for. This should
	 * add zero for non-exec shield users
	 */

	diff = UML_ROUND_UP(brk_start) - UML_ROUND_UP(&_end);
	if (diff > 1024 * 1024) {
		os_info("Adding %ld bytes to physical memory to account for "
			"exec-shield gap\n", diff);
		physmem_size += UML_ROUND_UP(brk_start) - UML_ROUND_UP(&_end);
	}

	uml_physmem = (unsigned long) __binary_start & PAGE_MASK;

	/* Reserve up to 4M after the current brk */
	uml_reserved = ROUND_4M(brk_start) + (1 << 22);

	setup_machinename(init_utsname()->machine);

	highmem = 0;
	iomem_size = (iomem_size + PAGE_SIZE - 1) & PAGE_MASK;
	max_physmem = TASK_SIZE - uml_physmem - iomem_size - MIN_VMALLOC;

	/*
	 * Zones have to begin on a 1 << MAX_ORDER page boundary,
	 * so this makes sure that's true for highmem
	 */
	max_physmem &= ~((1 << (PAGE_SHIFT + MAX_ORDER)) - 1);
	if (physmem_size + iomem_size > max_physmem) {
		highmem = physmem_size + iomem_size - max_physmem;
		physmem_size -= highmem;
	}

	high_physmem = uml_physmem + physmem_size;
	end_iomem = high_physmem + iomem_size;
	high_memory = (void *) end_iomem;

	start_vm = VMALLOC_START;

	virtmem_size = physmem_size;
	stack = (unsigned long) argv;
	stack &= ~(1024 * 1024 - 1);
	avail = stack - start_vm;
	if (physmem_size > avail)
		virtmem_size = avail;
	end_vm = start_vm + virtmem_size;

	if (virtmem_size < physmem_size)
		os_info("Kernel virtual memory size shrunk to %lu bytes\n",
			virtmem_size);

	os_flush_stdout();

	return start_uml();
}
コード例 #5
0
ファイル: um_arch.c プロジェクト: CSCLOG/beaglebone
int __init linux_main(int argc, char **argv)
{
	unsigned long avail, diff;
	unsigned long virtmem_size, max_physmem;
	unsigned long stack;
	unsigned int i;
	int add;
	char * mode;

	for (i = 1; i < argc; i++) {
		if ((i == 1) && (argv[i][0] == ' '))
			continue;
		add = 1;
		uml_checksetup(argv[i], &add);
		if (add)
			add_arg(argv[i]);
	}
	if (have_root == 0)
		add_arg(DEFAULT_COMMAND_LINE);

	host_task_size = os_get_top_address();
	/*
	 * TASK_SIZE needs to be PGDIR_SIZE aligned or else exit_mmap craps
	 * out
	 */
	task_size = host_task_size & PGDIR_MASK;

	/* OS sanity checks that need to happen before the kernel runs */
	os_early_checks();

	can_do_skas();

	if (proc_mm && ptrace_faultinfo)
		mode = "SKAS3";
	else
		mode = "SKAS0";

	printf("UML running in %s mode\n", mode);

	brk_start = (unsigned long) sbrk(0);

	/*
	 * Increase physical memory size for exec-shield users
	 * so they actually get what they asked for. This should
	 * add zero for non-exec shield users
	 */

	diff = UML_ROUND_UP(brk_start) - UML_ROUND_UP(&_end);
	if (diff > 1024 * 1024) {
		printf("Adding %ld bytes to physical memory to account for "
		       "exec-shield gap\n", diff);
		physmem_size += UML_ROUND_UP(brk_start) - UML_ROUND_UP(&_end);
	}

	uml_physmem = (unsigned long) &__binary_start & PAGE_MASK;

	/* Reserve up to 4M after the current brk */
	uml_reserved = ROUND_4M(brk_start) + (1 << 22);

	setup_machinename(init_utsname()->machine);

	highmem = 0;
	iomem_size = (iomem_size + PAGE_SIZE - 1) & PAGE_MASK;
	max_physmem = TASK_SIZE - uml_physmem - iomem_size - MIN_VMALLOC;

	/*
	 * Zones have to begin on a 1 << MAX_ORDER page boundary,
	 * so this makes sure that's true for highmem
	 */
	max_physmem &= ~((1 << (PAGE_SHIFT + MAX_ORDER)) - 1);
	if (physmem_size + iomem_size > max_physmem) {
		highmem = physmem_size + iomem_size - max_physmem;
		physmem_size -= highmem;
#ifndef CONFIG_HIGHMEM
		highmem = 0;
		printf("CONFIG_HIGHMEM not enabled - physical memory shrunk "
		       "to %Lu bytes\n", physmem_size);
#endif
	}

	high_physmem = uml_physmem + physmem_size;
	end_iomem = high_physmem + iomem_size;
	high_memory = (void *) end_iomem;

	start_vm = VMALLOC_START;

	setup_physmem(uml_physmem, uml_reserved, physmem_size, highmem);
	if (init_maps(physmem_size, iomem_size, highmem)) {
		printf("Failed to allocate mem_map for %Lu bytes of physical "
		       "memory and %Lu bytes of highmem\n", physmem_size,
		       highmem);
		exit(1);
	}

	virtmem_size = physmem_size;
	stack = (unsigned long) argv;
	stack &= ~(1024 * 1024 - 1);
	avail = stack - start_vm;
	if (physmem_size > avail)
		virtmem_size = avail;
	end_vm = start_vm + virtmem_size;

	if (virtmem_size < physmem_size)
		printf("Kernel virtual memory size shrunk to %lu bytes\n",
		       virtmem_size);

	atomic_notifier_chain_register(&panic_notifier_list,
				       &panic_exit_notifier);

	uml_postsetup();

	stack_protections((unsigned long) &init_thread_info);
	os_flush_stdout();

	return start_uml();
}