unsigned long set_task_sizes_tt(int arg, unsigned long *host_size_out, unsigned long *task_size_out) { /* Round up to the nearest 4M */ *host_size_out = ROUND_4M((unsigned long) &arg); *task_size_out = START; return(START); }
unsigned long set_task_sizes_tt(unsigned long *task_size_out) { unsigned long host_task_size; /* Round up to the nearest 4M */ host_task_size = ROUND_4M((unsigned long) &host_task_size); *task_size_out = START; return host_task_size; }
unsigned long set_task_sizes_skas(int arg, unsigned long *host_size_out, unsigned long *task_size_out) { /* Round up to the nearest 4M */ unsigned long top = ROUND_4M((unsigned long) &arg); #ifdef CONFIG_HOST_TASK_SIZE *host_size_out = CONFIG_HOST_TASK_SIZE; *task_size_out = CONFIG_HOST_TASK_SIZE; #else *host_size_out = top; *task_size_out = top; #endif return(((unsigned long) set_task_sizes_skas) & ~0xffffff); }
int linux_main(int argc, char **argv) { unsigned long avail, diff; unsigned long virtmem_size, max_physmem; unsigned int i, add; for (i = 1; i < argc; i++){ if((i == 1) && (argv[i][0] == ' ')) continue; add = 1; uml_checksetup(argv[i], &add); if(add) add_arg(saved_command_line, argv[i]); } if(have_root == 0) add_arg(saved_command_line, DEFAULT_COMMAND_LINE); mode_tt = force_tt ? 1 : !can_do_skas(); #ifndef CONFIG_MODE_TT if (mode_tt) { /*Since CONFIG_MODE_TT is #undef'ed, force_tt cannot be 1. So, * can_do_skas() returned 0, and the message is correct. */ printf("Support for TT mode is disabled, and no SKAS support is present on the host.\n"); exit(1); } #endif uml_start = CHOOSE_MODE_PROC(set_task_sizes_tt, set_task_sizes_skas, 0, &host_task_size, &task_size); /* Need to check this early because mmapping happens before the * kernel is running. */ check_tmpexec(); brk_start = (unsigned long) sbrk(0); CHOOSE_MODE_PROC(before_mem_tt, before_mem_skas, brk_start); /* Increase physical memory size for exec-shield users so they actually get what they asked for. This should add zero for non-exec shield users */ diff = UML_ROUND_UP(brk_start) - UML_ROUND_UP(&_end); if(diff > 1024 * 1024){ printf("Adding %ld bytes to physical memory to account for " "exec-shield gap\n", diff); physmem_size += UML_ROUND_UP(brk_start) - UML_ROUND_UP(&_end); } uml_physmem = uml_start; /* Reserve up to 4M after the current brk */ uml_reserved = ROUND_4M(brk_start) + (1 << 22); setup_machinename(system_utsname.machine); #ifdef CONFIG_MODE_TT argv1_begin = argv[1]; argv1_end = &argv[1][strlen(argv[1])]; #endif highmem = 0; iomem_size = (iomem_size + PAGE_SIZE - 1) & PAGE_MASK; max_physmem = get_kmem_end() - uml_physmem - iomem_size - MIN_VMALLOC; /* Zones have to begin on a 1 << MAX_ORDER page boundary, * so this makes sure that's true for highmem */ max_physmem &= ~((1 << (PAGE_SHIFT + MAX_ORDER)) - 1); if(physmem_size + iomem_size > max_physmem){ highmem = physmem_size + iomem_size - max_physmem; physmem_size -= highmem; #ifndef CONFIG_HIGHMEM highmem = 0; printf("CONFIG_HIGHMEM not enabled - physical memory shrunk " "to %ld bytes\n", physmem_size); #endif } high_physmem = uml_physmem + physmem_size; end_iomem = high_physmem + iomem_size; high_memory = (void *) end_iomem; start_vm = VMALLOC_START; setup_physmem(uml_physmem, uml_reserved, physmem_size, highmem); if(init_maps(physmem_size, iomem_size, highmem)){ printf("Failed to allocate mem_map for %ld bytes of physical " "memory and %ld bytes of highmem\n", physmem_size, highmem); exit(1); } virtmem_size = physmem_size; avail = get_kmem_end() - start_vm; if(physmem_size > avail) virtmem_size = avail; end_vm = start_vm + virtmem_size; if(virtmem_size < physmem_size) printf("Kernel virtual memory size shrunk to %ld bytes\n", virtmem_size); uml_postsetup(); task_protections((unsigned long) &init_thread_info); os_flush_stdout(); return(CHOOSE_MODE(start_uml_tt(), start_uml_skas())); }
int __init linux_main(int argc, char **argv) { unsigned long avail, diff; unsigned long virtmem_size, max_physmem; unsigned long stack; unsigned int i; int add; for (i = 1; i < argc; i++) { if ((i == 1) && (argv[i][0] == ' ')) continue; add = 1; uml_checksetup(argv[i], &add); if (add) add_arg(argv[i]); } if (have_root == 0) add_arg(DEFAULT_COMMAND_LINE); host_task_size = os_get_top_address(); /* * TASK_SIZE needs to be PGDIR_SIZE aligned or else exit_mmap craps * out */ task_size = host_task_size & PGDIR_MASK; /* OS sanity checks that need to happen before the kernel runs */ os_early_checks(); brk_start = (unsigned long) sbrk(0); /* * Increase physical memory size for exec-shield users * so they actually get what they asked for. This should * add zero for non-exec shield users */ diff = UML_ROUND_UP(brk_start) - UML_ROUND_UP(&_end); if (diff > 1024 * 1024) { os_info("Adding %ld bytes to physical memory to account for " "exec-shield gap\n", diff); physmem_size += UML_ROUND_UP(brk_start) - UML_ROUND_UP(&_end); } uml_physmem = (unsigned long) __binary_start & PAGE_MASK; /* Reserve up to 4M after the current brk */ uml_reserved = ROUND_4M(brk_start) + (1 << 22); setup_machinename(init_utsname()->machine); highmem = 0; iomem_size = (iomem_size + PAGE_SIZE - 1) & PAGE_MASK; max_physmem = TASK_SIZE - uml_physmem - iomem_size - MIN_VMALLOC; /* * Zones have to begin on a 1 << MAX_ORDER page boundary, * so this makes sure that's true for highmem */ max_physmem &= ~((1 << (PAGE_SHIFT + MAX_ORDER)) - 1); if (physmem_size + iomem_size > max_physmem) { highmem = physmem_size + iomem_size - max_physmem; physmem_size -= highmem; } high_physmem = uml_physmem + physmem_size; end_iomem = high_physmem + iomem_size; high_memory = (void *) end_iomem; start_vm = VMALLOC_START; virtmem_size = physmem_size; stack = (unsigned long) argv; stack &= ~(1024 * 1024 - 1); avail = stack - start_vm; if (physmem_size > avail) virtmem_size = avail; end_vm = start_vm + virtmem_size; if (virtmem_size < physmem_size) os_info("Kernel virtual memory size shrunk to %lu bytes\n", virtmem_size); os_flush_stdout(); return start_uml(); }
int __init linux_main(int argc, char **argv) { unsigned long avail, diff; unsigned long virtmem_size, max_physmem; unsigned long stack; unsigned int i; int add; char * mode; for (i = 1; i < argc; i++) { if ((i == 1) && (argv[i][0] == ' ')) continue; add = 1; uml_checksetup(argv[i], &add); if (add) add_arg(argv[i]); } if (have_root == 0) add_arg(DEFAULT_COMMAND_LINE); host_task_size = os_get_top_address(); /* * TASK_SIZE needs to be PGDIR_SIZE aligned or else exit_mmap craps * out */ task_size = host_task_size & PGDIR_MASK; /* OS sanity checks that need to happen before the kernel runs */ os_early_checks(); can_do_skas(); if (proc_mm && ptrace_faultinfo) mode = "SKAS3"; else mode = "SKAS0"; printf("UML running in %s mode\n", mode); brk_start = (unsigned long) sbrk(0); /* * Increase physical memory size for exec-shield users * so they actually get what they asked for. This should * add zero for non-exec shield users */ diff = UML_ROUND_UP(brk_start) - UML_ROUND_UP(&_end); if (diff > 1024 * 1024) { printf("Adding %ld bytes to physical memory to account for " "exec-shield gap\n", diff); physmem_size += UML_ROUND_UP(brk_start) - UML_ROUND_UP(&_end); } uml_physmem = (unsigned long) &__binary_start & PAGE_MASK; /* Reserve up to 4M after the current brk */ uml_reserved = ROUND_4M(brk_start) + (1 << 22); setup_machinename(init_utsname()->machine); highmem = 0; iomem_size = (iomem_size + PAGE_SIZE - 1) & PAGE_MASK; max_physmem = TASK_SIZE - uml_physmem - iomem_size - MIN_VMALLOC; /* * Zones have to begin on a 1 << MAX_ORDER page boundary, * so this makes sure that's true for highmem */ max_physmem &= ~((1 << (PAGE_SHIFT + MAX_ORDER)) - 1); if (physmem_size + iomem_size > max_physmem) { highmem = physmem_size + iomem_size - max_physmem; physmem_size -= highmem; #ifndef CONFIG_HIGHMEM highmem = 0; printf("CONFIG_HIGHMEM not enabled - physical memory shrunk " "to %Lu bytes\n", physmem_size); #endif } high_physmem = uml_physmem + physmem_size; end_iomem = high_physmem + iomem_size; high_memory = (void *) end_iomem; start_vm = VMALLOC_START; setup_physmem(uml_physmem, uml_reserved, physmem_size, highmem); if (init_maps(physmem_size, iomem_size, highmem)) { printf("Failed to allocate mem_map for %Lu bytes of physical " "memory and %Lu bytes of highmem\n", physmem_size, highmem); exit(1); } virtmem_size = physmem_size; stack = (unsigned long) argv; stack &= ~(1024 * 1024 - 1); avail = stack - start_vm; if (physmem_size > avail) virtmem_size = avail; end_vm = start_vm + virtmem_size; if (virtmem_size < physmem_size) printf("Kernel virtual memory size shrunk to %lu bytes\n", virtmem_size); atomic_notifier_chain_register(&panic_notifier_list, &panic_exit_notifier); uml_postsetup(); stack_protections((unsigned long) &init_thread_info); os_flush_stdout(); return start_uml(); }