void __init os_early_checks(void) { int pid; /* Print out the core dump limits early */ check_coredump_limit(); check_ptrace(); /* Need to check this early because mmapping happens before the * kernel is running. */ check_tmpexec(); pid = start_ptraced_child(); if (init_registers(pid)) fatal("Failed to initialize default registers"); stop_ptraced_child(pid, 1, 1); }
int linux_main(int argc, char **argv) { unsigned long avail, diff; unsigned long virtmem_size, max_physmem; unsigned int i, add; for (i = 1; i < argc; i++){ if((i == 1) && (argv[i][0] == ' ')) continue; add = 1; uml_checksetup(argv[i], &add); if(add) add_arg(saved_command_line, argv[i]); } if(have_root == 0) add_arg(saved_command_line, DEFAULT_COMMAND_LINE); mode_tt = force_tt ? 1 : !can_do_skas(); #ifndef CONFIG_MODE_TT if (mode_tt) { /*Since CONFIG_MODE_TT is #undef'ed, force_tt cannot be 1. So, * can_do_skas() returned 0, and the message is correct. */ printf("Support for TT mode is disabled, and no SKAS support is present on the host.\n"); exit(1); } #endif uml_start = CHOOSE_MODE_PROC(set_task_sizes_tt, set_task_sizes_skas, 0, &host_task_size, &task_size); /* Need to check this early because mmapping happens before the * kernel is running. */ check_tmpexec(); brk_start = (unsigned long) sbrk(0); CHOOSE_MODE_PROC(before_mem_tt, before_mem_skas, brk_start); /* Increase physical memory size for exec-shield users so they actually get what they asked for. This should add zero for non-exec shield users */ diff = UML_ROUND_UP(brk_start) - UML_ROUND_UP(&_end); if(diff > 1024 * 1024){ printf("Adding %ld bytes to physical memory to account for " "exec-shield gap\n", diff); physmem_size += UML_ROUND_UP(brk_start) - UML_ROUND_UP(&_end); } uml_physmem = uml_start; /* Reserve up to 4M after the current brk */ uml_reserved = ROUND_4M(brk_start) + (1 << 22); setup_machinename(system_utsname.machine); #ifdef CONFIG_MODE_TT argv1_begin = argv[1]; argv1_end = &argv[1][strlen(argv[1])]; #endif highmem = 0; iomem_size = (iomem_size + PAGE_SIZE - 1) & PAGE_MASK; max_physmem = get_kmem_end() - uml_physmem - iomem_size - MIN_VMALLOC; /* Zones have to begin on a 1 << MAX_ORDER page boundary, * so this makes sure that's true for highmem */ max_physmem &= ~((1 << (PAGE_SHIFT + MAX_ORDER)) - 1); if(physmem_size + iomem_size > max_physmem){ highmem = physmem_size + iomem_size - max_physmem; physmem_size -= highmem; #ifndef CONFIG_HIGHMEM highmem = 0; printf("CONFIG_HIGHMEM not enabled - physical memory shrunk " "to %ld bytes\n", physmem_size); #endif } high_physmem = uml_physmem + physmem_size; end_iomem = high_physmem + iomem_size; high_memory = (void *) end_iomem; start_vm = VMALLOC_START; setup_physmem(uml_physmem, uml_reserved, physmem_size, highmem); if(init_maps(physmem_size, iomem_size, highmem)){ printf("Failed to allocate mem_map for %ld bytes of physical " "memory and %ld bytes of highmem\n", physmem_size, highmem); exit(1); } virtmem_size = physmem_size; avail = get_kmem_end() - start_vm; if(physmem_size > avail) virtmem_size = avail; end_vm = start_vm + virtmem_size; if(virtmem_size < physmem_size) printf("Kernel virtual memory size shrunk to %ld bytes\n", virtmem_size); uml_postsetup(); task_protections((unsigned long) &init_thread_info); os_flush_stdout(); return(CHOOSE_MODE(start_uml_tt(), start_uml_skas())); }