void __init setup_arch(char **cmdline_p) { stack_protections((unsigned long) &init_thread_info); setup_physmem(uml_physmem, uml_reserved, physmem_size, highmem); mem_total_pages(physmem_size, iomem_size, highmem); read_initrd(); paging_init(); strlcpy(boot_command_line, command_line, COMMAND_LINE_SIZE); *cmdline_p = command_line; setup_hostinfo(host_info, sizeof host_info); }
unsigned long alloc_stack(int order, int atomic) { unsigned long page; gfp_t flags = GFP_KERNEL; if (atomic) flags = GFP_ATOMIC; page = __get_free_pages(flags, order); if(page == 0) return 0; stack_protections(page); return page; }
int start_uml_skas(void) { stack_protections((unsigned long) &cpu0_irqstack); set_sigstack(cpu0_irqstack, THREAD_SIZE); if(proc_mm) userspace_pid[0] = start_userspace(0); init_new_thread_signals(); init_task.thread.request.u.thread.proc = start_kernel_proc; init_task.thread.request.u.thread.arg = NULL; return(start_idle_thread(task_stack_page(&init_task), &init_task.thread.mode.skas.switch_buf)); }
int __init start_uml(void) { stack_protections((unsigned long) &cpu0_irqstack); set_sigstack(cpu0_irqstack, THREAD_SIZE); if (proc_mm) { userspace_pid[0] = start_userspace(0); if (userspace_pid[0] < 0) { printf("start_uml - start_userspace returned %d\n", userspace_pid[0]); exit(1); } } init_new_thread_signals(); init_task.thread.request.u.thread.proc = start_kernel_proc; init_task.thread.request.u.thread.arg = NULL; return start_idle_thread(task_stack_page(&init_task), &init_task.thread.switch_buf); }
void flush_thread_tt(void) { unsigned long stack; int new_pid; stack = alloc_stack(0, 0); if(stack == 0){ printk(KERN_ERR "flush_thread : failed to allocate temporary stack\n"); do_exit(SIGKILL); } new_pid = start_fork_tramp(task_stack_page(current), stack, 0, exec_tramp); if(new_pid < 0){ printk(KERN_ERR "flush_thread : new thread failed, errno = %d\n", -new_pid); do_exit(SIGKILL); } if(current_thread->cpu == 0) forward_interrupts(new_pid); current->thread.request.op = OP_EXEC; current->thread.request.u.exec.pid = new_pid; unprotect_stack((unsigned long) current_thread); os_usr1_process(os_getpid()); change_sig(SIGUSR1, 1); change_sig(SIGUSR1, 0); enable_timer(); free_page(stack); protect_memory(uml_reserved, high_physmem - uml_reserved, 1, 1, 0, 1); stack_protections((unsigned long) current_thread); force_flush_all(); unblock_signals(); }
int __init linux_main(int argc, char **argv) { unsigned long avail, diff; unsigned long virtmem_size, max_physmem; unsigned long stack; unsigned int i; int add; char * mode; for (i = 1; i < argc; i++) { if ((i == 1) && (argv[i][0] == ' ')) continue; add = 1; uml_checksetup(argv[i], &add); if (add) add_arg(argv[i]); } if (have_root == 0) add_arg(DEFAULT_COMMAND_LINE); host_task_size = os_get_top_address(); /* * TASK_SIZE needs to be PGDIR_SIZE aligned or else exit_mmap craps * out */ task_size = host_task_size & PGDIR_MASK; /* OS sanity checks that need to happen before the kernel runs */ os_early_checks(); can_do_skas(); if (proc_mm && ptrace_faultinfo) mode = "SKAS3"; else mode = "SKAS0"; printf("UML running in %s mode\n", mode); brk_start = (unsigned long) sbrk(0); /* * Increase physical memory size for exec-shield users * so they actually get what they asked for. This should * add zero for non-exec shield users */ diff = UML_ROUND_UP(brk_start) - UML_ROUND_UP(&_end); if (diff > 1024 * 1024) { printf("Adding %ld bytes to physical memory to account for " "exec-shield gap\n", diff); physmem_size += UML_ROUND_UP(brk_start) - UML_ROUND_UP(&_end); } uml_physmem = (unsigned long) &__binary_start & PAGE_MASK; /* Reserve up to 4M after the current brk */ uml_reserved = ROUND_4M(brk_start) + (1 << 22); setup_machinename(init_utsname()->machine); highmem = 0; iomem_size = (iomem_size + PAGE_SIZE - 1) & PAGE_MASK; max_physmem = TASK_SIZE - uml_physmem - iomem_size - MIN_VMALLOC; /* * Zones have to begin on a 1 << MAX_ORDER page boundary, * so this makes sure that's true for highmem */ max_physmem &= ~((1 << (PAGE_SHIFT + MAX_ORDER)) - 1); if (physmem_size + iomem_size > max_physmem) { highmem = physmem_size + iomem_size - max_physmem; physmem_size -= highmem; #ifndef CONFIG_HIGHMEM highmem = 0; printf("CONFIG_HIGHMEM not enabled - physical memory shrunk " "to %Lu bytes\n", physmem_size); #endif } high_physmem = uml_physmem + physmem_size; end_iomem = high_physmem + iomem_size; high_memory = (void *) end_iomem; start_vm = VMALLOC_START; setup_physmem(uml_physmem, uml_reserved, physmem_size, highmem); if (init_maps(physmem_size, iomem_size, highmem)) { printf("Failed to allocate mem_map for %Lu bytes of physical " "memory and %Lu bytes of highmem\n", physmem_size, highmem); exit(1); } virtmem_size = physmem_size; stack = (unsigned long) argv; stack &= ~(1024 * 1024 - 1); avail = stack - start_vm; if (physmem_size > avail) virtmem_size = avail; end_vm = start_vm + virtmem_size; if (virtmem_size < physmem_size) printf("Kernel virtual memory size shrunk to %lu bytes\n", virtmem_size); atomic_notifier_chain_register(&panic_notifier_list, &panic_exit_notifier); uml_postsetup(); stack_protections((unsigned long) &init_thread_info); os_flush_stdout(); return start_uml(); }