void sig_handler_common_tt(int sig, void *sc_ptr) { struct sigcontext *sc = sc_ptr; struct tt_regs save_regs, *r; struct signal_info *info; int save_errno = errno, is_user; unprotect_kernel_mem(); r = &TASK_REGS(get_current())->tt; save_regs = *r; is_user = user_context(SC_SP(sc)); r->sc = sc; if(sig != SIGUSR2) r->syscall = -1; change_sig(SIGUSR1, 1); info = &sig_info[sig]; if(!info->is_irq) unblock_signals(); (*info->handler)(sig, (union uml_pt_regs *) r); if(is_user){ interrupt_end(); block_signals(); change_sig(SIGUSR1, 0); set_user_mode(NULL); } *r = save_regs; errno = save_errno; if(is_user) protect_kernel_mem(); }
int copy_thread_tt(int nr, unsigned long clone_flags, unsigned long sp, unsigned long stack_top, struct task_struct * p, struct pt_regs *regs) { int (*tramp)(void *); int new_pid, err; unsigned long stack; if(current->thread.forking) tramp = fork_tramp; else { tramp = new_thread_proc; p->thread.request.u.thread = current->thread.request.u.thread; } err = os_pipe(p->thread.mode.tt.switch_pipe, 1, 1); if(err < 0){ printk("copy_thread : pipe failed, err = %d\n", -err); return(err); } stack = alloc_stack(0, 0); if(stack == 0){ printk(KERN_ERR "copy_thread : failed to allocate " "temporary stack\n"); return(-ENOMEM); } clone_flags &= CLONE_VM; p->thread.temp_stack = stack; new_pid = start_fork_tramp(task_stack_page(p), stack, clone_flags, tramp); if(new_pid < 0){ printk(KERN_ERR "copy_thread : clone failed - errno = %d\n", -new_pid); return(new_pid); } if(current->thread.forking){ sc_to_sc(UPT_SC(&p->thread.regs.regs), UPT_SC(®s->regs)); SC_SET_SYSCALL_RETURN(UPT_SC(&p->thread.regs.regs), 0); if(sp != 0) SC_SP(UPT_SC(&p->thread.regs.regs)) = sp; } p->thread.mode.tt.extern_pid = new_pid; current->thread.request.op = OP_FORK; current->thread.request.u.fork.pid = new_pid; os_usr1_process(os_getpid()); /* Enable the signal and then disable it to ensure that it is handled * here, and nowhere else. */ change_sig(SIGUSR1, 1); change_sig(SIGUSR1, 0); err = 0; return(err); }
void initial_thread_cb_tt(void (*proc)(void *), void *arg) { if(os_getpid() == tracing_pid){ (*proc)(arg); } else { current->thread.request.op = OP_CB; current->thread.request.u.cb.proc = proc; current->thread.request.u.cb.arg = arg; os_usr1_process(os_getpid()); change_sig(SIGUSR1, 1); change_sig(SIGUSR1, 0); } }
void new_thread_handler(int sig) { int (*fn)(void *), n; void *arg; fn = current->thread.request.u.thread.proc; arg = current->thread.request.u.thread.arg; change_sig(SIGUSR1, 1); thread_wait(¤t->thread.mode.skas.switch_buf, current->thread.mode.skas.fork_buf); if(current->thread.prev_sched != NULL) schedule_tail(current->thread.prev_sched); current->thread.prev_sched = NULL; /* The return value is 1 if the kernel thread execs a process, * 0 if it just exits */ n = run_kernel_thread(fn, arg, ¤t->thread.exec_buf); if(n == 1){ /* Handle any immediate reschedules or signals */ interrupt_end(); userspace(¤t->thread.regs.regs); } else do_exit(0); }
void sig_handler_common_skas(int sig, void *sc_ptr) { struct sigcontext *sc = sc_ptr; struct uml_pt_regs *r; void (*handler)(int, struct uml_pt_regs *); int save_user, save_errno = errno; /* * This is done because to allow SIGSEGV to be delivered inside a SEGV * handler. This can happen in copy_user, and if SEGV is disabled, * the process will die. * XXX Figure out why this is better than SA_NODEFER */ if (sig == SIGSEGV) { change_sig(SIGSEGV, 1); /* * For segfaults, we want the data from the * sigcontext. In this case, we don't want to mangle * the process registers, so use a static set of * registers. For other signals, the process * registers are OK. */ r = &ksig_regs[cpu()]; copy_sc(r, sc_ptr); } else r = TASK_REGS(get_current()); save_user = r->is_user; r->is_user = 0; if ((sig == SIGFPE) || (sig == SIGSEGV) || (sig == SIGBUS) || (sig == SIGILL) || (sig == SIGTRAP)) GET_FAULTINFO_FROM_SC(r->faultinfo, sc); change_sig(SIGUSR1, 1); handler = sig_info[sig]; /* unblock SIGVTALRM, SIGIO if sig isn't IRQ signal */ if ((sig != SIGIO) && (sig != SIGWINCH) && (sig != SIGVTALRM)) unblock_signals(); handler(sig, r); errno = save_errno; r->is_user = save_user; }
void sig_handler_common_tt(int sig, void *sc_ptr) { struct sigcontext *sc = sc_ptr; struct tt_regs save_regs, *r; struct signal_info *info; int save_errno = errno, is_user; unprotect_kernel_mem(); /* This is done because to allow SIGSEGV to be delivered inside a SEGV * handler. This can happen in copy_user, and if SEGV is disabled, * the process will die. */ if(sig == SIGSEGV) change_sig(SIGSEGV, 1); /* This is done because to allow SIGSEGV to be delivered inside a SEGV * handler. This can happen in copy_user, and if SEGV is disabled, * the process will die. */ if(sig == SIGSEGV) change_sig(SIGSEGV, 1); r = &TASK_REGS(get_current())->tt; save_regs = *r; is_user = user_context(SC_SP(sc)); r->sc = sc; if(sig != SIGUSR2) r->syscall = -1; info = &sig_info[sig]; if(!info->is_irq) unblock_signals(); (*info->handler)(sig, (union uml_pt_regs *) r); if(is_user){ interrupt_end(); block_signals(); set_user_mode(NULL); } *r = save_regs; errno = save_errno; if(is_user) protect_kernel_mem(); }
static int signal_tramp(void *arg) { int (*proc)(void *); if(honeypot && munmap((void *) (host_task_size - 0x10000000), 0x10000000)) panic("Unmapping stack failed"); if(ptrace(PTRACE_TRACEME, 0, 0, 0) < 0) panic("ptrace PTRACE_TRACEME failed"); os_stop_process(os_getpid()); change_sig(SIGWINCH, 0); signal(SIGUSR1, SIG_IGN); change_sig(SIGCHLD, 0); signal(SIGSEGV, (__sighandler_t) sig_handler); set_cmdline("(idle thread)"); set_init_pid(os_getpid()); proc = arg; return((*proc)(NULL)); }
int fork_tramp(void *stack) { local_irq_disable(); arch_init_thread(); init_new_thread_stack(stack, finish_fork_handler); os_usr1_process(os_getpid()); change_sig(SIGUSR1, 1); return(0); }
static void new_thread_handler(int sig) { unsigned long disable; int (*fn)(void *); void *arg; fn = current->thread.request.u.thread.proc; arg = current->thread.request.u.thread.arg; UPT_SC(¤t->thread.regs.regs) = (void *) (&sig + 1); disable = (1 << (SIGVTALRM - 1)) | (1 << (SIGALRM - 1)) | (1 << (SIGIO - 1)) | (1 << (SIGPROF - 1)); SC_SIGMASK(UPT_SC(¤t->thread.regs.regs)) &= ~disable; suspend_new_thread(current->thread.mode.tt.switch_pipe[0]); force_flush_all(); if(current->thread.prev_sched != NULL) schedule_tail(current->thread.prev_sched); current->thread.prev_sched = NULL; init_new_thread_signals(1); enable_timer(); free_page(current->thread.temp_stack); set_cmdline("(kernel thread)"); change_sig(SIGUSR1, 1); change_sig(SIGVTALRM, 1); change_sig(SIGPROF, 1); local_irq_enable(); if(!run_kernel_thread(fn, arg, ¤t->thread.exec_buf)) do_exit(0); /* XXX No set_user_mode here because a newly execed process will * immediately segfault on its non-existent IP, coming straight back * to the signal handler, which will call set_user_mode on its way * out. This should probably change since it's confusing. */ }
void finish_fork_handler(int sig) { UPT_SC(¤t->thread.regs.regs) = (void *) (&sig + 1); suspend_new_thread(current->thread.mode.tt.switch_pipe[0]); force_flush_all(); if(current->thread.prev_sched != NULL) schedule_tail(current->thread.prev_sched); current->thread.prev_sched = NULL; enable_timer(); change_sig(SIGVTALRM, 1); local_irq_enable(); if(current->mm != current->parent->mm) protect_memory(uml_reserved, high_physmem - uml_reserved, 1, 1, 0, 1); task_protections((unsigned long) current_thread); free_page(current->thread.temp_stack); local_irq_disable(); change_sig(SIGUSR1, 0); set_user_mode(current); }
void switch_to_tt(void *prev, void *next) { struct task_struct *from, *to, *prev_sched; unsigned long flags; int err, vtalrm, alrm, prof, cpu; char c; from = prev; to = next; cpu = from->thread_info->cpu; if(cpu == 0) forward_interrupts(to->thread.mode.tt.extern_pid); #ifdef CONFIG_SMP forward_ipi(cpu_data[cpu].ipi_pipe[0], to->thread.mode.tt.extern_pid); #endif local_irq_save(flags); vtalrm = change_sig(SIGVTALRM, 0); alrm = change_sig(SIGALRM, 0); prof = change_sig(SIGPROF, 0); forward_pending_sigio(to->thread.mode.tt.extern_pid); c = 0; err = os_write_file(to->thread.mode.tt.switch_pipe[1], &c, sizeof(c)); if(err != sizeof(c)) panic("write of switch_pipe failed, err = %d", -err); if(from->thread.mode.tt.switch_pipe[0] == -1) os_kill_process(os_getpid(), 0); err = os_read_file(from->thread.mode.tt.switch_pipe[0], &c, sizeof(c)); if(err != sizeof(c)) panic("read of switch_pipe failed, errno = %d", -err); /* If the process that we have just scheduled away from has exited, * then it needs to be killed here. The reason is that, even though * it will kill itself when it next runs, that may be too late. Its * stack will be freed, possibly before then, and if that happens, * we have a use-after-free situation. So, it gets killed here * in case it has not already killed itself. */ prev_sched = current->thread.prev_sched; if(prev_sched->thread.mode.tt.switch_pipe[0] == -1) os_kill_process(prev_sched->thread.mode.tt.extern_pid, 1); change_sig(SIGVTALRM, vtalrm); change_sig(SIGALRM, alrm); change_sig(SIGPROF, prof); arch_switch(); flush_tlb_all(); local_irq_restore(flags); }
void flush_thread_tt(void) { unsigned long stack; int new_pid; stack = alloc_stack(0, 0); if(stack == 0){ printk(KERN_ERR "flush_thread : failed to allocate temporary stack\n"); do_exit(SIGKILL); } new_pid = start_fork_tramp(task_stack_page(current), stack, 0, exec_tramp); if(new_pid < 0){ printk(KERN_ERR "flush_thread : new thread failed, errno = %d\n", -new_pid); do_exit(SIGKILL); } if(current_thread->cpu == 0) forward_interrupts(new_pid); current->thread.request.op = OP_EXEC; current->thread.request.u.exec.pid = new_pid; unprotect_stack((unsigned long) current_thread); os_usr1_process(os_getpid()); change_sig(SIGUSR1, 1); change_sig(SIGUSR1, 0); enable_timer(); free_page(stack); protect_memory(uml_reserved, high_physmem - uml_reserved, 1, 1, 0, 1); stack_protections((unsigned long) current_thread); force_flush_all(); unblock_signals(); }
void fork_handler(int sig) { change_sig(SIGUSR1, 1); thread_wait(¤t->thread.mode.skas.switch_buf, current->thread.mode.skas.fork_buf); force_flush_all(); if(current->thread.prev_sched == NULL) panic("blech"); schedule_tail(current->thread.prev_sched); current->thread.prev_sched = NULL; userspace(¤t->thread.regs.regs); }
void fork_handler(int sig) { change_sig(SIGUSR1, 1); thread_wait(¤t->thread.mode.skas.switch_buf, current->thread.mode.skas.fork_buf); force_flush_all(); if(current->thread.prev_sched == NULL) panic("blech"); schedule_tail(current->thread.prev_sched); current->thread.prev_sched = NULL; /* Handle any immediate reschedules or signals */ interrupt_end(); userspace(¤t->thread.regs.regs); }
static void ptrace_child(void) { int ret; /* Calling os_getpid because some libcs cached getpid incorrectly */ int pid = os_getpid(), ppid = getppid(); int sc_result; if (change_sig(SIGWINCH, 0) < 0 || ptrace(PTRACE_TRACEME, 0, 0, 0) < 0) { perror("ptrace"); kill(pid, SIGKILL); } kill(pid, SIGSTOP); /* * This syscall will be intercepted by the parent. Don't call more than * once, please. */ sc_result = os_getpid(); if (sc_result == pid) /* Nothing modified by the parent, we are running normally. */ ret = 1; else if (sc_result == ppid) /* * Expected in check_ptrace and check_sysemu when they succeed * in modifying the stack frame */ ret = 0; else /* Serious trouble! This could be caused by a bug in host 2.6 * SKAS3/2.6 patch before release -V6, together with a bug in * the UML code itself. */ ret = 2; exit(ret); }
static int new_thread_proc(void *stack) { /* local_irq_disable is needed to block out signals until this thread is * properly scheduled. Otherwise, the tracing thread will get mighty * upset about any signals that arrive before that. * This has the complication that it sets the saved signal mask in * the sigcontext to block signals. This gets restored when this * thread (or a descendant, since they get a copy of this sigcontext) * returns to userspace. * So, this is compensated for elsewhere. * XXX There is still a small window until local_irq_disable() actually * finishes where signals are possible - shouldn't be a problem in * practice since SIGIO hasn't been forwarded here yet, and the * local_irq_disable should finish before a SIGVTALRM has time to be * delivered. */ local_irq_disable(); init_new_thread_stack(stack, new_thread_handler); os_usr1_process(os_getpid()); change_sig(SIGUSR1, 1); return(0); }
void sig_handler_common_skas(int sig, void *sc_ptr) { struct sigcontext *sc = sc_ptr; struct skas_regs *r; struct signal_info *info; int save_errno = errno; int save_user; r = &TASK_REGS(get_current())->skas; save_user = r->is_user; r->is_user = 0; r->fault_addr = SC_FAULT_ADDR(sc); r->fault_type = SC_FAULT_TYPE(sc); r->trap_type = SC_TRAP_TYPE(sc); change_sig(SIGUSR1, 1); info = &sig_info[sig]; if(!info->is_irq) unblock_signals(); (*info->handler)(sig, (union uml_pt_regs *) r); errno = save_errno; r->is_user = save_user; }
void halt_tt(void) { current->thread.request.op = OP_HALT; os_usr1_process(os_getpid()); change_sig(SIGUSR1, 1); }
void reboot_tt(void) { current->thread.request.op = OP_REBOOT; os_usr1_process(os_getpid()); change_sig(SIGUSR1, 1); }
void switch_to_tt(void *prev, void *next) { struct task_struct *from, *to, *prev_sched; unsigned long flags; int err, vtalrm, alrm, prof, cpu; char c; from = prev; to = next; cpu = task_thread_info(from)->cpu; if(cpu == 0) forward_interrupts(to->thread.mode.tt.extern_pid); #ifdef CONFIG_SMP forward_ipi(cpu_data[cpu].ipi_pipe[0], to->thread.mode.tt.extern_pid); #endif local_irq_save(flags); vtalrm = change_sig(SIGVTALRM, 0); alrm = change_sig(SIGALRM, 0); prof = change_sig(SIGPROF, 0); forward_pending_sigio(to->thread.mode.tt.extern_pid); c = 0; /* Notice that here we "up" the semaphore on which "to" is waiting, and * below (the read) we wait on this semaphore (which is implemented by * switch_pipe) and go sleeping. Thus, after that, we have resumed in * "to", and can't use any more the value of "from" (which is outdated), * nor the value in "to" (since it was the task which stole us the CPU, * which we don't care about). */ err = os_write_file(to->thread.mode.tt.switch_pipe[1], &c, sizeof(c)); if(err != sizeof(c)) panic("write of switch_pipe failed, err = %d", -err); if(from->thread.mode.tt.switch_pipe[0] == -1) os_kill_process(os_getpid(), 0); err = os_read_file(from->thread.mode.tt.switch_pipe[0], &c, sizeof(c)); if(err != sizeof(c)) panic("read of switch_pipe failed, errno = %d", -err); /* If the process that we have just scheduled away from has exited, * then it needs to be killed here. The reason is that, even though * it will kill itself when it next runs, that may be too late. Its * stack will be freed, possibly before then, and if that happens, * we have a use-after-free situation. So, it gets killed here * in case it has not already killed itself. */ prev_sched = current->thread.prev_sched; if(prev_sched->thread.mode.tt.switch_pipe[0] == -1) os_kill_process(prev_sched->thread.mode.tt.extern_pid, 1); change_sig(SIGVTALRM, vtalrm); change_sig(SIGALRM, alrm); change_sig(SIGPROF, prof); arch_switch_to_tt(prev_sched, current); flush_tlb_all(); local_irq_restore(flags); }
void *switch_to_tt(void *prev, void *next, void *last) { struct task_struct *from, *to, *prev_sched; unsigned long flags; int err, vtalrm, alrm, prof, cpu; char c; /* jailing and SMP are incompatible, so this doesn't need to be * made per-cpu */ static int reading; from = prev; to = next; to->thread.prev_sched = from; cpu = from->thread_info->cpu; if(cpu == 0) forward_interrupts(to->thread.mode.tt.extern_pid); #ifdef CONFIG_SMP forward_ipi(cpu_data[cpu].ipi_pipe[0], to->thread.mode.tt.extern_pid); #endif local_irq_save(flags); vtalrm = change_sig(SIGVTALRM, 0); alrm = change_sig(SIGALRM, 0); prof = change_sig(SIGPROF, 0); forward_pending_sigio(to->thread.mode.tt.extern_pid); c = 0; set_current(to); reading = 0; err = os_write_file(to->thread.mode.tt.switch_pipe[1], &c, sizeof(c)); if(err != sizeof(c)) panic("write of switch_pipe failed, err = %d", -err); reading = 1; if((from->exit_state == EXIT_ZOMBIE) || (from->exit_state == EXIT_DEAD)) os_kill_process(os_getpid(), 0); err = os_read_file(from->thread.mode.tt.switch_pipe[0], &c, sizeof(c)); if(err != sizeof(c)) panic("read of switch_pipe failed, errno = %d", -err); /* If the process that we have just scheduled away from has exited, * then it needs to be killed here. The reason is that, even though * it will kill itself when it next runs, that may be too late. Its * stack will be freed, possibly before then, and if that happens, * we have a use-after-free situation. So, it gets killed here * in case it has not already killed itself. */ prev_sched = current->thread.prev_sched; if((prev_sched->exit_state == EXIT_ZOMBIE) || (prev_sched->exit_state == EXIT_DEAD)) os_kill_ptraced_process(prev_sched->thread.mode.tt.extern_pid, 1); /* This works around a nasty race with 'jail'. If we are switching * between two threads of a threaded app and the incoming process * runs before the outgoing process reaches the read, and it makes * it all the way out to userspace, then it will have write-protected * the outgoing process stack. Then, when the outgoing process * returns from the write, it will segfault because it can no longer * write its own stack. So, in order to avoid that, the incoming * thread sits in a loop yielding until 'reading' is set. This * isn't entirely safe, since there may be a reschedule from a timer * happening between setting 'reading' and sleeping in read. But, * it should get a whole quantum in which to reach the read and sleep, * which should be enough. */ if(jail){ while(!reading) sched_yield(); } change_sig(SIGVTALRM, vtalrm); change_sig(SIGALRM, alrm); change_sig(SIGPROF, prof); arch_switch(); flush_tlb_all(); local_irq_restore(flags); return(current->thread.prev_sched); }
int __init main(int argc, char **argv, char **envp) { char **new_argv; int ret, i, err; set_stklim(); setup_env_path(); new_argv = malloc((argc + 1) * sizeof(char *)); if (new_argv == NULL) { perror("Mallocing argv"); exit(1); } for (i = 0; i < argc; i++) { new_argv[i] = strdup(argv[i]); if (new_argv[i] == NULL) { perror("Mallocing an arg"); exit(1); } } new_argv[argc] = NULL; /* * Allow these signals to bring down a UML if all other * methods of control fail. */ install_fatal_handler(SIGINT); install_fatal_handler(SIGTERM); install_fatal_handler(SIGHUP); scan_elf_aux(envp); do_uml_initcalls(); ret = linux_main(argc, argv); /* * Disable SIGPROF - I have no idea why libc doesn't do this or turn * off the profiling time, but UML dies with a SIGPROF just before * exiting when profiling is active. */ change_sig(SIGPROF, 0); /* * This signal stuff used to be in the reboot case. However, * sometimes a SIGVTALRM can come in when we're halting (reproducably * when writing out gcov information, presumably because that takes * some time) and cause a segfault. */ /* stop timers and set SIGVTALRM to be ignored */ disable_timer(); /* disable SIGIO for the fds and set SIGIO to be ignored */ err = deactivate_all_fds(); if (err) printf("deactivate_all_fds failed, errno = %d\n", -err); /* * Let any pending signals fire now. This ensures * that they won't be delivered after the exec, when * they are definitely not expected. */ unblock_signals(); /* Reboot */ if (ret) { printf("\n"); execvp(new_argv[0], new_argv); perror("Failed to exec kernel"); ret = 1; } printf("\n"); return uml_exitcode; }