Пример #1
0
void destroy_context(struct mm_struct *mm)
{
	struct mm_context *mmu = &mm->context;

	if (proc_mm)
		os_close_file(mmu->id.u.mm_fd);
	else {
		/*
		 * If init_new_context wasn't called, this will be
		 * zero, resulting in a kill(0), which will result in the
		 * whole UML suddenly dying.  Also, cover negative and
		 * 1 cases, since they shouldn't happen either.
		 */
		if (mmu->id.u.pid < 2) {
			printk(KERN_ERR "corrupt mm_context - pid = %d\n",
			       mmu->id.u.pid);
			return;
		}
		os_kill_ptraced_process(mmu->id.u.pid, 1);
	}

	if (skas_needs_stub)
		free_page(mmu->id.stack);

	free_ldt(mmu);
}
Пример #2
0
void do_exec(int old_pid, int new_pid)
{
	unsigned long regs[FRAME_SIZE];
	int err;

	if((ptrace(PTRACE_ATTACH, new_pid, 0, 0) < 0) ||
	   (ptrace(PTRACE_CONT, new_pid, 0, 0) < 0))
		tracer_panic("do_exec failed to attach proc - errno = %d",
			     errno);

	CATCH_EINTR(err = waitpid(new_pid, 0, WUNTRACED));
	if (err < 0)
		tracer_panic("do_exec failed to attach proc in waitpid - errno = %d",
			     errno);

	if(ptrace_getregs(old_pid, regs) < 0)
		tracer_panic("do_exec failed to get registers - errno = %d",
			     errno);

	os_kill_ptraced_process(old_pid, 0);

	if (ptrace(PTRACE_OLDSETOPTIONS, new_pid, 0, (void *)PTRACE_O_TRACESYSGOOD) < 0)
		tracer_panic("do_exec: PTRACE_SETOPTIONS failed, errno = %d", errno);

	if(ptrace_setregs(new_pid, regs) < 0)
		tracer_panic("do_exec failed to start new proc - errno = %d",
			     errno);
}
Пример #3
0
static void kill_off_processes(void)
{
	if (proc_mm)
		os_kill_ptraced_process(userspace_pid[0], 1);
	else {
		struct task_struct *p;
		int pid;

		for_each_process(p) {
			if (p->mm == NULL)
				continue;

			pid = p->mm->context.id.u.pid;
			os_kill_ptraced_process(pid, 1);
		}
	}
}
Пример #4
0
void kill_off_processes_skas(void)
{
	if(proc_mm)
#warning need to loop over userspace_pids in kill_off_processes_skas
		os_kill_ptraced_process(userspace_pid[0], 1);
	else {
		struct task_struct *p;
		int pid, me;

		me = os_getpid();
		for_each_process(p){
			if(p->mm == NULL)
				continue;

			pid = p->mm->context.skas.id.u.pid;
			os_kill_ptraced_process(pid, 1);
		}
	}
}
Пример #5
0
static void kill_off_processes(void)
{
    if (proc_mm)
        /*
         * FIXME: need to loop over userspace_pids
         */
        os_kill_ptraced_process(userspace_pid[0], 1);
    else {
        struct task_struct *p;
        int pid, me;

        me = os_getpid();
        for_each_process(p) {
            if (p->mm == NULL)
                continue;

            pid = p->mm->context.id.u.pid;
            os_kill_ptraced_process(pid, 1);
        }
    }
}
Пример #6
0
static void kill_off_processes(void)
{
	if (proc_mm)
		/*
		 * FIXME: need to loop over userspace_pids
		 */
		os_kill_ptraced_process(userspace_pid[0], 1);
	else {
		struct task_struct *p;
<<<<<<< HEAD
		int pid;

=======
<<<<<<< HEAD
Пример #7
0
static void kill_off_processes(void)
{
	struct task_struct *p;
	int pid;

	read_lock(&tasklist_lock);
	for_each_process(p) {
		struct task_struct *t;

		t = find_lock_task_mm(p);
		if (!t)
			continue;
		pid = t->mm->context.id.u.pid;
		task_unlock(t);
		os_kill_ptraced_process(pid, 1);
	}
	read_unlock(&tasklist_lock);
}
Пример #8
0
void destroy_context_skas(struct mm_struct *mm)
{
	struct mmu_context_skas *mmu = &mm->context.skas;

	if(proc_mm)
		os_close_file(mmu->id.u.mm_fd);
	else
		os_kill_ptraced_process(mmu->id.u.pid, 1);

	if(!proc_mm || !ptrace_faultinfo){
		free_page(mmu->id.stack);
		pte_lock_deinit(virt_to_page(mmu->last_page_table));
		pte_free_kernel((pte_t *) mmu->last_page_table);
		dec_zone_page_state(virt_to_page(mmu->last_page_table), NR_PAGETABLE);
#ifdef CONFIG_3_LEVEL_PGTABLES
		pmd_free((pmd_t *) mmu->last_pmd);
#endif
	}
}
Пример #9
0
void os_dump_core(void)
{
	int pid;

	signal(SIGSEGV, SIG_DFL);

	/*
	 * We are about to SIGTERM this entire process group to ensure that
	 * nothing is around to run after the kernel exits.  The
	 * kernel wants to abort, not die through SIGTERM, so we
	 * ignore it here.
	 */

	signal(SIGTERM, SIG_IGN);
	kill(0, SIGTERM);
	/*
	 * Most of the other processes associated with this UML are
	 * likely sTopped, so give them a SIGCONT so they see the
	 * SIGTERM.
	 */
	kill(0, SIGCONT);

	/*
	 * Now, having sent signals to everyone but us, make sure they
	 * die by ptrace.  Processes can survive what's been done to
	 * them so far - the mechanism I understand is receiving a
	 * SIGSEGV and segfaulting immediately upon return.  There is
	 * always a SIGSEGV pending, and (I'm guessing) signals are
	 * processed in numeric order so the SIGTERM (signal 15 vs
	 * SIGSEGV being signal 11) is never handled.
	 *
	 * Run a waitpid loop until we get some kind of error.
	 * Hopefully, it's ECHILD, but there's not a lot we can do if
	 * it's something else.  Tell os_kill_ptraced_process not to
	 * wait for the child to report its death because there's
	 * nothing reasonable to do if that fails.
	 */

	while ((pid = waitpid(-1, NULL, WNOHANG)) > 0)
		os_kill_ptraced_process(pid, 0);

	abort();
}
Пример #10
0
void *switch_to_tt(void *prev, void *next, void *last)
{
	struct task_struct *from, *to, *prev_sched;
	unsigned long flags;
	int err, vtalrm, alrm, prof, cpu;
	char c;
	/* jailing and SMP are incompatible, so this doesn't need to be 
	 * made per-cpu 
	 */
	static int reading;

	from = prev;
	to = next;

	to->thread.prev_sched = from;

	cpu = from->thread_info->cpu;
	if(cpu == 0)
		forward_interrupts(to->thread.mode.tt.extern_pid);
#ifdef CONFIG_SMP
	forward_ipi(cpu_data[cpu].ipi_pipe[0], to->thread.mode.tt.extern_pid);
#endif
	local_irq_save(flags);

	vtalrm = change_sig(SIGVTALRM, 0);
	alrm = change_sig(SIGALRM, 0);
	prof = change_sig(SIGPROF, 0);

	forward_pending_sigio(to->thread.mode.tt.extern_pid);

	c = 0;
	set_current(to);

	reading = 0;
	err = os_write_file(to->thread.mode.tt.switch_pipe[1], &c, sizeof(c));
	if(err != sizeof(c))
		panic("write of switch_pipe failed, err = %d", -err);

	reading = 1;
	if((from->exit_state == EXIT_ZOMBIE) || (from->exit_state == EXIT_DEAD))
		os_kill_process(os_getpid(), 0);

	err = os_read_file(from->thread.mode.tt.switch_pipe[0], &c, sizeof(c));
	if(err != sizeof(c))
		panic("read of switch_pipe failed, errno = %d", -err);

	/* If the process that we have just scheduled away from has exited,
	 * then it needs to be killed here.  The reason is that, even though
	 * it will kill itself when it next runs, that may be too late.  Its
	 * stack will be freed, possibly before then, and if that happens,
	 * we have a use-after-free situation.  So, it gets killed here
	 * in case it has not already killed itself.
	 */
	prev_sched = current->thread.prev_sched;
	if((prev_sched->exit_state == EXIT_ZOMBIE) ||
	   (prev_sched->exit_state == EXIT_DEAD))
		os_kill_ptraced_process(prev_sched->thread.mode.tt.extern_pid, 1);

	/* This works around a nasty race with 'jail'.  If we are switching
	 * between two threads of a threaded app and the incoming process 
	 * runs before the outgoing process reaches the read, and it makes
	 * it all the way out to userspace, then it will have write-protected 
	 * the outgoing process stack.  Then, when the outgoing process 
	 * returns from the write, it will segfault because it can no longer
	 * write its own stack.  So, in order to avoid that, the incoming 
	 * thread sits in a loop yielding until 'reading' is set.  This 
	 * isn't entirely safe, since there may be a reschedule from a timer
	 * happening between setting 'reading' and sleeping in read.  But,
	 * it should get a whole quantum in which to reach the read and sleep,
	 * which should be enough.
	 */

	if(jail){
		while(!reading) sched_yield();
	}

	change_sig(SIGVTALRM, vtalrm);
	change_sig(SIGALRM, alrm);
	change_sig(SIGPROF, prof);

	arch_switch();

	flush_tlb_all();
	local_irq_restore(flags);

	return(current->thread.prev_sched);
}