Пример #1
0
void new_thread_handler(int sig)
{
	int (*fn)(void *), n;
	void *arg;

	fn = current->thread.request.u.thread.proc;
	arg = current->thread.request.u.thread.arg;
	change_sig(SIGUSR1, 1);
	thread_wait(&current->thread.mode.skas.switch_buf, 
		    current->thread.mode.skas.fork_buf);

	if(current->thread.prev_sched != NULL)
		schedule_tail(current->thread.prev_sched);
	current->thread.prev_sched = NULL;

	/* The return value is 1 if the kernel thread execs a process,
	 * 0 if it just exits
	 */
	n = run_kernel_thread(fn, arg, &current->thread.exec_buf);
	if(n == 1){
		/* Handle any immediate reschedules or signals */
		interrupt_end();
		userspace(&current->thread.regs.regs);
	}
	else do_exit(0);
}
Пример #2
0
void kernel_thread_start(struct task_struct *p)
{
	struct pt_regs *r = &current->thread.regs;
	int (*func)(void *) = (void *)r->si;

	schedule_tail(p);
	do_exit(func((void *)r->di));
}
Пример #3
0
static void continue_new_vcpu(struct vcpu *prev)
{
    schedule_tail(prev);

    if ( is_idle_vcpu(current) )
        reset_stack_and_jump(idle_loop);
    else if ( is_32bit_domain(current->domain) )
        /* check_wakeup_from_wait(); */
        reset_stack_and_jump(return_to_new_vcpu32);
    else
        /* check_wakeup_from_wait(); */
        reset_stack_and_jump(return_to_new_vcpu64);
}
Пример #4
0
void fork_handler(int sig)
{
        change_sig(SIGUSR1, 1);
 	thread_wait(&current->thread.mode.skas.switch_buf, 
		    current->thread.mode.skas.fork_buf);
  	
	force_flush_all();
	if(current->thread.prev_sched == NULL)
		panic("blech");

	schedule_tail(current->thread.prev_sched);
	current->thread.prev_sched = NULL;

	userspace(&current->thread.regs.regs);
}
Пример #5
0
void context_switch(struct vcpu *prev, struct vcpu *next)
{
    ASSERT(local_irq_is_enabled());
    ASSERT(prev != next);
    ASSERT(cpumask_empty(next->vcpu_dirty_cpumask));

    if ( prev != next )
        update_runstate_area(prev);

    local_irq_disable();

    set_current(next);

    prev = __context_switch(prev, next);

    schedule_tail(prev);
}
Пример #6
0
void fork_handler(int sig)
{
        change_sig(SIGUSR1, 1);
 	thread_wait(&current->thread.mode.skas.switch_buf, 
		    current->thread.mode.skas.fork_buf);
  	
	force_flush_all();
	if(current->thread.prev_sched == NULL)
		panic("blech");

	schedule_tail(current->thread.prev_sched);
	current->thread.prev_sched = NULL;

	/* Handle any immediate reschedules or signals */
	interrupt_end();
	userspace(&current->thread.regs.regs);
}
Пример #7
0
/* Called magically, see new_thread_handler above */
void fork_handler(void)
{
	force_flush_all();
	if(current->thread.prev_sched == NULL)
		panic("blech");

	schedule_tail(current->thread.prev_sched);

	/* XXX: if interrupt_end() calls schedule, this call to
	 * arch_switch_to_skas isn't needed. We could want to apply this to
	 * improve performance. -bb */
	arch_switch_to_skas(current->thread.prev_sched, current);

	current->thread.prev_sched = NULL;

/* Handle any immediate reschedules or signals */
	interrupt_end();

	userspace(&current->thread.regs.regs);
}
Пример #8
0
static void new_thread_handler(int sig)
{
	unsigned long disable;
	int (*fn)(void *);
	void *arg;

	fn = current->thread.request.u.thread.proc;
	arg = current->thread.request.u.thread.arg;

	UPT_SC(&current->thread.regs.regs) = (void *) (&sig + 1);
	disable = (1 << (SIGVTALRM - 1)) | (1 << (SIGALRM - 1)) |
		(1 << (SIGIO - 1)) | (1 << (SIGPROF - 1));
	SC_SIGMASK(UPT_SC(&current->thread.regs.regs)) &= ~disable;

	suspend_new_thread(current->thread.mode.tt.switch_pipe[0]);

	force_flush_all();
	if(current->thread.prev_sched != NULL)
		schedule_tail(current->thread.prev_sched);
	current->thread.prev_sched = NULL;

	init_new_thread_signals(1);
	enable_timer();
	free_page(current->thread.temp_stack);
	set_cmdline("(kernel thread)");

	change_sig(SIGUSR1, 1);
	change_sig(SIGVTALRM, 1);
	change_sig(SIGPROF, 1);
	local_irq_enable();
	if(!run_kernel_thread(fn, arg, &current->thread.exec_buf))
		do_exit(0);

	/* XXX No set_user_mode here because a newly execed process will
	 * immediately segfault on its non-existent IP, coming straight back
	 * to the signal handler, which will call set_user_mode on its way
	 * out.  This should probably change since it's confusing.
	 */
}
Пример #9
0
void finish_fork_handler(int sig)
{
 	UPT_SC(&current->thread.regs.regs) = (void *) (&sig + 1);
	suspend_new_thread(current->thread.mode.tt.switch_pipe[0]);

	force_flush_all();
	if(current->thread.prev_sched != NULL)
		schedule_tail(current->thread.prev_sched);
	current->thread.prev_sched = NULL;

	enable_timer();
	change_sig(SIGVTALRM, 1);
	local_irq_enable();
	if(current->mm != current->parent->mm)
		protect_memory(uml_reserved, high_physmem - uml_reserved, 1, 
			       1, 0, 1);
	task_protections((unsigned long) current_thread);

	free_page(current->thread.temp_stack);
	local_irq_disable();
	change_sig(SIGUSR1, 0);
	set_user_mode(current);
}
Пример #10
0
struct task_struct *__switch_to(struct task_struct *prev,
				struct task_struct *next)
{
	struct thread_info *_prev = task_thread_info(prev);
	struct thread_info *_next = task_thread_info(next);
	/*
	 * schedule() expects the return of this function to be the task that we
	 * switched away from. Returning prev is not going to work because we
	 * are actually going to return the previous taks that was scheduled
	 * before the task we are going to wake up, and not the current task,
	 * e.g.:
	 *
	 * swapper -> init: saved prev on swapper stack is swapper
	 * init -> ksoftirqd0: saved prev on init stack is init
	 * ksoftirqd0 -> swapper: returned prev is swapper
	 */
	static struct task_struct *abs_prev = &init_task;
	/*
	 * We need to free the thread_info structure in free_thread_info to
	 * avoid races between the dying thread and other threads. We also need
	 * to cleanup sched_sem and signal to the prev thread that it needs to
	 * exit, and we use this stack varible to pass this info.
	 */
	struct thread_exit_info ei = {
		.dead = false,
		.sched_sem = _prev->sched_sem,
	};

	_current_thread_info = task_thread_info(next);
	_next->prev_sched = prev;
	abs_prev = prev;
	_prev->exit_info = &ei;

	lkl_ops->sem_up(_next->sched_sem);
	/* _next may be already gone so use ei instead */
	lkl_ops->sem_down(ei.sched_sem);

	if (ei.dead) {
		lkl_ops->sem_free(ei.sched_sem);
		threads_counter_dec();
		lkl_ops->thread_exit();
	}

	_prev->exit_info = NULL;

	return abs_prev;
}

struct thread_bootstrap_arg {
	struct thread_info *ti;
	int (*f)(void *);
	void *arg;
};

static void thread_bootstrap(void *_tba)
{
	struct thread_bootstrap_arg *tba = (struct thread_bootstrap_arg *)_tba;
	struct thread_info *ti = tba->ti;
	int (*f)(void *) = tba->f;
	void *arg = tba->arg;

	lkl_ops->sem_down(ti->sched_sem);
	kfree(tba);
	if (ti->prev_sched)
		schedule_tail(ti->prev_sched);

	f(arg);
	do_exit(0);
}

int copy_thread(unsigned long clone_flags, unsigned long esp,
		unsigned long unused, struct task_struct *p)
{
	struct thread_info *ti = task_thread_info(p);
	struct thread_bootstrap_arg *tba;
	int ret;

	tba = kmalloc(sizeof(*tba), GFP_KERNEL);
	if (!tba)
		return -ENOMEM;

	tba->f = (int (*)(void *))esp;
	tba->arg = (void *)unused;
	tba->ti = ti;

	ret = lkl_ops->thread_create(thread_bootstrap, tba);
	if (ret) {
		kfree(tba);
		return -ENOMEM;
	}

	threads_counter_inc();

	return 0;
}

void show_stack(struct task_struct *task, unsigned long *esp)
{
}

static inline void pr_early(const char *str)
{
	if (lkl_ops->print)
		lkl_ops->print(str, strlen(str));
}

/**
 * This is called before the kernel initializes, so no kernel calls (including
 * printk) can't be made yet.
 */
int threads_init(void)
{
	struct thread_info *ti = &init_thread_union.thread_info;
	int ret = 0;

	ti->exit_info = NULL;
	ti->prev_sched = NULL;

	ti->sched_sem = lkl_ops->sem_alloc(0);
	if (!ti->sched_sem) {
		pr_early("lkl: failed to allocate init schedule semaphore\n");
		ret = -ENOMEM;
		goto out;
	}

	threads_counter_lock = lkl_ops->sem_alloc(1);
	if (!threads_counter_lock) {
		pr_early("lkl: failed to alllocate threads counter lock\n");
		ret = -ENOMEM;
		goto out_free_init_sched_sem;
	}

	return 0;

out_free_init_sched_sem:
	lkl_ops->sem_free(ti->sched_sem);

out:
	return ret;
}

void threads_cleanup(void)
{
	struct task_struct *p;

	for_each_process(p) {
		struct thread_info *ti = task_thread_info(p);

		if (p->pid != 1)
			WARN(!(p->flags & PF_KTHREAD),
			     "non kernel thread task %p\n", p->comm);
		WARN(p->state == TASK_RUNNING,
		     "thread %s still running while halting\n", p->comm);

		kill_thread(ti->exit_info);
	}

	while (threads_counter_get())
		;

	lkl_ops->sem_free(init_thread_union.thread_info.sched_sem);
	lkl_ops->sem_free(threads_counter_lock);
}