Exemple #1
0
int kt_kernel_idle_task(void)
{
	tm_thread_raise_flag(current_thread, THREAD_KERNEL);
	kthread_create(&kthread_pager, "[kpager]", 0, __KT_pager, 0);
	strncpy((char *)current_process->command, "[kernel]", 128);
	/* wait until init has successfully executed, and then remap. */
	while(!(kernel_state_flags & KSF_HAVEEXECED)) {
		tm_schedule();
	}
	printk(1, "[kernel]: remapping lower memory with protection flags...\n");
	cpu_interrupt_set(0);
	for(addr_t addr = MEMMAP_KERNEL_START; addr < MEMMAP_KERNEL_END; addr += PAGE_SIZE)
	{
		mm_virtual_changeattr(addr, PAGE_PRESENT | PAGE_WRITE, PAGE_SIZE);
	}
	cpu_interrupt_set(1);
	/* Now enter the main idle loop, waiting to do periodic cleanup */
	printk(0, "[idle]: entering background loop\n");
	for(;;) {
		assert(!current_thread->held_locks);
		int r=1;
		if(__current_cpu->work.count > 0)
			r=workqueue_dowork(&__current_cpu->work);
		else
			tm_schedule();
		int status;
		int pid = sys_waitpid(-1, &status, WNOHANG);
		if(WIFSTOPPED(status)) {
			sys_kill(pid, SIGKILL);
		}
	}
}
Exemple #2
0
void __init_entry(void)
{
	/* the kernel doesn't have this mapping, so we have to create it here. */
	tm_thread_raise_flag(current_thread, THREAD_KERNEL);
	addr_t ret = mm_mmap(current_thread->usermode_stack_start, CONFIG_STACK_PAGES * PAGE_SIZE,
			PROT_READ | PROT_WRITE, MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, 0, 0, 0);
	tm_thread_lower_flag(current_thread, THREAD_KERNEL);
	tm_thread_user_mode_jump(user_mode_init);
}
Exemple #3
0
void tm_thread_exit(int code)
{
	current_thread->exit_code = code;
	if(tm_thread_lower_flag(current_thread, THREAD_PTRACED) & THREAD_PTRACED) {
		assert(current_thread->tracer);
		tm_thread_put(current_thread->tracer);
		current_thread->tracer = 0;
	}
	tm_thread_raise_flag(current_thread, THREAD_EXIT);
}
Exemple #4
0
void ticker_tick(struct ticker *ticker, uint64_t microseconds)
{
	ticker->tick += microseconds;
	uint64_t key;
	void *data;
	if(heap_peek(&ticker->heap, &key, &data) == 0) {
		if(key < ticker->tick) {
			tm_thread_raise_flag(current_thread, THREAD_TICKER_DOWORK);
		}
	}
}
Exemple #5
0
static inline void __setup_signal_handler(struct registers *regs)
{
	if((current_thread->flags & THREAD_SIGNALED) && !current_thread->signal)
		PANIC(0, "Thread is signaled with null signal.", EINVAL);
	if(current_thread->signal && !(current_thread->flags & THREAD_SIGNALED))
		tm_thread_raise_flag(current_thread, THREAD_SCHEDULE);
	if(!current_thread->signal || !(current_thread->flags & THREAD_SIGNALED))
		return;
	struct sigaction *sa = &current_process->signal_act[current_thread->signal];
	arch_tm_userspace_signal_initializer(regs, sa);
	tm_thread_lower_flag(current_thread, THREAD_SIGNALED);
	current_thread->signal = 0;
}
Exemple #6
0
void tm_thread_exit_system(long sys, long ret)
{
	current_thread->system=0;
	// Check for PTRACE event.
	if((current_thread->flags & THREAD_PTRACED) && (current_thread->tracee_flags & TRACEE_STOPON_SYSCALL)) {
		current_thread->tracee_flags &= ~TRACEE_STOPON_SYSCALL;
		current_thread->orig_syscall = sys;
		current_thread->syscall_return = ret;
		tm_signal_send_thread(current_thread, SIGTRAP);
		tm_schedule();
	}

	// If we have a signal, then we've been ignoring it up until now because we were inside a syscall. Set the schedule flag so we can handle that now.
	if(tm_thread_got_signal(current_thread))
		tm_thread_raise_flag(current_thread, THREAD_SCHEDULE);
}
Exemple #7
0
void tm_thread_do_exit(void)
{
	assert(current_thread->held_locks == 0);
	assert(current_thread->blocklist == 0);

	struct async_call *thread_cleanup_call = async_call_create(&current_thread->cleanup_call, 0, 
							tm_thread_destroy, (unsigned long)current_thread, 0);

	struct ticker *ticker = (void *)atomic_exchange(&current_thread->alarm_ticker, NULL);
	if(ticker) {
		if(ticker_delete(ticker, &current_thread->alarm_timeout) != -ENOENT)
			tm_thread_put(current_thread);
	}

	linkedlist_remove(&current_process->threadlist, &current_thread->pnode);

	tm_thread_remove_kerfs_entries(current_thread);
	atomic_fetch_sub_explicit(&running_threads, 1, memory_order_relaxed);
	if(atomic_fetch_sub(&current_process->thread_count, 1) == 1) {
		atomic_fetch_sub_explicit(&running_processes, 1, memory_order_relaxed);
		tm_process_remove_kerfs_entries(current_process);
		tm_process_exit(current_thread->exit_code);
	}

	cpu_disable_preemption();

	assert(!current_thread->blocklist);
	tqueue_remove(current_thread->cpu->active_queue, &current_thread->activenode);
	atomic_fetch_sub_explicit(&current_thread->cpu->numtasks, 1, memory_order_relaxed);
	tm_thread_raise_flag(current_thread, THREAD_SCHEDULE);
	current_thread->state = THREADSTATE_DEAD;
	
	workqueue_insert(&__current_cpu->work, thread_cleanup_call);
	cpu_interrupt_set(0); /* don't schedule away until we get back
							 to the syscall handler! */
	cpu_enable_preemption();
}