Ejemplo n.º 1
0
void spinlock_acquire(struct spinlock *s)
{
	cpu_disable_preemption();
	while(atomic_flag_test_and_set_explicit(&s->flag, memory_order_relaxed)) {
		arch_cpu_pause();
	}
}
Ejemplo n.º 2
0
void tm_thread_do_exit(void)
{
	assert(current_thread->held_locks == 0);
	assert(current_thread->blocklist == 0);

	struct async_call *thread_cleanup_call = async_call_create(&current_thread->cleanup_call, 0, 
							tm_thread_destroy, (unsigned long)current_thread, 0);

	struct ticker *ticker = (void *)atomic_exchange(&current_thread->alarm_ticker, NULL);
	if(ticker) {
		if(ticker_delete(ticker, &current_thread->alarm_timeout) != -ENOENT)
			tm_thread_put(current_thread);
	}

	linkedlist_remove(&current_process->threadlist, &current_thread->pnode);

	tm_thread_remove_kerfs_entries(current_thread);
	atomic_fetch_sub_explicit(&running_threads, 1, memory_order_relaxed);
	if(atomic_fetch_sub(&current_process->thread_count, 1) == 1) {
		atomic_fetch_sub_explicit(&running_processes, 1, memory_order_relaxed);
		tm_process_remove_kerfs_entries(current_process);
		tm_process_exit(current_thread->exit_code);
	}

	cpu_disable_preemption();

	assert(!current_thread->blocklist);
	tqueue_remove(current_thread->cpu->active_queue, &current_thread->activenode);
	atomic_fetch_sub_explicit(&current_thread->cpu->numtasks, 1, memory_order_relaxed);
	tm_thread_raise_flag(current_thread, THREAD_SCHEDULE);
	current_thread->state = THREADSTATE_DEAD;
	
	workqueue_insert(&__current_cpu->work, thread_cleanup_call);
	cpu_interrupt_set(0); /* don't schedule away until we get back
							 to the syscall handler! */
	cpu_enable_preemption();
}