void tm_thread_do_exit(void) { assert(current_thread->held_locks == 0); assert(current_thread->blocklist == 0); struct async_call *thread_cleanup_call = async_call_create(¤t_thread->cleanup_call, 0, tm_thread_destroy, (unsigned long)current_thread, 0); struct ticker *ticker = (void *)atomic_exchange(¤t_thread->alarm_ticker, NULL); if(ticker) { if(ticker_delete(ticker, ¤t_thread->alarm_timeout) != -ENOENT) tm_thread_put(current_thread); } linkedlist_remove(¤t_process->threadlist, ¤t_thread->pnode); tm_thread_remove_kerfs_entries(current_thread); atomic_fetch_sub_explicit(&running_threads, 1, memory_order_relaxed); if(atomic_fetch_sub(¤t_process->thread_count, 1) == 1) { atomic_fetch_sub_explicit(&running_processes, 1, memory_order_relaxed); tm_process_remove_kerfs_entries(current_process); tm_process_exit(current_thread->exit_code); } cpu_disable_preemption(); assert(!current_thread->blocklist); tqueue_remove(current_thread->cpu->active_queue, ¤t_thread->activenode); atomic_fetch_sub_explicit(¤t_thread->cpu->numtasks, 1, memory_order_relaxed); tm_thread_raise_flag(current_thread, THREAD_SCHEDULE); current_thread->state = THREADSTATE_DEAD; workqueue_insert(&__current_cpu->work, thread_cleanup_call); cpu_interrupt_set(0); /* don't schedule away until we get back to the syscall handler! */ cpu_enable_preemption(); }
void spinlock_release(struct spinlock *s) { atomic_flag_clear_explicit(&s->flag, memory_order_relaxed); cpu_enable_preemption(); }