Ejemplo n.º 1
0
int workqueue_open(lua_State *L) {
   workqueue_one_time_init();
   const char *name = luaL_checkstring(L, 1);
   size_t size = luaL_optnumber(L, 2, DEFAULT_WORKQUEUE_SIZE);
   pthread_mutex_lock(&workqueue_mutex);
   workqueue_t *workqueue = workqueue_find(name);
   if (!workqueue) {
      workqueue = (workqueue_t *)calloc(1, sizeof(workqueue_t));
      workqueue->refcount = 1;
      workqueue->name = strdup(name);
      workqueue_init_queue(&workqueue->questions, size);
      workqueue_init_queue(&workqueue->answers, size);
      workqueue->owner_thread = pthread_self();
      pthread_mutexattr_t mutex_attr;
      pthread_mutexattr_init(&mutex_attr);
      pthread_mutexattr_settype(&mutex_attr, PTHREAD_MUTEX_RECURSIVE);
      pthread_mutex_init(&workqueue->mutex, &mutex_attr);
      workqueue_insert(workqueue);
   }
   pthread_mutex_unlock(&workqueue_mutex);
   context_t *context = (context_t *)lua_newuserdata(L, sizeof(context_t));
   context->workqueue = workqueue;
   luaL_getmetatable(L, "ipc.workqueue");
   lua_setmetatable(L, -2);
   return 1;
}
Ejemplo n.º 2
0
void tm_thread_do_exit(void)
{
	assert(current_thread->held_locks == 0);
	assert(current_thread->blocklist == 0);

	struct async_call *thread_cleanup_call = async_call_create(&current_thread->cleanup_call, 0, 
							tm_thread_destroy, (unsigned long)current_thread, 0);

	struct ticker *ticker = (void *)atomic_exchange(&current_thread->alarm_ticker, NULL);
	if(ticker) {
		if(ticker_delete(ticker, &current_thread->alarm_timeout) != -ENOENT)
			tm_thread_put(current_thread);
	}

	linkedlist_remove(&current_process->threadlist, &current_thread->pnode);

	tm_thread_remove_kerfs_entries(current_thread);
	atomic_fetch_sub_explicit(&running_threads, 1, memory_order_relaxed);
	if(atomic_fetch_sub(&current_process->thread_count, 1) == 1) {
		atomic_fetch_sub_explicit(&running_processes, 1, memory_order_relaxed);
		tm_process_remove_kerfs_entries(current_process);
		tm_process_exit(current_thread->exit_code);
	}

	cpu_disable_preemption();

	assert(!current_thread->blocklist);
	tqueue_remove(current_thread->cpu->active_queue, &current_thread->activenode);
	atomic_fetch_sub_explicit(&current_thread->cpu->numtasks, 1, memory_order_relaxed);
	tm_thread_raise_flag(current_thread, THREAD_SCHEDULE);
	current_thread->state = THREADSTATE_DEAD;
	
	workqueue_insert(&__current_cpu->work, thread_cleanup_call);
	cpu_interrupt_set(0); /* don't schedule away until we get back
							 to the syscall handler! */
	cpu_enable_preemption();
}
Ejemplo n.º 3
0
void cpu_interrupt_schedule_stage2(struct async_call *call)
{
	struct cpu *c = cpu_get_current();
	workqueue_insert(&c->work, call);
	cpu_put_current(c);
}