/** * Create a new execution context and create a pointer to it. */ task_t* task_spawn(void* fct_ptr, void* arguments, void *return_val) { task_t* new_task; task_t* current_task = task_current(); ALLOCATE_TASK(new_task); pthread_mutex_init(&new_task->lock, NULL); pthread_mutex_init(&new_task->running_lock, NULL); if (inside_main()) { debug("Spawn called from main."); } else { int parent_id = (current_task->parent) ? (current_task->parent->id) : 0; if (parent_id < 0 ) exit(1); debug("Spawn called from a worker %d, parent %d", current_task->id, parent_id); } *((int*)return_val) = -1; new_task->arguments = (void*) arguments; new_task->function = (function_t) fct_ptr; new_task->result = return_val; new_task->status = STARTED; new_task->context->uc_link = &go_home; new_task->parent = current_task; if (!inside_main()) { task_inc_children_count(current_task); } getcontext(new_task->context); if (new_task->status == STARTED) { makecontext(new_task->context, (void (*) (void)) sched_wrapper_function, 0); sched_add_task(new_task); } return new_task; }
task_t* task_current() { if (inside_main()) { return 0; } else { scheduler_t* scheduler = sched_get(); return scheduler->current_task; } }
int task_sync(task_t** execution_context, int count) { int i; int someone_not_done = 1; while (someone_not_done) { someone_not_done = 0; for (i = 0; i < count; i++) { if (execution_context[i]->status != COMPLETED) { //if (execution_context[i]->status != COMPLETED || execution_context[i]->result == NULL || *((int*)execution_context[i]->result) == -1) { someone_not_done = 1; } } if (someone_not_done) { if (!inside_main()) { debug("%d - Yielding...", sched_get()->id); sched_yield_current(); } else { debug("Main waiting"); pthread_mutex_lock(&wake_up_main_lock); pthread_cond_wait(&wake_up_main, &wake_up_main_lock); pthread_mutex_unlock(&wake_up_main_lock); debug("Main woken up"); } } } //if control is here, child tasks have finished //let's clean-up for (i = 0; i < count; i++) { task_t* task = execution_context[i]; task_destroy(task); } if (inside_main()) { debug("Going outside of sync in main %d", (unsigned int ) pthread_self()); } else { debug("Going outside of sync in task %d", task_current()->id); } return 0; }
void sched_add_task(task_t* new_task) { scheduler_t* scheduler = sched_get(); if (inside_main()) { debug("Adding a task from main"); queue_push(&scheduler->ready,new_task); } else { scheduler->new_task = new_task; scheduler->action = ADD_TASK; sched_invoke(); } }
/** * This is called after a new task is spawned. What we want to do is to place the currently * running task (the parent) into the queue and run the child. */ void* sched_execute_action(void *arguments) { debug("Inside scheduler"); if (inside_main()) { log_err("Main inside a scheduler context. Exiting..."); exit(-1); } scheduler_t* scheduler = sched_get(); //sched_assign_to_core(scheduler, sysconf(_SC_NPROCESSORS_ONLN)); getcontext(scheduler->context); if (has_program_ended()) { free(scheduler->context->uc_stack.ss_sp); free(scheduler->context); debug("%d - Program ended, attempting to exit",scheduler->id); pthread_exit(0); } if (scheduler->action == ADD_TASK) { sched_handler_add_task(scheduler); } else if (scheduler->action == YIELD) { sched_handler_yield(scheduler); } else if (scheduler->action == RETURN_TASK) { sched_handler_return(scheduler); } else { scheduler->current_task = NULL; task_t* new_task = (task_t*) queue_pop(&scheduler->ready); if (new_task) { scheduler->current_task = new_task; } else { sched_handler_try_steal(scheduler, 3); } } ucontext_t* next_context = scheduler->context; if (scheduler->current_task) { pthread_mutex_lock(&scheduler->current_task->running_lock); debug("%d - Attempting to execute task %d", scheduler->id, scheduler->current_task->id); if (scheduler->current_task->status != COMPLETED) { next_context = scheduler->current_task->context; } else { pthread_mutex_unlock(&scheduler->current_task->running_lock); } } setcontext(next_context); return (void*) 0xbeef; }
/* * Initialize the thread scheduler context. There are n scheduler contexts, one for each thread. */ void sched_init(int workers) { debug("Initializing workers"); pthread_key_create(&scheduler_key, task_deinit); schedulers = (scheduler_t *) malloc(workers * sizeof(scheduler_t)); int i; for (i = 0; i < workers; ++i) { debug("Creating thread %d", i); schedulers[i].context = (ucontext_t*) malloc(sizeof(ucontext_t)); //allocate the stack for the ucontext ALLOCATE_STACK_UC(schedulers[i].context); //schedulers[i].ready = INIT_LIST(); schedulers[i].action = IDLE; schedulers[i].id = i; //allocate the scheduler function schedulers[i].ready.top = 0; schedulers[i].new_task = NULL; schedulers[i].current_task = NULL; pthread_mutex_init(&schedulers[i].lock, NULL); pthread_mutex_init(&schedulers[i].ready.lock, NULL); getcontext(schedulers[i].context); makecontext(schedulers[i].context, (void (*) (void)) sched_execute_action, 0); } int main_id = -1; pthread_setspecific(scheduler_key,(void*) &main_id); for (i = 0; i < workers; ++i) { pthread_create(&schedulers[i].thread, NULL, thread_init, (void*) &schedulers[i]); } getcontext(&go_home); if (!inside_main()) { scheduler_t* scheduler = sched_get(); if (!scheduler) { log_err("NO SCHEDULER"); } debug("%d - Jumping to scheduler context", scheduler->id); setcontext(scheduler->context); } else { debug("Only main here"); } }
__attribute__((noinline)) void recurse(int n, int is_first) { /* c.f. http://gcc.gnu.org/onlinedocs/gcc/Return-Address.html */ uintptr_t ra = (uintptr_t) __builtin_return_address(0); printf("ra: %#x\n", ra); if (is_first) { ASSERT(inside_main(ra), "ERROR: ra to main is off\n"); } else { ASSERT(inside_recurse(ra), "ERROR: ra to recurse is off\n"); } if (n != 0) { recurse(n - 1, 0); /* NOTE: this print statement also prevents this function * from tail recursing into itself. * On gcc this behavior can also be controlled using * -foptimize-sibling-calls */ printf("recurse <- %d\n", n); } }