/* __ompc_add_task_to_pool_default: * Adds a task to the task pool. The task will be added to the current * thread's queue. */ int __ompc_add_task_to_pool_default(omp_task_pool_t *pool, omp_task_t *task) { int success; int myid = __omp_myid; omp_task_queue_level_t *per_thread; Is_True(pool != NULL, ("__ompc_add_task_to_pool: task pool is uninitialized")); Is_True(task != NULL, ("__ompc_add_task_to_pool: tried to add NULL task to pool")); /* num_pending_tasks track not just tasks entered into the task pool, but * also tasks marked as deferred that could not fit into the task pool */ if (__ompc_atomic_inc(&pool->num_pending_tasks) == 1) { pthread_mutex_lock(&pool->pool_lock); pthread_cond_broadcast(&pool->pool_cond); pthread_mutex_unlock(&pool->pool_lock); } per_thread = &pool->level[PER_THREAD]; if (__ompc_task_is_tied(task)) /* For tied tasks, we don't use the task_queue API. We explicitly put to * the tail */ success = __ompc_queue_put_tail( &per_thread->task_queue[TIED_IDX(myid)], task); else success = __ompc_task_queue_put( &per_thread->task_queue[UNTIED_IDX(myid)], task); return success; }
/* __ompc_remove_task_from_pool_default: *Takes task from the task pool. * * Takes a task from the task pool. First tries to get a task from the current * thread's task queue. If that doesn't work, then it will attempt to steal a * task from another task queue (so long as there are no other tasks, not in a * barrier, that are tied to the current thread). */ omp_task_t *__ompc_remove_task_from_pool_default(omp_task_pool_t *pool) { omp_task_t *task, *current_task; omp_team_t *team; omp_v_thread_t *current_thread; omp_queue_t *my_queue; omp_queue_t *victim_queue; omp_task_queue_level_t *per_thread; int myid = __omp_myid; Is_True(pool != NULL, ("__ompc_remove_task_from_pool: task pool is uninitialized")); current_task = __omp_current_task; current_thread = __omp_current_v_thread; per_thread = &pool->level[PER_THREAD]; /* We get only from the tail for tied tasks. This is necessary to guarantee * that tied tasks are only scheduled if they are descendants of every * suspended tied task not at a barrier */ task = __ompc_queue_get_tail(&per_thread->task_queue[TIED_IDX(myid)]); /* for untied tasks, we can get from the head or tail, depending on what * O64_OMP_TASK_QUEUE is set to */ if (task == NULL) task = __ompc_task_queue_get(&per_thread->task_queue[UNTIED_IDX(myid)]); /* check if there are any untied tasks available in the other task queues */ if (task == NULL) { int first_victim, victim = 0; int team_size = pool->team_size; if (team_size < 2) return NULL; victim = (rand_r(&__omp_seed) % (team_size - 1)); if (victim >= myid) victim++; /* cycle through to find a queue with work to steal */ first_victim = victim; while (1) { while (__ompc_queue_lockless_is_empty( &per_thread->task_queue[UNTIED_IDX(victim)])) { victim++; if (victim == myid) victim++; if (victim == team_size) victim = 0; if (victim == first_victim) goto CHECK_TIED_TASK_QUEUES; } task = __ompc_task_queue_steal( &per_thread->task_queue[UNTIED_IDX(victim)]); if ( task != NULL ) { /* if (!__ompc_task_state_is_unscheduled(task)) { // Is_True(0, ("state of task from queue was not unscheduled")); printf("\n... (1) skipping over a task with state %s; queue size is %d \n", __ompc_task_get_state_string(task), __ompc_queue_num_used_slots(&per_thread->task_queue[UNTIED_IDX(victim)])); task = NULL; } */ return task; } } } /* if no task in local queue and no available untied tasks, we will look in * another queue so long as there are no suspended tasks tied to thread and * the current task is either in a barrier or its not tied */ CHECK_TIED_TASK_QUEUES: if (task == NULL && !current_thread->num_suspended_tied_tasks && (__ompc_task_state_is_in_barrier(current_task) || !__ompc_task_is_tied(current_task))) { int first_victim, victim = 0; int team_size = pool->team_size; victim = (rand_r(&__omp_seed) % (team_size - 1)); if (victim >= myid) victim++; /* cycle through to find a queue with work to steal */ first_victim = victim; while (1) { while (__ompc_queue_is_empty( &per_thread->task_queue[TIED_IDX(victim)])) { victim++; if (victim == myid) victim++; if (victim == team_size) victim = 0; if (victim == first_victim) return NULL; } /* Always steal from the head for tied tasks. Note also that by not * using the task_queue API, CFIFO implementation will not be used */ task = __ompc_queue_steal_head( &per_thread->task_queue[TIED_IDX(victim)]); if ( task != NULL ) { /* if (!__ompc_task_state_is_unscheduled(task)) { // Is_True(0, ("state of task from queue was not unscheduled")); printf("\n... (2) skipping over a task with state %s; queue size is %d \n", __ompc_task_get_state_string(task), __ompc_queue_num_used_slots(&per_thread->task_queue[TIED_IDX(victim)])); task = NULL; } */ return task; } } } /* if ( task != NULL ) { if (!__ompc_task_state_is_unscheduled(task)) { // Is_True(0, ("state of task from queue was not unscheduled")); printf("\n... (3) skipping over a task with state %s; queue size is %d \n", __ompc_task_get_state_string(task), __ompc_queue_num_used_slots(&per_thread->task_queue[UNTIED_IDX(myid)])); task = NULL; } } */ return task; }
void __ompc_task_create(omp_task_func taskfunc, void *frame_pointer, void *firstprivates, int may_delay, int is_tied, int blocks_parent) { int myid; omp_team_t *team; omp_task_t *current_task, *new_task, *orig_task; omp_v_thread_t *current_thread; current_task = __omp_current_task; if (__ompc_task_cutoff()) { //__omp_task_cutoffs++; orig_task = current_task; __omp_current_task = NULL; taskfunc(firstprivates, frame_pointer); __omp_current_task = orig_task; return; /* not reached */ } myid = __omp_myid; current_thread = __omp_current_v_thread; team = current_thread->team; #ifdef USE_COLLECTOR_TASK OMP_COLLECTOR_API_THR_STATE temp_state = (OMP_COLLECTOR_API_THR_STATE)current_thread->state; __ompc_set_state(THR_TASK_CREATE_STATE); __ompc_event_callback(OMP_EVENT_THR_BEGIN_CREATE_TASK); #ifndef OMPT int new_id = __ompc_atomic_inc(&team->collector_task_id); #endif #endif if (may_delay) { new_task = __ompc_task_new(); __ompc_task_set_function(new_task, taskfunc); __ompc_task_set_frame_pointer(new_task, frame_pointer); __ompc_task_set_firstprivates(new_task, firstprivates); new_task->creating_thread_id = myid; new_task->parent = current_task; new_task->depth = current_task->depth + 1; #ifdef USE_COLLECTOR_TASK #ifndef OMPT new_task->task_id = new_id; #endif __omp_collector_task = new_task; __ompc_event_callback(OMP_EVENT_THR_END_CREATE_TASK_DEL); __ompc_set_state(temp_state); #endif __ompc_task_set_flags(new_task, OMP_TASK_IS_DEFERRED); if (is_tied) __ompc_task_set_flags(new_task, OMP_TASK_IS_TIED); __ompc_atomic_inc(¤t_task->num_children); if (blocks_parent) { __ompc_task_set_flags(new_task, OMP_TASK_BLOCKS_PARENT); __ompc_atomic_inc(¤t_task->num_blocking_children); } #ifdef OMPT __ompt_event_callback(ompt_event_task_begin); #endif if (__ompc_add_task_to_pool(team->task_pool, new_task) == 0) { /* couldn't add to task pool, so execute it immediately */ __ompc_task_set_state(current_task, OMP_TASK_READY); __ompc_task_switch(new_task); __ompc_task_set_state(current_task, OMP_TASK_RUNNING); } } else { omp_task_t new_immediate_task; new_task = &new_immediate_task; memset(new_task, 0, sizeof(omp_task_t)); __ompc_task_set_function(new_task, taskfunc); __ompc_task_set_frame_pointer(new_task, frame_pointer); /* firstprivates will be NULL, so don't need to set it */ Is_True(firstprivates == NULL, ("firstprivates should always be NULL")); new_task->creating_thread_id = myid; new_task->parent = current_task; new_task->depth = current_task->depth + 1; #ifdef USE_COLLECTOR_TASK #ifndef OMPT new_task->task_id = new_id; #endif __omp_collector_task = new_task; __ompc_event_callback(OMP_EVENT_THR_END_CREATE_TASK_IMM); #endif if (is_tied) __ompc_task_set_flags(new_task, OMP_TASK_IS_TIED); #ifdef OMPT __ompt_event_callback(ompt_event_task_begin); #endif __ompc_task_set_state(current_task, OMP_TASK_READY); if (__ompc_task_is_tied(current_task)) { /* if current task is tied, it should not go back into task pool */ orig_task = current_task; ++(current_thread->num_suspended_tied_tasks); __omp_current_task = new_task; taskfunc(NULL, frame_pointer); __omp_current_task = orig_task; --(current_thread->num_suspended_tied_tasks); } else { /* if current task is untied, it can go back into task pool, but this * isn't currently supported. */ orig_task = current_task; __omp_current_task = new_task; taskfunc(NULL, frame_pointer); __omp_current_task = orig_task; } __ompc_task_set_state(current_task, OMP_TASK_RUNNING); } }
void __ompc_task_switch(omp_task_t *new_task) { omp_v_thread_t *current_thread = __omp_current_v_thread; omp_task_t *orig_task = __omp_current_task; __ompc_task_set_state(new_task, OMP_TASK_RUNNING); __omp_current_task = new_task; new_task->sdepth = orig_task->sdepth + 1; #ifdef OMPT __ompt_suspended_task_id = orig_task->task_id; __ompt_resumed_task_id = new_task->task_id; __ompt_event_callback(ompt_event_task_switch); #endif #ifdef USE_COLLECTOR_TASK __omp_collector_task = __omp_current_task; omp_v_thread_t *p_vthread = __ompc_get_v_thread_by_num( __omp_myid); OMP_COLLECTOR_API_THR_STATE temp_state = (OMP_COLLECTOR_API_THR_STATE)p_vthread->state; __ompc_ompt_set_state(THR_WORK_STATE, ompt_state_work_parallel, 0); __ompc_event_callback(OMP_EVENT_THR_BEGIN_EXEC_TASK); #endif #ifdef OMPT new_task->frame_s.exit_runtime_frame = __builtin_frame_address(0); if(__ompc_task_is_implicit(new_task)) __ompt_event_callback(ompt_event_implicit_task_begin); #endif if (__ompc_task_is_tied(orig_task) && !__ompc_task_state_is_in_barrier(orig_task)) { ++(current_thread->num_suspended_tied_tasks); new_task->t.func(new_task->firstprivates, new_task->frame_pointer); --(current_thread->num_suspended_tied_tasks); } else { new_task->t.func(new_task->firstprivates, new_task->frame_pointer); } #ifdef OMPT new_task->frame_s.reenter_runtime_frame = __builtin_frame_address(0); if(__ompc_task_is_implicit(new_task)) __ompt_event_callback(ompt_event_implicit_task_end); #endif #ifdef USE_COLLECTOR_TASK __ompc_set_state(temp_state); #endif Is_True(__ompc_task_state_is_exiting(new_task), ("__ompc_task_switch: task returned but not in EXITING state")); if (new_task->num_children == 0) { __ompc_task_delete(new_task); } else { __ompc_task_set_state(new_task, OMP_TASK_FINISHED); __ompc_unlock(&new_task->lock); } __omp_current_task = orig_task; #ifdef USE_COLLECTOR_TASK __omp_collector_task = __omp_current_task; #endif }
/* __ompc_remove_task_from_pool_simple_2level: * Takes a task from the task pool. First tries to get a task from the current * thread's task queue. If that doesn't work, then it will look for work in * the community queue. If that's also empty, then it will attempt to steal a * task from another task queue (so long as there are no other tasks, not in a * barrier, that are tied to the current thread). * * Note: The restriction on stealing is overly conservative. Even if there are * tasks tied to the current thread and not in a barrier ([*]), we should be * able to steal any untied tasks, or tied tasks that descend from all tasks * in [*]. But this implementation does not separate untied tasks from tied * tasks, and also does not track descendants in the task pool. */ omp_task_t *__ompc_remove_task_from_pool_simple_2level(omp_task_pool_t *pool) { omp_task_t *task, *current_task; omp_team_t *team; omp_v_thread_t *current_thread; omp_queue_t *my_queue; omp_queue_t *victim_queue; omp_task_queue_level_t *per_thread, *community; int myid = __omp_myid; Is_True(pool != NULL, ("__ompc_remove_task_from_pool: task pool is uninitialized")); current_task = __omp_current_task; current_thread = __omp_current_v_thread; per_thread = &pool->level[PER_THREAD]; community = &pool->level[COMMUNITY]; task = __ompc_task_queue_get(&per_thread->task_queue[myid]); /* if no task in local queue, we will look in another queue so long as there * are no suspended tasks tied to thread and the current task is either in a * barrier or its not tied */ if (task == NULL && !current_thread->num_suspended_tied_tasks && (__ompc_task_state_is_in_barrier(current_task) || !__ompc_task_is_tied(current_task))) { if (__omp_task_chunk_size > 1) { /* this will steal a chunk of tasks, instead of just 1, from the * community * queue */ task = __ompc_task_queue_steal_chunk(community->task_queue, &per_thread->task_queue[myid], __omp_task_chunk_size); } else { task = __ompc_task_queue_steal(community->task_queue); } if (task == NULL) { int first_victim, victim = 0; int team_size = pool->team_size; victim = (rand_r(&__omp_seed) % (team_size - 1)); if (victim >= myid) victim++; /* cycle through to find a queue with work to steal */ first_victim = victim; while (1) { while (__ompc_queue_is_empty(&per_thread->task_queue[victim])) { victim++; if (victim == myid) victim++; if (victim == team_size) victim = 0; if (victim == first_victim) return NULL; } task = __ompc_task_queue_steal(&per_thread->task_queue[victim]); if ( task != NULL ) return task; } } } return task; }
/* __ompc_remove_task_from_pool_simple: * Takes a task from the task pool. First tries to get a task from the current * thread's task queue. If that doesn't work, then it will attempt to steal a * task from another task queue (so long as there are no other tasks, not in a * barrier, that are tied to the current thread). * * Note: The restriction on stealing is overly conservative. Even if there are * tasks tied to the current thread and not in a barrier ([*]), we should be * able to steal any untied tasks, or tied tasks that descend from all tasks * in [*]. But this implementation does not separate untied tasks from tied * tasks, and also does not track descendants in the task pool. */ omp_task_t *__ompc_remove_task_from_pool_simple(omp_task_pool_t *pool) { omp_task_t *task, *current_task; omp_team_t *team; omp_v_thread_t *current_thread; omp_queue_t *my_queue; omp_queue_t *victim_queue; omp_task_queue_level_t *per_thread; int myid = __omp_myid; int team_size; Is_True(pool != NULL, ("__ompc_remove_task_from_pool: task pool is uninitialized")); current_task = __omp_current_task; current_thread = __omp_current_v_thread; per_thread = &pool->level[PER_THREAD]; task = __ompc_task_queue_get(&per_thread->task_queue[myid]); /* if no task in local queue, we will look in another queue so long as there * are no suspended tasks tied to thread and the current task is either in a * barrier or its not tied */ team_size = pool->team_size; if (task == NULL && (team_size > 1) && !current_thread->num_suspended_tied_tasks && (__ompc_task_state_is_in_barrier(current_task) || !__ompc_task_is_tied(current_task))) { int first_victim, victim = 0, go_right; go_right = (rand_r(&__omp_seed) % (2)); victim = (rand_r(&__omp_seed) % (team_size - 1)); if (victim >= myid) victim++; first_victim = victim; if (go_right) { /* cycle through to find a queue with work to steal */ while (1) { while (__ompc_queue_is_empty(&per_thread->task_queue[victim])) { victim++; if (victim == myid) victim++; if (victim == team_size) victim = 0; if (victim == first_victim) return NULL; } task = __ompc_task_queue_steal(&per_thread->task_queue[victim]); if ( task != NULL ) return task; } } else { /* cycle through to find a queue with work to steal */ while (1) { while (__ompc_queue_is_empty(&per_thread->task_queue[victim])) { victim--; if (victim == myid) victim--; if (victim == -1) victim = team_size-1; if (victim == first_victim) return NULL; } task = __ompc_task_queue_steal(&per_thread->task_queue[victim]); if ( task != NULL ) return task; } } } return task; }