void gomp_end_task (void) { struct gomp_thread *thr = gomp_thread (); struct gomp_task *task = thr->task; gomp_finish_task (task); thr->task = task->parent; }
static void * gomp_thread_start (void *xdata) { struct gomp_thread_start_data *data = xdata; struct gomp_thread *thr; struct gomp_thread_pool *pool; void (*local_fn) (void *); void *local_data; #if defined HAVE_TLS || defined USE_EMUTLS thr = &gomp_tls_data; #else struct gomp_thread local_thr; thr = &local_thr; pthread_setspecific (gomp_tls_key, thr); #endif gomp_sem_init (&thr->release, 0); /* Extract what we need from data. */ local_fn = data->fn; local_data = data->fn_data; thr->thread_pool = data->thread_pool; thr->ts = data->ts; thr->task = data->task; thr->place = data->place; thr->ts.team->ordered_release[thr->ts.team_id] = &thr->release; /* Make thread pool local. */ pool = thr->thread_pool; if (data->nested) { struct gomp_team *team = thr->ts.team; struct gomp_task *task = thr->task; gomp_barrier_wait (&team->barrier); local_fn (local_data); gomp_team_barrier_wait_final (&team->barrier); gomp_finish_task (task); gomp_barrier_wait_last (&team->barrier); } else { pool->threads[thr->ts.team_id] = thr; gomp_simple_barrier_wait (&pool->threads_dock); do { struct gomp_team *team = thr->ts.team; struct gomp_task *task = thr->task; local_fn (local_data); gomp_team_barrier_wait_final (&team->barrier); gomp_finish_task (task); gomp_simple_barrier_wait (&pool->threads_dock); local_fn = thr->fn; local_data = thr->data; thr->fn = NULL; } while (local_fn); } gomp_sem_destroy (&thr->release); thr->thread_pool = NULL; thr->task = NULL; return NULL; }
void gomp_create_target_task (struct gomp_device_descr *devicep, void (*fn) (void *), size_t mapnum, void **hostaddrs, size_t *sizes, unsigned short *kinds, unsigned int flags, void **depend) { struct gomp_thread *thr = gomp_thread (); struct gomp_team *team = thr->ts.team; /* If parallel or taskgroup has been cancelled, don't start new tasks. */ if (team && (gomp_team_barrier_cancelled (&team->barrier) || (thr->task->taskgroup && thr->task->taskgroup->cancelled))) return; struct gomp_target_task *ttask; struct gomp_task *task; struct gomp_task *parent = thr->task; struct gomp_taskgroup *taskgroup = parent->taskgroup; bool do_wake; size_t depend_size = 0; if (depend != NULL) depend_size = ((uintptr_t) depend[0] * sizeof (struct gomp_task_depend_entry)); task = gomp_malloc (sizeof (*task) + depend_size + sizeof (*ttask) + mapnum * (sizeof (void *) + sizeof (size_t) + sizeof (unsigned short))); gomp_init_task (task, parent, gomp_icv (false)); task->kind = GOMP_TASK_WAITING; task->in_tied_task = parent->in_tied_task; task->taskgroup = taskgroup; ttask = (struct gomp_target_task *) &task->depend[(uintptr_t) depend[0]]; ttask->devicep = devicep; ttask->fn = fn; ttask->mapnum = mapnum; memcpy (ttask->hostaddrs, hostaddrs, mapnum * sizeof (void *)); ttask->sizes = (size_t *) &ttask->hostaddrs[mapnum]; memcpy (ttask->sizes, sizes, mapnum * sizeof (size_t)); ttask->kinds = (unsigned short *) &ttask->sizes[mapnum]; memcpy (ttask->kinds, kinds, mapnum * sizeof (unsigned short)); ttask->flags = flags; task->fn = gomp_target_task_fn; task->fn_data = ttask; task->final_task = 0; gomp_mutex_lock (&team->task_lock); /* If parallel or taskgroup has been cancelled, don't start new tasks. */ if (__builtin_expect (gomp_team_barrier_cancelled (&team->barrier) || (taskgroup && taskgroup->cancelled), 0)) { gomp_mutex_unlock (&team->task_lock); gomp_finish_task (task); free (task); return; } if (taskgroup) taskgroup->num_children++; if (depend_size) { gomp_task_handle_depend (task, parent, depend); if (task->num_dependees) { gomp_mutex_unlock (&team->task_lock); return; } } if (parent->children) { task->next_child = parent->children; task->prev_child = parent->children->prev_child; task->next_child->prev_child = task; task->prev_child->next_child = task; } else { task->next_child = task; task->prev_child = task; } parent->children = task; if (taskgroup) { /* If applicable, place task into its taskgroup. */ if (taskgroup->children) { task->next_taskgroup = taskgroup->children; task->prev_taskgroup = taskgroup->children->prev_taskgroup; task->next_taskgroup->prev_taskgroup = task; task->prev_taskgroup->next_taskgroup = task; } else { task->next_taskgroup = task; task->prev_taskgroup = task; } taskgroup->children = task; } if (team->task_queue) { task->next_queue = team->task_queue; task->prev_queue = team->task_queue->prev_queue; task->next_queue->prev_queue = task; task->prev_queue->next_queue = task; } else { task->next_queue = task; task->prev_queue = task; team->task_queue = task; } ++team->task_count; ++team->task_queued_count; gomp_team_barrier_set_task_pending (&team->barrier); do_wake = team->task_running_count + !parent->in_tied_task < team->nthreads; gomp_mutex_unlock (&team->task_lock); if (do_wake) gomp_team_barrier_wake (&team->barrier, 1); }
void GOMP_taskgroup_end (void) { struct gomp_thread *thr = gomp_thread (); struct gomp_team *team = thr->ts.team; struct gomp_task *task = thr->task; struct gomp_taskgroup *taskgroup; struct gomp_task *child_task = NULL; struct gomp_task *to_free = NULL; int do_wake = 0; if (team == NULL) return; taskgroup = task->taskgroup; /* The acquire barrier on load of taskgroup->num_children here synchronizes with the write of 0 in gomp_task_run_post_remove_taskgroup. It is not necessary that we synchronize with other non-0 writes at this point, but we must ensure that all writes to memory by a child thread task work function are seen before we exit from GOMP_taskgroup_end. */ if (__atomic_load_n (&taskgroup->num_children, MEMMODEL_ACQUIRE) == 0) goto finish; gomp_mutex_lock (&team->task_lock); while (1) { bool cancelled = false; if (taskgroup->children == NULL) { if (taskgroup->num_children) { if (task->children == NULL) goto do_wait; child_task = task->children; } else { gomp_mutex_unlock (&team->task_lock); if (to_free) { gomp_finish_task (to_free); free (to_free); } goto finish; } } else child_task = taskgroup->children; if (child_task->kind == GOMP_TASK_WAITING) { cancelled = gomp_task_run_pre (child_task, child_task->parent, team); if (__builtin_expect (cancelled, 0)) { if (to_free) { gomp_finish_task (to_free); free (to_free); to_free = NULL; } goto finish_cancelled; } } else { child_task = NULL; do_wait: /* All tasks we are waiting for are already running in other threads. Wait for them. */ taskgroup->in_taskgroup_wait = true; } gomp_mutex_unlock (&team->task_lock); if (do_wake) { gomp_team_barrier_wake (&team->barrier, do_wake); do_wake = 0; } if (to_free) { gomp_finish_task (to_free); free (to_free); to_free = NULL; } if (child_task) { thr->task = child_task; child_task->fn (child_task->fn_data); thr->task = task; } else gomp_sem_wait (&taskgroup->taskgroup_sem); gomp_mutex_lock (&team->task_lock); if (child_task) { finish_cancelled:; size_t new_tasks = gomp_task_run_post_handle_depend (child_task, team); gomp_task_run_post_remove_parent (child_task); gomp_clear_parent (child_task->children); gomp_task_run_post_remove_taskgroup (child_task); to_free = child_task; child_task = NULL; team->task_count--; if (new_tasks > 1) { do_wake = team->nthreads - team->task_running_count - !task->in_tied_task; if (do_wake > new_tasks) do_wake = new_tasks; } } } finish: task->taskgroup = taskgroup->prev; gomp_sem_destroy (&taskgroup->taskgroup_sem); free (taskgroup); }
void GOMP_task (void (*fn) (void *), void *data, void (*cpyfn) (void *, void *), long arg_size, long arg_align, bool if_clause, unsigned flags, void **depend, int priority) { struct gomp_thread *thr = gomp_thread (); struct gomp_team *team = thr->ts.team; #ifdef HAVE_BROKEN_POSIX_SEMAPHORES /* If pthread_mutex_* is used for omp_*lock*, then each task must be tied to one thread all the time. This means UNTIED tasks must be tied and if CPYFN is non-NULL IF(0) must be forced, as CPYFN might be running on different thread than FN. */ if (cpyfn) if_clause = false; flags &= ~GOMP_TASK_FLAG_UNTIED; #endif /* If parallel or taskgroup has been cancelled, don't start new tasks. */ if (team && (gomp_team_barrier_cancelled (&team->barrier) || (thr->task->taskgroup && thr->task->taskgroup->cancelled))) return; if ((flags & GOMP_TASK_FLAG_PRIORITY) == 0) priority = 0; /* FIXME, use priority. */ (void) priority; if (!if_clause || team == NULL || (thr->task && thr->task->final_task) || team->task_count > 64 * team->nthreads) { struct gomp_task task; /* If there are depend clauses and earlier deferred sibling tasks with depend clauses, check if there isn't a dependency. If there is, we need to wait for them. There is no need to handle depend clauses for non-deferred tasks other than this, because the parent task is suspended until the child task finishes and thus it can't start further child tasks. */ if ((flags & GOMP_TASK_FLAG_DEPEND) && thr->task && thr->task->depend_hash) gomp_task_maybe_wait_for_dependencies (depend); gomp_init_task (&task, thr->task, gomp_icv (false)); task.kind = GOMP_TASK_UNDEFERRED; task.final_task = (thr->task && thr->task->final_task) || (flags & GOMP_TASK_FLAG_FINAL); if (thr->task) { task.in_tied_task = thr->task->in_tied_task; task.taskgroup = thr->task->taskgroup; } thr->task = &task; if (__builtin_expect (cpyfn != NULL, 0)) { char buf[arg_size + arg_align - 1]; char *arg = (char *) (((uintptr_t) buf + arg_align - 1) & ~(uintptr_t) (arg_align - 1)); cpyfn (arg, data); fn (arg); } else fn (data); /* Access to "children" is normally done inside a task_lock mutex region, but the only way this particular task.children can be set is if this thread's task work function (fn) creates children. So since the setter is *this* thread, we need no barriers here when testing for non-NULL. We can have task.children set by the current thread then changed by a child thread, but seeing a stale non-NULL value is not a problem. Once past the task_lock acquisition, this thread will see the real value of task.children. */ if (task.children != NULL) { gomp_mutex_lock (&team->task_lock); gomp_clear_parent (task.children); gomp_mutex_unlock (&team->task_lock); } gomp_end_task (); } else { struct gomp_task *task; struct gomp_task *parent = thr->task; struct gomp_taskgroup *taskgroup = parent->taskgroup; char *arg; bool do_wake; size_t depend_size = 0; if (flags & GOMP_TASK_FLAG_DEPEND) depend_size = ((uintptr_t) depend[0] * sizeof (struct gomp_task_depend_entry)); task = gomp_malloc (sizeof (*task) + depend_size + arg_size + arg_align - 1); arg = (char *) (((uintptr_t) (task + 1) + depend_size + arg_align - 1) & ~(uintptr_t) (arg_align - 1)); gomp_init_task (task, parent, gomp_icv (false)); task->kind = GOMP_TASK_UNDEFERRED; task->in_tied_task = parent->in_tied_task; task->taskgroup = taskgroup; thr->task = task; if (cpyfn) { cpyfn (arg, data); task->copy_ctors_done = true; } else memcpy (arg, data, arg_size); thr->task = parent; task->kind = GOMP_TASK_WAITING; task->fn = fn; task->fn_data = arg; task->final_task = (flags & GOMP_TASK_FLAG_FINAL) >> 1; gomp_mutex_lock (&team->task_lock); /* If parallel or taskgroup has been cancelled, don't start new tasks. */ if (__builtin_expect ((gomp_team_barrier_cancelled (&team->barrier) || (taskgroup && taskgroup->cancelled)) && !task->copy_ctors_done, 0)) { gomp_mutex_unlock (&team->task_lock); gomp_finish_task (task); free (task); return; } if (taskgroup) taskgroup->num_children++; if (depend_size) { gomp_task_handle_depend (task, parent, depend); if (task->num_dependees) { gomp_mutex_unlock (&team->task_lock); return; } } if (parent->children) { task->next_child = parent->children; task->prev_child = parent->children->prev_child; task->next_child->prev_child = task; task->prev_child->next_child = task; } else { task->next_child = task; task->prev_child = task; } parent->children = task; if (taskgroup) { /* If applicable, place task into its taskgroup. */ if (taskgroup->children) { task->next_taskgroup = taskgroup->children; task->prev_taskgroup = taskgroup->children->prev_taskgroup; task->next_taskgroup->prev_taskgroup = task; task->prev_taskgroup->next_taskgroup = task; } else { task->next_taskgroup = task; task->prev_taskgroup = task; } taskgroup->children = task; } if (team->task_queue) { task->next_queue = team->task_queue; task->prev_queue = team->task_queue->prev_queue; task->next_queue->prev_queue = task; task->prev_queue->next_queue = task; } else { task->next_queue = task; task->prev_queue = task; team->task_queue = task; } ++team->task_count; ++team->task_queued_count; gomp_team_barrier_set_task_pending (&team->barrier); do_wake = team->task_running_count + !parent->in_tied_task < team->nthreads; gomp_mutex_unlock (&team->task_lock); if (do_wake) gomp_team_barrier_wake (&team->barrier, 1); } }
void gomp_task_maybe_wait_for_dependencies (void **depend) { struct gomp_thread *thr = gomp_thread (); struct gomp_task *task = thr->task; struct gomp_team *team = thr->ts.team; struct gomp_task_depend_entry elem, *ent = NULL; struct gomp_taskwait taskwait; struct gomp_task *last_parent_depends_on = NULL; size_t ndepend = (uintptr_t) depend[0]; size_t nout = (uintptr_t) depend[1]; size_t i; size_t num_awaited = 0; struct gomp_task *child_task = NULL; struct gomp_task *to_free = NULL; int do_wake = 0; gomp_mutex_lock (&team->task_lock); for (i = 0; i < ndepend; i++) { elem.addr = depend[i + 2]; ent = htab_find (task->depend_hash, &elem); for (; ent; ent = ent->next) if (i >= nout && ent->is_in) continue; else { struct gomp_task *tsk = ent->task; if (!tsk->parent_depends_on) { tsk->parent_depends_on = true; ++num_awaited; /* If a task we need to wait for is not already running and is ready to be scheduled, move it to front, so that we run it as soon as possible. We rearrange the children queue such that all parent_depends_on tasks are first, and last_parent_depends_on points to the last such task we rearranged. For example, given the following children where PD[123] are the parent_depends_on tasks: task->children | V C1 -> C2 -> C3 -> PD1 -> PD2 -> PD3 -> C4 We rearrange such that: task->children | +--- last_parent_depends_on | | V V PD1 -> PD2 -> PD3 -> C1 -> C2 -> C3 -> C4 */ if (tsk->num_dependees == 0 && tsk->kind == GOMP_TASK_WAITING) { if (last_parent_depends_on) { tsk->prev_child->next_child = tsk->next_child; tsk->next_child->prev_child = tsk->prev_child; tsk->prev_child = last_parent_depends_on; tsk->next_child = last_parent_depends_on->next_child; tsk->prev_child->next_child = tsk; tsk->next_child->prev_child = tsk; } else if (tsk != task->children) { tsk->prev_child->next_child = tsk->next_child; tsk->next_child->prev_child = tsk->prev_child; tsk->prev_child = task->children->prev_child; tsk->next_child = task->children; task->children = tsk; tsk->prev_child->next_child = tsk; tsk->next_child->prev_child = tsk; } last_parent_depends_on = tsk; } } } } if (num_awaited == 0) { gomp_mutex_unlock (&team->task_lock); return; } memset (&taskwait, 0, sizeof (taskwait)); taskwait.n_depend = num_awaited; taskwait.last_parent_depends_on = last_parent_depends_on; gomp_sem_init (&taskwait.taskwait_sem, 0); task->taskwait = &taskwait; while (1) { bool cancelled = false; if (taskwait.n_depend == 0) { task->taskwait = NULL; gomp_mutex_unlock (&team->task_lock); if (to_free) { gomp_finish_task (to_free); free (to_free); } gomp_sem_destroy (&taskwait.taskwait_sem); return; } if (task->children->kind == GOMP_TASK_WAITING) { child_task = task->children; cancelled = gomp_task_run_pre (child_task, task, team); if (__builtin_expect (cancelled, 0)) { if (to_free) { gomp_finish_task (to_free); free (to_free); to_free = NULL; } goto finish_cancelled; } } else /* All tasks we are waiting for are already running in other threads. Wait for them. */ taskwait.in_depend_wait = true; gomp_mutex_unlock (&team->task_lock); if (do_wake) { gomp_team_barrier_wake (&team->barrier, do_wake); do_wake = 0; } if (to_free) { gomp_finish_task (to_free); free (to_free); to_free = NULL; } if (child_task) { thr->task = child_task; child_task->fn (child_task->fn_data); thr->task = task; } else gomp_sem_wait (&taskwait.taskwait_sem); gomp_mutex_lock (&team->task_lock); if (child_task) { finish_cancelled:; size_t new_tasks = gomp_task_run_post_handle_depend (child_task, team); if (child_task->parent_depends_on) --taskwait.n_depend; /* Remove child_task from sibling list. */ child_task->prev_child->next_child = child_task->next_child; child_task->next_child->prev_child = child_task->prev_child; if (task->children == child_task) { if (child_task->next_child != child_task) task->children = child_task->next_child; else task->children = NULL; } gomp_clear_parent (child_task->children); gomp_task_run_post_remove_taskgroup (child_task); to_free = child_task; child_task = NULL; team->task_count--; if (new_tasks > 1) { do_wake = team->nthreads - team->task_running_count - !task->in_tied_task; if (do_wake > new_tasks) do_wake = new_tasks; } } } }
void GOMP_taskwait (void) { struct gomp_thread *thr = gomp_thread (); struct gomp_team *team = thr->ts.team; struct gomp_task *task = thr->task; struct gomp_task *child_task = NULL; struct gomp_task *to_free = NULL; struct gomp_taskwait taskwait; int do_wake = 0; /* The acquire barrier on load of task->children here synchronizes with the write of a NULL in gomp_task_run_post_remove_parent. It is not necessary that we synchronize with other non-NULL writes at this point, but we must ensure that all writes to memory by a child thread task work function are seen before we exit from GOMP_taskwait. */ if (task == NULL || __atomic_load_n (&task->children, MEMMODEL_ACQUIRE) == NULL) return; memset (&taskwait, 0, sizeof (taskwait)); gomp_mutex_lock (&team->task_lock); while (1) { bool cancelled = false; if (task->children == NULL) { bool destroy_taskwait = task->taskwait != NULL; task->taskwait = NULL; gomp_mutex_unlock (&team->task_lock); if (to_free) { gomp_finish_task (to_free); free (to_free); } if (destroy_taskwait) gomp_sem_destroy (&taskwait.taskwait_sem); return; } if (task->children->kind == GOMP_TASK_WAITING) { child_task = task->children; cancelled = gomp_task_run_pre (child_task, task, team); if (__builtin_expect (cancelled, 0)) { if (to_free) { gomp_finish_task (to_free); free (to_free); to_free = NULL; } goto finish_cancelled; } } else { /* All tasks we are waiting for are already running in other threads. Wait for them. */ if (task->taskwait == NULL) { taskwait.in_depend_wait = false; gomp_sem_init (&taskwait.taskwait_sem, 0); task->taskwait = &taskwait; } taskwait.in_taskwait = true; } gomp_mutex_unlock (&team->task_lock); if (do_wake) { gomp_team_barrier_wake (&team->barrier, do_wake); do_wake = 0; } if (to_free) { gomp_finish_task (to_free); free (to_free); to_free = NULL; } if (child_task) { thr->task = child_task; child_task->fn (child_task->fn_data); thr->task = task; } else gomp_sem_wait (&taskwait.taskwait_sem); gomp_mutex_lock (&team->task_lock); if (child_task) { finish_cancelled:; size_t new_tasks = gomp_task_run_post_handle_depend (child_task, team); /* Remove child_task from children list, and set up the next sibling to be run. */ child_task->prev_child->next_child = child_task->next_child; child_task->next_child->prev_child = child_task->prev_child; if (task->children == child_task) { if (child_task->next_child != child_task) task->children = child_task->next_child; else task->children = NULL; } /* Orphan all the children of CHILD_TASK. */ gomp_clear_parent (child_task->children); /* Remove CHILD_TASK from its taskgroup. */ gomp_task_run_post_remove_taskgroup (child_task); to_free = child_task; child_task = NULL; team->task_count--; if (new_tasks > 1) { do_wake = team->nthreads - team->task_running_count - !task->in_tied_task; if (do_wake > new_tasks) do_wake = new_tasks; } } } }
void gomp_barrier_handle_tasks (gomp_barrier_state_t state) { struct gomp_thread *thr = gomp_thread (); struct gomp_team *team = thr->ts.team; struct gomp_task *task = thr->task; struct gomp_task *child_task = NULL; struct gomp_task *to_free = NULL; int do_wake = 0; gomp_mutex_lock (&team->task_lock); if (gomp_barrier_last_thread (state)) { if (team->task_count == 0) { gomp_team_barrier_done (&team->barrier, state); gomp_mutex_unlock (&team->task_lock); gomp_team_barrier_wake (&team->barrier, 0); return; } gomp_team_barrier_set_waiting_for_tasks (&team->barrier); } while (1) { bool cancelled = false; if (team->task_queue != NULL) { child_task = team->task_queue; cancelled = gomp_task_run_pre (child_task, child_task->parent, team); if (__builtin_expect (cancelled, 0)) { if (to_free) { gomp_finish_task (to_free); free (to_free); to_free = NULL; } goto finish_cancelled; } team->task_running_count++; child_task->in_tied_task = true; } gomp_mutex_unlock (&team->task_lock); if (do_wake) { gomp_team_barrier_wake (&team->barrier, do_wake); do_wake = 0; } if (to_free) { gomp_finish_task (to_free); free (to_free); to_free = NULL; } if (child_task) { thr->task = child_task; child_task->fn (child_task->fn_data); thr->task = task; } else return; gomp_mutex_lock (&team->task_lock); if (child_task) { finish_cancelled:; size_t new_tasks = gomp_task_run_post_handle_depend (child_task, team); gomp_task_run_post_remove_parent (child_task); gomp_clear_parent (child_task->children); gomp_task_run_post_remove_taskgroup (child_task); to_free = child_task; child_task = NULL; if (!cancelled) team->task_running_count--; if (new_tasks > 1) { do_wake = team->nthreads - team->task_running_count; if (do_wake > new_tasks) do_wake = new_tasks; } if (--team->task_count == 0 && gomp_team_barrier_waiting_for_tasks (&team->barrier)) { gomp_team_barrier_done (&team->barrier, state); gomp_mutex_unlock (&team->task_lock); gomp_team_barrier_wake (&team->barrier, 0); gomp_mutex_lock (&team->task_lock); } } } }
void GOMP_task (void (*fn) (void *), void *data, void (*cpyfn) (void *, void *), long arg_size, long arg_align, bool if_clause, unsigned flags, void **depend) { struct gomp_thread *thr = gomp_thread (); struct gomp_team *team = thr->ts.team; #ifdef HAVE_BROKEN_POSIX_SEMAPHORES /* If pthread_mutex_* is used for omp_*lock*, then each task must be tied to one thread all the time. This means UNTIED tasks must be tied and if CPYFN is non-NULL IF(0) must be forced, as CPYFN might be running on different thread than FN. */ if (cpyfn) if_clause = false; if (flags & 1) flags &= ~1; #endif /* If parallel or taskgroup has been cancelled, don't start new tasks. */ if (team && (gomp_team_barrier_cancelled (&team->barrier) || (thr->task->taskgroup && thr->task->taskgroup->cancelled))) return; if (!if_clause || team == NULL || (thr->task && thr->task->final_task) || team->task_count > 64 * team->nthreads) { struct gomp_task task; /* If there are depend clauses and earlier deferred sibling tasks with depend clauses, check if there isn't a dependency. If there is, we need to wait for them. There is no need to handle depend clauses for non-deferred tasks other than this, because the parent task is suspended until the child task finishes and thus it can't start further child tasks. */ if ((flags & 8) && thr->task && thr->task->depend_hash) gomp_task_maybe_wait_for_dependencies (depend); gomp_init_task (&task, thr->task, gomp_icv (false)); task.kind = GOMP_TASK_IFFALSE; task.final_task = (thr->task && thr->task->final_task) || (flags & 2); if (thr->task) { task.in_tied_task = thr->task->in_tied_task; task.taskgroup = thr->task->taskgroup; } thr->task = &task; if (__builtin_expect (cpyfn != NULL, 0)) { char buf[arg_size + arg_align - 1]; char *arg = (char *) (((uintptr_t) buf + arg_align - 1) & ~(uintptr_t) (arg_align - 1)); cpyfn (arg, data); fn (arg); } else fn (data); /* Access to "children" is normally done inside a task_lock mutex region, but the only way this particular task.children can be set is if this thread's task work function (fn) creates children. So since the setter is *this* thread, we need no barriers here when testing for non-NULL. We can have task.children set by the current thread then changed by a child thread, but seeing a stale non-NULL value is not a problem. Once past the task_lock acquisition, this thread will see the real value of task.children. */ if (task.children != NULL) { gomp_mutex_lock (&team->task_lock); gomp_clear_parent (task.children); gomp_mutex_unlock (&team->task_lock); } gomp_end_task (); } else { struct gomp_task *task; struct gomp_task *parent = thr->task; struct gomp_taskgroup *taskgroup = parent->taskgroup; char *arg; bool do_wake; size_t depend_size = 0; if (flags & 8) depend_size = ((uintptr_t) depend[0] * sizeof (struct gomp_task_depend_entry)); task = gomp_malloc (sizeof (*task) + depend_size + arg_size + arg_align - 1); arg = (char *) (((uintptr_t) (task + 1) + depend_size + arg_align - 1) & ~(uintptr_t) (arg_align - 1)); gomp_init_task (task, parent, gomp_icv (false)); task->kind = GOMP_TASK_IFFALSE; task->in_tied_task = parent->in_tied_task; task->taskgroup = taskgroup; thr->task = task; if (cpyfn) { cpyfn (arg, data); task->copy_ctors_done = true; } else memcpy (arg, data, arg_size); thr->task = parent; task->kind = GOMP_TASK_WAITING; task->fn = fn; task->fn_data = arg; task->final_task = (flags & 2) >> 1; gomp_mutex_lock (&team->task_lock); /* If parallel or taskgroup has been cancelled, don't start new tasks. */ if (__builtin_expect ((gomp_team_barrier_cancelled (&team->barrier) || (taskgroup && taskgroup->cancelled)) && !task->copy_ctors_done, 0)) { gomp_mutex_unlock (&team->task_lock); gomp_finish_task (task); free (task); return; } if (taskgroup) taskgroup->num_children++; if (depend_size) { size_t ndepend = (uintptr_t) depend[0]; size_t nout = (uintptr_t) depend[1]; size_t i; hash_entry_type ent; task->depend_count = ndepend; task->num_dependees = 0; if (parent->depend_hash == NULL) parent->depend_hash = htab_create (2 * ndepend > 12 ? 2 * ndepend : 12); for (i = 0; i < ndepend; i++) { task->depend[i].addr = depend[2 + i]; task->depend[i].next = NULL; task->depend[i].prev = NULL; task->depend[i].task = task; task->depend[i].is_in = i >= nout; task->depend[i].redundant = false; task->depend[i].redundant_out = false; hash_entry_type *slot = htab_find_slot (&parent->depend_hash, &task->depend[i], INSERT); hash_entry_type out = NULL, last = NULL; if (*slot) { /* If multiple depends on the same task are the same, all but the first one are redundant. As inout/out come first, if any of them is inout/out, it will win, which is the right semantics. */ if ((*slot)->task == task) { task->depend[i].redundant = true; continue; } for (ent = *slot; ent; ent = ent->next) { if (ent->redundant_out) break; last = ent; /* depend(in:...) doesn't depend on earlier depend(in:...). */ if (i >= nout && ent->is_in) continue; if (!ent->is_in) out = ent; struct gomp_task *tsk = ent->task; if (tsk->dependers == NULL) { tsk->dependers = gomp_malloc (sizeof (struct gomp_dependers_vec) + 6 * sizeof (struct gomp_task *)); tsk->dependers->n_elem = 1; tsk->dependers->allocated = 6; tsk->dependers->elem[0] = task; task->num_dependees++; continue; } /* We already have some other dependency on tsk from earlier depend clause. */ else if (tsk->dependers->n_elem && (tsk->dependers->elem[tsk->dependers->n_elem - 1] == task)) continue; else if (tsk->dependers->n_elem == tsk->dependers->allocated) { tsk->dependers->allocated = tsk->dependers->allocated * 2 + 2; tsk->dependers = gomp_realloc (tsk->dependers, sizeof (struct gomp_dependers_vec) + (tsk->dependers->allocated * sizeof (struct gomp_task *))); } tsk->dependers->elem[tsk->dependers->n_elem++] = task; task->num_dependees++; } task->depend[i].next = *slot; (*slot)->prev = &task->depend[i]; } *slot = &task->depend[i]; /* There is no need to store more than one depend({,in}out:) task per address in the hash table chain for the purpose of creation of deferred tasks, because each out depends on all earlier outs, thus it is enough to record just the last depend({,in}out:). For depend(in:), we need to keep all of the previous ones not terminated yet, because a later depend({,in}out:) might need to depend on all of them. So, if the new task's clause is depend({,in}out:), we know there is at most one other depend({,in}out:) clause in the list (out). For non-deferred tasks we want to see all outs, so they are moved to the end of the chain, after first redundant_out entry all following entries should be redundant_out. */ if (!task->depend[i].is_in && out) { if (out != last) { out->next->prev = out->prev; out->prev->next = out->next; out->next = last->next; out->prev = last; last->next = out; if (out->next) out->next->prev = out; } out->redundant_out = true; } } if (task->num_dependees) { gomp_mutex_unlock (&team->task_lock); return; } } if (parent->children) { task->next_child = parent->children; task->prev_child = parent->children->prev_child; task->next_child->prev_child = task; task->prev_child->next_child = task; } else { task->next_child = task; task->prev_child = task; } parent->children = task; if (taskgroup) { if (taskgroup->children) { task->next_taskgroup = taskgroup->children; task->prev_taskgroup = taskgroup->children->prev_taskgroup; task->next_taskgroup->prev_taskgroup = task; task->prev_taskgroup->next_taskgroup = task; } else { task->next_taskgroup = task; task->prev_taskgroup = task; } taskgroup->children = task; } if (team->task_queue) { task->next_queue = team->task_queue; task->prev_queue = team->task_queue->prev_queue; task->next_queue->prev_queue = task; task->prev_queue->next_queue = task; } else { task->next_queue = task; task->prev_queue = task; team->task_queue = task; } ++team->task_count; ++team->task_queued_count; gomp_team_barrier_set_task_pending (&team->barrier); do_wake = team->task_running_count + !parent->in_tied_task < team->nthreads; gomp_mutex_unlock (&team->task_lock); if (do_wake) gomp_team_barrier_wake (&team->barrier, 1); } }
void GOMP_taskloop (void (*fn) (void *), void *data, void (*cpyfn) (void *, void *), long arg_size, long arg_align, unsigned flags, unsigned long num_tasks, int priority, TYPE start, TYPE end, TYPE step) { struct gomp_thread *thr = gomp_thread (); struct gomp_team *team = thr->ts.team; #ifdef HAVE_BROKEN_POSIX_SEMAPHORES /* If pthread_mutex_* is used for omp_*lock*, then each task must be tied to one thread all the time. This means UNTIED tasks must be tied and if CPYFN is non-NULL IF(0) must be forced, as CPYFN might be running on different thread than FN. */ if (cpyfn) flags &= ~GOMP_TASK_FLAG_IF; flags &= ~GOMP_TASK_FLAG_UNTIED; #endif /* If parallel or taskgroup has been cancelled, don't start new tasks. */ if (team && gomp_team_barrier_cancelled (&team->barrier)) return; #ifdef TYPE_is_long TYPE s = step; if (step > 0) { if (start >= end) return; s--; } else { if (start <= end) return; s++; } UTYPE n = (end - start + s) / step; #else UTYPE n; if (flags & GOMP_TASK_FLAG_UP) { if (start >= end) return; n = (end - start + step - 1) / step; } else { if (start <= end) return; n = (start - end - step - 1) / -step; } #endif TYPE task_step = step; unsigned long nfirst = n; if (flags & GOMP_TASK_FLAG_GRAINSIZE) { unsigned long grainsize = num_tasks; #ifdef TYPE_is_long num_tasks = n / grainsize; #else UTYPE ndiv = n / grainsize; num_tasks = ndiv; if (num_tasks != ndiv) num_tasks = ~0UL; #endif if (num_tasks <= 1) { num_tasks = 1; task_step = end - start; } else if (num_tasks >= grainsize #ifndef TYPE_is_long && num_tasks != ~0UL #endif ) { UTYPE mul = num_tasks * grainsize; task_step = (TYPE) grainsize * step; if (mul != n) { task_step += step; nfirst = n - mul - 1; } } else { UTYPE div = n / num_tasks; UTYPE mod = n % num_tasks; task_step = (TYPE) div * step; if (mod) { task_step += step; nfirst = mod - 1; } } } else { if (num_tasks == 0) num_tasks = team ? team->nthreads : 1; if (num_tasks >= n) num_tasks = n; else { UTYPE div = n / num_tasks; UTYPE mod = n % num_tasks; task_step = (TYPE) div * step; if (mod) { task_step += step; nfirst = mod - 1; } } } if (flags & GOMP_TASK_FLAG_NOGROUP) { if (thr->task && thr->task->taskgroup && thr->task->taskgroup->cancelled) return; } else ialias_call (GOMP_taskgroup_start) (); if (priority > gomp_max_task_priority_var) priority = gomp_max_task_priority_var; if ((flags & GOMP_TASK_FLAG_IF) == 0 || team == NULL || (thr->task && thr->task->final_task) || team->task_count + num_tasks > 64 * team->nthreads) { unsigned long i; if (__builtin_expect (cpyfn != NULL, 0)) { struct gomp_task task[num_tasks]; struct gomp_task *parent = thr->task; arg_size = (arg_size + arg_align - 1) & ~(arg_align - 1); char buf[num_tasks * arg_size + arg_align - 1]; char *arg = (char *) (((uintptr_t) buf + arg_align - 1) & ~(uintptr_t) (arg_align - 1)); char *orig_arg = arg; for (i = 0; i < num_tasks; i++) { gomp_init_task (&task[i], parent, gomp_icv (false)); task[i].priority = priority; task[i].kind = GOMP_TASK_UNDEFERRED; task[i].final_task = (thr->task && thr->task->final_task) || (flags & GOMP_TASK_FLAG_FINAL); if (thr->task) { task[i].in_tied_task = thr->task->in_tied_task; task[i].taskgroup = thr->task->taskgroup; } thr->task = &task[i]; cpyfn (arg, data); arg += arg_size; } arg = orig_arg; for (i = 0; i < num_tasks; i++) { thr->task = &task[i]; ((TYPE *)arg)[0] = start; start += task_step; ((TYPE *)arg)[1] = start; if (i == nfirst) task_step -= step; fn (arg); arg += arg_size; if (!priority_queue_empty_p (&task[i].children_queue, MEMMODEL_RELAXED)) { gomp_mutex_lock (&team->task_lock); gomp_clear_parent (&task[i].children_queue); gomp_mutex_unlock (&team->task_lock); } gomp_end_task (); } } else for (i = 0; i < num_tasks; i++) { struct gomp_task task; gomp_init_task (&task, thr->task, gomp_icv (false)); task.priority = priority; task.kind = GOMP_TASK_UNDEFERRED; task.final_task = (thr->task && thr->task->final_task) || (flags & GOMP_TASK_FLAG_FINAL); if (thr->task) { task.in_tied_task = thr->task->in_tied_task; task.taskgroup = thr->task->taskgroup; } thr->task = &task; ((TYPE *)data)[0] = start; start += task_step; ((TYPE *)data)[1] = start; if (i == nfirst) task_step -= step; fn (data); if (!priority_queue_empty_p (&task.children_queue, MEMMODEL_RELAXED)) { gomp_mutex_lock (&team->task_lock); gomp_clear_parent (&task.children_queue); gomp_mutex_unlock (&team->task_lock); } gomp_end_task (); } } else { struct gomp_task *tasks[num_tasks]; struct gomp_task *parent = thr->task; struct gomp_taskgroup *taskgroup = parent->taskgroup; char *arg; int do_wake; unsigned long i; for (i = 0; i < num_tasks; i++) { struct gomp_task *task = gomp_malloc (sizeof (*task) + arg_size + arg_align - 1); tasks[i] = task; arg = (char *) (((uintptr_t) (task + 1) + arg_align - 1) & ~(uintptr_t) (arg_align - 1)); gomp_init_task (task, parent, gomp_icv (false)); task->priority = priority; task->kind = GOMP_TASK_UNDEFERRED; task->in_tied_task = parent->in_tied_task; task->taskgroup = taskgroup; thr->task = task; if (cpyfn) { cpyfn (arg, data); task->copy_ctors_done = true; } else memcpy (arg, data, arg_size); ((TYPE *)arg)[0] = start; start += task_step; ((TYPE *)arg)[1] = start; if (i == nfirst) task_step -= step; thr->task = parent; task->kind = GOMP_TASK_WAITING; task->fn = fn; task->fn_data = arg; task->final_task = (flags & GOMP_TASK_FLAG_FINAL) >> 1; } gomp_mutex_lock (&team->task_lock); /* If parallel or taskgroup has been cancelled, don't start new tasks. */ if (__builtin_expect ((gomp_team_barrier_cancelled (&team->barrier) || (taskgroup && taskgroup->cancelled)) && cpyfn == NULL, 0)) { gomp_mutex_unlock (&team->task_lock); for (i = 0; i < num_tasks; i++) { gomp_finish_task (tasks[i]); free (tasks[i]); } if ((flags & GOMP_TASK_FLAG_NOGROUP) == 0) ialias_call (GOMP_taskgroup_end) (); return; } if (taskgroup) taskgroup->num_children += num_tasks; for (i = 0; i < num_tasks; i++) { struct gomp_task *task = tasks[i]; priority_queue_insert (PQ_CHILDREN, &parent->children_queue, task, priority, PRIORITY_INSERT_BEGIN, /*last_parent_depends_on=*/false, task->parent_depends_on); if (taskgroup) priority_queue_insert (PQ_TASKGROUP, &taskgroup->taskgroup_queue, task, priority, PRIORITY_INSERT_BEGIN, /*last_parent_depends_on=*/false, task->parent_depends_on); priority_queue_insert (PQ_TEAM, &team->task_queue, task, priority, PRIORITY_INSERT_END, /*last_parent_depends_on=*/false, task->parent_depends_on); ++team->task_count; ++team->task_queued_count; } gomp_team_barrier_set_task_pending (&team->barrier); if (team->task_running_count + !parent->in_tied_task < team->nthreads) { do_wake = team->nthreads - team->task_running_count - !parent->in_tied_task; if ((unsigned long) do_wake > num_tasks) do_wake = num_tasks; } else do_wake = 0; gomp_mutex_unlock (&team->task_lock); if (do_wake) gomp_team_barrier_wake (&team->barrier, do_wake); } if ((flags & GOMP_TASK_FLAG_NOGROUP) == 0) ialias_call (GOMP_taskgroup_end) (); }