void GOMP_parallel_end (void) { struct gomp_task_icv *icv = gomp_icv (false); if (__builtin_expect (icv->thread_limit_var != UINT_MAX, 0)) { struct gomp_thread *thr = gomp_thread (); struct gomp_team *team = thr->ts.team; unsigned int nthreads = team ? team->nthreads : 1; gomp_team_end (); if (nthreads > 1) { /* If not nested, there is just one thread in the contention group left, no need for atomicity. */ if (thr->ts.team == NULL) thr->thread_pool->threads_busy = 1; else { #ifdef HAVE_SYNC_BUILTINS __sync_fetch_and_add (&thr->thread_pool->threads_busy, 1UL - nthreads); #else gomp_mutex_lock (&gomp_managed_threads_lock); thr->thread_pool->threads_busy -= nthreads - 1; gomp_mutex_unlock (&gomp_managed_threads_lock); #endif } } } else gomp_team_end (); }
void GOMP_parallel_end (void) { if (__builtin_expect (gomp_thread_limit_var != ULONG_MAX, 0)) { struct gomp_thread *thr = gomp_thread (); struct gomp_team *team = thr->ts.team; if (team && team->nthreads > 1) { #ifdef HAVE_SYNC_BUILTINS __sync_fetch_and_add (&gomp_remaining_threads_count, 1UL - team->nthreads); #else gomp_mutex_lock (&gomp_remaining_threads_lock); gomp_remaining_threads_count -= team->nthreads - 1; #endif } } gomp_team_end (); }
void GOMP_parallel_end (void) { int i; unsigned int myid, timer; gomp_team_t *the_team; #ifdef STATS_ENABLE timers[8]= stop_timer(); #endif myid = prv_proc_num; the_team = (gomp_team_t *) CURR_TEAM(myid); MSlaveBarrier_Wait(_ms_barrier, the_team->nthreads, the_team->proc_ids); gomp_team_end(); #ifdef STATS_ENABLE timers[13]= stop_timer(); #endif }
void GOMP_parallel_end (void) { gomp_team_end (); }