initialize_critical (void) { gomp_mutex_init (&default_lock); gomp_mutex_init (&atomic_lock); #ifndef HAVE_SYNC_BUILTINS gomp_mutex_init (&create_lock_lock); #endif }
void GOMP_critical_name_start (void **pptr) { gomp_mutex_t *plock; /* If a mutex fits within the space for a pointer, and is zero initialized, then use the pointer space directly. */ if (GOMP_MUTEX_INIT_0 && sizeof (gomp_mutex_t) <= sizeof (void *) && __alignof (gomp_mutex_t) <= sizeof (void *)) plock = (gomp_mutex_t *)pptr; /* Otherwise we have to be prepared to malloc storage. */ else { plock = *pptr; if (plock == NULL) { #ifdef HAVE_SYNC_BUILTINS gomp_mutex_t *nlock = gomp_malloc (sizeof (gomp_mutex_t)); gomp_mutex_init (nlock); plock = __sync_val_compare_and_swap (pptr, NULL, nlock); if (plock != NULL) { gomp_mutex_destroy (nlock); gomp_free (nlock); } else plock = nlock; #else gomp_mutex_lock (&create_lock_lock); plock = *pptr; if (plock == NULL) { plock = gomp_malloc (sizeof (gomp_mutex_t)); gomp_mutex_init (plock); __sync_synchronize (); *pptr = plock; } gomp_mutex_unlock (&create_lock_lock); #endif } } gomp_mutex_lock (plock); /* OMP v3.1, 2.8.6 p81,l16 - "At entry to critical regions" */ gomp_flush0(); }
struct gomp_team * gomp_new_team (unsigned nthreads) { struct gomp_team *team; int i; team = get_last_team (nthreads); if (team == NULL) { size_t extra = sizeof (team->ordered_release[0]) + sizeof (team->implicit_task[0]); team = gomp_malloc (sizeof (*team) + nthreads * extra); #ifndef HAVE_SYNC_BUILTINS gomp_mutex_init (&team->work_share_list_free_lock); #endif gomp_barrier_init (&team->barrier, nthreads); gomp_mutex_init (&team->task_lock); team->nthreads = nthreads; } team->work_share_chunk = 8; #ifdef HAVE_SYNC_BUILTINS team->single_count = 0; #endif team->work_shares_to_free = &team->work_shares[0]; gomp_init_work_share (&team->work_shares[0], false, nthreads); team->work_shares[0].next_alloc = NULL; team->work_share_list_free = NULL; team->work_share_list_alloc = &team->work_shares[1]; for (i = 1; i < 7; i++) team->work_shares[i].next_free = &team->work_shares[i + 1]; team->work_shares[i].next_free = NULL; gomp_sem_init (&team->master_release, 0); team->ordered_release = (void *) &team->implicit_task[nthreads]; team->ordered_release[0] = &team->master_release; priority_queue_init (&team->task_queue); team->task_count = 0; team->task_queued_count = 0; team->task_running_count = 0; team->work_share_cancelled = 0; team->team_cancelled = 0; return team; }
attribute_hidden void goacc_runtime_initialize (void) { gomp_mutex_init (&acc_device_lock); #if !(defined HAVE_TLS || defined USE_EMUTLS) pthread_key_create (&goacc_tls_key, NULL); #endif pthread_key_create (&goacc_cleanup_key, goacc_destroy_thread); cached_base_dev = NULL; goacc_threads = NULL; gomp_mutex_init (&goacc_thread_lock); }
void gomp_init_lock_30 (omp_lock_t *lock) { #if __x86_64__ && USE_LITHE fprintf(stderr, "unimplemented (%s:%d)\n", __FILE__, __LINE__); abort(); #else gomp_mutex_init (lock); #endif /* __x86_64__ && USE_LITHE */ }
struct gomp_team * gomp_new_team (unsigned nthreads) { struct gomp_team *team; size_t size; int i; size = sizeof (*team) + nthreads * (sizeof (team->ordered_release[0]) + sizeof (team->implicit_task[0])); team = gomp_malloc (size); team->work_share_chunk = 8; #ifdef HAVE_SYNC_BUILTINS team->single_count = 0; #else gomp_mutex_init (&team->work_share_list_free_lock); #endif gomp_init_work_share (&team->work_shares[0], false, nthreads); team->work_shares[0].next_alloc = NULL; team->work_share_list_free = NULL; team->work_share_list_alloc = &team->work_shares[1]; for (i = 1; i < 7; i++) team->work_shares[i].next_free = &team->work_shares[i + 1]; team->work_shares[i].next_free = NULL; team->nthreads = nthreads; gomp_barrier_init (&team->barrier, nthreads); gomp_sem_init (&team->master_release, 0); team->ordered_release = (void *) &team->implicit_task[nthreads]; team->ordered_release[0] = &team->master_release; gomp_mutex_init (&team->task_lock); team->task_queue = NULL; team->task_count = 0; team->task_running_count = 0; return team; }
static void allocate_thread_pool_reservoir (unsigned long count, unsigned long priority, unsigned long scheduler) { struct gomp_thread_pool_reservoir *res; struct gomp_thread_pool *pools; unsigned long i; size_t size; res = gomp_thread_pool_reservoirs[scheduler]; if (res != NULL) gomp_fatal ("Multiple thread pool reservoir initialization"); size = sizeof (*res) + count * (sizeof(pools) + sizeof(*pools)); pools = gomp_malloc (size); memset (pools, 0, size); res = (struct gomp_thread_pool_reservoir *) (pools + count); res->index = count; res->priority = priority; gomp_sem_init (&res->available, count); gomp_mutex_init (&res->lock); for (i = 0; i < count; ++i) res->pools[i] = &pools[i]; gomp_thread_pool_reservoirs[scheduler] = res; }
static struct gomp_team * new_team (unsigned nthreads, struct gomp_work_share *work_share) { struct gomp_team *team; size_t size; size = sizeof (*team) + nthreads * sizeof (team->ordered_release[0]); team = gomp_malloc (size); gomp_mutex_init (&team->work_share_lock); team->work_shares = gomp_malloc (4 * sizeof (struct gomp_work_share *)); team->generation_mask = 3; team->oldest_live_gen = work_share == NULL; team->num_live_gen = work_share != NULL; team->work_shares[0] = work_share; team->nthreads = nthreads; gomp_barrier_init (&team->barrier, nthreads); gomp_sem_init (&team->master_release, 0); team->ordered_release[0] = &team->master_release; return team; }
/* Register this device type. */ void goacc_host_init (void) { gomp_mutex_init (&host_dispatch.lock); goacc_register (&host_dispatch); }
initialize_atomic (void) { gomp_mutex_init (&atomic_lock); }
void gomp_init_lock_30 (omp_lock_t *lock) { gomp_mutex_init (lock); }
void omp_init_lock (omp_lock_t *lock) { gomp_mutex_init (lock); }