static void tMPI_Init_initers(void) { int state; /* we can pre-check because it's atomic */ if (tMPI_Atomic_get(&init_inited) == 0) { /* this can be a spinlock because the chances of collision are low. */ tMPI_Spinlock_lock( &init_init ); state=tMPI_Atomic_get(&init_inited); tMPI_Atomic_memory_barrier_acq(); if (state == 0) { InitializeCriticalSection(&mutex_init); InitializeCriticalSection(&once_init); InitializeCriticalSection(&cond_init); InitializeCriticalSection(&barrier_init); /* fatal errors are handled by the routine by calling tMPI_Fatal_error() */ tMPI_Init_NUMA(); tMPI_Atomic_memory_barrier_rel(); tMPI_Atomic_set(&init_inited, 1); } tMPI_Spinlock_unlock( &init_init ); } }
static int tMPI_Init_initers(void) { int state; int ret = 0; /* we can pre-check because it's atomic */ if (tMPI_Atomic_get(&init_inited) == 0) { /* this can be a spinlock because the chances of collision are low. */ tMPI_Spinlock_lock( &init_init ); state = tMPI_Atomic_get(&init_inited); tMPI_Atomic_memory_barrier_acq(); if (state == 0) { InitializeCriticalSection(&mutex_init); InitializeCriticalSection(&once_init); InitializeCriticalSection(&cond_init); InitializeCriticalSection(&barrier_init); InitializeCriticalSection(&thread_id_list_lock); ret = tMPI_Init_NUMA(); if (ret != 0) { goto err; } ret = tMPI_Thread_id_list_init(); if (ret != 0) { goto err; } tMPI_Atomic_memory_barrier_rel(); tMPI_Atomic_set(&init_inited, 1); } tMPI_Spinlock_unlock( &init_init ); } return ret; err: tMPI_Spinlock_unlock( &init_init ); return ret; }