kmp_uint32 __kmp_test_then_and32(volatile kmp_uint32 *p, kmp_uint32 d) { kmp_uint32 old_value, new_value; old_value = TCR_4(*p); new_value = old_value & d; while (!__kmp_compare_and_store32((volatile kmp_int32 *)p, old_value, new_value)) { KMP_CPU_PAUSE(); old_value = TCR_4(*p); new_value = old_value & d; } return old_value; }
/* we are called from __kmp_serial_initialize() with __kmp_initz_lock held. */ void __kmp_common_initialize( void ) { if( ! TCR_4(__kmp_init_common) ) { int q; #ifdef KMP_DEBUG int gtid; #endif __kmp_threadpriv_cache_list = NULL; #ifdef KMP_DEBUG /* verify the uber masters were initialized */ for(gtid = 0 ; gtid < __kmp_threads_capacity; gtid++ ) if( __kmp_root[gtid] ) { KMP_DEBUG_ASSERT( __kmp_root[gtid]->r.r_uber_thread ); for ( q = 0; q< KMP_HASH_TABLE_SIZE; ++q) KMP_DEBUG_ASSERT( !__kmp_root[gtid]->r.r_uber_thread->th.th_pri_common->data[q] ); /* __kmp_root[ gitd ]-> r.r_uber_thread -> th.th_pri_common -> data[ q ] = 0;*/ } #endif /* KMP_DEBUG */ for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q) __kmp_threadprivate_d_table.data[ q ] = 0; TCW_4(__kmp_init_common, TRUE); } }
/* Call all destructors for threadprivate data belonging to all threads. Currently unused! */ void __kmp_common_destroy( void ) { if( TCR_4(__kmp_init_common) ) { int q; TCW_4(__kmp_init_common, FALSE); for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q) { int gtid; struct private_common *tn; struct shared_common *d_tn; /* C++ destructors need to be called once per thread before exiting */ /* don't call destructors for master thread though unless we used copy constructor */ for (d_tn = __kmp_threadprivate_d_table.data[ q ]; d_tn; d_tn = d_tn->next) { if (d_tn->is_vec) { if (d_tn->dt.dtorv != 0) { for (gtid = 0; gtid < __kmp_all_nth; ++gtid) { if( __kmp_threads[gtid] ) { if( (__kmp_foreign_tp) ? (! KMP_INITIAL_GTID (gtid)) : (! KMP_UBER_GTID (gtid)) ) { tn = __kmp_threadprivate_find_task_common( __kmp_threads[ gtid ]->th.th_pri_common, gtid, d_tn->gbl_addr ); if (tn) { (*d_tn->dt.dtorv) (tn->par_addr, d_tn->vec_len); } } } } if (d_tn->obj_init != 0) { (*d_tn->dt.dtorv) (d_tn->obj_init, d_tn->vec_len); } } } else { if (d_tn->dt.dtor != 0) { for (gtid = 0; gtid < __kmp_all_nth; ++gtid) { if( __kmp_threads[gtid] ) { if( (__kmp_foreign_tp) ? (! KMP_INITIAL_GTID (gtid)) : (! KMP_UBER_GTID (gtid)) ) { tn = __kmp_threadprivate_find_task_common( __kmp_threads[ gtid ]->th.th_pri_common, gtid, d_tn->gbl_addr ); if (tn) { (*d_tn->dt.dtor) (tn->par_addr); } } } } if (d_tn->obj_init != 0) { (*d_tn->dt.dtor) (d_tn->obj_init); } } } } __kmp_threadprivate_d_table.data[ q ] = 0; } } }
/* Call all destructors for threadprivate data belonging to this thread */ void __kmp_common_destroy_gtid( int gtid ) { struct private_common *tn; struct shared_common *d_tn; KC_TRACE( 10, ("__kmp_common_destroy_gtid: T#%d called\n", gtid ) ); if( (__kmp_foreign_tp) ? (! KMP_INITIAL_GTID (gtid)) : (! KMP_UBER_GTID (gtid)) ) { if( TCR_4(__kmp_init_common) ) { #if KMP_THREADPRIVATE_TLS for(d_tn = kmpc_threadprivate_d_table_data_head_local; d_tn != NULL; d_tn = d_tn->next) { // call destructor if (d_tn->dt.dtor) d_tn->dt.dtor(NULL); } #else /* Cannot do this here since not all threads have destroyed their data */ /* TCW_4(__kmp_init_common, FALSE); */ for (tn = __kmp_threads[ gtid ]->th.th_pri_head; tn; tn = tn->link) { d_tn = __kmp_find_shared_task_common( &__kmp_threadprivate_d_table, gtid, tn->gbl_addr ); KMP_DEBUG_ASSERT( d_tn ); if (d_tn->is_vec) { if (d_tn->dt.dtorv != 0) { (void) (*d_tn->dt.dtorv) (tn->par_addr, d_tn->vec_len); } if (d_tn->obj_init != 0) { (void) (*d_tn->dt.dtorv) (d_tn->obj_init, d_tn->vec_len); } } else { if (d_tn->dt.dtor != 0) { (void) (*d_tn->dt.dtor) (tn->par_addr); } if (d_tn->obj_init != 0) { (void) (*d_tn->dt.dtor) (d_tn->obj_init); } } } #endif KC_TRACE( 30, ("__kmp_common_destroy_gtid: T#%d threadprivate destructors complete\n", gtid ) ); } } }