static #ifdef KMP_INLINE_SUBR __forceinline #endif struct private_common * __kmp_threadprivate_find_task_common( struct common_table *tbl, int gtid, void *pc_addr ) { struct private_common *tn; #ifdef KMP_TASK_COMMON_DEBUG KC_TRACE( 10, ( "__kmp_threadprivate_find_task_common: thread#%d, called with address %p\n", gtid, pc_addr ) ); dump_list(); #endif for (tn = tbl->data[ KMP_HASH(pc_addr) ]; tn; tn = tn->next) { if (tn->gbl_addr == pc_addr) { #ifdef KMP_TASK_COMMON_DEBUG KC_TRACE( 10, ( "__kmp_threadprivate_find_task_common: thread#%d, found node %p on list\n", gtid, pc_addr ) ); #endif return tn; } } return 0; }
void kmp_threadprivate_insert_private_data( int gtid, void *pc_addr, void *data_addr, size_t pc_size ) { struct shared_common **lnk_tn, *d_tn; KMP_DEBUG_ASSERT( __kmp_threads[ gtid ] && __kmp_threads[ gtid ] -> th.th_root -> r.r_active == 0 ); d_tn = __kmp_find_shared_task_common( &__kmp_threadprivate_d_table, gtid, pc_addr ); if (d_tn == 0) { d_tn = (struct shared_common *) __kmp_allocate( sizeof( struct shared_common ) ); d_tn->gbl_addr = pc_addr; d_tn->pod_init = __kmp_init_common_data( data_addr, pc_size ); /* d_tn->obj_init = 0; // AC: commented out because __kmp_allocate zeroes the memory d_tn->ct.ctor = 0; d_tn->cct.cctor = 0;; d_tn->dt.dtor = 0; d_tn->is_vec = FALSE; d_tn->vec_len = 0L; */ d_tn->cmn_size = pc_size; __kmp_acquire_lock( &__kmp_global_lock, gtid ); lnk_tn = &(__kmp_threadprivate_d_table.data[ KMP_HASH(pc_addr) ]); d_tn->next = *lnk_tn; *lnk_tn = d_tn; __kmp_release_lock( &__kmp_global_lock, gtid ); } }
/*! @ingroup THREADPRIVATE @param loc source location information @param data pointer to data being privatized @param ctor pointer to constructor function for data @param cctor pointer to copy constructor function for data @param dtor pointer to destructor function for data Register constructors and destructors for thread private data. This function is called when executing in parallel, when we know the thread id. */ void __kmpc_threadprivate_register(ident_t *loc, void *data, kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor) { struct shared_common *d_tn, **lnk_tn; KC_TRACE( 10, ("__kmpc_threadprivate_register: called\n" ) ); #ifdef USE_CHECKS_COMMON /* copy constructor must be zero for current code gen (Nov 2002 - jph) */ KMP_ASSERT( cctor == 0); #endif /* USE_CHECKS_COMMON */ /* Only the global data table exists. */ d_tn = __kmp_find_shared_task_common( &__kmp_threadprivate_d_table, -1, data ); if (d_tn == 0) { d_tn = (struct shared_common *) __kmp_allocate( sizeof( struct shared_common ) ); d_tn->gbl_addr = data; d_tn->ct.ctor = ctor; d_tn->cct.cctor = cctor; d_tn->dt.dtor = dtor; /* d_tn->is_vec = FALSE; // AC: commented out because __kmp_allocate zeroes the memory d_tn->vec_len = 0L; d_tn->obj_init = 0; d_tn->pod_init = 0; */ lnk_tn = &(__kmp_threadprivate_d_table.data[ KMP_HASH(data) ]); d_tn->next = *lnk_tn; *lnk_tn = d_tn; } }
static #ifdef KMP_INLINE_SUBR __forceinline #endif struct shared_common * __kmp_find_shared_task_common( struct shared_table *tbl, int gtid, void *pc_addr ) { struct shared_common *tn; for (tn = tbl->data[ KMP_HASH(pc_addr) ]; tn; tn = tn->next) { if (tn->gbl_addr == pc_addr) { #ifdef KMP_TASK_COMMON_DEBUG KC_TRACE( 10, ( "__kmp_find_shared_task_common: thread#%d, found node %p on list\n", gtid, pc_addr ) ); #endif return tn; } } return 0; }
/*! @ingroup THREADPRIVATE @param loc source location information @param data pointer to data being privatized @param ctor pointer to constructor function for data @param cctor pointer to copy constructor function for data @param dtor pointer to destructor function for data Register constructors and destructors for thread private data. This function is called when executing in parallel, when we know the thread id. */ void __kmpc_threadprivate_register(ident_t *loc, void *data, kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor) { struct shared_common *d_tn, **lnk_tn; KC_TRACE( 10, ("__kmpc_threadprivate_register: called\n" ) ); #ifdef USE_CHECKS_COMMON /* copy constructor must be zero for current code gen (Nov 2002 - jph) */ KMP_ASSERT( cctor == 0); #endif /* USE_CHECKS_COMMON */ #if KMP_THREADPRIVATE_TLS /* Check if unique. We use here only data[0], as we need to maintain a uniqe ordering between each of the entries on the list. This is because each thread maintain a private variable pointing to the entries it has already initialized. When a worker starts a parallel region, it will check if his private pointer points to the most recent entry. If not, the thread will call the constructors for all entries between the top and the ones already processed. */ for (d_tn = __kmp_threadprivate_d_table.data[0]; d_tn; d_tn = d_tn->next) { if (d_tn->gbl_addr == data) { // nothing to be done, already here return; } } // not found, create one d_tn = (struct shared_common *) __kmp_allocate( sizeof( struct shared_common ) ); d_tn->gbl_addr = data; d_tn->ct.ctor = ctor; d_tn->cct.cctor = cctor; d_tn->dt.dtor = dtor; // use only one list (0th, arbitrary) lnk_tn = &(__kmp_threadprivate_d_table.data[0]); // set new element at head of list d_tn->next = __kmp_threadprivate_d_table.data[0]; // Make sure that if a thread see the new element, then it must // see the new next pointer value. KMP_MB(); __kmp_threadprivate_d_table.data[0] = d_tn; return; #endif /* Only the global data table exists. */ d_tn = __kmp_find_shared_task_common( &__kmp_threadprivate_d_table, -1, data ); if (d_tn == 0) { d_tn = (struct shared_common *) __kmp_allocate( sizeof( struct shared_common ) ); d_tn->gbl_addr = data; d_tn->ct.ctor = ctor; d_tn->cct.cctor = cctor; d_tn->dt.dtor = dtor; /* d_tn->is_vec = FALSE; // AC: commented out because __kmp_allocate zeroes the memory d_tn->vec_len = 0L; d_tn->obj_init = 0; d_tn->pod_init = 0; */ lnk_tn = &(__kmp_threadprivate_d_table.data[ KMP_HASH(data) ]); d_tn->next = *lnk_tn; *lnk_tn = d_tn; } }
struct private_common * kmp_threadprivate_insert( int gtid, void *pc_addr, void *data_addr, size_t pc_size ) { struct private_common *tn, **tt; struct shared_common *d_tn; /* +++++++++ START OF CRITICAL SECTION +++++++++ */ __kmp_acquire_lock( & __kmp_global_lock, gtid ); tn = (struct private_common *) __kmp_allocate( sizeof (struct private_common) ); tn->gbl_addr = pc_addr; d_tn = __kmp_find_shared_task_common( &__kmp_threadprivate_d_table, gtid, pc_addr ); /* Only the MASTER data table exists. */ if (d_tn != 0) { /* This threadprivate variable has already been seen. */ if ( d_tn->pod_init == 0 && d_tn->obj_init == 0 ) { d_tn->cmn_size = pc_size; if (d_tn->is_vec) { if (d_tn->ct.ctorv != 0) { /* Construct from scratch so no prototype exists */ d_tn->obj_init = 0; } else if (d_tn->cct.cctorv != 0) { /* Now data initialize the prototype since it was previously registered */ d_tn->obj_init = (void *) __kmp_allocate( d_tn->cmn_size ); (void) (*d_tn->cct.cctorv) (d_tn->obj_init, pc_addr, d_tn->vec_len); } else { d_tn->pod_init = __kmp_init_common_data( data_addr, d_tn->cmn_size ); } } else { if (d_tn->ct.ctor != 0) { /* Construct from scratch so no prototype exists */ d_tn->obj_init = 0; } else if (d_tn->cct.cctor != 0) { /* Now data initialize the prototype since it was previously registered */ d_tn->obj_init = (void *) __kmp_allocate( d_tn->cmn_size ); (void) (*d_tn->cct.cctor) (d_tn->obj_init, pc_addr); } else { d_tn->pod_init = __kmp_init_common_data( data_addr, d_tn->cmn_size ); } } } } else { struct shared_common **lnk_tn; d_tn = (struct shared_common *) __kmp_allocate( sizeof( struct shared_common ) ); d_tn->gbl_addr = pc_addr; d_tn->cmn_size = pc_size; d_tn->pod_init = __kmp_init_common_data( data_addr, pc_size ); /* d_tn->obj_init = 0; // AC: commented out because __kmp_allocate zeroes the memory d_tn->ct.ctor = 0; d_tn->cct.cctor = 0; d_tn->dt.dtor = 0; d_tn->is_vec = FALSE; d_tn->vec_len = 0L; */ lnk_tn = &(__kmp_threadprivate_d_table.data[ KMP_HASH(pc_addr) ]); d_tn->next = *lnk_tn; *lnk_tn = d_tn; } tn->cmn_size = d_tn->cmn_size; if ( (__kmp_foreign_tp) ? (KMP_INITIAL_GTID (gtid)) : (KMP_UBER_GTID (gtid)) ) { tn->par_addr = (void *) pc_addr; } else { tn->par_addr = (void *) __kmp_allocate( tn->cmn_size ); } __kmp_release_lock( & __kmp_global_lock, gtid ); /* +++++++++ END OF CRITICAL SECTION +++++++++ */ #ifdef USE_CHECKS_COMMON if (pc_size > d_tn->cmn_size) { KC_TRACE( 10, ( "__kmp_threadprivate_insert: THREADPRIVATE: %p (%" KMP_UINTPTR_SPEC " ,%" KMP_UINTPTR_SPEC ")\n", pc_addr, pc_size, d_tn->cmn_size ) ); KMP_FATAL( TPCommonBlocksInconsist ); } #endif /* USE_CHECKS_COMMON */ tt = &(__kmp_threads[ gtid ]->th.th_pri_common->data[ KMP_HASH(pc_addr) ]); #ifdef KMP_TASK_COMMON_DEBUG if (*tt != 0) { KC_TRACE( 10, ( "__kmp_threadprivate_insert: WARNING! thread#%d: collision on %p\n", gtid, pc_addr ) ); } #endif tn->next = *tt; *tt = tn; #ifdef KMP_TASK_COMMON_DEBUG KC_TRACE( 10, ( "__kmp_threadprivate_insert: thread#%d, inserted node %p on list\n", gtid, pc_addr ) ); dump_list( ); #endif /* Link the node into a simple list */ tn->link = __kmp_threads[ gtid ]->th.th_pri_head; __kmp_threads[ gtid ]->th.th_pri_head = tn; #ifdef BUILD_TV __kmp_tv_threadprivate_store( __kmp_threads[ gtid ], tn->gbl_addr, tn->par_addr ); #endif if( (__kmp_foreign_tp) ? (KMP_INITIAL_GTID (gtid)) : (KMP_UBER_GTID (gtid)) ) return tn; /* * if C++ object with copy constructor, use it; * else if C++ object with constructor, use it for the non-master copies only; * else use pod_init and memcpy * * C++ constructors need to be called once for each non-master thread on allocate * C++ copy constructors need to be called once for each thread on allocate */ /* * C++ object with constructors/destructors; * don't call constructors for master thread though */ if (d_tn->is_vec) { if ( d_tn->ct.ctorv != 0) { (void) (*d_tn->ct.ctorv) (tn->par_addr, d_tn->vec_len); } else if (d_tn->cct.cctorv != 0) { (void) (*d_tn->cct.cctorv) (tn->par_addr, d_tn->obj_init, d_tn->vec_len); } else if (tn->par_addr != tn->gbl_addr) { __kmp_copy_common_data( tn->par_addr, d_tn->pod_init ); } } else { if ( d_tn->ct.ctor != 0 ) { (void) (*d_tn->ct.ctor) (tn->par_addr); } else if (d_tn->cct.cctor != 0) { (void) (*d_tn->cct.cctor) (tn->par_addr, d_tn->obj_init); } else if (tn->par_addr != tn->gbl_addr) { __kmp_copy_common_data( tn->par_addr, d_tn->pod_init ); } } /* !BUILD_OPENMP_C if (tn->par_addr != tn->gbl_addr) __kmp_copy_common_data( tn->par_addr, d_tn->pod_init ); */ return tn; }