static struct private_data * __kmp_init_common_data( void *pc_addr, size_t pc_size ) { struct private_data *d; size_t i; char *p; d = (struct private_data *) __kmp_allocate( sizeof( struct private_data ) ); /* d->data = 0; // AC: commented out because __kmp_allocate zeroes the memory d->next = 0; */ d->size = pc_size; d->more = 1; p = (char*)pc_addr; for (i = pc_size; i > 0; --i) { if (*p++ != '\0') { d->data = __kmp_allocate( pc_size ); KMP_MEMCPY( d->data, pc_addr, pc_size ); break; } } return d; }
void kmp_threadprivate_insert_private_data( int gtid, void *pc_addr, void *data_addr, size_t pc_size ) { struct shared_common **lnk_tn, *d_tn; KMP_DEBUG_ASSERT( __kmp_threads[ gtid ] && __kmp_threads[ gtid ] -> th.th_root -> r.r_active == 0 ); d_tn = __kmp_find_shared_task_common( &__kmp_threadprivate_d_table, gtid, pc_addr ); if (d_tn == 0) { d_tn = (struct shared_common *) __kmp_allocate( sizeof( struct shared_common ) ); d_tn->gbl_addr = pc_addr; d_tn->pod_init = __kmp_init_common_data( data_addr, pc_size ); /* d_tn->obj_init = 0; // AC: commented out because __kmp_allocate zeroes the memory d_tn->ct.ctor = 0; d_tn->cct.cctor = 0;; d_tn->dt.dtor = 0; d_tn->is_vec = FALSE; d_tn->vec_len = 0L; */ d_tn->cmn_size = pc_size; __kmp_acquire_lock( &__kmp_global_lock, gtid ); lnk_tn = &(__kmp_threadprivate_d_table.data[ KMP_HASH(pc_addr) ]); d_tn->next = *lnk_tn; *lnk_tn = d_tn; __kmp_release_lock( &__kmp_global_lock, gtid ); } }
/*! @ingroup THREADPRIVATE @param loc source location information @param data pointer to data being privatized @param ctor pointer to constructor function for data @param cctor pointer to copy constructor function for data @param dtor pointer to destructor function for data Register constructors and destructors for thread private data. This function is called when executing in parallel, when we know the thread id. */ void __kmpc_threadprivate_register(ident_t *loc, void *data, kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor) { struct shared_common *d_tn, **lnk_tn; KC_TRACE( 10, ("__kmpc_threadprivate_register: called\n" ) ); #ifdef USE_CHECKS_COMMON /* copy constructor must be zero for current code gen (Nov 2002 - jph) */ KMP_ASSERT( cctor == 0); #endif /* USE_CHECKS_COMMON */ /* Only the global data table exists. */ d_tn = __kmp_find_shared_task_common( &__kmp_threadprivate_d_table, -1, data ); if (d_tn == 0) { d_tn = (struct shared_common *) __kmp_allocate( sizeof( struct shared_common ) ); d_tn->gbl_addr = data; d_tn->ct.ctor = ctor; d_tn->cct.cctor = cctor; d_tn->dt.dtor = dtor; /* d_tn->is_vec = FALSE; // AC: commented out because __kmp_allocate zeroes the memory d_tn->vec_len = 0L; d_tn->obj_init = 0; d_tn->pod_init = 0; */ lnk_tn = &(__kmp_threadprivate_d_table.data[ KMP_HASH(data) ]); d_tn->next = *lnk_tn; *lnk_tn = d_tn; } }
struct cons_header * __kmp_allocate_cons_stack( int gtid ) { struct cons_header *p; /* TODO for monitor perhaps? */ if ( gtid < 0 ) { __kmp_check_null_func(); }; // if KE_TRACE( 10, ("allocate cons_stack (%d)\n", gtid ) ); p = (struct cons_header *) __kmp_allocate( sizeof( struct cons_header ) ); p->p_top = p->w_top = p->s_top = 0; p->stack_data = (struct cons_data *) __kmp_allocate( sizeof( struct cons_data ) * (MIN_STACK+1) ); p->stack_size = MIN_STACK; p->stack_top = 0; p->stack_data[ 0 ].type = ct_none; p->stack_data[ 0 ].prev = 0; p->stack_data[ 0 ].ident = NULL; return p; }
// returns a pointer to newly created stats node kmp_stats_list* kmp_stats_list::push_back(int gtid) { kmp_stats_list* newnode = (kmp_stats_list*)__kmp_allocate(sizeof(kmp_stats_list)); // placement new, only requires space and pointer and initializes (so __kmp_allocate instead of C++ new[] is used) new (newnode) kmp_stats_list(); newnode->setGtid(gtid); newnode->prev = this->prev; newnode->next = this; newnode->prev->next = newnode; newnode->next->prev = newnode; return newnode; }
static void __kmp_expand_cons_stack( int gtid, struct cons_header *p ) { int i; struct cons_data *d; /* TODO for monitor perhaps? */ if (gtid < 0) __kmp_check_null_func(); KE_TRACE( 10, ("expand cons_stack (%d %d)\n", gtid, __kmp_get_gtid() ) ); d = p->stack_data; p->stack_size = (p->stack_size * 2) + 100; /* TODO free the old data */ p->stack_data = (struct cons_data *) __kmp_allocate( sizeof( struct cons_data ) * (p->stack_size+1) ); for (i = p->stack_top; i >= 0; --i) p->stack_data[i] = d[i]; /* NOTE: we do not free the old stack_data */ }
/*! @ingroup THREADPRIVATE @param loc source location information @param global_tid global thread number @param data pointer to data to privatize @param size size of data to privatize @param cache pointer to cache @return pointer to private storage Allocate private storage for threadprivate data. */ void * __kmpc_threadprivate_cached( ident_t * loc, kmp_int32 global_tid, // gtid. void * data, // Pointer to original global variable. size_t size, // Size of original global variable. void *** cache ) { KC_TRACE( 10, ("__kmpc_threadprivate_cached: T#%d called with cache: %p, address: %p, size: %" KMP_SIZE_T_SPEC "\n", global_tid, *cache, data, size ) ); if ( TCR_PTR(*cache) == 0) { __kmp_acquire_lock( & __kmp_global_lock, global_tid ); if ( TCR_PTR(*cache) == 0) { __kmp_acquire_bootstrap_lock(&__kmp_tp_cached_lock); __kmp_tp_cached = 1; __kmp_release_bootstrap_lock(&__kmp_tp_cached_lock); void ** my_cache; KMP_ITT_IGNORE( my_cache = (void**) __kmp_allocate(sizeof( void * ) * __kmp_tp_capacity + sizeof ( kmp_cached_addr_t )); );
/*! @ingroup THREADPRIVATE @param loc source location information @param data pointer to data being privatized @param ctor pointer to constructor function for data @param cctor pointer to copy constructor function for data @param dtor pointer to destructor function for data Register constructors and destructors for thread private data. This function is called when executing in parallel, when we know the thread id. */ void __kmpc_threadprivate_register(ident_t *loc, void *data, kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor) { struct shared_common *d_tn, **lnk_tn; KC_TRACE( 10, ("__kmpc_threadprivate_register: called\n" ) ); #ifdef USE_CHECKS_COMMON /* copy constructor must be zero for current code gen (Nov 2002 - jph) */ KMP_ASSERT( cctor == 0); #endif /* USE_CHECKS_COMMON */ #if KMP_THREADPRIVATE_TLS /* Check if unique. We use here only data[0], as we need to maintain a uniqe ordering between each of the entries on the list. This is because each thread maintain a private variable pointing to the entries it has already initialized. When a worker starts a parallel region, it will check if his private pointer points to the most recent entry. If not, the thread will call the constructors for all entries between the top and the ones already processed. */ for (d_tn = __kmp_threadprivate_d_table.data[0]; d_tn; d_tn = d_tn->next) { if (d_tn->gbl_addr == data) { // nothing to be done, already here return; } } // not found, create one d_tn = (struct shared_common *) __kmp_allocate( sizeof( struct shared_common ) ); d_tn->gbl_addr = data; d_tn->ct.ctor = ctor; d_tn->cct.cctor = cctor; d_tn->dt.dtor = dtor; // use only one list (0th, arbitrary) lnk_tn = &(__kmp_threadprivate_d_table.data[0]); // set new element at head of list d_tn->next = __kmp_threadprivate_d_table.data[0]; // Make sure that if a thread see the new element, then it must // see the new next pointer value. KMP_MB(); __kmp_threadprivate_d_table.data[0] = d_tn; return; #endif /* Only the global data table exists. */ d_tn = __kmp_find_shared_task_common( &__kmp_threadprivate_d_table, -1, data ); if (d_tn == 0) { d_tn = (struct shared_common *) __kmp_allocate( sizeof( struct shared_common ) ); d_tn->gbl_addr = data; d_tn->ct.ctor = ctor; d_tn->cct.cctor = cctor; d_tn->dt.dtor = dtor; /* d_tn->is_vec = FALSE; // AC: commented out because __kmp_allocate zeroes the memory d_tn->vec_len = 0L; d_tn->obj_init = 0; d_tn->pod_init = 0; */ lnk_tn = &(__kmp_threadprivate_d_table.data[ KMP_HASH(data) ]); d_tn->next = *lnk_tn; *lnk_tn = d_tn; } }
struct private_common * kmp_threadprivate_insert( int gtid, void *pc_addr, void *data_addr, size_t pc_size ) { struct private_common *tn, **tt; struct shared_common *d_tn; /* +++++++++ START OF CRITICAL SECTION +++++++++ */ __kmp_acquire_lock( & __kmp_global_lock, gtid ); tn = (struct private_common *) __kmp_allocate( sizeof (struct private_common) ); tn->gbl_addr = pc_addr; d_tn = __kmp_find_shared_task_common( &__kmp_threadprivate_d_table, gtid, pc_addr ); /* Only the MASTER data table exists. */ if (d_tn != 0) { /* This threadprivate variable has already been seen. */ if ( d_tn->pod_init == 0 && d_tn->obj_init == 0 ) { d_tn->cmn_size = pc_size; if (d_tn->is_vec) { if (d_tn->ct.ctorv != 0) { /* Construct from scratch so no prototype exists */ d_tn->obj_init = 0; } else if (d_tn->cct.cctorv != 0) { /* Now data initialize the prototype since it was previously registered */ d_tn->obj_init = (void *) __kmp_allocate( d_tn->cmn_size ); (void) (*d_tn->cct.cctorv) (d_tn->obj_init, pc_addr, d_tn->vec_len); } else { d_tn->pod_init = __kmp_init_common_data( data_addr, d_tn->cmn_size ); } } else { if (d_tn->ct.ctor != 0) { /* Construct from scratch so no prototype exists */ d_tn->obj_init = 0; } else if (d_tn->cct.cctor != 0) { /* Now data initialize the prototype since it was previously registered */ d_tn->obj_init = (void *) __kmp_allocate( d_tn->cmn_size ); (void) (*d_tn->cct.cctor) (d_tn->obj_init, pc_addr); } else { d_tn->pod_init = __kmp_init_common_data( data_addr, d_tn->cmn_size ); } } } } else { struct shared_common **lnk_tn; d_tn = (struct shared_common *) __kmp_allocate( sizeof( struct shared_common ) ); d_tn->gbl_addr = pc_addr; d_tn->cmn_size = pc_size; d_tn->pod_init = __kmp_init_common_data( data_addr, pc_size ); /* d_tn->obj_init = 0; // AC: commented out because __kmp_allocate zeroes the memory d_tn->ct.ctor = 0; d_tn->cct.cctor = 0; d_tn->dt.dtor = 0; d_tn->is_vec = FALSE; d_tn->vec_len = 0L; */ lnk_tn = &(__kmp_threadprivate_d_table.data[ KMP_HASH(pc_addr) ]); d_tn->next = *lnk_tn; *lnk_tn = d_tn; } tn->cmn_size = d_tn->cmn_size; if ( (__kmp_foreign_tp) ? (KMP_INITIAL_GTID (gtid)) : (KMP_UBER_GTID (gtid)) ) { tn->par_addr = (void *) pc_addr; } else { tn->par_addr = (void *) __kmp_allocate( tn->cmn_size ); } __kmp_release_lock( & __kmp_global_lock, gtid ); /* +++++++++ END OF CRITICAL SECTION +++++++++ */ #ifdef USE_CHECKS_COMMON if (pc_size > d_tn->cmn_size) { KC_TRACE( 10, ( "__kmp_threadprivate_insert: THREADPRIVATE: %p (%" KMP_UINTPTR_SPEC " ,%" KMP_UINTPTR_SPEC ")\n", pc_addr, pc_size, d_tn->cmn_size ) ); KMP_FATAL( TPCommonBlocksInconsist ); } #endif /* USE_CHECKS_COMMON */ tt = &(__kmp_threads[ gtid ]->th.th_pri_common->data[ KMP_HASH(pc_addr) ]); #ifdef KMP_TASK_COMMON_DEBUG if (*tt != 0) { KC_TRACE( 10, ( "__kmp_threadprivate_insert: WARNING! thread#%d: collision on %p\n", gtid, pc_addr ) ); } #endif tn->next = *tt; *tt = tn; #ifdef KMP_TASK_COMMON_DEBUG KC_TRACE( 10, ( "__kmp_threadprivate_insert: thread#%d, inserted node %p on list\n", gtid, pc_addr ) ); dump_list( ); #endif /* Link the node into a simple list */ tn->link = __kmp_threads[ gtid ]->th.th_pri_head; __kmp_threads[ gtid ]->th.th_pri_head = tn; #ifdef BUILD_TV __kmp_tv_threadprivate_store( __kmp_threads[ gtid ], tn->gbl_addr, tn->par_addr ); #endif if( (__kmp_foreign_tp) ? (KMP_INITIAL_GTID (gtid)) : (KMP_UBER_GTID (gtid)) ) return tn; /* * if C++ object with copy constructor, use it; * else if C++ object with constructor, use it for the non-master copies only; * else use pod_init and memcpy * * C++ constructors need to be called once for each non-master thread on allocate * C++ copy constructors need to be called once for each thread on allocate */ /* * C++ object with constructors/destructors; * don't call constructors for master thread though */ if (d_tn->is_vec) { if ( d_tn->ct.ctorv != 0) { (void) (*d_tn->ct.ctorv) (tn->par_addr, d_tn->vec_len); } else if (d_tn->cct.cctorv != 0) { (void) (*d_tn->cct.cctorv) (tn->par_addr, d_tn->obj_init, d_tn->vec_len); } else if (tn->par_addr != tn->gbl_addr) { __kmp_copy_common_data( tn->par_addr, d_tn->pod_init ); } } else { if ( d_tn->ct.ctor != 0 ) { (void) (*d_tn->ct.ctor) (tn->par_addr); } else if (d_tn->cct.cctor != 0) { (void) (*d_tn->cct.cctor) (tn->par_addr, d_tn->obj_init); } else if (tn->par_addr != tn->gbl_addr) { __kmp_copy_common_data( tn->par_addr, d_tn->pod_init ); } } /* !BUILD_OPENMP_C if (tn->par_addr != tn->gbl_addr) __kmp_copy_common_data( tn->par_addr, d_tn->pod_init ); */ return tn; }