static ph_thread_t *ph_thread_init_myself(bool booting) { ph_thread_t *me; ck_epoch_record_t *er; er = ck_epoch_recycle(&misc_epoch); if (er) { me = ph_container_of(er, ph_thread_t, epoch_record); } else { me = calloc(1, sizeof(*me)); if (!me) { ph_panic("fatal OOM in ph_thread_init_myself()"); } ck_epoch_register(&misc_epoch, &me->epoch_record); ck_stack_push_mpmc(&ph_thread_all_threads, &me->thread_linkage); ph_counter_init_thread(me); } #ifdef HAVE___THREAD __ph_thread_self = me; #endif pthread_setspecific(__ph_thread_key, me); PH_STAILQ_INIT(&me->pending_nbio); PH_STAILQ_INIT(&me->pending_pool); me->tid = ck_pr_faa_32(&next_tid, 1); me->thr = pthread_self(); #ifdef __sun__ me->lwpid = _lwp_self(); #endif #if defined(__linux__) || defined(__MACH__) // see if we can discover our thread name from the system pthread_getname_np(me->thr, me->name, sizeof(me->name)); #endif // If we were recycled from a non-phenom thread, and are initializing // a non-phenom thread, it is possible that there are still deferred // items to reap in this record, so get them now. if (er && !booting) { ck_epoch_barrier(&misc_epoch, &me->epoch_record); } return me; }
static void *ph_thread_boot(void *arg) { struct ph_thread_boot_data data; ph_thread_t *me; void *retval; /* copy in the boot data from the stack of our creator */ memcpy(&data, arg, sizeof(data)); me = ph_thread_init_myself(true); /* this publishes that we're ready to run to * the thread that spawned us */ ck_pr_store_ptr(data.thr, ck_pr_load_ptr(&me)); ck_pr_fence_store(); retval = data.func(data.arg); ck_epoch_barrier(&me->epoch_record); return retval; }
void ph_thread_epoch_barrier(void) { ph_thread_t *me = ph_thread_self(); ck_epoch_barrier(&me->epoch_record); }