/* ////////////////////////////////////////////////////////////////////////////////////// * implementation */ tb_sockdata_ref_t tb_sockdata() { #ifndef TB_CONFIG_MICRO_ENABLE // init local socket data if (!tb_thread_local_init(&g_sockdata_local, tb_sockdata_local_free)) return tb_null; // init socket data tb_sockdata_ref_t sockdata = (tb_sockdata_ref_t)tb_thread_local_get(&g_sockdata_local); if (!sockdata) { // make socket data sockdata = tb_malloc0_type(tb_sockdata_t); if (sockdata) { // init socket data tb_sockdata_init(sockdata); // save socket data to local thread tb_thread_local_set(&g_sockdata_local, sockdata); } } // ok? return sockdata; #else return tb_null; #endif }
static tb_void_t tb_exception_signal_func(tb_int_t sig) { tb_stack_ref_t stack = (tb_stack_ref_t)tb_thread_local_get(&g_exception_local); if (stack && tb_stack_size(stack)) { #if defined(tb_sigsetjmp) && defined(tb_siglongjmp) tb_sigjmpbuf_t* jmpbuf = (tb_sigjmpbuf_t*)tb_stack_top(stack); if (jmpbuf) tb_siglongjmp(*jmpbuf, 1); #else tb_jmpbuf_t* jmpbuf = (tb_jmpbuf_t*)tb_stack_top(stack); if (jmpbuf) tb_longjmp(*jmpbuf, 1); #endif } else { // trace tb_trace_e("exception: no handler for signal: %d", sig); // ignore signal tb_signal(TB_SIGILL, TB_SIG_DFL); tb_signal(TB_SIGFPE, TB_SIG_DFL); tb_signal(TB_SIGBUS, TB_SIG_DFL); tb_signal(TB_SIGSEGV, TB_SIG_DFL); tb_signal(TB_SIGABRT, TB_SIG_DFL); #ifdef TB_CONFIG_LIBC_HAVE_KILL // kill it kill(getpid(), sig); #endif } }
tb_lo_scheduler_ref_t tb_lo_scheduler_self_() { // get self scheduler on the current thread #if defined(TB_CONFIG_MICRO_ENABLE) || defined(__tb_thread_local__) return (tb_lo_scheduler_ref_t)g_scheduler_self_ex; #else return (tb_lo_scheduler_ref_t)(g_scheduler_self_ex? g_scheduler_self_ex : tb_thread_local_get(&g_scheduler_self)); #endif }
tb_co_scheduler_ref_t tb_co_scheduler_self() { // get self scheduler on the current thread #ifdef __tb_thread_local__ return (tb_co_scheduler_ref_t)g_scheduler_self_ex; #else return (tb_co_scheduler_ref_t)(g_scheduler_self_ex? g_scheduler_self_ex : tb_thread_local_get(&g_scheduler_self)); #endif }
static tb_int_t tb_demo_thread_local_test(tb_cpointer_t priv) { // self tb_size_t self = tb_thread_self(); // trace tb_trace_i("thread[%lx]: init", self); // init the thread local, only once static tb_thread_local_t s_local = TB_THREAD_LOCAL_INIT; if (!tb_thread_local_init(&s_local, tb_demo_thread_local_free)) return -1; // init start time tb_hong_t time = tb_mclock(); // done __tb_volatile__ tb_size_t count = 10000000; while (count--) { // attempt to get local variable tb_size_t local; if (!(local = (tb_size_t)tb_thread_local_get(&s_local))) { // init local variable if (tb_thread_local_set(&s_local, (tb_cpointer_t)self)) local = self; } // check if (local != self) { // trace tb_trace_i("thread[%lx]: invalid value: %lx", self, local); } } // compile the interval time time = tb_mclock() - time; // trace tb_trace_i("thread[%lx]: exit: %lld ms", self, time); // ok return 0; }
tb_bool_t tb_thread_local_set(tb_thread_local_ref_t local, tb_cpointer_t priv) { // check tb_assert(local); // have been not initialized? tb_assert_and_check_return_val(local->inited, tb_false); // free the previous data first if (local->free && tb_thread_local_has(local)) local->free(tb_thread_local_get(local)); // set it tb_bool_t ok = pthread_setspecific(((pthread_key_t*)local->priv)[0], priv) == 0; if (ok) { // mark exists ok = pthread_setspecific(((pthread_key_t*)local->priv)[1], (tb_pointer_t)tb_true) == 0; } // ok? return ok; }