/* ////////////////////////////////////////////////////////////////////////////////////// * implementation */ tb_sockdata_ref_t tb_sockdata() { #ifndef TB_CONFIG_MICRO_ENABLE // init local socket data if (!tb_thread_local_init(&g_sockdata_local, tb_sockdata_local_free)) return tb_null; // init socket data tb_sockdata_ref_t sockdata = (tb_sockdata_ref_t)tb_thread_local_get(&g_sockdata_local); if (!sockdata) { // make socket data sockdata = tb_malloc0_type(tb_sockdata_t); if (sockdata) { // init socket data tb_sockdata_init(sockdata); // save socket data to local thread tb_thread_local_set(&g_sockdata_local, sockdata); } } // ok? return sockdata; #else return tb_null; #endif }
tb_void_t tb_co_scheduler_loop(tb_co_scheduler_ref_t self, tb_bool_t exclusive) { // check tb_co_scheduler_t* scheduler = (tb_co_scheduler_t*)self; tb_assert_and_check_return(scheduler); #ifdef __tb_thread_local__ g_scheduler_self_ex = scheduler; #else // is exclusive mode? if (exclusive) g_scheduler_self_ex = scheduler; else { // init self scheduler local if (!tb_thread_local_init(&g_scheduler_self, tb_null)) return ; // update and overide the current scheduler tb_thread_local_set(&g_scheduler_self, self); } #endif // schedule all ready coroutines while (tb_list_entry_size(&scheduler->coroutines_ready)) { // check tb_assert(tb_coroutine_is_original(scheduler->running)); // get the next entry from head tb_list_entry_ref_t entry = tb_list_entry_head(&scheduler->coroutines_ready); tb_assert(entry); // switch to the next coroutine tb_co_scheduler_switch(scheduler, (tb_coroutine_t*)tb_list_entry0(entry)); // trace tb_trace_d("[loop]: ready %lu", tb_list_entry_size(&scheduler->coroutines_ready)); } // stop it scheduler->stopped = tb_true; #ifdef __tb_thread_local__ g_scheduler_self_ex = tb_null; #else // is exclusive mode? if (exclusive) g_scheduler_self_ex = tb_null; else { // clear the current scheduler tb_thread_local_set(&g_scheduler_self, tb_null); } #endif }
/* ////////////////////////////////////////////////////////////////////////////////////// * implementation */ tb_bool_t tb_exception_init_env() { // init the thread local, only once if (!tb_thread_local_init(&g_exception_local, tb_exception_stack_exit)) return tb_false; // register signal handler // tb_signal(TB_SIGINT, tb_exception_signal_func); tb_signal(TB_SIGILL, tb_exception_signal_func); tb_signal(TB_SIGFPE, tb_exception_signal_func); tb_signal(TB_SIGBUS, tb_exception_signal_func); tb_signal(TB_SIGSEGV, tb_exception_signal_func); tb_signal(TB_SIGABRT, tb_exception_signal_func); // tb_signal(TB_SIGTRAP, tb_exception_signal_func); // ok return tb_true; }
static tb_int_t tb_demo_thread_local_test(tb_cpointer_t priv) { // self tb_size_t self = tb_thread_self(); // trace tb_trace_i("thread[%lx]: init", self); // init the thread local, only once static tb_thread_local_t s_local = TB_THREAD_LOCAL_INIT; if (!tb_thread_local_init(&s_local, tb_demo_thread_local_free)) return -1; // init start time tb_hong_t time = tb_mclock(); // done __tb_volatile__ tb_size_t count = 10000000; while (count--) { // attempt to get local variable tb_size_t local; if (!(local = (tb_size_t)tb_thread_local_get(&s_local))) { // init local variable if (tb_thread_local_set(&s_local, (tb_cpointer_t)self)) local = self; } // check if (local != self) { // trace tb_trace_i("thread[%lx]: invalid value: %lx", self, local); } } // compile the interval time time = tb_mclock() - time; // trace tb_trace_i("thread[%lx]: exit: %lld ms", self, time); // ok return 0; }
tb_void_t tb_lo_scheduler_loop(tb_lo_scheduler_ref_t self, tb_bool_t exclusive) { // check tb_lo_scheduler_t* scheduler = (tb_lo_scheduler_t*)self; tb_assert_and_check_return(scheduler); #ifdef __tb_thread_local__ g_scheduler_self_ex = scheduler; #else // is exclusive mode? if (exclusive) g_scheduler_self_ex = scheduler; # ifndef TB_CONFIG_MICRO_ENABLE else { // init self scheduler local if (!tb_thread_local_init(&g_scheduler_self, tb_null)) return ; // update and overide the current scheduler tb_thread_local_set(&g_scheduler_self, self); } # else else {