static tb_bool_t tb_aiop_rtor_poll_delo(tb_aiop_rtor_impl_t* rtor, tb_aioo_impl_t const* aioo) { // check tb_aiop_rtor_poll_impl_t* impl = (tb_aiop_rtor_poll_impl_t*)rtor; tb_assert_and_check_return_val(impl && impl->pfds && impl->cfds && aioo && aioo->sock, tb_false); // the aiop tb_aiop_impl_t* aiop = rtor->aiop; tb_assert_and_check_return_val(aiop, tb_false); // delo it, TODO: delo by binary search tb_spinlock_enter(&impl->lock.pfds); tb_remove_first_if(impl->pfds, tb_poll_walk_delo, (tb_pointer_t)(((tb_long_t)aioo->sock) - 1)); tb_spinlock_leave(&impl->lock.pfds); // del sock => aioo tb_spinlock_enter(&impl->lock.hash); if (impl->hash) tb_hash_del(impl->hash, aioo->sock); tb_spinlock_leave(&impl->lock.hash); // spak it if (aiop->spak[0]) tb_socket_send(aiop->spak[0], (tb_byte_t const*)"p", 1); // ok return tb_true; }
static tb_bool_t tb_aiop_rtor_select_delo(tb_aiop_rtor_impl_t* rtor, tb_aioo_impl_t const* aioo) { // check tb_aiop_rtor_select_impl_t* impl = (tb_aiop_rtor_select_impl_t*)rtor; tb_assert_and_check_return_val(impl && aioo && aioo->sock, tb_false); // the aiop tb_aiop_impl_t* aiop = rtor->aiop; tb_assert_and_check_return_val(aiop, tb_false); // fd tb_long_t fd = tb_sock2fd(aioo->sock); // enter tb_spinlock_enter(&impl->lock.pfds); // del fds FD_CLR(fd, &impl->rfdi); FD_CLR(fd, &impl->wfdi); // leave tb_spinlock_leave(&impl->lock.pfds); // del sock => aioo tb_spinlock_enter(&impl->lock.hash); if (impl->hash) tb_hash_map_remove(impl->hash, aioo->sock); tb_spinlock_leave(&impl->lock.hash); // spak it if (aiop->spak[0]) tb_socket_send(aiop->spak[0], (tb_byte_t const*)"p", 1); // ok return tb_true; }
static tb_void_t tb_aiop_rtor_select_exit(tb_aiop_rtor_impl_t* rtor) { tb_aiop_rtor_select_impl_t* impl = (tb_aiop_rtor_select_impl_t*)rtor; if (impl) { // free fds tb_spinlock_enter(&impl->lock.pfds); FD_ZERO(&impl->rfdi); FD_ZERO(&impl->wfdi); FD_ZERO(&impl->rfdo); FD_ZERO(&impl->wfdo); tb_spinlock_leave(&impl->lock.pfds); // exit hash tb_spinlock_enter(&impl->lock.hash); if (impl->hash) tb_hash_map_exit(impl->hash); impl->hash = tb_null; tb_spinlock_leave(&impl->lock.hash); // exit lock tb_spinlock_exit(&impl->lock.pfds); tb_spinlock_exit(&impl->lock.hash); // free it tb_free(impl); } }
static tb_void_t tb_aiop_rtor_poll_exit(tb_aiop_rtor_impl_t* rtor) { tb_aiop_rtor_poll_impl_t* impl = (tb_aiop_rtor_poll_impl_t*)rtor; if (impl) { // exit pfds tb_spinlock_enter(&impl->lock.pfds); if (impl->pfds) tb_vector_exit(impl->pfds); impl->pfds = tb_null; tb_spinlock_leave(&impl->lock.pfds); // exit cfds if (impl->cfds) tb_vector_exit(impl->cfds); impl->cfds = tb_null; // exit hash tb_spinlock_enter(&impl->lock.hash); if (impl->hash) tb_hash_exit(impl->hash); impl->hash = tb_null; tb_spinlock_leave(&impl->lock.hash); // exit lock tb_spinlock_exit(&impl->lock.pfds); tb_spinlock_exit(&impl->lock.hash); // free it tb_free(impl); } }
static tb_void_t tb_aiop_rtor_select_cler(tb_aiop_rtor_impl_t* rtor) { tb_aiop_rtor_select_impl_t* impl = (tb_aiop_rtor_select_impl_t*)rtor; if (impl) { // free fds tb_spinlock_enter(&impl->lock.pfds); impl->sfdm = 0; FD_ZERO(&impl->rfdi); FD_ZERO(&impl->wfdi); FD_ZERO(&impl->efdi); FD_ZERO(&impl->rfdo); FD_ZERO(&impl->wfdo); FD_ZERO(&impl->efdo); tb_spinlock_leave(&impl->lock.pfds); // clear hash tb_spinlock_enter(&impl->lock.hash); if (impl->hash) tb_hash_clear(impl->hash); tb_spinlock_leave(&impl->lock.hash); // spak it if (rtor->aiop && rtor->aiop->spak[0]) tb_socket_send(rtor->aiop->spak[0], (tb_byte_t const*)"p", 1); } }
/* ////////////////////////////////////////////////////////////////////////////////////// * implementation */ static tb_bool_t tb_aiop_rtor_select_addo(tb_aiop_rtor_impl_t* rtor, tb_aioo_impl_t const* aioo) { // check tb_aiop_rtor_select_impl_t* impl = (tb_aiop_rtor_select_impl_t*)rtor; tb_assert_and_check_return_val(impl && rtor->aiop && aioo && aioo->sock, tb_false); // the aiop tb_aiop_impl_t* aiop = rtor->aiop; tb_assert_and_check_return_val(aiop, tb_false); // check size tb_spinlock_enter(&impl->lock.hash); tb_size_t size = tb_hash_size(impl->hash); tb_spinlock_leave(&impl->lock.hash); tb_assert_and_check_return_val(size < FD_SETSIZE, tb_false); // add sock => aioo tb_bool_t ok = tb_false; tb_spinlock_enter(&impl->lock.hash); if (impl->hash) { tb_hash_set(impl->hash, aioo->sock, aioo); ok = tb_true; } tb_spinlock_leave(&impl->lock.hash); tb_assert_and_check_return_val(ok, tb_false); // the fd tb_long_t fd = ((tb_long_t)aioo->sock) - 1; // the code tb_size_t code = aioo->code; // enter tb_spinlock_enter(&impl->lock.pfds); // update fd max if (fd > (tb_long_t)impl->sfdm) impl->sfdm = (tb_size_t)fd; // init fds if (code & (TB_AIOE_CODE_RECV | TB_AIOE_CODE_ACPT)) FD_SET(fd, &impl->rfdi); if (code & (TB_AIOE_CODE_SEND | TB_AIOE_CODE_CONN)) FD_SET(fd, &impl->wfdi); FD_SET(fd, &impl->efdi); // leave tb_spinlock_leave(&impl->lock.pfds); // spak it if (aiop->spak[0] && code) tb_socket_send(aiop->spak[0], (tb_byte_t const*)"p", 1); // ok? return ok; }
tb_bool_t tb_aicp_exit(tb_aicp_ref_t aicp) { // check tb_aicp_impl_t* impl = (tb_aicp_impl_t*)aicp; tb_assert_and_check_return_val(impl, tb_false); // kill all first tb_aicp_kill_all((tb_aicp_ref_t)impl); // wait all exiting if (tb_aicp_wait_all((tb_aicp_ref_t)impl, 5000) <= 0) { // wait failed, trace left aicos tb_spinlock_enter(&impl->lock); if (impl->pool) tb_fixed_pool_walk(impl->pool, tb_aicp_walk_wait, tb_null); tb_spinlock_leave(&impl->lock); return tb_false; } // kill loop tb_aicp_kill((tb_aicp_ref_t)impl); // wait workers exiting tb_hong_t time = tb_mclock(); while (tb_atomic_get(&impl->work) && (tb_mclock() < time + 5000)) tb_msleep(500); // exit proactor if (impl->ptor) { tb_assert(impl->ptor && impl->ptor->exit); impl->ptor->exit(impl->ptor); impl->ptor = tb_null; } // exit aico pool tb_spinlock_enter(&impl->lock); if (impl->pool) tb_fixed_pool_exit(impl->pool); impl->pool = tb_null; tb_spinlock_leave(&impl->lock); // exit lock tb_spinlock_exit(&impl->lock); // free impl tb_free(impl); // ok return tb_true; }
tb_size_t tb_timer_delay(tb_timer_ref_t timer) { // check tb_timer_impl_t* impl = (tb_timer_impl_t*)timer; tb_assert_and_check_return_val(impl && impl->heap, -1); // stoped? tb_assert_and_check_return_val(!tb_atomic_get(&impl->stop), -1); // enter tb_spinlock_enter(&impl->lock); // done tb_size_t delay = -1; if (tb_heap_size(impl->heap)) { // the task tb_timer_task_impl_t const* task_impl = (tb_timer_task_impl_t const*)tb_heap_top(impl->heap); if (task_impl) { // the now tb_hong_t now = tb_timer_now(impl); // the delay delay = task_impl->when > now? (tb_size_t)(task_impl->when - now) : 0; } } // leave tb_spinlock_leave(&impl->lock); // ok? return delay; }
tb_hize_t tb_timer_top(tb_timer_ref_t timer) { // check tb_timer_impl_t* impl = (tb_timer_impl_t*)timer; tb_assert_and_check_return_val(impl && impl->heap, -1); // stoped? tb_assert_and_check_return_val(!tb_atomic_get(&impl->stop), -1); // enter tb_spinlock_enter(&impl->lock); // done tb_hize_t when = -1; if (tb_heap_size(impl->heap)) { // the task tb_timer_task_impl_t const* task_impl = (tb_timer_task_impl_t const*)tb_heap_top(impl->heap); if (task_impl) when = task_impl->when; } // leave tb_spinlock_leave(&impl->lock); // ok? return when; }
tb_size_t tb_thread_pool_task_post_list(tb_thread_pool_ref_t pool, tb_thread_pool_task_t const* list, tb_size_t size) { // check tb_thread_pool_impl_t* impl = (tb_thread_pool_impl_t*)pool; tb_assert_and_check_return_val(impl && list, 0); // init the post size tb_size_t post_size = 0; // enter tb_spinlock_enter(&impl->lock); // done tb_size_t ok = 0; if (!impl->bstoped) { for (ok = 0; ok < size; ok++) { // post task tb_thread_pool_job_t* job = tb_thread_pool_jobs_post_task(impl, &list[ok], &post_size); tb_assert_and_check_break(job); } } // leave tb_spinlock_leave(&impl->lock); // post the workers if (ok && post_size) tb_thread_pool_worker_post(impl, post_size); // ok? return ok; }
tb_void_t tb_pool_dump(tb_pool_ref_t pool) { // check tb_pool_impl_t* impl = (tb_pool_impl_t*)pool; tb_assert_and_check_return(impl); // uses allocator? if (impl->allocator) { // dump it tb_allocator_dump(impl->allocator); return ; } // check tb_assert_and_check_return(impl->large_pool && impl->small_pool); // enter tb_spinlock_enter(&impl->lock); // dump pool tb_small_pool_dump(impl->small_pool); // leave tb_spinlock_leave(&impl->lock); }
/* ////////////////////////////////////////////////////////////////////////////////////// * implementation */ tb_aico_ref_t tb_aico_init(tb_aicp_ref_t aicp) { // check tb_aicp_impl_t* aicp_impl = (tb_aicp_impl_t*)aicp; tb_assert_and_check_return_val(aicp_impl && aicp_impl->pool, tb_null); // enter tb_spinlock_enter(&aicp_impl->lock); // make aico tb_aico_impl_t* aico = (tb_aico_impl_t*)tb_fixed_pool_malloc0(aicp_impl->pool); // init aico if (aico) { aico->aicp = aicp; aico->type = TB_AICO_TYPE_NONE; aico->handle = tb_null; aico->state = TB_STATE_CLOSED; // init timeout tb_size_t i = 0; tb_size_t n = tb_arrayn(aico->timeout); for (i = 0; i < n; i++) aico->timeout[i] = -1; } // leave tb_spinlock_leave(&aicp_impl->lock); // ok? return (tb_aico_ref_t)aico; }
tb_void_t tb_aico_exit(tb_aico_ref_t aico) { // check tb_aico_impl_t* impl = (tb_aico_impl_t*)aico; tb_aicp_impl_t* aicp_impl = (tb_aicp_impl_t*)impl->aicp; tb_assert_and_check_return(impl && aicp_impl && aicp_impl->pool); // wait closing? tb_size_t tryn = 15; while (tb_atomic_get(&impl->state) != TB_STATE_CLOSED && tryn--) { // trace tb_trace_d("exit[%p]: type: %lu, handle: %p, state: %s: wait: ..", aico, tb_aico_type(aico), impl->handle, tb_state_cstr(tb_atomic_get(&impl->state))); // wait some time tb_msleep(200); } // check tb_assert_abort(tb_atomic_get(&impl->state) == TB_STATE_CLOSED); tb_check_return(tb_atomic_get(&impl->state) == TB_STATE_CLOSED); // enter tb_spinlock_enter(&aicp_impl->lock); // trace tb_trace_d("exit[%p]: type: %lu, handle: %p, state: %s: ok", aico, tb_aico_type(aico), impl->handle, tb_state_cstr(tb_atomic_get(&impl->state))); // free it tb_fixed_pool_free(aicp_impl->pool, aico); // leave tb_spinlock_leave(&aicp_impl->lock); }
/* ////////////////////////////////////////////////////////////////////////////////////// * implementation */ tb_bool_t tb_native_memory_init() { // enter tb_spinlock_enter_without_profiler(&g_lock); // done tb_bool_t ok = tb_false; do { // have been inited? tb_check_break_state(!g_heap, ok, tb_true); // make heap g_heap = (tb_handle_t)HeapCreate(0, 0, 0); tb_check_break(g_heap); // ok ok = tb_true; } while (0); // leave tb_spinlock_leave(&g_lock); // ok? return ok; }
tb_pointer_t tb_native_memory_ralloc(tb_pointer_t data, tb_size_t size) { // no size? free it if (!size) { tb_native_memory_free(data); return tb_null; } // no data? malloc it else if (!data) return tb_native_memory_malloc(size); // realloc it else { // enter tb_spinlock_enter_without_profiler(&g_lock); // realloc if (g_heap) data = (tb_pointer_t)HeapReAlloc((HANDLE)g_heap, 0, data, (SIZE_T)size); // leave tb_spinlock_leave(&g_lock); // ok? return data; } }
static tb_bool_t tb_aiop_rtor_poll_post(tb_aiop_rtor_impl_t* rtor, tb_aioe_t const* aioe) { // check tb_aiop_rtor_poll_impl_t* impl = (tb_aiop_rtor_poll_impl_t*)rtor; tb_assert_and_check_return_val(impl && impl->pfds && impl->cfds && aioe, tb_false); // the aiop tb_aiop_impl_t* aiop = rtor->aiop; tb_assert_and_check_return_val(aiop, tb_false); // the aioo tb_aioo_impl_t* aioo = (tb_aioo_impl_t*)aioe->aioo; tb_assert_and_check_return_val(aioo, tb_false); // save aioo aioo->code = aioe->code; aioo->priv = aioe->priv; // sete it, TODO: sete by binary search tb_spinlock_enter(&impl->lock.pfds); tb_walk_all(impl->pfds, tb_poll_walk_sete, (tb_pointer_t)aioe); tb_spinlock_leave(&impl->lock.pfds); // spak it if (aiop->spak[0]) tb_socket_send(aiop->spak[0], (tb_byte_t const*)"p", 1); // ok return tb_true; }
tb_void_t tb_pool_exit(tb_pool_ref_t pool) { // check tb_pool_impl_t* impl = (tb_pool_impl_t*)pool; tb_assert_and_check_return(impl); // uses allocator? if (impl->allocator) { // exit it tb_allocator_free(impl->allocator, impl); return ; } // enter tb_spinlock_enter(&impl->lock); // exit small pool if (impl->small_pool) tb_small_pool_exit(impl->small_pool); impl->small_pool = tb_null; // leave tb_spinlock_leave(&impl->lock); // exit lock tb_spinlock_exit(&impl->lock); // exit pool if (impl->large_pool) tb_large_pool_free(impl->large_pool, impl); }
tb_void_t tb_timer_task_exit(tb_timer_ref_t timer, tb_timer_task_ref_t task) { // check tb_timer_impl_t* impl = (tb_timer_impl_t*)timer; tb_timer_task_impl_t* task_impl = (tb_timer_task_impl_t*)task; tb_assert_and_check_return(impl && impl->pool && task_impl); // trace tb_trace_d("exit: when: %lld, period: %u, refn: %u", task_impl->when, task_impl->period, task_impl->refn); // enter tb_spinlock_enter(&impl->lock); // remove it? if (task_impl->refn > 1) { // refn-- task_impl->refn--; // cancel task task_impl->func = tb_null; task_impl->priv = tb_null; task_impl->repeat = 0; } // remove it from pool directly if the task_impl have been expired else tb_fixed_pool_free(impl->pool, task_impl); // leave tb_spinlock_leave(&impl->lock); }
/* ////////////////////////////////////////////////////////////////////////////////////// * implementation */ tb_bool_t tb_dns_cache_init() { // enter tb_spinlock_enter(&g_lock); // done tb_bool_t ok = tb_false; do { // init hash if (!g_cache.hash) g_cache.hash = tb_hash_init(tb_align8(tb_isqrti(TB_DNS_CACHE_MAXN) + 1), tb_item_func_str(tb_false), tb_item_func_mem(sizeof(tb_dns_cache_addr_t), tb_null, tb_null)); tb_assert_and_check_break(g_cache.hash); // ok ok = tb_true; } while (0); // leave tb_spinlock_leave(&g_lock); // failed? exit it if (!ok) tb_dns_cache_exit(); // ok? return ok; }
tb_void_t tb_aiop_exit(tb_aiop_ref_t aiop) { // check tb_aiop_impl_t* impl = (tb_aiop_impl_t*)aiop; tb_assert_and_check_return(impl); // exit reactor if (impl->rtor && impl->rtor->exit) impl->rtor->exit(impl->rtor); // exit spak if (impl->spak[0]) tb_socket_exit(impl->spak[0]); if (impl->spak[1]) tb_socket_exit(impl->spak[1]); impl->spak[0] = tb_null; impl->spak[1] = tb_null; // exit pool tb_spinlock_enter(&impl->lock); if (impl->pool) tb_fixed_pool_exit(impl->pool); impl->pool = tb_null; tb_spinlock_leave(&impl->lock); // exit lock tb_spinlock_exit(&impl->lock); // free impl tb_free(impl); }
/* ////////////////////////////////////////////////////////////////////////////////////// * aioo */ static tb_aioo_ref_t tb_aiop_aioo_init(tb_aiop_impl_t* impl, tb_socket_ref_t sock, tb_size_t code, tb_cpointer_t priv) { // check tb_assert_and_check_return_val(impl && impl->pool, tb_null); // enter tb_spinlock_enter(&impl->lock); // make aioo tb_aioo_impl_t* aioo = (tb_aioo_impl_t*)tb_fixed_pool_malloc0(impl->pool); // init aioo if (aioo) { aioo->code = code; aioo->priv = priv; aioo->sock = sock; } // leave tb_spinlock_leave(&impl->lock); // ok? return (tb_aioo_ref_t)aioo; }
tb_pointer_t tb_allocator_malloc_(tb_allocator_ref_t allocator, tb_size_t size __tb_debug_decl__) { // check tb_assert_and_check_return_val(allocator, tb_null); // enter tb_spinlock_enter(&allocator->lock); // malloc it tb_pointer_t data = tb_null; if (allocator->malloc) data = allocator->malloc(allocator, size __tb_debug_args__); else if (allocator->large_malloc) data = allocator->large_malloc(allocator, size, tb_null __tb_debug_args__); // trace tb_trace_d("malloc(%lu): %p at %s(): %d, %s", size, data __tb_debug_args__); // check tb_assertf(data, "malloc(%lu) failed!", size); tb_assertf(!(((tb_size_t)data) & (TB_POOL_DATA_ALIGN - 1)), "malloc(%lu): unaligned data: %p", size, data); // leave tb_spinlock_leave(&allocator->lock); // ok? return data; }
static tb_void_t tb_aiop_ptor_exit(tb_aicp_ptor_impl_t* ptor) { // check tb_aiop_ptor_impl_t* impl = (tb_aiop_ptor_impl_t*)ptor; tb_assert_and_check_return(impl); // trace tb_trace_d("exit"); // exit file tb_aicp_file_exit(impl); // exit loop if (impl->loop) { tb_long_t wait = 0; if ((wait = tb_thread_wait(impl->loop, 5000)) <= 0) { // trace tb_trace_e("loop[%p]: wait failed: %ld!", impl->loop, wait); } tb_thread_exit(impl->loop); impl->loop = tb_null; } // exit spak tb_spinlock_enter(&impl->lock); if (impl->spak[0]) tb_queue_exit(impl->spak[0]); if (impl->spak[1]) tb_queue_exit(impl->spak[1]); impl->spak[0] = tb_null; impl->spak[1] = tb_null; tb_spinlock_leave(&impl->lock); // exit aiop if (impl->aiop) tb_aiop_exit(impl->aiop); impl->aiop = tb_null; // exit list if (impl->list) tb_free(impl->list); impl->list = tb_null; // exit wait if (impl->wait) tb_semaphore_exit(impl->wait); impl->wait = tb_null; // exit timer if (impl->timer) tb_timer_exit(impl->timer); impl->timer = tb_null; // exit ltimer if (impl->ltimer) tb_ltimer_exit(impl->ltimer); impl->ltimer = tb_null; // exit lock tb_spinlock_exit(&impl->lock); // exit it tb_free(impl); }
tb_void_t tb_timer_exit(tb_timer_ref_t timer) { // check tb_timer_impl_t* impl = (tb_timer_impl_t*)timer; tb_assert_and_check_return(impl); // stop it tb_atomic_set(&impl->stop, 1); // wait loop exit tb_size_t tryn = 10; while (tb_atomic_get(&impl->work) && tryn--) tb_msleep(500); // warning if (!tryn && tb_atomic_get(&impl->work)) tb_trace_w("[timer]: the loop has been not exited now!"); // post event tb_spinlock_enter(&impl->lock); tb_event_ref_t event = impl->event; tb_spinlock_leave(&impl->lock); if (event) tb_event_post(event); // enter tb_spinlock_enter(&impl->lock); // exit heap if (impl->heap) tb_heap_exit(impl->heap); impl->heap = tb_null; // exit pool if (impl->pool) tb_fixed_pool_exit(impl->pool); impl->pool = tb_null; // exit event if (impl->event) tb_event_exit(impl->event); impl->event = tb_null; // leave tb_spinlock_leave(&impl->lock); // exit lock tb_spinlock_exit(&impl->lock); // exit it tb_free(impl); }
tb_bool_t tb_transfer_pool_exit(tb_transfer_pool_ref_t pool) { // check tb_transfer_pool_impl_t* impl = (tb_transfer_pool_impl_t*)pool; tb_assert_and_check_return_val(impl, tb_false); // trace tb_trace_d("exit: .."); // kill it first tb_transfer_pool_kill(pool); // wait all if (tb_transfer_pool_wait_all(pool, 5000) <= 0) { // trace tb_trace_e("exit: wait failed!"); return tb_false; } // enter tb_spinlock_enter(&impl->lock); // check tb_assert(!tb_list_entry_size(&impl->work)); // exit the work list tb_list_entry_exit(&impl->work); // exit the idle list tb_list_entry_exit(&impl->idle); // exit pool if (impl->pool) { // exit all task tb_fixed_pool_walk(impl->pool, tb_transfer_pool_walk_exit, tb_null); // exit it tb_fixed_pool_exit(impl->pool); impl->pool = tb_null; } // leave tb_spinlock_leave(&impl->lock); // exit lock tb_spinlock_exit(&impl->lock); // exit it tb_free(pool); // trace tb_trace_d("exit: ok"); // ok return tb_true; }
static tb_void_t tb_aiop_spak_wait_timeout(tb_bool_t killed, tb_cpointer_t priv) { // the aico tb_aiop_aico_t* aico = (tb_aiop_aico_t*)priv; tb_assert_and_check_return(aico && aico->waiting); // the impl tb_aiop_ptor_impl_t* impl = aico->impl; tb_assert_and_check_return(impl && impl->aiop); // for sock if (aico->base.type == TB_AICO_TYPE_SOCK) { // check tb_assert_and_check_return(aico->aioo); // delo aioo tb_aiop_delo(impl->aiop, aico->aioo); aico->aioo = tb_null; } // have been waited ok for the spak loop? need not spak it repeatly tb_bool_t ok = tb_false; if (!aico->wait_ok) { // the priority tb_size_t priority = tb_aice_impl_priority(&aico->aice); tb_assert_and_check_return(priority < tb_arrayn(impl->spak) && impl->spak[priority]); // trace tb_trace_d("wait: timeout: code: %lu, priority: %lu, time: %lld", aico->aice.code, priority, tb_cache_time_mclock()); // enter tb_spinlock_enter(&impl->lock); // spak aice if (!tb_queue_full(impl->spak[priority])) { // save state aico->aice.state = killed? TB_STATE_KILLED : TB_STATE_TIMEOUT; // put it tb_queue_put(impl->spak[priority], &aico->aice); // ok ok = tb_true; aico->wait_ok = 1; } else tb_assert(0); // leave tb_spinlock_leave(&impl->lock); } // work it if (ok) tb_aiop_spak_work(impl); }
tb_void_t tb_trace_tail(tb_char_t const* format, ...) { // check tb_check_return(format); // enter tb_spinlock_enter_without_profiler(&g_lock); // done do { // check tb_check_break(g_mode); // init tb_va_list_t l; tb_char_t* p = g_line; tb_char_t* e = g_line + sizeof(g_line); tb_va_start(l, format); // append format if (p < e) p += tb_vsnprintf(p, e - p, format, l); // append end if (p < e) *p = '\0'; e[-1] = '\0'; // print it if (g_mode & TB_TRACE_MODE_PRINT) tb_print(g_line); // print it to file #ifndef TB_CONFIG_MICRO_ENABLE if ((g_mode & TB_TRACE_MODE_FILE) && g_file) { // done tb_size_t size = p - g_line; tb_size_t writ = 0; while (writ < size) { // writ it tb_long_t real = tb_file_writ(g_file, (tb_byte_t const*)g_line + writ, size - writ); tb_check_break(real > 0); // save size writ += real; } } #endif // exit tb_va_end(l); } while (0); // leave tb_spinlock_leave(&g_lock); }
tb_void_t tb_thread_store_setp(tb_thread_store_data_t const* data) { // enter lock tb_spinlock_enter(&g_lock); // get data if (g_store) tb_hash_map_insert(g_store, (tb_pointer_t)tb_thread_self(), data); // leave lock tb_spinlock_leave(&g_lock); }
static tb_void_t tb_aiop_rtor_poll_cler(tb_aiop_rtor_impl_t* rtor) { tb_aiop_rtor_poll_impl_t* impl = (tb_aiop_rtor_poll_impl_t*)rtor; if (impl) { // clear pfds tb_spinlock_enter(&impl->lock.pfds); if (impl->pfds) tb_vector_clear(impl->pfds); tb_spinlock_leave(&impl->lock.pfds); // clear hash tb_spinlock_enter(&impl->lock.hash); if (impl->hash) tb_hash_clear(impl->hash); tb_spinlock_leave(&impl->lock.hash); // spak it if (rtor->aiop && rtor->aiop->spak[0]) tb_socket_send(rtor->aiop->spak[0], (tb_byte_t const*)"p", 1); } }
tb_bool_t tb_mutex_leave(tb_mutex_ref_t mutex) { // check tb_assert_and_check_return_val(mutex, tb_false); // leave tb_spinlock_leave((tb_spinlock_ref_t)mutex); // ok return tb_true; }