tb_void_t tb_aico_kill(tb_aico_ref_t aico) { // check tb_aico_impl_t* impl = (tb_aico_impl_t*)aico; tb_aicp_impl_t* aicp_impl = (tb_aicp_impl_t*)impl->aicp; tb_assert_and_check_return(impl && aicp_impl && aicp_impl->ptor && aicp_impl->ptor->kilo); // the impl is killed and not worked? tb_check_return(!tb_atomic_get(&aicp_impl->kill) || tb_atomic_get(&aicp_impl->work)); // trace tb_trace_d("kill: aico[%p]: type: %lu, handle: %p: state: %s: ..", aico, tb_aico_type(aico), impl->handle, tb_state_cstr(tb_atomic_get(&((tb_aico_impl_t*)aico)->state))); // opened? killed if (TB_STATE_OPENED == tb_atomic_fetch_and_pset(&impl->state, TB_STATE_OPENED, TB_STATE_KILLED)) { // trace tb_trace_d("kill: aico[%p]: type: %lu, handle: %p: ok", aico, tb_aico_type(aico), impl->handle); } // pending? kill it else if (TB_STATE_PENDING == tb_atomic_fetch_and_pset(&impl->state, TB_STATE_PENDING, TB_STATE_KILLING)) { // kill aico aicp_impl->ptor->kilo(aicp_impl->ptor, impl); // trace tb_trace_d("kill: aico[%p]: type: %lu, handle: %p: state: pending: ok", aico, tb_aico_type(aico), impl->handle); } }
tb_void_t tb_exit() { // have been exited? if (TB_STATE_OK != tb_atomic_fetch_and_pset(&g_state, TB_STATE_OK, TB_STATE_EXITING)) return ; // kill singleton tb_singleton_kill(); // exit object #ifdef TB_CONFIG_MODULE_HAVE_OBJECT tb_object_exit_env(); #endif // exit network envirnoment tb_network_exit_env(); // exit libm envirnoment tb_libm_exit_env(); // exit math envirnoment tb_math_exit_env(); // exit libc envirnoment tb_libc_exit_env(); // exit platform envirnoment tb_platform_exit_env(); // exit singleton tb_singleton_exit(); // exit memory envirnoment tb_memory_exit_env(); // trace tb_trace_d("exit: ok"); // exit trace tb_trace_exit(); // end tb_atomic_set(&g_state, TB_STATE_END); }
tb_bool_t tb_aicp_post_(tb_aicp_ref_t aicp, tb_aice_ref_t aice __tb_debug_decl__) { // check tb_aicp_impl_t* impl = (tb_aicp_impl_t*)aicp; tb_assert_and_check_return_val(impl && impl->ptor && impl->ptor->post, tb_false); tb_assert_and_check_return_val(aice && aice->aico, tb_false); // the aico tb_aico_impl_t* aico = (tb_aico_impl_t*)aice->aico; tb_assert_and_check_return_val(aico, tb_false); // opened or killed or closed? pending it tb_size_t state = tb_atomic_fetch_and_pset(&aico->state, TB_STATE_OPENED, TB_STATE_PENDING); if (state == TB_STATE_OPENED || state == TB_STATE_KILLED) { // save debug info #ifdef __tb_debug__ aico->func = func_; aico->file = file_; aico->line = line_; #endif // post aice return impl->ptor->post(impl->ptor, aice); } // trace #ifdef __tb_debug__ tb_trace_e("post aice[%lu] failed, the aico[%p]: type: %lu, handle: %p, state: %s for func: %s, line: %lu, file: %s", aice->code, aico, tb_aico_type((tb_aico_ref_t)aico), aico->handle, tb_state_cstr(state), func_, line_, file_); #else tb_trace_e("post aice[%lu] failed, the aico[%p]: type: %lu, handle: %p, state: %s", aice->code, aico, tb_aico_type((tb_aico_ref_t)aico), aico->handle, tb_state_cstr(state)); #endif // abort it tb_assert(0); // post failed return tb_false; }
/* ////////////////////////////////////////////////////////////////////////////////////// * implementation */ tb_bool_t tb_init_(tb_handle_t priv, tb_allocator_ref_t allocator, tb_size_t mode, tb_hize_t build) { // have been inited? if (TB_STATE_OK == tb_atomic_fetch_and_pset(&g_state, TB_STATE_END, TB_STATE_OK)) return tb_true; // init trace if (!tb_trace_init()) return tb_false; // trace tb_trace_d("init: .."); // check mode if (!tb_check_mode(mode)) return tb_false; // check types tb_assert_static(sizeof(tb_byte_t) == 1); tb_assert_static(sizeof(tb_uint_t) == 4); tb_assert_static(sizeof(tb_uint8_t) == 1); tb_assert_static(sizeof(tb_uint16_t) == 2); tb_assert_static(sizeof(tb_uint32_t) == 4); tb_assert_static(sizeof(tb_hize_t) == 8); tb_assert_static(sizeof(tb_wchar_t) == sizeof(L'w')); tb_assert_static(TB_CPU_BITSIZE == (sizeof(tb_size_t) << 3)); tb_assert_static(TB_CPU_BITSIZE == (sizeof(tb_long_t) << 3)); tb_assert_static(TB_CPU_BITSIZE == (sizeof(tb_pointer_t) << 3)); tb_assert_static(TB_CPU_BITSIZE == (sizeof(tb_handle_t) << 3)); // check byteorder tb_assert_and_check_return_val(tb_check_order_word(), tb_false); tb_assert_and_check_return_val(tb_check_order_double(), tb_false); // init singleton if (!tb_singleton_init()) return tb_false; // init memory envirnoment if (!tb_memory_init_env(allocator)) return tb_false; // init platform envirnoment if (!tb_platform_init_env(priv)) return tb_false; // init libc envirnoment if (!tb_libc_init_env()) return tb_false; // init math envirnoment if (!tb_math_init_env()) return tb_false; // init libm envirnoment if (!tb_libm_init_env()) return tb_false; // init network envirnoment if (!tb_network_init_env()) return tb_false; // init object envirnoment #ifdef TB_CONFIG_MODULE_HAVE_OBJECT if (!tb_object_init_env()) return tb_false; #endif // check version tb_version_check(build); // trace tb_trace_d("init: ok"); // ok return tb_true; }
// the maxn return impl->maxn; } tb_bool_t tb_aicp_post_(tb_aicp_ref_t aicp, tb_aice_t const* aice __tb_debug_decl__) { // check tb_aicp_impl_t* impl = (tb_aicp_impl_t*)aicp; tb_assert_and_check_return_val(impl && impl->ptor && impl->ptor->post, tb_false); tb_assert_and_check_return_val(aice && aice->aico, tb_false); // the aico tb_aico_impl_t* aico = (tb_aico_impl_t*)aice->aico; tb_assert_and_check_return_val(aico, tb_false); // opened or killed or closed? pending it tb_size_t state = tb_atomic_fetch_and_pset(&aico->state, TB_STATE_OPENED, TB_STATE_PENDING); if (state == TB_STATE_OPENED || state == TB_STATE_KILLED) { // save debug info #ifdef __tb_debug__ aico->func = func_; aico->file = file_; aico->line = line_; #endif // post aice return impl->ptor->post(impl->ptor, aice); } // trace #ifdef __tb_debug__
tb_void_t tb_aicp_loop_util(tb_aicp_ref_t aicp, tb_bool_t (*stop)(tb_cpointer_t priv), tb_cpointer_t priv) { // check tb_aicp_impl_t* impl = (tb_aicp_impl_t*)aicp; tb_assert_and_check_return(impl); // the ptor tb_aicp_ptor_impl_t* ptor = impl->ptor; tb_assert_and_check_return(ptor && ptor->loop_spak); // the loop spak tb_long_t (*loop_spak)(tb_aicp_ptor_impl_t* , tb_handle_t, tb_aice_ref_t , tb_long_t ) = ptor->loop_spak; // worker++ tb_atomic_fetch_and_inc(&impl->work); // init loop tb_handle_t loop = ptor->loop_init? ptor->loop_init(ptor) : tb_null; // trace tb_trace_d("loop[%p]: init", loop); // spak ctime tb_cache_time_spak(); // loop while (1) { // spak tb_aice_t resp = {0}; tb_long_t ok = loop_spak(ptor, loop, &resp, -1); // spak ctime tb_cache_time_spak(); // failed? tb_check_break(ok >= 0); // timeout? tb_check_continue(ok); // check aico tb_aico_impl_t* aico = (tb_aico_impl_t*)resp.aico; tb_assert_and_check_continue(aico); // trace tb_trace_d("loop[%p]: spak: code: %lu, aico: %p, state: %s: %ld", loop, resp.code, aico, aico? tb_state_cstr(tb_atomic_get(&aico->state)) : "null", ok); // pending? clear state if be not accept or accept failed tb_size_t state = TB_STATE_OPENED; state = (resp.code != TB_AICE_CODE_ACPT || resp.state != TB_STATE_OK)? tb_atomic_fetch_and_pset(&aico->state, TB_STATE_PENDING, state) : tb_atomic_get(&aico->state); // killed or killing? if (state == TB_STATE_KILLED || state == TB_STATE_KILLING) { // update the aice state resp.state = TB_STATE_KILLED; // killing? update to the killed state tb_atomic_fetch_and_pset(&aico->state, TB_STATE_KILLING, TB_STATE_KILLED); } // done func, @note maybe the aico exit will be called if (resp.func && !resp.func(&resp)) { // trace #ifdef __tb_debug__ tb_trace_e("loop[%p]: done aice func failed with code: %lu at line: %lu, func: %s, file: %s!", loop, resp.code, aico->line, aico->func, aico->file); #else tb_trace_e("loop[%p]: done aice func failed with code: %lu!", loop, resp.code); #endif } // killing? update to the killed state tb_atomic_fetch_and_pset(&aico->state, TB_STATE_KILLING, TB_STATE_KILLED); // stop it? if (stop && stop(priv)) tb_aicp_kill(aicp); } // exit loop if (ptor->loop_exit) ptor->loop_exit(ptor, loop); // worker-- tb_atomic_fetch_and_dec(&impl->work); // trace tb_trace_d("loop[%p]: exit", loop); }
tb_bool_t tb_async_transfer_open(tb_async_transfer_ref_t transfer, tb_hize_t offset, tb_async_transfer_open_func_t func, tb_cpointer_t priv) { // check tb_async_transfer_impl_t* impl = (tb_async_transfer_impl_t*)transfer; tb_assert_and_check_return_val(impl && impl->aicp && func, tb_false); // done tb_bool_t ok = tb_false; do { // set opening tb_size_t state = tb_atomic_fetch_and_pset(&impl->state, TB_STATE_CLOSED, TB_STATE_OPENING); // opened? done func directly if (state == TB_STATE_OPENED) { // check tb_assert_and_check_break(impl->istream && impl->ostream); // done func func(TB_STATE_OK, tb_async_stream_offset(impl->istream), tb_async_stream_size(impl->istream), priv); // ok ok = tb_true; break; } // must be closed tb_assert_and_check_break(state == TB_STATE_CLOSED); // clear pause state tb_atomic_set(&impl->state_pause, TB_STATE_OK); // init func impl->open.func = func; impl->open.priv = priv; // check tb_assert_and_check_break(impl->istream); tb_assert_and_check_break(impl->ostream); // init some rate info impl->done.base_time = tb_aicp_time(impl->aicp); impl->done.base_time1s = impl->done.base_time; impl->done.saved_size = 0; impl->done.saved_size1s = 0; impl->done.current_rate = 0; // ctrl stream if (impl->ctrl.func && !impl->ctrl.func(impl->istream, impl->ostream, impl->ctrl.priv)) break; // open and seek istream if (!tb_async_stream_open_seek(impl->istream, offset, tb_async_transfer_istream_open_func, impl)) break; // ok ok = tb_true; } while (0); // failed? restore state if (!ok) tb_atomic_set(&impl->state, TB_STATE_CLOSED); // ok? return ok; }
static tb_bool_t tb_async_transfer_ostream_writ_func(tb_async_stream_ref_t stream, tb_size_t state, tb_byte_t const* data, tb_size_t real, tb_size_t size, tb_cpointer_t priv) { // check tb_async_transfer_impl_t* impl = (tb_async_transfer_impl_t*)priv; tb_assert_and_check_return_val(stream && impl && impl->aicp && impl->istream, tb_false); // trace tb_trace_d("writ: real: %lu, size: %lu, state: %s", real, size, tb_state_cstr(state)); // the time tb_hong_t time = tb_aicp_time(impl->aicp); // done tb_bool_t bwrit = tb_false; do { // ok? tb_check_break(state == TB_STATE_OK); // reset state state = TB_STATE_UNKNOWN_ERROR; // done func at first once if (!impl->done.saved_size && !tb_async_transfer_done_func(impl, TB_STATE_OK)) break; // update saved size impl->done.saved_size += real; // < 1s? tb_size_t delay = 0; tb_size_t limited_rate = tb_atomic_get(&impl->limited_rate); if (time < impl->done.base_time1s + 1000) { // save size for 1s impl->done.saved_size1s += real; // save current rate if < 1s from base_time if (time < impl->done.base_time + 1000) impl->done.current_rate = impl->done.saved_size1s; // compute the delay for limit rate if (limited_rate) delay = impl->done.saved_size1s >= limited_rate? (tb_size_t)(impl->done.base_time1s + 1000 - time) : 0; } else { // save current rate impl->done.current_rate = impl->done.saved_size1s; // update base_time1s impl->done.base_time1s = time; // reset size impl->done.saved_size1s = 0; // reset delay delay = 0; // done func if (!tb_async_transfer_done_func(impl, TB_STATE_OK)) break; } // killed? if (TB_STATE_KILLING == tb_atomic_get(&impl->state)) { state = TB_STATE_KILLED; break; } // not finished? continue to writ tb_size_t state_pause = TB_STATE_OK; if (real < size) bwrit = tb_true; // pausing or paused? else if ( (TB_STATE_PAUSED == (state_pause = tb_atomic_fetch_and_pset(&impl->state_pause, TB_STATE_PAUSING, TB_STATE_PAUSED))) || (state_pause == TB_STATE_PAUSING)) { // done func if (!tb_async_transfer_done_func(impl, TB_STATE_PAUSED)) break; } // continue? else { // trace tb_trace_d("delay: %lu ms", delay); // continue to read it if (!tb_async_stream_read_after(impl->istream, delay, limited_rate, tb_async_transfer_istream_read_func, (tb_pointer_t)impl)) break; } // ok state = TB_STATE_OK; } while (0); // failed? if (state != TB_STATE_OK) { // compute the total rate impl->done.current_rate = (impl->done.saved_size && (time > impl->done.base_time))? (tb_size_t)((impl->done.saved_size * 1000) / (time - impl->done.base_time)) : (tb_size_t)impl->done.saved_size; // done func tb_async_transfer_done_func(impl, state); // break; bwrit = tb_false; } // continue to writ or break it return bwrit; }
static tb_pointer_t tb_thread_pool_worker_loop(tb_cpointer_t priv) { // the worker tb_thread_pool_worker_t* worker = (tb_thread_pool_worker_t*)priv; // trace tb_trace_d("worker[%lu]: init", worker? worker->id : -1); // done do { // check tb_assert_and_check_break(worker && !worker->jobs && !worker->stats); // the pool tb_thread_pool_impl_t* impl = (tb_thread_pool_impl_t*)worker->pool; tb_assert_and_check_break(impl && impl->semaphore); // wait some time for leaving the lock tb_msleep((worker->id + 1)* 20); // init jobs worker->jobs = tb_vector_init(TB_THREAD_POOL_JOBS_WORKING_GROW, tb_element_ptr(tb_null, tb_null)); tb_assert_and_check_break(worker->jobs); // init stats worker->stats = tb_hash_map_init(TB_HASH_MAP_BUCKET_SIZE_MICRO, tb_element_ptr(tb_null, tb_null), tb_element_mem(sizeof(tb_thread_pool_job_stats_t), tb_null, tb_null)); tb_assert_and_check_break(worker->stats); // loop while (1) { // pull jobs if be idle if (!tb_vector_size(worker->jobs)) { // enter tb_spinlock_enter(&impl->lock); // init the pull time worker->pull = 0; // pull from the urgent jobs if (tb_list_entry_size(&impl->jobs_urgent)) { // trace tb_trace_d("worker[%lu]: try pulling from urgent: %lu", worker->id, tb_list_entry_size(&impl->jobs_urgent)); // pull it tb_remove_if_until(tb_list_entry_itor(&impl->jobs_urgent), tb_thread_pool_worker_walk_pull, worker); } // pull from the waiting jobs if (tb_list_entry_size(&impl->jobs_waiting)) { // trace tb_trace_d("worker[%lu]: try pulling from waiting: %lu", worker->id, tb_list_entry_size(&impl->jobs_waiting)); // pull it tb_remove_if_until(tb_list_entry_itor(&impl->jobs_waiting), tb_thread_pool_worker_walk_pull, worker); } // pull from the pending jobs and clean some finished and killed jobs if (tb_list_entry_size(&impl->jobs_pending)) { // trace tb_trace_d("worker[%lu]: try pulling from pending: %lu", worker->id, tb_list_entry_size(&impl->jobs_pending)); // no jobs? try to pull from the pending jobs if (!tb_vector_size(worker->jobs)) tb_remove_if(tb_list_entry_itor(&impl->jobs_pending), tb_thread_pool_worker_walk_pull_and_clean, worker); // clean some finished and killed jobs else tb_remove_if(tb_list_entry_itor(&impl->jobs_pending), tb_thread_pool_worker_walk_clean, worker); } // leave tb_spinlock_leave(&impl->lock); // idle? wait it if (!tb_vector_size(worker->jobs)) { // killed? tb_check_break(!tb_atomic_get(&worker->bstoped)); // trace tb_trace_d("worker[%lu]: wait: ..", worker->id); // wait some time tb_long_t wait = tb_semaphore_wait(impl->semaphore, -1); tb_assert_and_check_break(wait > 0); // trace tb_trace_d("worker[%lu]: wait: ok", worker->id); // continue it continue; } else { #ifdef TB_TRACE_DEBUG // update the jobs urgent size tb_size_t jobs_urgent_size = tb_list_entry_size(&impl->jobs_urgent); // update the jobs waiting size tb_size_t jobs_waiting_size = tb_list_entry_size(&impl->jobs_waiting); // update the jobs pending size tb_size_t jobs_pending_size = tb_list_entry_size(&impl->jobs_pending); // trace tb_trace_d("worker[%lu]: pull: jobs: %lu, time: %lu ms, waiting: %lu, pending: %lu, urgent: %lu", worker->id, tb_vector_size(worker->jobs), worker->pull, jobs_waiting_size, jobs_pending_size, jobs_urgent_size); #endif } } // done jobs tb_for_all (tb_thread_pool_job_t*, job, worker->jobs) { // check tb_assert_and_check_continue(job && job->task.done); // the job state tb_size_t state = tb_atomic_fetch_and_pset(&job->state, TB_STATE_WAITING, TB_STATE_WORKING); // the job is waiting? work it if (state == TB_STATE_WAITING) { // trace tb_trace_d("worker[%lu]: done: task[%p:%s]: ..", worker->id, job->task.done, job->task.name); // init the time tb_hong_t time = tb_cache_time_spak(); // done the job job->task.done((tb_thread_pool_worker_ref_t)worker, job->task.priv); // computate the time time = tb_cache_time_spak() - time; // exists? update time and count tb_size_t itor; tb_hash_map_item_ref_t item = tb_null; if ( ((itor = tb_hash_map_find(worker->stats, job->task.done)) != tb_iterator_tail(worker->stats)) && (item = (tb_hash_map_item_ref_t)tb_iterator_item(worker->stats, itor))) { // the stats tb_thread_pool_job_stats_t* stats = (tb_thread_pool_job_stats_t*)item->data; tb_assert_and_check_break(stats); // update the done count stats->done_count++; // update the total time stats->total_time += time; } // no item? add it if (!item) { // init stats tb_thread_pool_job_stats_t stats = {0}; stats.done_count = 1; stats.total_time = time; // add stats tb_hash_map_insert(worker->stats, job->task.done, &stats); } #ifdef TB_TRACE_DEBUG tb_size_t done_count = 0; tb_hize_t total_time = 0; tb_thread_pool_job_stats_t* stats = (tb_thread_pool_job_stats_t*)tb_hash_map_get(worker->stats, job->task.done); if (stats) { done_count = stats->done_count; total_time = stats->total_time; } // trace tb_trace_d("worker[%lu]: done: task[%p:%s]: time: %lld ms, average: %lld ms, count: %lu", worker->id, job->task.done, job->task.name, time, (total_time / (tb_hize_t)done_count), done_count); #endif // update the job state tb_atomic_set(&job->state, TB_STATE_FINISHED); } // the job is killing? work it else if (state == TB_STATE_KILLING) { // update the job state tb_atomic_set(&job->state, TB_STATE_KILLED); } } // clear jobs tb_vector_clear(worker->jobs); } } while (0); // exit worker if (worker) { // trace tb_trace_d("worker[%lu]: exit", worker->id); // stoped tb_atomic_set(&worker->bstoped, 1); // exit all private data tb_size_t i = 0; tb_size_t n = tb_arrayn(worker->priv); for (i = 0; i < n; i++) { // the private data tb_thread_pool_worker_priv_t* priv = &worker->priv[n - i - 1]; // exit it if (priv->exit) priv->exit((tb_thread_pool_worker_ref_t)worker, priv->priv); // clear it priv->exit = tb_null; priv->priv = tb_null; } // exit stats if (worker->stats) tb_hash_map_exit(worker->stats); worker->stats = tb_null; // exit jobs if (worker->jobs) tb_vector_exit(worker->jobs); worker->jobs = tb_null; } // exit tb_thread_return(tb_null); return tb_null; }