static tb_void_t tb_aiop_rtor_select_exit(tb_aiop_rtor_impl_t* rtor) { tb_aiop_rtor_select_impl_t* impl = (tb_aiop_rtor_select_impl_t*)rtor; if (impl) { // free fds tb_spinlock_enter(&impl->lock.pfds); FD_ZERO(&impl->rfdi); FD_ZERO(&impl->wfdi); FD_ZERO(&impl->rfdo); FD_ZERO(&impl->wfdo); tb_spinlock_leave(&impl->lock.pfds); // exit hash tb_spinlock_enter(&impl->lock.hash); if (impl->hash) tb_hash_map_exit(impl->hash); impl->hash = tb_null; tb_spinlock_leave(&impl->lock.hash); // exit lock tb_spinlock_exit(&impl->lock.pfds); tb_spinlock_exit(&impl->lock.hash); // free it tb_free(impl); } }
static tb_bool_t tb_ifaddrs_interface_load(tb_list_ref_t interfaces, tb_long_t sock, tb_long_t request) { // trace tb_trace_d("netlink: load: .."); // send request if (tb_ifaddrs_netlink_socket_send(sock, request) < 0) return tb_false; // make names tb_hash_map_ref_t names = tb_hash_map_init(8, tb_element_size(), tb_element_str(tb_true)); tb_assert_and_check_return_val(names, tb_false); // done tb_long_t ok = -1; while (!(ok = tb_ifaddrs_interface_done(interfaces, names, sock, request))) ; // trace tb_trace_d("netlink: load: %s", ok > 0? "ok" : "no"); // exit names if (names) tb_hash_map_exit(names); names = tb_null; // ok? return ok > 0; }
tb_void_t tb_thread_store_exit() { // enter lock tb_spinlock_enter(&g_lock); // exit store if (g_store) tb_hash_map_exit(g_store); g_store = tb_null; // leave lock tb_spinlock_leave(&g_lock); }
static tb_void_t tb_object_dictionary_exit(tb_object_ref_t object) { // check tb_object_dictionary_t* dictionary = tb_object_dictionary_cast(object); tb_assert_and_check_return(dictionary); // exit hash if (dictionary->hash) tb_hash_map_exit(dictionary->hash); dictionary->hash = tb_null; // exit it tb_free(dictionary); }
tb_void_t tb_oc_reader_remove(tb_size_t format) { // check format &= 0x00ff; tb_assert_and_check_return((format < tb_arrayn(g_reader))); // exit it if (g_reader[format]) { // exit hooker if (g_reader[format]->hooker) tb_hash_map_exit(g_reader[format]->hooker); g_reader[format]->hooker = tb_null; // clear it g_reader[format] = tb_null; } }
tb_void_t tb_hash_set_exit(tb_hash_set_ref_t self) { tb_hash_map_exit((tb_hash_map_ref_t)self); }
/* ////////////////////////////////////////////////////////////////////////////////////// * implementation */ tb_hash_map_ref_t tb_hash_map_init(tb_size_t bucket_size, tb_element_t element_name, tb_element_t element_data) { // check tb_assert_and_check_return_val(element_name.size && element_name.hash && element_name.comp && element_name.data && element_name.dupl, tb_null); tb_assert_and_check_return_val(element_data.data && element_data.dupl && element_data.repl, tb_null); // check bucket size if (!bucket_size) bucket_size = TB_HASH_MAP_BUCKET_SIZE_DEFAULT; tb_assert_and_check_return_val(bucket_size <= TB_HASH_MAP_BUCKET_SIZE_LARGE, tb_null); // done tb_bool_t ok = tb_false; tb_hash_map_impl_t* impl = tb_null; do { // make hash_map impl = tb_malloc0_type(tb_hash_map_impl_t); tb_assert_and_check_break(impl); // init hash_map func impl->element_name = element_name; impl->element_data = element_data; // init item itor impl->itor.mode = TB_ITERATOR_MODE_FORWARD | TB_ITERATOR_MODE_MUTABLE; impl->itor.priv = tb_null; impl->itor.step = sizeof(tb_hash_map_item_t); impl->itor.size = tb_hash_map_itor_size; impl->itor.head = tb_hash_map_itor_head; impl->itor.tail = tb_hash_map_itor_tail; impl->itor.prev = tb_null; impl->itor.next = tb_hash_map_itor_next; impl->itor.item = tb_hash_map_itor_item; impl->itor.copy = tb_hash_map_itor_copy; impl->itor.comp = tb_hash_map_itor_comp; impl->itor.remove = tb_hash_map_itor_remove; impl->itor.remove_range = tb_hash_map_itor_remove_range; // init hash_map size impl->hash_size = tb_align_pow2(bucket_size); tb_assert_and_check_break(impl->hash_size <= TB_HASH_MAP_BUCKET_MAXN); // init hash_map list impl->hash_list = (tb_hash_map_item_list_t**)tb_nalloc0(impl->hash_size, sizeof(tb_size_t)); tb_assert_and_check_break(impl->hash_list); // init item grow impl->item_grow = tb_isqrti(bucket_size); if (impl->item_grow < 8) impl->item_grow = 8; impl->item_grow = tb_align_pow2(impl->item_grow); // ok ok = tb_true; } while (0); // failed? if (!ok) { // exit it if (impl) tb_hash_map_exit((tb_hash_map_ref_t)impl); impl = tb_null; } // ok? return (tb_hash_map_ref_t)impl; }
static tb_pointer_t tb_thread_pool_worker_loop(tb_cpointer_t priv) { // the worker tb_thread_pool_worker_t* worker = (tb_thread_pool_worker_t*)priv; // trace tb_trace_d("worker[%lu]: init", worker? worker->id : -1); // done do { // check tb_assert_and_check_break(worker && !worker->jobs && !worker->stats); // the pool tb_thread_pool_impl_t* impl = (tb_thread_pool_impl_t*)worker->pool; tb_assert_and_check_break(impl && impl->semaphore); // wait some time for leaving the lock tb_msleep((worker->id + 1)* 20); // init jobs worker->jobs = tb_vector_init(TB_THREAD_POOL_JOBS_WORKING_GROW, tb_element_ptr(tb_null, tb_null)); tb_assert_and_check_break(worker->jobs); // init stats worker->stats = tb_hash_map_init(TB_HASH_MAP_BUCKET_SIZE_MICRO, tb_element_ptr(tb_null, tb_null), tb_element_mem(sizeof(tb_thread_pool_job_stats_t), tb_null, tb_null)); tb_assert_and_check_break(worker->stats); // loop while (1) { // pull jobs if be idle if (!tb_vector_size(worker->jobs)) { // enter tb_spinlock_enter(&impl->lock); // init the pull time worker->pull = 0; // pull from the urgent jobs if (tb_list_entry_size(&impl->jobs_urgent)) { // trace tb_trace_d("worker[%lu]: try pulling from urgent: %lu", worker->id, tb_list_entry_size(&impl->jobs_urgent)); // pull it tb_remove_if_until(tb_list_entry_itor(&impl->jobs_urgent), tb_thread_pool_worker_walk_pull, worker); } // pull from the waiting jobs if (tb_list_entry_size(&impl->jobs_waiting)) { // trace tb_trace_d("worker[%lu]: try pulling from waiting: %lu", worker->id, tb_list_entry_size(&impl->jobs_waiting)); // pull it tb_remove_if_until(tb_list_entry_itor(&impl->jobs_waiting), tb_thread_pool_worker_walk_pull, worker); } // pull from the pending jobs and clean some finished and killed jobs if (tb_list_entry_size(&impl->jobs_pending)) { // trace tb_trace_d("worker[%lu]: try pulling from pending: %lu", worker->id, tb_list_entry_size(&impl->jobs_pending)); // no jobs? try to pull from the pending jobs if (!tb_vector_size(worker->jobs)) tb_remove_if(tb_list_entry_itor(&impl->jobs_pending), tb_thread_pool_worker_walk_pull_and_clean, worker); // clean some finished and killed jobs else tb_remove_if(tb_list_entry_itor(&impl->jobs_pending), tb_thread_pool_worker_walk_clean, worker); } // leave tb_spinlock_leave(&impl->lock); // idle? wait it if (!tb_vector_size(worker->jobs)) { // killed? tb_check_break(!tb_atomic_get(&worker->bstoped)); // trace tb_trace_d("worker[%lu]: wait: ..", worker->id); // wait some time tb_long_t wait = tb_semaphore_wait(impl->semaphore, -1); tb_assert_and_check_break(wait > 0); // trace tb_trace_d("worker[%lu]: wait: ok", worker->id); // continue it continue; } else { #ifdef TB_TRACE_DEBUG // update the jobs urgent size tb_size_t jobs_urgent_size = tb_list_entry_size(&impl->jobs_urgent); // update the jobs waiting size tb_size_t jobs_waiting_size = tb_list_entry_size(&impl->jobs_waiting); // update the jobs pending size tb_size_t jobs_pending_size = tb_list_entry_size(&impl->jobs_pending); // trace tb_trace_d("worker[%lu]: pull: jobs: %lu, time: %lu ms, waiting: %lu, pending: %lu, urgent: %lu", worker->id, tb_vector_size(worker->jobs), worker->pull, jobs_waiting_size, jobs_pending_size, jobs_urgent_size); #endif } } // done jobs tb_for_all (tb_thread_pool_job_t*, job, worker->jobs) { // check tb_assert_and_check_continue(job && job->task.done); // the job state tb_size_t state = tb_atomic_fetch_and_pset(&job->state, TB_STATE_WAITING, TB_STATE_WORKING); // the job is waiting? work it if (state == TB_STATE_WAITING) { // trace tb_trace_d("worker[%lu]: done: task[%p:%s]: ..", worker->id, job->task.done, job->task.name); // init the time tb_hong_t time = tb_cache_time_spak(); // done the job job->task.done((tb_thread_pool_worker_ref_t)worker, job->task.priv); // computate the time time = tb_cache_time_spak() - time; // exists? update time and count tb_size_t itor; tb_hash_map_item_ref_t item = tb_null; if ( ((itor = tb_hash_map_find(worker->stats, job->task.done)) != tb_iterator_tail(worker->stats)) && (item = (tb_hash_map_item_ref_t)tb_iterator_item(worker->stats, itor))) { // the stats tb_thread_pool_job_stats_t* stats = (tb_thread_pool_job_stats_t*)item->data; tb_assert_and_check_break(stats); // update the done count stats->done_count++; // update the total time stats->total_time += time; } // no item? add it if (!item) { // init stats tb_thread_pool_job_stats_t stats = {0}; stats.done_count = 1; stats.total_time = time; // add stats tb_hash_map_insert(worker->stats, job->task.done, &stats); } #ifdef TB_TRACE_DEBUG tb_size_t done_count = 0; tb_hize_t total_time = 0; tb_thread_pool_job_stats_t* stats = (tb_thread_pool_job_stats_t*)tb_hash_map_get(worker->stats, job->task.done); if (stats) { done_count = stats->done_count; total_time = stats->total_time; } // trace tb_trace_d("worker[%lu]: done: task[%p:%s]: time: %lld ms, average: %lld ms, count: %lu", worker->id, job->task.done, job->task.name, time, (total_time / (tb_hize_t)done_count), done_count); #endif // update the job state tb_atomic_set(&job->state, TB_STATE_FINISHED); } // the job is killing? work it else if (state == TB_STATE_KILLING) { // update the job state tb_atomic_set(&job->state, TB_STATE_KILLED); } } // clear jobs tb_vector_clear(worker->jobs); } } while (0); // exit worker if (worker) { // trace tb_trace_d("worker[%lu]: exit", worker->id); // stoped tb_atomic_set(&worker->bstoped, 1); // exit all private data tb_size_t i = 0; tb_size_t n = tb_arrayn(worker->priv); for (i = 0; i < n; i++) { // the private data tb_thread_pool_worker_priv_t* priv = &worker->priv[n - i - 1]; // exit it if (priv->exit) priv->exit((tb_thread_pool_worker_ref_t)worker, priv->priv); // clear it priv->exit = tb_null; priv->priv = tb_null; } // exit stats if (worker->stats) tb_hash_map_exit(worker->stats); worker->stats = tb_null; // exit jobs if (worker->jobs) tb_vector_exit(worker->jobs); worker->jobs = tb_null; } // exit tb_thread_return(tb_null); return tb_null; }