/* ////////////////////////////////////////////////////////////////////////////////////// * interfaces */ tb_element_t tb_element_obj() { // the ptr element tb_element_t element_ptr = tb_element_ptr(tb_null, tb_null); // the str element tb_element_t element_str = tb_element_str(tb_true); // init element tb_element_t element = {0}; element.type = TB_ELEMENT_TYPE_OBJ; element.flag = 0; element.hash = element_ptr.hash; element.comp = element_ptr.comp; element.data = element_ptr.data; element.cstr = tb_element_obj_cstr; element.free = tb_element_obj_free; element.dupl = tb_element_obj_dupl; element.repl = tb_element_obj_repl; element.copy = element_ptr.copy; element.nfree = element_str.nfree; element.ndupl = element_str.ndupl; element.nrepl = element_str.nrepl; element.ncopy = element_ptr.ncopy; element.size = sizeof(tb_object_ref_t); // ok? return element; }
/* ////////////////////////////////////////////////////////////////////////////////////// * implementation */ tb_bool_t tb_thread_store_init() { // enter lock tb_spinlock_enter(&g_lock); // no store? if (!g_store) { // init store g_store = tb_hash_map_init(8, tb_element_size(), tb_element_ptr(tb_thread_store_free, tb_null)); } // leave lock tb_spinlock_leave(&g_lock); // register lock profiler #ifdef TB_LOCK_PROFILER_ENABLE tb_lock_profiler_register(tb_lock_profiler(), (tb_pointer_t)&g_lock, TB_TRACE_MODULE_NAME); #endif // ok? return g_store? tb_true : tb_false; }
/* ////////////////////////////////////////////////////////////////////////////////////// * implementation */ tb_bool_t gb_tessellator_event_queue_make(gb_tessellator_impl_t* impl) { // check tb_assert_abort(impl); // the mesh gb_mesh_ref_t mesh = impl->mesh; tb_assert_abort(mesh); // init event queue if (!impl->event_queue) { // make event element tb_element_t element = tb_element_ptr(tb_null, tb_null); // init the comparator for the vertex event element.comp = gb_tessellator_event_queue_comp; #ifdef __gb_debug__ // init the c-string function for tb_priority_queue_dump element.cstr = gb_tessellator_event_queue_cstr; #endif // make event queue impl->event_queue = tb_priority_queue_init(0, element); } tb_assert_abort_and_check_return_val(impl->event_queue, tb_false); // clear event queue first tb_priority_queue_clear(impl->event_queue); // done tb_for_all_if (gb_mesh_vertex_ref_t, vertex, gb_mesh_vertex_itor(mesh), vertex) { // put vertex event to the queue tb_priority_queue_put(impl->event_queue, vertex); }
/* ////////////////////////////////////////////////////////////////////////////////////// * interfaces */ tb_object_reader_t* tb_object_json_reader() { // the reader static tb_object_reader_t s_reader = {0}; // init reader s_reader.read = tb_object_json_reader_done; s_reader.probe = tb_object_json_reader_probe; // init hooker s_reader.hooker = tb_hash_map_init(TB_HASH_MAP_BUCKET_SIZE_MICRO, tb_element_uint8(), tb_element_ptr(tb_null, tb_null)); tb_assert_and_check_return_val(s_reader.hooker, tb_null); // hook reader tb_hash_map_insert(s_reader.hooker, (tb_pointer_t)'n', tb_object_json_reader_func_null); tb_hash_map_insert(s_reader.hooker, (tb_pointer_t)'N', tb_object_json_reader_func_null); tb_hash_map_insert(s_reader.hooker, (tb_pointer_t)'[', tb_object_json_reader_func_array); tb_hash_map_insert(s_reader.hooker, (tb_pointer_t)'\'', tb_object_json_reader_func_string); tb_hash_map_insert(s_reader.hooker, (tb_pointer_t)'\"', tb_object_json_reader_func_string); tb_hash_map_insert(s_reader.hooker, (tb_pointer_t)'0', tb_object_json_reader_func_number); tb_hash_map_insert(s_reader.hooker, (tb_pointer_t)'1', tb_object_json_reader_func_number); tb_hash_map_insert(s_reader.hooker, (tb_pointer_t)'2', tb_object_json_reader_func_number); tb_hash_map_insert(s_reader.hooker, (tb_pointer_t)'3', tb_object_json_reader_func_number); tb_hash_map_insert(s_reader.hooker, (tb_pointer_t)'4', tb_object_json_reader_func_number); tb_hash_map_insert(s_reader.hooker, (tb_pointer_t)'5', tb_object_json_reader_func_number); tb_hash_map_insert(s_reader.hooker, (tb_pointer_t)'6', tb_object_json_reader_func_number); tb_hash_map_insert(s_reader.hooker, (tb_pointer_t)'7', tb_object_json_reader_func_number); tb_hash_map_insert(s_reader.hooker, (tb_pointer_t)'8', tb_object_json_reader_func_number); tb_hash_map_insert(s_reader.hooker, (tb_pointer_t)'9', tb_object_json_reader_func_number); tb_hash_map_insert(s_reader.hooker, (tb_pointer_t)'.', tb_object_json_reader_func_number); tb_hash_map_insert(s_reader.hooker, (tb_pointer_t)'-', tb_object_json_reader_func_number); tb_hash_map_insert(s_reader.hooker, (tb_pointer_t)'+', tb_object_json_reader_func_number); tb_hash_map_insert(s_reader.hooker, (tb_pointer_t)'e', tb_object_json_reader_func_number); tb_hash_map_insert(s_reader.hooker, (tb_pointer_t)'E', tb_object_json_reader_func_number); tb_hash_map_insert(s_reader.hooker, (tb_pointer_t)'t', tb_object_json_reader_func_boolean); tb_hash_map_insert(s_reader.hooker, (tb_pointer_t)'T', tb_object_json_reader_func_boolean); tb_hash_map_insert(s_reader.hooker, (tb_pointer_t)'f', tb_object_json_reader_func_boolean); tb_hash_map_insert(s_reader.hooker, (tb_pointer_t)'F', tb_object_json_reader_func_boolean); tb_hash_map_insert(s_reader.hooker, (tb_pointer_t)'{', tb_object_json_reader_func_dictionary); // ok return &s_reader; }
/* ////////////////////////////////////////////////////////////////////////////////////// * interfaces */ tb_object_reader_t* tb_object_xplist_reader() { // the reader static tb_object_reader_t s_reader = {0}; // init reader s_reader.read = tb_object_xplist_reader_done; s_reader.probe = tb_object_xplist_reader_probe; // init hooker s_reader.hooker = tb_hash_map_init(TB_HASH_MAP_BUCKET_SIZE_MICRO, tb_element_str(tb_false), tb_element_ptr(tb_null, tb_null)); tb_assert_and_check_return_val(s_reader.hooker, tb_null); // hook reader tb_hash_map_insert(s_reader.hooker, "date", tb_object_xplist_reader_func_date); tb_hash_map_insert(s_reader.hooker, "data", tb_object_xplist_reader_func_data); tb_hash_map_insert(s_reader.hooker, "array", tb_object_xplist_reader_func_array); tb_hash_map_insert(s_reader.hooker, "string", tb_object_xplist_reader_func_string); tb_hash_map_insert(s_reader.hooker, "integer", tb_object_xplist_reader_func_number); tb_hash_map_insert(s_reader.hooker, "real", tb_object_xplist_reader_func_number); tb_hash_map_insert(s_reader.hooker, "true", tb_object_xplist_reader_func_boolean); tb_hash_map_insert(s_reader.hooker, "false", tb_object_xplist_reader_func_boolean); tb_hash_map_insert(s_reader.hooker, "dict", tb_object_xplist_reader_func_dictionary); // ok return &s_reader; }
static tb_aiop_rtor_impl_t* tb_aiop_rtor_select_init(tb_aiop_impl_t* aiop) { // check tb_assert_and_check_return_val(aiop && aiop->maxn, tb_null); // done tb_bool_t ok = tb_false; tb_aiop_rtor_select_impl_t* impl = tb_null; do { // make rtor impl = tb_malloc0_type(tb_aiop_rtor_select_impl_t); tb_assert_and_check_break(impl); // init base impl->base.aiop = aiop; impl->base.code = TB_AIOE_CODE_EALL | TB_AIOE_CODE_ONESHOT; impl->base.exit = tb_aiop_rtor_select_exit; impl->base.cler = tb_aiop_rtor_select_cler; impl->base.addo = tb_aiop_rtor_select_addo; impl->base.delo = tb_aiop_rtor_select_delo; impl->base.post = tb_aiop_rtor_select_post; impl->base.wait = tb_aiop_rtor_select_wait; // init fds FD_ZERO(&impl->rfdi); FD_ZERO(&impl->wfdi); FD_ZERO(&impl->rfdo); FD_ZERO(&impl->wfdo); // init lock if (!tb_spinlock_init(&impl->lock.pfds)) break; if (!tb_spinlock_init(&impl->lock.hash)) break; // init hash impl->hash = tb_hash_map_init(tb_align8(tb_isqrti((tb_uint32_t)aiop->maxn) + 1), tb_element_ptr(tb_null, tb_null), tb_element_ptr(tb_null, tb_null)); tb_assert_and_check_break(impl->hash); // ok ok = tb_true; } while (0); // failed? if (!ok) { // exit it if (impl) tb_aiop_rtor_select_exit((tb_aiop_rtor_impl_t*)impl); impl = tb_null; } // ok? return (tb_aiop_rtor_impl_t*)impl; }
static tb_pointer_t tb_thread_pool_worker_loop(tb_cpointer_t priv) { // the worker tb_thread_pool_worker_t* worker = (tb_thread_pool_worker_t*)priv; // trace tb_trace_d("worker[%lu]: init", worker? worker->id : -1); // done do { // check tb_assert_and_check_break(worker && !worker->jobs && !worker->stats); // the pool tb_thread_pool_impl_t* impl = (tb_thread_pool_impl_t*)worker->pool; tb_assert_and_check_break(impl && impl->semaphore); // wait some time for leaving the lock tb_msleep((worker->id + 1)* 20); // init jobs worker->jobs = tb_vector_init(TB_THREAD_POOL_JOBS_WORKING_GROW, tb_element_ptr(tb_null, tb_null)); tb_assert_and_check_break(worker->jobs); // init stats worker->stats = tb_hash_map_init(TB_HASH_MAP_BUCKET_SIZE_MICRO, tb_element_ptr(tb_null, tb_null), tb_element_mem(sizeof(tb_thread_pool_job_stats_t), tb_null, tb_null)); tb_assert_and_check_break(worker->stats); // loop while (1) { // pull jobs if be idle if (!tb_vector_size(worker->jobs)) { // enter tb_spinlock_enter(&impl->lock); // init the pull time worker->pull = 0; // pull from the urgent jobs if (tb_list_entry_size(&impl->jobs_urgent)) { // trace tb_trace_d("worker[%lu]: try pulling from urgent: %lu", worker->id, tb_list_entry_size(&impl->jobs_urgent)); // pull it tb_remove_if_until(tb_list_entry_itor(&impl->jobs_urgent), tb_thread_pool_worker_walk_pull, worker); } // pull from the waiting jobs if (tb_list_entry_size(&impl->jobs_waiting)) { // trace tb_trace_d("worker[%lu]: try pulling from waiting: %lu", worker->id, tb_list_entry_size(&impl->jobs_waiting)); // pull it tb_remove_if_until(tb_list_entry_itor(&impl->jobs_waiting), tb_thread_pool_worker_walk_pull, worker); } // pull from the pending jobs and clean some finished and killed jobs if (tb_list_entry_size(&impl->jobs_pending)) { // trace tb_trace_d("worker[%lu]: try pulling from pending: %lu", worker->id, tb_list_entry_size(&impl->jobs_pending)); // no jobs? try to pull from the pending jobs if (!tb_vector_size(worker->jobs)) tb_remove_if(tb_list_entry_itor(&impl->jobs_pending), tb_thread_pool_worker_walk_pull_and_clean, worker); // clean some finished and killed jobs else tb_remove_if(tb_list_entry_itor(&impl->jobs_pending), tb_thread_pool_worker_walk_clean, worker); } // leave tb_spinlock_leave(&impl->lock); // idle? wait it if (!tb_vector_size(worker->jobs)) { // killed? tb_check_break(!tb_atomic_get(&worker->bstoped)); // trace tb_trace_d("worker[%lu]: wait: ..", worker->id); // wait some time tb_long_t wait = tb_semaphore_wait(impl->semaphore, -1); tb_assert_and_check_break(wait > 0); // trace tb_trace_d("worker[%lu]: wait: ok", worker->id); // continue it continue; } else { #ifdef TB_TRACE_DEBUG // update the jobs urgent size tb_size_t jobs_urgent_size = tb_list_entry_size(&impl->jobs_urgent); // update the jobs waiting size tb_size_t jobs_waiting_size = tb_list_entry_size(&impl->jobs_waiting); // update the jobs pending size tb_size_t jobs_pending_size = tb_list_entry_size(&impl->jobs_pending); // trace tb_trace_d("worker[%lu]: pull: jobs: %lu, time: %lu ms, waiting: %lu, pending: %lu, urgent: %lu", worker->id, tb_vector_size(worker->jobs), worker->pull, jobs_waiting_size, jobs_pending_size, jobs_urgent_size); #endif } } // done jobs tb_for_all (tb_thread_pool_job_t*, job, worker->jobs) { // check tb_assert_and_check_continue(job && job->task.done); // the job state tb_size_t state = tb_atomic_fetch_and_pset(&job->state, TB_STATE_WAITING, TB_STATE_WORKING); // the job is waiting? work it if (state == TB_STATE_WAITING) { // trace tb_trace_d("worker[%lu]: done: task[%p:%s]: ..", worker->id, job->task.done, job->task.name); // init the time tb_hong_t time = tb_cache_time_spak(); // done the job job->task.done((tb_thread_pool_worker_ref_t)worker, job->task.priv); // computate the time time = tb_cache_time_spak() - time; // exists? update time and count tb_size_t itor; tb_hash_map_item_ref_t item = tb_null; if ( ((itor = tb_hash_map_find(worker->stats, job->task.done)) != tb_iterator_tail(worker->stats)) && (item = (tb_hash_map_item_ref_t)tb_iterator_item(worker->stats, itor))) { // the stats tb_thread_pool_job_stats_t* stats = (tb_thread_pool_job_stats_t*)item->data; tb_assert_and_check_break(stats); // update the done count stats->done_count++; // update the total time stats->total_time += time; } // no item? add it if (!item) { // init stats tb_thread_pool_job_stats_t stats = {0}; stats.done_count = 1; stats.total_time = time; // add stats tb_hash_map_insert(worker->stats, job->task.done, &stats); } #ifdef TB_TRACE_DEBUG tb_size_t done_count = 0; tb_hize_t total_time = 0; tb_thread_pool_job_stats_t* stats = (tb_thread_pool_job_stats_t*)tb_hash_map_get(worker->stats, job->task.done); if (stats) { done_count = stats->done_count; total_time = stats->total_time; } // trace tb_trace_d("worker[%lu]: done: task[%p:%s]: time: %lld ms, average: %lld ms, count: %lu", worker->id, job->task.done, job->task.name, time, (total_time / (tb_hize_t)done_count), done_count); #endif // update the job state tb_atomic_set(&job->state, TB_STATE_FINISHED); } // the job is killing? work it else if (state == TB_STATE_KILLING) { // update the job state tb_atomic_set(&job->state, TB_STATE_KILLED); } } // clear jobs tb_vector_clear(worker->jobs); } } while (0); // exit worker if (worker) { // trace tb_trace_d("worker[%lu]: exit", worker->id); // stoped tb_atomic_set(&worker->bstoped, 1); // exit all private data tb_size_t i = 0; tb_size_t n = tb_arrayn(worker->priv); for (i = 0; i < n; i++) { // the private data tb_thread_pool_worker_priv_t* priv = &worker->priv[n - i - 1]; // exit it if (priv->exit) priv->exit((tb_thread_pool_worker_ref_t)worker, priv->priv); // clear it priv->exit = tb_null; priv->priv = tb_null; } // exit stats if (worker->stats) tb_hash_map_exit(worker->stats); worker->stats = tb_null; // exit jobs if (worker->jobs) tb_vector_exit(worker->jobs); worker->jobs = tb_null; } // exit tb_thread_return(tb_null); return tb_null; }