tb_size_t gb_mesh_face_list_size(gb_mesh_face_list_ref_t list) { // check gb_mesh_face_list_impl_t* impl = (gb_mesh_face_list_impl_t*)list; tb_assert_and_check_return_val(impl && impl->pool, 0); tb_assert_abort(tb_list_entry_size(&impl->head) == tb_fixed_pool_size(impl->pool)); // the size return tb_list_entry_size(&impl->head); }
tb_size_t tb_list_size(tb_list_ref_t self) { // check tb_list_t* list = (tb_list_t*)self; tb_assert_and_check_return_val(list && list->pool, 0); tb_assert(tb_list_entry_size(&list->head) == tb_fixed_pool_size(list->pool)); // the size return tb_list_entry_size(&list->head); }
tb_void_t tb_co_scheduler_loop(tb_co_scheduler_ref_t self, tb_bool_t exclusive) { // check tb_co_scheduler_t* scheduler = (tb_co_scheduler_t*)self; tb_assert_and_check_return(scheduler); #ifdef __tb_thread_local__ g_scheduler_self_ex = scheduler; #else // is exclusive mode? if (exclusive) g_scheduler_self_ex = scheduler; else { // init self scheduler local if (!tb_thread_local_init(&g_scheduler_self, tb_null)) return ; // update and overide the current scheduler tb_thread_local_set(&g_scheduler_self, self); } #endif // schedule all ready coroutines while (tb_list_entry_size(&scheduler->coroutines_ready)) { // check tb_assert(tb_coroutine_is_original(scheduler->running)); // get the next entry from head tb_list_entry_ref_t entry = tb_list_entry_head(&scheduler->coroutines_ready); tb_assert(entry); // switch to the next coroutine tb_co_scheduler_switch(scheduler, (tb_coroutine_t*)tb_list_entry0(entry)); // trace tb_trace_d("[loop]: ready %lu", tb_list_entry_size(&scheduler->coroutines_ready)); } // stop it scheduler->stopped = tb_true; #ifdef __tb_thread_local__ g_scheduler_self_ex = tb_null; #else // is exclusive mode? if (exclusive) g_scheduler_self_ex = tb_null; else { // clear the current scheduler tb_thread_local_set(&g_scheduler_self, tb_null); } #endif }
tb_bool_t tb_transfer_pool_exit(tb_transfer_pool_ref_t pool) { // check tb_transfer_pool_impl_t* impl = (tb_transfer_pool_impl_t*)pool; tb_assert_and_check_return_val(impl, tb_false); // trace tb_trace_d("exit: .."); // kill it first tb_transfer_pool_kill(pool); // wait all if (tb_transfer_pool_wait_all(pool, 5000) <= 0) { // trace tb_trace_e("exit: wait failed!"); return tb_false; } // enter tb_spinlock_enter(&impl->lock); // check tb_assert(!tb_list_entry_size(&impl->work)); // exit the work list tb_list_entry_exit(&impl->work); // exit the idle list tb_list_entry_exit(&impl->idle); // exit pool if (impl->pool) { // exit all task tb_fixed_pool_walk(impl->pool, tb_transfer_pool_walk_exit, tb_null); // exit it tb_fixed_pool_exit(impl->pool); impl->pool = tb_null; } // leave tb_spinlock_leave(&impl->lock); // exit lock tb_spinlock_exit(&impl->lock); // exit it tb_free(pool); // trace tb_trace_d("exit: ok"); // ok return tb_true; }
tb_long_t tb_thread_pool_task_wait_all(tb_thread_pool_ref_t pool, tb_long_t timeout) { // check tb_thread_pool_impl_t* impl = (tb_thread_pool_impl_t*)pool; tb_assert_and_check_return_val(impl, -1); // wait it tb_size_t size = 0; tb_hong_t time = tb_cache_time_spak(); while ((timeout < 0 || tb_cache_time_spak() < time + timeout)) { // enter tb_spinlock_enter(&impl->lock); // the jobs count size = impl->jobs_pool? tb_fixed_pool_size(impl->jobs_pool) : 0; // trace tb_trace_d("wait: jobs: %lu, waiting: %lu, pending: %lu, urgent: %lu: .." , size , tb_list_entry_size(&impl->jobs_waiting) , tb_list_entry_size(&impl->jobs_pending) , tb_list_entry_size(&impl->jobs_urgent)); #if 0 tb_for_all_if (tb_thread_pool_job_t*, job, tb_list_entry_itor(&impl->jobs_pending), job) { tb_trace_d("wait: job: %s from pending", tb_state_cstr(tb_atomic_get(&job->state))); } #endif // leave tb_spinlock_leave(&impl->lock); // ok? tb_check_break(size); // wait some time tb_msleep(200); } // ok? return !size? 1 : 0; }
tb_void_t tb_co_scheduler_exit(tb_co_scheduler_ref_t self) { // check tb_co_scheduler_t* scheduler = (tb_co_scheduler_t*)self; tb_assert_and_check_return(scheduler); // must be stopped tb_assert(scheduler->stopped); // exit io scheduler first if (scheduler->scheduler_io) tb_co_scheduler_io_exit(scheduler->scheduler_io); scheduler->scheduler_io = tb_null; // clear running scheduler->running = tb_null; // check coroutines tb_assert(!tb_list_entry_size(&scheduler->coroutines_ready)); tb_assert(!tb_list_entry_size(&scheduler->coroutines_suspend)); // free all dead coroutines tb_co_scheduler_free(&scheduler->coroutines_dead); // free all ready coroutines tb_co_scheduler_free(&scheduler->coroutines_ready); // free all suspend coroutines tb_co_scheduler_free(&scheduler->coroutines_suspend); // exit dead coroutines tb_list_entry_exit(&scheduler->coroutines_dead); // exit ready coroutines tb_list_entry_exit(&scheduler->coroutines_ready); // exit suspend coroutines tb_list_entry_exit(&scheduler->coroutines_suspend); // exit the scheduler tb_free(scheduler); }
static __tb_inline__ tb_lo_coroutine_t* tb_lo_scheduler_next_ready(tb_lo_scheduler_t* scheduler) { // check tb_assert(scheduler && tb_list_entry_size(&scheduler->coroutines_ready)); // get the next entry tb_list_entry_ref_t entry_next = scheduler->running? tb_list_entry_next(&scheduler->running->entry) : tb_list_entry_head(&scheduler->coroutines_ready); tb_assert(entry_next); // is list header? skip it and get the first entry if (entry_next == (tb_list_entry_ref_t)&scheduler->coroutines_ready) entry_next = tb_list_entry_next(entry_next); // get the next ready coroutine return (tb_lo_coroutine_t*)tb_list_entry(&scheduler->coroutines_ready, entry_next); }
/* ////////////////////////////////////////////////////////////////////////////////////// * main */ tb_int_t tb_demo_container_list_entry_main(tb_int_t argc, tb_char_t** argv) { // init the entries tb_demo_entry_t entries[12] = { {{0}, 0} , {{0}, 1} , {{0}, 2} , {{0}, 3} , {{0}, 4} , {{0}, 5} , {{0}, 6} , {{0}, 7} , {{0}, 8} , {{0}, 9} , {{0}, 10} , {{0}, 11} }; // init the list tb_list_entry_head_t list; tb_list_entry_init(&list, tb_demo_entry_t, entry, tb_demo_entry_copy); // insert entries tb_list_entry_insert_tail(&list, &entries[5].entry); tb_list_entry_insert_tail(&list, &entries[6].entry); tb_list_entry_insert_tail(&list, &entries[7].entry); tb_list_entry_insert_tail(&list, &entries[8].entry); tb_list_entry_insert_tail(&list, &entries[9].entry); tb_list_entry_insert_head(&list, &entries[4].entry); tb_list_entry_insert_head(&list, &entries[3].entry); tb_list_entry_insert_head(&list, &entries[2].entry); tb_list_entry_insert_head(&list, &entries[1].entry); tb_list_entry_insert_head(&list, &entries[0].entry); // the entry tb_demo_entry_t* entry = (tb_demo_entry_t*)tb_list_entry(&list, &entries[5].entry); tb_trace_i("entry: %lu", entry->data); tb_trace_i(""); // walk it tb_trace_i("insert: %lu", tb_list_entry_size(&list)); tb_for_all_if(tb_demo_entry_t*, item0, tb_list_entry_itor(&list), item0) { tb_trace_i("%lu", item0->data); }
tb_size_t tb_transfer_pool_size(tb_transfer_pool_ref_t pool) { // check tb_transfer_pool_impl_t* impl = (tb_transfer_pool_impl_t*)pool; tb_assert_and_check_return_val(impl, 0); // enter tb_spinlock_enter(&impl->lock); // the size tb_size_t size = tb_list_entry_size(&impl->work); // leave tb_spinlock_leave(&impl->lock); // ok? return size; }
/* ////////////////////////////////////////////////////////////////////////////////////// * private implementation */ static tb_void_t tb_lo_scheduler_free(tb_list_entry_head_ref_t coroutines) { // check tb_assert(coroutines); // free all coroutines while (tb_list_entry_size(coroutines)) { // get the next entry from head tb_list_entry_ref_t entry = tb_list_entry_head(coroutines); tb_assert(entry); // remove it from the ready coroutines tb_list_entry_remove_head(coroutines); // exit this coroutine tb_lo_coroutine_exit((tb_lo_coroutine_t*)tb_list_entry(coroutines, entry)); } }
tb_void_t tb_transfer_pool_kill_all(tb_transfer_pool_ref_t pool) { // check tb_transfer_pool_impl_t* impl = (tb_transfer_pool_impl_t*)pool; tb_assert_and_check_return(impl); // check tb_check_return(TB_STATE_OK == tb_atomic_get(&impl->state)); // enter tb_spinlock_enter(&impl->lock); // trace tb_trace_d("kill_all: %lu, ..", tb_list_entry_size(&impl->work)); // kill it tb_walk_all(tb_list_entry_itor(&impl->work), tb_transfer_pool_work_kill, tb_null); // leave tb_spinlock_leave(&impl->lock); }
tb_long_t tb_transfer_pool_wait_all(tb_transfer_pool_ref_t pool, tb_long_t timeout) { // check tb_transfer_pool_impl_t* impl = (tb_transfer_pool_impl_t*)pool; tb_assert_and_check_return_val(impl, -1); // wait it tb_size_t size = 0; tb_hong_t time = tb_cache_time_spak(); while ((timeout < 0 || tb_cache_time_spak() < time + timeout)) { // enter tb_spinlock_enter(&impl->lock); // the size tb_size_t size = tb_list_entry_size(&impl->work); // trace tb_trace_d("wait: %lu: ..", size); // trace work #ifdef __tb_debug__ if (size) tb_walk_all(tb_list_entry_itor(&impl->work), tb_transfer_pool_work_wait, tb_null); #endif // leave tb_spinlock_leave(&impl->lock); // ok? tb_check_break(size); // wait some time tb_msleep(200); } // ok? return !size? 1 : 0; }
// walk it tb_trace_i("insert: %lu", tb_list_entry_size(&list)); tb_for_all_if(tb_demo_entry_t*, item0, tb_list_entry_itor(&list), item0) { tb_trace_i("%lu", item0->data); } // trace tb_trace_i(""); // replace entries tb_list_entry_replace_head(&list, &entries[10].entry); tb_list_entry_replace_last(&list, &entries[11].entry); // walk it tb_trace_i("replace: %lu", tb_list_entry_size(&list)); tb_for_all_if(tb_demo_entry_t*, item1, tb_list_entry_itor(&list), item1) { tb_trace_i("%lu", item1->data); } // trace tb_trace_i(""); // remove entries tb_list_entry_remove_head(&list); tb_list_entry_remove_last(&list); // walk it tb_trace_i("remove: %lu", tb_list_entry_size(&list)); tb_for_all_if(tb_demo_entry_t*, item2, tb_list_entry_itor(&list), item2)
static tb_pointer_t tb_thread_pool_worker_loop(tb_cpointer_t priv) { // the worker tb_thread_pool_worker_t* worker = (tb_thread_pool_worker_t*)priv; // trace tb_trace_d("worker[%lu]: init", worker? worker->id : -1); // done do { // check tb_assert_and_check_break(worker && !worker->jobs && !worker->stats); // the pool tb_thread_pool_impl_t* impl = (tb_thread_pool_impl_t*)worker->pool; tb_assert_and_check_break(impl && impl->semaphore); // wait some time for leaving the lock tb_msleep((worker->id + 1)* 20); // init jobs worker->jobs = tb_vector_init(TB_THREAD_POOL_JOBS_WORKING_GROW, tb_element_ptr(tb_null, tb_null)); tb_assert_and_check_break(worker->jobs); // init stats worker->stats = tb_hash_map_init(TB_HASH_MAP_BUCKET_SIZE_MICRO, tb_element_ptr(tb_null, tb_null), tb_element_mem(sizeof(tb_thread_pool_job_stats_t), tb_null, tb_null)); tb_assert_and_check_break(worker->stats); // loop while (1) { // pull jobs if be idle if (!tb_vector_size(worker->jobs)) { // enter tb_spinlock_enter(&impl->lock); // init the pull time worker->pull = 0; // pull from the urgent jobs if (tb_list_entry_size(&impl->jobs_urgent)) { // trace tb_trace_d("worker[%lu]: try pulling from urgent: %lu", worker->id, tb_list_entry_size(&impl->jobs_urgent)); // pull it tb_remove_if_until(tb_list_entry_itor(&impl->jobs_urgent), tb_thread_pool_worker_walk_pull, worker); } // pull from the waiting jobs if (tb_list_entry_size(&impl->jobs_waiting)) { // trace tb_trace_d("worker[%lu]: try pulling from waiting: %lu", worker->id, tb_list_entry_size(&impl->jobs_waiting)); // pull it tb_remove_if_until(tb_list_entry_itor(&impl->jobs_waiting), tb_thread_pool_worker_walk_pull, worker); } // pull from the pending jobs and clean some finished and killed jobs if (tb_list_entry_size(&impl->jobs_pending)) { // trace tb_trace_d("worker[%lu]: try pulling from pending: %lu", worker->id, tb_list_entry_size(&impl->jobs_pending)); // no jobs? try to pull from the pending jobs if (!tb_vector_size(worker->jobs)) tb_remove_if(tb_list_entry_itor(&impl->jobs_pending), tb_thread_pool_worker_walk_pull_and_clean, worker); // clean some finished and killed jobs else tb_remove_if(tb_list_entry_itor(&impl->jobs_pending), tb_thread_pool_worker_walk_clean, worker); } // leave tb_spinlock_leave(&impl->lock); // idle? wait it if (!tb_vector_size(worker->jobs)) { // killed? tb_check_break(!tb_atomic_get(&worker->bstoped)); // trace tb_trace_d("worker[%lu]: wait: ..", worker->id); // wait some time tb_long_t wait = tb_semaphore_wait(impl->semaphore, -1); tb_assert_and_check_break(wait > 0); // trace tb_trace_d("worker[%lu]: wait: ok", worker->id); // continue it continue; } else { #ifdef TB_TRACE_DEBUG // update the jobs urgent size tb_size_t jobs_urgent_size = tb_list_entry_size(&impl->jobs_urgent); // update the jobs waiting size tb_size_t jobs_waiting_size = tb_list_entry_size(&impl->jobs_waiting); // update the jobs pending size tb_size_t jobs_pending_size = tb_list_entry_size(&impl->jobs_pending); // trace tb_trace_d("worker[%lu]: pull: jobs: %lu, time: %lu ms, waiting: %lu, pending: %lu, urgent: %lu", worker->id, tb_vector_size(worker->jobs), worker->pull, jobs_waiting_size, jobs_pending_size, jobs_urgent_size); #endif } } // done jobs tb_for_all (tb_thread_pool_job_t*, job, worker->jobs) { // check tb_assert_and_check_continue(job && job->task.done); // the job state tb_size_t state = tb_atomic_fetch_and_pset(&job->state, TB_STATE_WAITING, TB_STATE_WORKING); // the job is waiting? work it if (state == TB_STATE_WAITING) { // trace tb_trace_d("worker[%lu]: done: task[%p:%s]: ..", worker->id, job->task.done, job->task.name); // init the time tb_hong_t time = tb_cache_time_spak(); // done the job job->task.done((tb_thread_pool_worker_ref_t)worker, job->task.priv); // computate the time time = tb_cache_time_spak() - time; // exists? update time and count tb_size_t itor; tb_hash_map_item_ref_t item = tb_null; if ( ((itor = tb_hash_map_find(worker->stats, job->task.done)) != tb_iterator_tail(worker->stats)) && (item = (tb_hash_map_item_ref_t)tb_iterator_item(worker->stats, itor))) { // the stats tb_thread_pool_job_stats_t* stats = (tb_thread_pool_job_stats_t*)item->data; tb_assert_and_check_break(stats); // update the done count stats->done_count++; // update the total time stats->total_time += time; } // no item? add it if (!item) { // init stats tb_thread_pool_job_stats_t stats = {0}; stats.done_count = 1; stats.total_time = time; // add stats tb_hash_map_insert(worker->stats, job->task.done, &stats); } #ifdef TB_TRACE_DEBUG tb_size_t done_count = 0; tb_hize_t total_time = 0; tb_thread_pool_job_stats_t* stats = (tb_thread_pool_job_stats_t*)tb_hash_map_get(worker->stats, job->task.done); if (stats) { done_count = stats->done_count; total_time = stats->total_time; } // trace tb_trace_d("worker[%lu]: done: task[%p:%s]: time: %lld ms, average: %lld ms, count: %lu", worker->id, job->task.done, job->task.name, time, (total_time / (tb_hize_t)done_count), done_count); #endif // update the job state tb_atomic_set(&job->state, TB_STATE_FINISHED); } // the job is killing? work it else if (state == TB_STATE_KILLING) { // update the job state tb_atomic_set(&job->state, TB_STATE_KILLED); } } // clear jobs tb_vector_clear(worker->jobs); } } while (0); // exit worker if (worker) { // trace tb_trace_d("worker[%lu]: exit", worker->id); // stoped tb_atomic_set(&worker->bstoped, 1); // exit all private data tb_size_t i = 0; tb_size_t n = tb_arrayn(worker->priv); for (i = 0; i < n; i++) { // the private data tb_thread_pool_worker_priv_t* priv = &worker->priv[n - i - 1]; // exit it if (priv->exit) priv->exit((tb_thread_pool_worker_ref_t)worker, priv->priv); // clear it priv->exit = tb_null; priv->priv = tb_null; } // exit stats if (worker->stats) tb_hash_map_exit(worker->stats); worker->stats = tb_null; // exit jobs if (worker->jobs) tb_vector_exit(worker->jobs); worker->jobs = tb_null; } // exit tb_thread_return(tb_null); return tb_null; }
tb_bool_t tb_lo_scheduler_start(tb_lo_scheduler_t* scheduler, tb_lo_coroutine_func_t func, tb_cpointer_t priv, tb_lo_coroutine_free_t free) { // check tb_assert(func); // done tb_bool_t ok = tb_false; tb_lo_coroutine_t* coroutine = tb_null; do { // trace tb_trace_d("start .."); // get the current scheduler if (!scheduler) scheduler = (tb_lo_scheduler_t*)tb_lo_scheduler_self_(); tb_assert_and_check_break(scheduler); // have been stopped? do not continue to start new coroutines tb_check_break(!scheduler->stopped); // reuses dead coroutines in init function if (tb_list_entry_size(&scheduler->coroutines_dead)) { // get the next entry from head tb_list_entry_ref_t entry = tb_list_entry_head(&scheduler->coroutines_dead); tb_assert_and_check_break(entry); // remove it from the ready coroutines tb_list_entry_remove_head(&scheduler->coroutines_dead); // get the dead coroutine coroutine = (tb_lo_coroutine_t*)tb_list_entry(&scheduler->coroutines_dead, entry); // reinit this coroutine tb_lo_coroutine_reinit(coroutine, func, priv, free); } // init coroutine if (!coroutine) coroutine = tb_lo_coroutine_init((tb_lo_scheduler_ref_t)scheduler, func, priv, free); tb_assert_and_check_break(coroutine); // ready coroutine tb_lo_scheduler_make_ready(scheduler, coroutine); // the dead coroutines is too much? free some coroutines while (tb_list_entry_size(&scheduler->coroutines_dead) > TB_SCHEDULER_DEAD_CACHE_MAXN) { // get the next entry from head tb_list_entry_ref_t entry = tb_list_entry_head(&scheduler->coroutines_dead); tb_assert(entry); // remove it from the ready coroutines tb_list_entry_remove_head(&scheduler->coroutines_dead); // exit this coroutine tb_lo_coroutine_exit((tb_lo_coroutine_t*)tb_list_entry(&scheduler->coroutines_dead, entry)); } // ok ok = tb_true; } while (0); // trace tb_trace_d("start %s", ok? "ok" : "no"); // ok? return ok; }
tb_bool_t tb_transfer_pool_done(tb_transfer_pool_ref_t pool, tb_char_t const* iurl, tb_char_t const* ourl, tb_hize_t offset, tb_size_t rate, tb_async_transfer_done_func_t done, tb_async_transfer_ctrl_func_t ctrl, tb_cpointer_t priv) { // check tb_transfer_pool_impl_t* impl = (tb_transfer_pool_impl_t*)pool; tb_assert_and_check_return_val(impl && impl->aicp && iurl && ourl, tb_false); // enter tb_spinlock_enter(&impl->lock); // done tb_bool_t ok = tb_false; tb_transfer_task_t* task = tb_null; do { // check tb_check_break(TB_STATE_OK == tb_atomic_get(&impl->state)); // too many tasks? if (tb_list_entry_size(&impl->work) >= impl->maxn) { // trace tb_trace_e("too many tasks, done task: %s => %s failed!", iurl, ourl); break; } // init task task = tb_transfer_task_init(impl, done, ctrl, priv); tb_assert_and_check_break(task && task->transfer); // init transfer stream if (!tb_async_transfer_init_istream_from_url(task->transfer, iurl)) break; if (!tb_async_transfer_init_ostream_from_url(task->transfer, ourl)) break; // init transfer rate tb_async_transfer_limitrate(task->transfer, rate); // check tb_assert_and_check_break(impl->pool); // append to the work list tb_list_entry_insert_tail(&impl->work, &task->entry); // ok ok = tb_true; } while (0); // trace tb_trace_d("done: task: %p, %s => %s, work: %lu, idle: %lu, state: %s", task, iurl, ourl, tb_list_entry_size(&impl->work), tb_list_entry_size(&impl->idle), ok? "ok" : "no"); // failed? if (!ok) { // exit it if (task) tb_transfer_task_exit(impl, task); task = tb_null; } // leave tb_spinlock_leave(&impl->lock); // ok? done it if (ok && task && task->transfer) { // done if (!tb_async_transfer_open_done(task->transfer, 0, tb_transfer_task_done, task)) { // enter tb_spinlock_enter(&impl->lock); // remove task from the work list tb_list_entry_remove(&impl->work, &task->entry); // exit task tb_transfer_task_exit(impl, task); // leave tb_spinlock_leave(&impl->lock); // failed ok = tb_false; } } // ok? return ok; }
static tb_transfer_task_t* tb_transfer_task_init(tb_transfer_pool_impl_t* impl, tb_async_transfer_done_func_t done, tb_async_transfer_ctrl_func_t ctrl, tb_cpointer_t priv) { // check tb_assert_and_check_return_val(impl, tb_null); // done tb_bool_t ok = tb_false; tb_transfer_task_t* task = tb_null; do { // init task pool if (!impl->pool) impl->pool = tb_fixed_pool_init(tb_null, (impl->maxn >> 4) + 16, sizeof(tb_transfer_task_t), tb_null, tb_null, tb_null); tb_assert_and_check_break(impl->pool); // init task from the idle list first if (tb_list_entry_size(&impl->idle)) { // get the head entry tb_list_entry_ref_t entry = tb_list_entry_head(&impl->idle); tb_assert_and_check_break(entry); // the task task = (tb_transfer_task_t*)tb_list_entry(&impl->idle, entry); tb_assert_and_check_break(task); // remove the last task tb_list_entry_remove(&impl->idle, entry); // check tb_assert_and_check_break(task->transfer); } // init task from the task pool else { // make task task = (tb_transfer_task_t*)tb_fixed_pool_malloc0(impl->pool); tb_assert_and_check_break(task); // init transfer task->transfer = tb_async_transfer_init(impl->aicp, tb_true); tb_assert_and_check_break(task->transfer); } // init ctrl if (ctrl && !tb_async_transfer_ctrl(task->transfer, ctrl, priv)) break; // init task task->func = done; task->priv = priv; task->pool = impl; // ok ok = tb_true; } while (0); // failed? if (!ok) { // exit it if (task) tb_transfer_task_exit(impl, task); task = tb_null; } // ok? return task; }
static tb_thread_pool_job_t* tb_thread_pool_jobs_post_task(tb_thread_pool_impl_t* impl, tb_thread_pool_task_t const* task, tb_size_t* post_size) { // check tb_assert_and_check_return_val(impl && task && task->done && post_size, tb_null); // done tb_bool_t ok = tb_false; tb_thread_pool_job_t* job = tb_null; do { // check tb_assert_and_check_break(tb_list_entry_size(&impl->jobs_waiting) + tb_list_entry_size(&impl->jobs_urgent) + 1 < TB_THREAD_POOL_JOBS_WAITING_MAXN); // make job job = (tb_thread_pool_job_t*)tb_fixed_pool_malloc0(impl->jobs_pool); tb_assert_and_check_break(job); // init job job->refn = 1; job->state = TB_STATE_WAITING; job->task = *task; // non-urgent job? if (!task->urgent) { // post to the waiting jobs tb_list_entry_insert_tail(&impl->jobs_waiting, &job->entry); } else { // post to the urgent jobs tb_list_entry_insert_tail(&impl->jobs_urgent, &job->entry); } // the waiting jobs count tb_size_t jobs_waiting_count = tb_list_entry_size(&impl->jobs_waiting) + tb_list_entry_size(&impl->jobs_urgent); tb_assert_and_check_break(jobs_waiting_count); // update the post size if (*post_size < impl->worker_size) (*post_size)++; // trace tb_trace_d("task[%p:%s]: post: %lu: ..", task->done, task->name, *post_size); // init them if the workers have been not inited if (impl->worker_size < jobs_waiting_count) { tb_size_t i = impl->worker_size; tb_size_t n = tb_min(jobs_waiting_count, impl->worker_maxn); for (; i < n; i++) { // the worker tb_thread_pool_worker_t* worker = &impl->worker_list[i]; // clear worker tb_memset(worker, 0, sizeof(tb_thread_pool_worker_t)); // init worker worker->id = i; worker->pool = (tb_thread_pool_ref_t)impl; worker->loop = tb_thread_init(__tb_lstring__("thread_pool"), tb_thread_pool_worker_loop, worker, impl->stack); tb_assert_and_check_continue(worker->loop); } // update the worker size impl->worker_size = i; } // ok ok = tb_true; } while (0); // failed? if (!ok) { // exit it tb_fixed_pool_free(impl->jobs_pool, job); job = tb_null; } // ok? return job; }