tb_size_t tb_timer_delay(tb_timer_ref_t timer) { // check tb_timer_impl_t* impl = (tb_timer_impl_t*)timer; tb_assert_and_check_return_val(impl && impl->heap, -1); // stoped? tb_assert_and_check_return_val(!tb_atomic_get(&impl->stop), -1); // enter tb_spinlock_enter(&impl->lock); // done tb_size_t delay = -1; if (tb_heap_size(impl->heap)) { // the task tb_timer_task_impl_t const* task_impl = (tb_timer_task_impl_t const*)tb_heap_top(impl->heap); if (task_impl) { // the now tb_hong_t now = tb_timer_now(impl); // the delay delay = task_impl->when > now? (tb_size_t)(task_impl->when - now) : 0; } } // leave tb_spinlock_leave(&impl->lock); // ok? return delay; }
tb_bool_t tb_async_transfer_init_ostream_from_data(tb_async_transfer_ref_t transfer, tb_byte_t* data, tb_size_t size) { // check tb_async_transfer_impl_t* impl = (tb_async_transfer_impl_t*)transfer; tb_assert_and_check_return_val(impl && impl->aicp && data && size, tb_false); // muse be closed tb_assert_and_check_return_val(TB_STATE_CLOSED == tb_atomic_get(&impl->state), tb_false); // exit the previous stream first if be not data stream if (impl->ostream && tb_async_stream_type(impl->ostream) != TB_STREAM_TYPE_DATA) { if (impl->oowner) tb_async_stream_exit(impl->ostream); impl->ostream = tb_null; } // using the previous stream? if (impl->ostream) { // ctrl stream if (!tb_async_stream_ctrl(impl->ostream, TB_STREAM_CTRL_DATA_SET_DATA, data, size)) return tb_false; } else { // init stream impl->ostream = tb_async_stream_init_from_data(impl->aicp, data, size); tb_assert_and_check_return_val(impl->ostream, tb_false); // init owner impl->oowner = 1; } // ok return tb_true; }
tb_hize_t tb_timer_top(tb_timer_ref_t timer) { // check tb_timer_impl_t* impl = (tb_timer_impl_t*)timer; tb_assert_and_check_return_val(impl && impl->heap, -1); // stoped? tb_assert_and_check_return_val(!tb_atomic_get(&impl->stop), -1); // enter tb_spinlock_enter(&impl->lock); // done tb_hize_t when = -1; if (tb_heap_size(impl->heap)) { // the task tb_timer_task_impl_t const* task_impl = (tb_timer_task_impl_t const*)tb_heap_top(impl->heap); if (task_impl) when = task_impl->when; } // leave tb_spinlock_leave(&impl->lock); // ok? return when; }
tb_bool_t tb_aicp_post_after_(tb_aicp_ref_t aicp, tb_size_t delay, tb_aice_ref_t aice __tb_debug_decl__) { // check tb_aicp_impl_t* impl = (tb_aicp_impl_t*)aicp; tb_assert_and_check_return_val(impl && impl->ptor && impl->ptor->post, tb_false); tb_assert_and_check_return_val(aice && aice->aico, tb_false); // killed? tb_check_return_val(!tb_atomic_get(&impl->kill_all), tb_false); // no delay? if (!delay) return tb_aicp_post_(aicp, aice __tb_debug_args__); // the aico tb_aico_impl_t* aico = (tb_aico_impl_t*)aice->aico; tb_assert_and_check_return_val(aico, tb_false); // make the posted aice tb_aice_ref_t posted_aice = tb_malloc0_type(tb_aice_t); tb_assert_and_check_return_val(posted_aice, tb_false); // init the posted aice *posted_aice = *aice; // run the delay task return tb_aico_task_run_((tb_aico_ref_t)aico, delay, tb_aicp_post_after_func, posted_aice __tb_debug_args__); }
tb_bool_t tb_aico_open_task(tb_aico_ref_t aico, tb_bool_t ltimer) { // check tb_aico_impl_t* impl = (tb_aico_impl_t*)aico; tb_aicp_impl_t* aicp_impl = (tb_aicp_impl_t*)impl->aicp; tb_assert_and_check_return_val(impl && aicp_impl && aicp_impl->ptor && aicp_impl->ptor->addo, tb_false); // done tb_bool_t ok = tb_false; do { // closed? tb_assert_and_check_break(tb_atomic_get(&impl->state) == TB_STATE_CLOSED); tb_assert_and_check_break(!impl->type); // bind type and handle // hack: handle != null? using higher precision timer for being compatible with sock/file task impl->type = TB_AICO_TYPE_TASK; impl->handle = (tb_handle_t)(tb_size_t)!ltimer; // addo aico ok = aicp_impl->ptor->addo(aicp_impl->ptor, impl); tb_assert_and_check_break(ok); // opened tb_atomic_set(&impl->state, TB_STATE_OPENED); } while (0); // ok? return ok; }
tb_bool_t tb_aico_open_sock(tb_aico_ref_t aico, tb_socket_ref_t sock) { // check tb_aico_impl_t* impl = (tb_aico_impl_t*)aico; tb_aicp_impl_t* aicp_impl = (tb_aicp_impl_t*)impl->aicp; tb_assert_and_check_return_val(impl && sock && aicp_impl && aicp_impl->ptor && aicp_impl->ptor->addo, tb_false); // done tb_bool_t ok = tb_false; do { // closed? tb_assert_and_check_break(tb_atomic_get(&impl->state) == TB_STATE_CLOSED); tb_assert_and_check_break(!impl->type && !impl->handle); // bind type and handle impl->type = TB_AICO_TYPE_SOCK; impl->handle = (tb_handle_t)sock; // addo aico ok = aicp_impl->ptor->addo(aicp_impl->ptor, impl); tb_assert_and_check_break(ok); // opened tb_atomic_set(&impl->state, TB_STATE_OPENED); } while (0); // ok? return ok; }
tb_long_t tb_semaphore_wait(tb_semaphore_ref_t semaphore, tb_long_t timeout) { // check tb_semaphore_impl_t* impl = (tb_semaphore_impl_t*)semaphore; tb_assert_and_check_return_val(semaphore && impl->semaphore && impl->semaphore != INVALID_HANDLE_VALUE, -1); // wait tb_long_t r = WaitForSingleObject(impl->semaphore, timeout >= 0? timeout : INFINITE); tb_assert_and_check_return_val(r != WAIT_FAILED, -1); // timeout? tb_check_return_val(r != WAIT_TIMEOUT, 0); // error? tb_check_return_val(r >= WAIT_OBJECT_0, -1); // check value tb_assert_and_check_return_val((tb_long_t)tb_atomic_get(&impl->value) > 0, -1); // value-- tb_atomic_fetch_and_dec(&impl->value); // ok return 1; }
tb_void_t tb_timer_exit(tb_timer_ref_t timer) { // check tb_timer_impl_t* impl = (tb_timer_impl_t*)timer; tb_assert_and_check_return(impl); // stop it tb_atomic_set(&impl->stop, 1); // wait loop exit tb_size_t tryn = 10; while (tb_atomic_get(&impl->work) && tryn--) tb_msleep(500); // warning if (!tryn && tb_atomic_get(&impl->work)) tb_trace_w("[timer]: the loop has been not exited now!"); // post event tb_spinlock_enter(&impl->lock); tb_event_ref_t event = impl->event; tb_spinlock_leave(&impl->lock); if (event) tb_event_post(event); // enter tb_spinlock_enter(&impl->lock); // exit heap if (impl->heap) tb_heap_exit(impl->heap); impl->heap = tb_null; // exit pool if (impl->pool) tb_fixed_pool_exit(impl->pool); impl->pool = tb_null; // exit event if (impl->event) tb_event_exit(impl->event); impl->event = tb_null; // leave tb_spinlock_leave(&impl->lock); // exit lock tb_spinlock_exit(&impl->lock); // exit it tb_free(impl); }
tb_bool_t tb_aico_clos_try(tb_aico_ref_t aico) { // check tb_aico_impl_t* impl = (tb_aico_impl_t*)aico; tb_assert_and_check_return_val(impl && impl->aicp, tb_false); // closed? return (tb_atomic_get(&impl->state) == TB_STATE_CLOSED)? tb_true : tb_false; }
tb_long_t tb_aico_timeout(tb_aico_ref_t aico, tb_size_t type) { // check tb_aico_impl_t* impl = (tb_aico_impl_t*)aico; tb_assert_and_check_return_val(impl && type < tb_arrayn(impl->timeout), -1); // the impl timeout return tb_atomic_get((tb_atomic_t*)(impl->timeout + type)); }
tb_long_t tb_semaphore_value(tb_semaphore_ref_t self) { // check tb_atomic_t* semaphore = (tb_atomic_t*)self; tb_assert_and_check_return_val(semaphore, tb_false); // get value return (tb_long_t)tb_atomic_get(semaphore); }
tb_long_t tb_semaphore_value(tb_semaphore_ref_t semaphore) { // check tb_semaphore_impl_t* impl = (tb_semaphore_impl_t*)semaphore; tb_assert_and_check_return_val(semaphore, -1); // get value return (tb_long_t)tb_atomic_get(&impl->value); }
tb_bool_t tb_async_transfer_resume(tb_async_transfer_ref_t transfer) { // check tb_async_transfer_impl_t* impl = (tb_async_transfer_impl_t*)transfer; tb_assert_and_check_return_val(impl && impl->aicp, tb_false); // done tb_bool_t ok = tb_false; tb_size_t state_pause = TB_STATE_OK; do { // must be opened? tb_check_break(TB_STATE_OPENED == tb_atomic_get(&impl->state)); // resume it tb_size_t state_pause = tb_atomic_fetch_and_set(&impl->state_pause, TB_STATE_OK); // pausing or ok? return ok directly tb_check_return_val(state_pause == TB_STATE_PAUSED, tb_true); // check tb_assert_and_check_break(impl->istream); tb_assert_and_check_break(impl->ostream); // init some rate info impl->done.base_time = tb_aicp_time(impl->aicp); impl->done.base_time1s = impl->done.base_time; impl->done.saved_size1s = 0; impl->done.current_rate = 0; // read it if (!tb_async_stream_read(impl->istream, (tb_size_t)tb_atomic_get(&impl->limited_rate), tb_async_transfer_istream_read_func, impl)) break; // ok ok = tb_true; } while (0); // failed? restore state if (!ok && state_pause != TB_STATE_OK) tb_atomic_pset(&impl->state_pause, TB_STATE_OK, state_pause); // ok? return ok; }
tb_bool_t tb_async_transfer_done(tb_async_transfer_ref_t transfer, tb_async_transfer_done_func_t func, tb_cpointer_t priv) { // check tb_async_transfer_impl_t* impl = (tb_async_transfer_impl_t*)transfer; tb_assert_and_check_return_val(impl && func, tb_false); // check state tb_assert_and_check_return_val(TB_STATE_OPENED == tb_atomic_get(&impl->state), tb_false); // check stream tb_assert_and_check_return_val(impl->istream && impl->ostream, tb_false); // init func impl->done.func = func; impl->done.priv = priv; // read it return tb_async_stream_read(impl->istream, (tb_size_t)tb_atomic_get(&impl->limited_rate), tb_async_transfer_istream_read_func, impl); }
tb_bool_t tb_async_transfer_init_istream_from_url(tb_async_transfer_ref_t transfer, tb_char_t const* url) { // check tb_async_transfer_impl_t* impl = (tb_async_transfer_impl_t*)transfer; tb_assert_and_check_return_val(impl && impl->aicp && url, tb_false); // muse be closed tb_assert_and_check_return_val(TB_STATE_CLOSED == tb_atomic_get(&impl->state), tb_false); // check stream type if (impl->istream) { // probe protocol tb_size_t protocol = tb_url_protocol_probe(url); tb_assert_static((tb_size_t)TB_URL_PROTOCOL_FILE == (tb_size_t)TB_STREAM_TYPE_FILE); tb_assert_static((tb_size_t)TB_URL_PROTOCOL_HTTP == (tb_size_t)TB_STREAM_TYPE_HTTP); tb_assert_static((tb_size_t)TB_URL_PROTOCOL_SOCK == (tb_size_t)TB_STREAM_TYPE_SOCK); tb_assert_static((tb_size_t)TB_URL_PROTOCOL_DATA == (tb_size_t)TB_STREAM_TYPE_DATA); // protocol => type tb_size_t type = protocol; if (!type || type > TB_STREAM_TYPE_DATA) { tb_trace_e("unknown stream for url: %s", url); return tb_false; } // exit the previous stream first if be different stream type if (tb_async_stream_type(impl->istream) != type) { if (impl->iowner) tb_async_stream_exit(impl->istream); impl->istream = tb_null; } } // using the previous stream? if (impl->istream) { // ctrl stream if (!tb_async_stream_ctrl(impl->istream, TB_STREAM_CTRL_SET_URL, url)) return tb_false; } else { // init stream impl->istream = tb_async_stream_init_from_url(impl->aicp, url); tb_assert_and_check_return_val(impl->istream, tb_false); // init owner impl->iowner = 1; } // ok return tb_true; }
tb_timer_task_ref_t tb_timer_task_init_at(tb_timer_ref_t timer, tb_hize_t when, tb_size_t period, tb_bool_t repeat, tb_timer_task_func_t func, tb_cpointer_t priv) { // check tb_timer_impl_t* impl = (tb_timer_impl_t*)timer; tb_assert_and_check_return_val(impl && impl->pool && impl->heap && func, tb_null); // stoped? tb_assert_and_check_return_val(!tb_atomic_get(&impl->stop), tb_null); // enter tb_spinlock_enter(&impl->lock); // make task tb_event_ref_t event = tb_null; tb_hize_t when_top = -1; tb_timer_task_impl_t* task_impl = (tb_timer_task_impl_t*)tb_fixed_pool_malloc0(impl->pool); if (task_impl) { // the top when if (tb_heap_size(impl->heap)) { tb_timer_task_impl_t* task_impl = (tb_timer_task_impl_t*)tb_heap_top(impl->heap); if (task_impl) when_top = task_impl->when; } // init task task_impl->refn = 2; task_impl->func = func; task_impl->priv = priv; task_impl->when = when; task_impl->period = period; task_impl->repeat = repeat? 1 : 0; // add task tb_heap_put(impl->heap, task_impl); // the event event = impl->event; } // leave tb_spinlock_leave(&impl->lock); // post event if the top task is changed if (event && task_impl && when < when_top) tb_event_post(event); // ok? return (tb_timer_task_ref_t)task_impl; }
static tb_void_t tb_aiop_spak_work(tb_aiop_ptor_impl_t* impl) { // check tb_assert_and_check_return(impl && impl->wait && impl->base.aicp); // the worker size tb_size_t work = tb_atomic_get(&impl->base.aicp->work); // the semaphore value tb_long_t value = tb_semaphore_value(impl->wait); // post wait if (value >= 0 && value < work) tb_semaphore_post(impl->wait, work - value); }
tb_bool_t tb_aicp_exit(tb_aicp_ref_t aicp) { // check tb_aicp_impl_t* impl = (tb_aicp_impl_t*)aicp; tb_assert_and_check_return_val(impl, tb_false); // kill all first tb_aicp_kill_all((tb_aicp_ref_t)impl); // wait all exiting if (tb_aicp_wait_all((tb_aicp_ref_t)impl, 5000) <= 0) { // wait failed, trace left aicos tb_spinlock_enter(&impl->lock); if (impl->pool) tb_fixed_pool_walk(impl->pool, tb_aicp_walk_wait, tb_null); tb_spinlock_leave(&impl->lock); return tb_false; } // kill loop tb_aicp_kill((tb_aicp_ref_t)impl); // wait workers exiting tb_hong_t time = tb_mclock(); while (tb_atomic_get(&impl->work) && (tb_mclock() < time + 5000)) tb_msleep(500); // exit proactor if (impl->ptor) { tb_assert(impl->ptor && impl->ptor->exit); impl->ptor->exit(impl->ptor); impl->ptor = tb_null; } // exit aico pool tb_spinlock_enter(&impl->lock); if (impl->pool) tb_fixed_pool_exit(impl->pool); impl->pool = tb_null; tb_spinlock_leave(&impl->lock); // exit lock tb_spinlock_exit(&impl->lock); // free impl tb_free(impl); // ok return tb_true; }
tb_bool_t tb_async_transfer_ctrl(tb_async_transfer_ref_t transfer, tb_async_transfer_ctrl_func_t func, tb_cpointer_t priv) { // check tb_async_transfer_impl_t* impl = (tb_async_transfer_impl_t*)transfer; tb_assert_and_check_return_val(impl && func, tb_false); // check state tb_assert_and_check_return_val(TB_STATE_CLOSED == tb_atomic_get(&impl->state), tb_false); // init func impl->ctrl.func = func; impl->ctrl.priv = priv; // ok return tb_true; }
static tb_bool_t tb_async_transfer_ostream_open_func(tb_async_stream_ref_t stream, tb_size_t state, tb_cpointer_t priv) { // check tb_async_transfer_impl_t* impl = (tb_async_transfer_impl_t*)priv; tb_assert_and_check_return_val(stream && impl && impl->open.func, tb_false); // trace tb_trace_d("open: ostream: %s, state: %s", tb_url_cstr(tb_async_stream_url(stream)), tb_state_cstr(state)); // done tb_bool_t ok = tb_true; do { // ok? tb_check_break(state == TB_STATE_OK); // reset state state = TB_STATE_UNKNOWN_ERROR; // check tb_assert_and_check_break(impl->istream); // killed? if (TB_STATE_KILLING == tb_atomic_get(&impl->state)) { state = TB_STATE_KILLED; break; } // done func ok = tb_async_transfer_open_func(impl, TB_STATE_OK, tb_async_stream_offset(impl->istream), tb_async_stream_size(impl->istream), impl->open.func, impl->open.priv); // ok state = TB_STATE_OK; } while (0); // failed? if (state != TB_STATE_OK) { // done func ok = tb_async_transfer_open_func(impl, state, 0, 0, impl->open.func, impl->open.priv); } // ok return ok; }
tb_bool_t tb_async_transfer_open_done(tb_async_transfer_ref_t transfer, tb_hize_t offset, tb_async_transfer_done_func_t func, tb_cpointer_t priv) { // check tb_async_transfer_impl_t* impl = (tb_async_transfer_impl_t*)transfer; tb_assert_and_check_return_val(impl && func, tb_false); // no opened? open it first if (TB_STATE_CLOSED == tb_atomic_get(&impl->state)) { impl->done.func = func; impl->done.priv = priv; return tb_async_transfer_open(transfer, offset, tb_async_transfer_open_done_func, impl); } // done it return tb_async_transfer_done(transfer, func, priv); }
tb_void_t tb_thread_pool_dump(tb_thread_pool_ref_t pool) { // check tb_thread_pool_impl_t* impl = (tb_thread_pool_impl_t*)pool; tb_assert_and_check_return(impl); // enter tb_spinlock_enter(&impl->lock); // dump workers if (impl->worker_size) { // trace tb_trace_i(""); tb_trace_i("workers: size: %lu, maxn: %lu", impl->worker_size, impl->worker_maxn); // walk tb_size_t i = 0; for (i = 0; i < impl->worker_size; i++) { // the worker tb_thread_pool_worker_t* worker = &impl->worker_list[i]; tb_assert_and_check_break(worker); // dump worker tb_trace_i(" worker: id: %lu, stoped: %ld", worker->id, (tb_long_t)tb_atomic_get(&worker->bstoped)); } // trace tb_trace_i(""); // dump all jobs if (impl->jobs_pool) { // trace tb_trace_i("jobs: size: %lu", tb_fixed_pool_size(impl->jobs_pool)); // dump jobs tb_fixed_pool_walk(impl->jobs_pool, tb_thread_pool_jobs_walk_dump_all, tb_null); } } // leave tb_spinlock_leave(&impl->lock); }
tb_bool_t tb_aico_open_file_from_path(tb_aico_ref_t aico, tb_char_t const* path, tb_size_t mode) { // check tb_aico_impl_t* impl = (tb_aico_impl_t*)aico; tb_aicp_impl_t* aicp_impl = (tb_aicp_impl_t*)impl->aicp; tb_assert_and_check_return_val(impl && path && aicp_impl && aicp_impl->ptor && aicp_impl->ptor->addo, tb_false); // done tb_bool_t ok = tb_false; tb_file_ref_t file = tb_null; do { // closed? tb_assert_and_check_break(tb_atomic_get(&impl->state) == TB_STATE_CLOSED); tb_assert_and_check_break(!impl->type && !impl->handle); // init file file = tb_file_init(path, mode | TB_FILE_MODE_ASIO); tb_assert_and_check_break(file); // bind type and handle impl->type = TB_AICO_TYPE_FILE; impl->handle = (tb_handle_t)file; // addo aico ok = aicp_impl->ptor->addo(aicp_impl->ptor, impl); tb_assert_and_check_break(ok); // opened tb_atomic_set(&impl->state, TB_STATE_OPENED); } while (0); // failed? if (!ok) { // exit it if (file) tb_file_exit(file); file = tb_null; } // ok? return ok; }
tb_long_t tb_thread_pool_task_wait_all(tb_thread_pool_ref_t pool, tb_long_t timeout) { // check tb_thread_pool_impl_t* impl = (tb_thread_pool_impl_t*)pool; tb_assert_and_check_return_val(impl, -1); // wait it tb_size_t size = 0; tb_hong_t time = tb_cache_time_spak(); while ((timeout < 0 || tb_cache_time_spak() < time + timeout)) { // enter tb_spinlock_enter(&impl->lock); // the jobs count size = impl->jobs_pool? tb_fixed_pool_size(impl->jobs_pool) : 0; // trace tb_trace_d("wait: jobs: %lu, waiting: %lu, pending: %lu, urgent: %lu: .." , size , tb_list_entry_size(&impl->jobs_waiting) , tb_list_entry_size(&impl->jobs_pending) , tb_list_entry_size(&impl->jobs_urgent)); #if 0 tb_for_all_if (tb_thread_pool_job_t*, job, tb_list_entry_itor(&impl->jobs_pending), job) { tb_trace_d("wait: job: %s from pending", tb_state_cstr(tb_atomic_get(&job->state))); } #endif // leave tb_spinlock_leave(&impl->lock); // ok? tb_check_break(size); // wait some time tb_msleep(200); } // ok? return !size? 1 : 0; }
static tb_bool_t tb_async_transfer_open_done_func(tb_size_t state, tb_hize_t offset, tb_hong_t size, tb_cpointer_t priv) { // the impl tb_async_transfer_impl_t* impl = (tb_async_transfer_impl_t*)priv; tb_assert_and_check_return_val(impl && impl->done.func, tb_false); // trace tb_trace_d("open_done: offset: %llu, size: %lld, state: %s", offset, size, tb_state_cstr(state)); // done tb_bool_t ok = tb_true; do { // ok? tb_check_break(state == TB_STATE_OK); // reset state state = TB_STATE_UNKNOWN_ERROR; // killed? if (TB_STATE_KILLING == tb_atomic_get(&impl->state)) { state = TB_STATE_KILLED; break; } // done it if (!tb_async_transfer_done((tb_async_transfer_ref_t)impl, impl->done.func, impl->done.priv)) break; // ok state = TB_STATE_OK; } while (0); // failed? if (state != TB_STATE_OK) { // done func for closing it ok = tb_async_transfer_done_func(impl, state); } // ok? return ok; }
tb_long_t tb_semaphore_wait(tb_semaphore_ref_t self, tb_long_t timeout) { // check tb_atomic_t* semaphore = (tb_atomic_t*)self; tb_assert_and_check_return_val(semaphore, -1); // init tb_long_t r = 0; tb_hong_t base = tb_cache_time_spak(); // wait while (1) { // get post tb_long_t post = (tb_long_t)tb_atomic_get(semaphore); // has signal? if (post > 0) { // semaphore-- tb_atomic_fetch_and_dec(semaphore); // ok r = post; break; } // no signal? else if (!post) { // timeout? if (timeout >= 0 && tb_cache_time_spak() - base >= timeout) break; else tb_msleep(200); } // error else { r = -1; break; } } return r; }
static tb_bool_t tb_thread_pool_worker_walk_clean(tb_iterator_ref_t iterator, tb_cpointer_t item, tb_cpointer_t value) { // the worker pull tb_thread_pool_worker_t* worker = (tb_thread_pool_worker_t*)value; tb_assert_abort(worker && worker->jobs); // the job tb_thread_pool_job_t* job = (tb_thread_pool_job_t*)item; tb_assert_abort(job); // the job state tb_size_t state = tb_atomic_get(&job->state); // finished or killed? remove it tb_bool_t ok = tb_false; if (state == TB_STATE_FINISHED || state == TB_STATE_KILLED) { // trace tb_trace_d("worker[%lu]: remove: task[%p:%s] from pending", worker->id, job->task.done, job->task.name); // exit the job if (job->task.exit) job->task.exit((tb_thread_pool_worker_ref_t)worker, job->task.priv); // remove it from the pending jobs ok = tb_true; // refn-- if (job->refn > 1) job->refn--; // remove it from pool directly else { // the pool tb_thread_pool_impl_t* impl = (tb_thread_pool_impl_t*)worker->pool; tb_assert_abort(impl); // remove it from the jobs pool tb_fixed_pool_free(impl->jobs_pool, job); } } // remove it? return ok; }
tb_bool_t tb_async_transfer_clos_try(tb_async_transfer_ref_t transfer) { // check tb_async_transfer_impl_t* impl = (tb_async_transfer_impl_t*)transfer; tb_assert_and_check_return_val(impl, tb_false); // trace tb_trace_d("clos: try: .."); // done tb_bool_t ok = tb_false; do { // closed? if (TB_STATE_CLOSED == tb_atomic_get(&impl->state)) { ok = tb_true; break; } // try closing istream if (impl->istream && !tb_async_stream_clos_try(impl->istream)) break; // try closing ostream if (impl->ostream && !tb_async_stream_clos_try(impl->ostream)) break; // closed tb_atomic_set(&impl->state, TB_STATE_CLOSED); // clear pause state tb_atomic_set(&impl->state_pause, TB_STATE_OK); // ok ok = tb_true; } while (0); // trace tb_trace_d("clos: try: %s", ok? "ok" : "no"); // ok? return ok; }
tb_void_t tb_timer_loop(tb_timer_ref_t timer) { // check tb_timer_impl_t* impl = (tb_timer_impl_t*)timer; tb_assert_and_check_return(impl); // work++ tb_atomic_fetch_and_inc(&impl->work); // init event tb_spinlock_enter(&impl->lock); if (!impl->event) impl->event = tb_event_init(); tb_spinlock_leave(&impl->lock); // loop while (!tb_atomic_get(&impl->stop)) { // the delay tb_size_t delay = tb_timer_delay(timer); if (delay) { // the event tb_spinlock_enter(&impl->lock); tb_event_ref_t event = impl->event; tb_spinlock_leave(&impl->lock); tb_check_break(event); // wait some time if (tb_event_wait(event, delay) < 0) break; } // spak ctime if (impl->ctime) tb_cache_time_spak(); // spak it if (!tb_timer_spak(timer)) break; } // work-- tb_atomic_fetch_and_dec(&impl->work); }
static tb_bool_t tb_demo_spider_task_save(tb_size_t state, tb_hize_t offset, tb_hong_t size, tb_hize_t save, tb_size_t rate, tb_cpointer_t priv) { // check tb_demo_spider_task_t* task = (tb_demo_spider_task_t*)priv; tb_assert_and_check_return_val(task && task->spider, tb_false); // percent #ifdef TB_TRACE_DEBUG tb_size_t percent = 0; if (size > 0) percent = (tb_size_t)((offset * 100) / size); else if (state == TB_STATE_OK) percent = 100; // trace tb_trace_d("save[%s]: %llu, rate: %lu bytes/s, percent: %lu%%, state: %s", task->iurl, save, rate, percent, tb_state_cstr(state)); #endif // ok? continue it tb_bool_t ok = tb_false; if (state == TB_STATE_OK) ok = tb_true; // closed? else if (state == TB_STATE_CLOSED && TB_STATE_OK == tb_atomic_get(&task->spider->state)) { // trace tb_trace_i("task: done: %s: ok", task->iurl); // post parser task tb_thread_pool_task_post(tb_thread_pool(), "parser_task", tb_demo_spider_parser_task_done, tb_demo_spider_parser_task_exit, task, tb_false); } // failed or killed? else { // trace tb_trace_e("task: done: %s: %s", task->iurl, tb_state_cstr(state)); // exit task tb_demo_spider_task_exit(task); } // break or continue? return ok; }