static tb_bool_t tb_demo_sock_conn_func(tb_aice_t const* aice) { // check tb_assert_and_check_return_val(aice && aice->code == TB_AICE_CODE_CONN, tb_false); // the context tb_demo_context_t* context = (tb_demo_context_t*)aice->priv; tb_assert_and_check_return_val(context, tb_false); // connection ok? if (aice->state == TB_STATE_OK) { // trace tb_trace_i("conn[%p]: ok", aice->aico); // post recv from server if (!tb_aico_recv(aice->aico, context->data, TB_DEMO_SOCK_RECV_MAXN, tb_demo_sock_recv_func, context)) return tb_false; } // timeout or failed? else { // exit loop tb_trace_i("conn[%p]: state: %s", aice->aico, tb_state_cstr(aice->state)); // exit context tb_demo_context_exit(context); } // ok return tb_true; }
static tb_bool_t tb_demo_file_read_func(tb_aice_ref_t aice) { // check tb_assert_and_check_return_val(aice && aice->code == TB_AICE_CODE_READ, tb_false); // the context tb_demo_context_t* context = (tb_demo_context_t*)aice->priv; tb_assert_and_check_return_val(context, tb_false); // ok? if (aice->state == TB_STATE_OK) { // trace tb_trace_d("read[%p]: real: %lu, size: %lu, seek: %llu", aice->aico, aice->u.read.real, aice->u.read.size, aice->u.read.seek); // post send to client if (!tb_aico_send(context->aico[0], aice->u.read.data, aice->u.read.real, tb_demo_sock_send_func, context)) return tb_false; } // closed or failed? else { tb_trace_i("read[%p]: state: %s", aice->aico, tb_state_cstr(aice->state)); tb_demo_context_exit(context); } // ok return tb_true; }
static tb_bool_t tb_demo_task_func(tb_aice_ref_t aice) { // check tb_assert_and_check_return_val(aice && aice->code == TB_AICE_CODE_RUNTASK, tb_false); // ok? if (aice->state == TB_STATE_OK) { // trace tb_trace_i("task[%p]: now: %lld", aice->aico, tb_cache_time_mclock()); // run task if (!tb_aico_task_run(aice->aico, 1001, tb_demo_task_func, aice->priv)) return tb_false; } // failed? else { // trace tb_trace_i("task[%p]: state: %s", aice->aico, tb_state_cstr(aice->state)); // clos aico if (aice->aico) tb_aico_clos(aice->aico, tb_demo_aico_clos, tb_null); } // ok return tb_true; }
/* ////////////////////////////////////////////////////////////////////////////////////// * implementation */ static tb_bool_t tb_demo_async_stream_null_done_func(tb_size_t state, tb_hize_t offset, tb_hong_t size, tb_hize_t save, tb_size_t rate, tb_cpointer_t priv) { // trace tb_trace_i("save: %llu bytes, rate: %lu bytes/s, state: %s", save, rate, tb_state_cstr(state)); // exit wait if (state != TB_STATE_OK) tb_event_post((tb_event_ref_t)priv); // ok return tb_true; }
tb_void_t tb_thread_pool_task_kill(tb_thread_pool_ref_t pool, tb_thread_pool_task_ref_t task) { // check tb_thread_pool_job_t* job = (tb_thread_pool_job_t*)task; tb_assert_and_check_return(pool && job); // trace tb_trace_d("task[%p:%s]: kill: state: %s: ..", job->task.done, job->task.name, tb_state_cstr(tb_atomic_get(&job->state))); // kill it if be waiting tb_atomic_pset(&job->state, TB_STATE_WAITING, TB_STATE_KILLING); }
static tb_bool_t tb_demo_sock_recv_func(tb_aice_t const* aice) { // check tb_assert_and_check_return_val(aice && aice->code == TB_AICE_CODE_RECV, tb_false); // the context tb_demo_context_t* context = (tb_demo_context_t*)aice->priv; tb_assert_and_check_return_val(context, tb_false); // ok? if (aice->state == TB_STATE_OK) { // trace tb_trace_d("recv[%p]: real: %lu, size: %lu", aice->aico, aice->u.recv.real, aice->u.recv.size); // post writ to file if (!tb_aico_writ(context->file, context->size, aice->u.recv.data, aice->u.recv.real, tb_demo_file_writ_func, context)) return tb_false; // save size context->size += aice->u.recv.real; // compute speed context->peak += aice->u.recv.real; if (!context->time) { context->time = tb_mclock(); context->base = tb_mclock(); context->sped = context->peak; } else if (tb_mclock() > context->time + 1000) { context->sped = context->peak; context->peak = 0; context->time = tb_mclock(); // trace tb_trace_i("recv[%p]: size: %llu, sped: %lu KB/s", aice->aico, context->size, context->sped / 1000); } } // closed or failed? else { // trace tb_trace_i("recv[%p]: state: %s", aice->aico, tb_state_cstr(aice->state)); // exit context tb_demo_context_exit(context); } // ok return tb_true; }
/* ////////////////////////////////////////////////////////////////////////////////////// * implementation */ static tb_bool_t tb_demo_aico_clos(tb_aice_ref_t aice) { // check tb_assert_and_check_return_val(aice && aice->aico && aice->code == TB_AICE_CODE_CLOS, tb_false); // trace tb_trace_d("aico[%p]: clos: %s", aice->aico, tb_state_cstr(aice->state)); // exit aico tb_aico_exit(aice->aico); // ok return tb_true; }
/* ////////////////////////////////////////////////////////////////////////////////////// * implementation */ static tb_void_t tb_async_stream_clos_opening(tb_async_stream_ref_t stream, tb_size_t state, tb_cpointer_t priv) { // check tb_async_stream_impl_t* impl = tb_async_stream_impl(stream); tb_assert_and_check_return(impl); // trace tb_trace_d("clos: opening: %s, state: %s", tb_url_cstr(&impl->url), tb_state_cstr(impl->clos_opening.state)); // closed tb_atomic_set(&impl->istate, TB_STATE_CLOSED); // done func if (impl->clos_opening.func) impl->clos_opening.func(stream, impl->clos_opening.state, impl->clos_opening.priv); }
static tb_bool_t tb_async_transfer_ostream_sync_func(tb_async_stream_ref_t stream, tb_size_t state, tb_bool_t bclosing, tb_cpointer_t priv) { // check tb_async_transfer_impl_t* impl = (tb_async_transfer_impl_t*)priv; tb_assert_and_check_return_val(stream && impl && impl->aicp && impl->istream, tb_false); // trace tb_trace_d("sync: state: %s", tb_state_cstr(state)); // the time tb_hong_t time = tb_aicp_time(impl->aicp); // compute the total rate impl->done.current_rate = (impl->done.saved_size && (time > impl->done.base_time))? (tb_size_t)((impl->done.saved_size * 1000) / (time - impl->done.base_time)) : (tb_size_t)impl->done.saved_size; // done func return tb_async_transfer_done_func(impl, state == TB_STATE_OK? TB_STATE_CLOSED : state); }
tb_long_t tb_thread_pool_task_wait_all(tb_thread_pool_ref_t pool, tb_long_t timeout) { // check tb_thread_pool_impl_t* impl = (tb_thread_pool_impl_t*)pool; tb_assert_and_check_return_val(impl, -1); // wait it tb_size_t size = 0; tb_hong_t time = tb_cache_time_spak(); while ((timeout < 0 || tb_cache_time_spak() < time + timeout)) { // enter tb_spinlock_enter(&impl->lock); // the jobs count size = impl->jobs_pool? tb_fixed_pool_size(impl->jobs_pool) : 0; // trace tb_trace_d("wait: jobs: %lu, waiting: %lu, pending: %lu, urgent: %lu: .." , size , tb_list_entry_size(&impl->jobs_waiting) , tb_list_entry_size(&impl->jobs_pending) , tb_list_entry_size(&impl->jobs_urgent)); #if 0 tb_for_all_if (tb_thread_pool_job_t*, job, tb_list_entry_itor(&impl->jobs_pending), job) { tb_trace_d("wait: job: %s from pending", tb_state_cstr(tb_atomic_get(&job->state))); } #endif // leave tb_spinlock_leave(&impl->lock); // ok? tb_check_break(size); // wait some time tb_msleep(200); } // ok? return !size? 1 : 0; }
static tb_bool_t tb_async_transfer_open_done_func(tb_size_t state, tb_hize_t offset, tb_hong_t size, tb_cpointer_t priv) { // the impl tb_async_transfer_impl_t* impl = (tb_async_transfer_impl_t*)priv; tb_assert_and_check_return_val(impl && impl->done.func, tb_false); // trace tb_trace_d("open_done: offset: %llu, size: %lld, state: %s", offset, size, tb_state_cstr(state)); // done tb_bool_t ok = tb_true; do { // ok? tb_check_break(state == TB_STATE_OK); // reset state state = TB_STATE_UNKNOWN_ERROR; // killed? if (TB_STATE_KILLING == tb_atomic_get(&impl->state)) { state = TB_STATE_KILLED; break; } // done it if (!tb_async_transfer_done((tb_async_transfer_ref_t)impl, impl->done.func, impl->done.priv)) break; // ok state = TB_STATE_OK; } while (0); // failed? if (state != TB_STATE_OK) { // done func for closing it ok = tb_async_transfer_done_func(impl, state); } // ok? return ok; }
static tb_bool_t tb_demo_sock_send_func(tb_aice_ref_t aice) { // check tb_assert_and_check_return_val(aice && aice->code == TB_AICE_CODE_SEND, tb_false); // the context tb_demo_context_t* context = (tb_demo_context_t*)aice->priv; tb_assert_and_check_return_val(context, tb_false); // ok? if (aice->state == TB_STATE_OK) { // trace tb_trace_d("send[%p]: real: %lu, size: %lu", aice->aico, aice->u.send.real, aice->u.send.size); // save size context->size += aice->u.send.real; // continue? if (aice->u.send.real < aice->u.send.size) { // post send to client if (!tb_aico_send(aice->aico, aice->u.send.data + aice->u.send.real, aice->u.send.size - aice->u.send.real, tb_demo_sock_send_func, context)) return tb_false; } // ok? else { // post read from file if (!tb_aico_read(context->aico[1], context->size, context->data, TB_DEMO_FILE_READ_MAXN, tb_demo_file_read_func, context)) return tb_false; } } // closed or failed? else { tb_trace_i("send[%p]: state: %s", aice->aico, tb_state_cstr(aice->state)); tb_demo_context_exit(context); } // ok return tb_true; }
static tb_bool_t tb_demo_spider_task_save(tb_size_t state, tb_hize_t offset, tb_hong_t size, tb_hize_t save, tb_size_t rate, tb_cpointer_t priv) { // check tb_demo_spider_task_t* task = (tb_demo_spider_task_t*)priv; tb_assert_and_check_return_val(task && task->spider, tb_false); // percent #ifdef TB_TRACE_DEBUG tb_size_t percent = 0; if (size > 0) percent = (tb_size_t)((offset * 100) / size); else if (state == TB_STATE_OK) percent = 100; // trace tb_trace_d("save[%s]: %llu, rate: %lu bytes/s, percent: %lu%%, state: %s", task->iurl, save, rate, percent, tb_state_cstr(state)); #endif // ok? continue it tb_bool_t ok = tb_false; if (state == TB_STATE_OK) ok = tb_true; // closed? else if (state == TB_STATE_CLOSED && TB_STATE_OK == tb_atomic_get(&task->spider->state)) { // trace tb_trace_i("task: done: %s: ok", task->iurl); // post parser task tb_thread_pool_task_post(tb_thread_pool(), "parser_task", tb_demo_spider_parser_task_done, tb_demo_spider_parser_task_exit, task, tb_false); } // failed or killed? else { // trace tb_trace_e("task: done: %s: %s", task->iurl, tb_state_cstr(state)); // exit task tb_demo_spider_task_exit(task); } // break or continue? return ok; }
static tb_bool_t tb_demo_file_writ_func(tb_aice_t const* aice) { // check tb_assert_and_check_return_val(aice && aice->code == TB_AICE_CODE_WRIT, tb_false); // the context tb_demo_context_t* context = (tb_demo_context_t*)aice->priv; tb_assert_and_check_return_val(context, tb_false); // ok? if (aice->state == TB_STATE_OK) { // trace tb_trace_d("writ[%p]: real: %lu, size: %lu", aice->aico, aice->u.writ.real, aice->u.writ.size); // continue? if (aice->u.writ.real < aice->u.writ.size) { // post writ to file if (!tb_aico_writ(aice->aico, aice->u.writ.seek + aice->u.writ.real, aice->u.writ.data + aice->u.writ.real, aice->u.writ.size - aice->u.writ.real, tb_demo_file_writ_func, context)) return tb_false; } // ok? else { // post recv from server if (!tb_aico_recv(context->sock, context->data, TB_DEMO_SOCK_RECV_MAXN, tb_demo_sock_recv_func, context)) return tb_false; } } // closed or failed? else { // trace tb_trace_i("writ[%p]: %s", aice->aico, tb_state_cstr(aice->state)); // exit context tb_demo_context_exit(context); } // ok return tb_true; }
/* ////////////////////////////////////////////////////////////////////////////////////// * test */ static tb_void_t tb_demo_database_sql_test_done(tb_database_sql_ref_t database, tb_char_t const* sql) { // check tb_assert_and_check_return(database && sql); // done do { // done sql if (!tb_database_sql_done(database, sql)) { // trace tb_trace_e("done %s failed, error: %s", sql, tb_state_cstr(tb_database_sql_state(database))); break ; } // load result // tb_iterator_ref_t result = tb_database_sql_result_load(database, tb_true); tb_iterator_ref_t result = tb_database_sql_result_load(database, tb_false); tb_check_break(result); // trace tb_trace_i("=============================================================================="); tb_trace_i("row: size: %lu", tb_iterator_size(result)); // walk result tb_for_all_if (tb_iterator_ref_t, row, result, row) { // trace tb_tracef_i("[row: %lu, col: size: %lu]: ", row_itor, tb_iterator_size(row)); // walk items tb_for_all_if (tb_database_sql_value_t*, value, row, value) { // trace tb_tracet_i("[%s:%s] ", tb_database_sql_value_name(value), tb_database_sql_value_text(value)); } // trace tb_tracet_i(__tb_newline__); }
static tb_bool_t tb_demo_sock_sendf_func(tb_aice_ref_t aice) { // check tb_assert_and_check_return_val(aice && aice->code == TB_AICE_CODE_SENDF, tb_false); // the context tb_demo_context_t* context = (tb_demo_context_t*)aice->priv; tb_assert_and_check_return_val(context, tb_false); // ok? if (aice->state == TB_STATE_OK) { // trace tb_trace_d("sendf[%p]: real: %lu, size: %llu", aice->aico, aice->u.sendf.real, aice->u.sendf.size); // save size context->size += aice->u.sendf.real; // continue to send it? if (aice->u.sendf.real < aice->u.sendf.size) { // post sendf from file if (!tb_aico_sendf(aice->aico, context->file, context->size, aice->u.sendf.size - aice->u.sendf.real, tb_demo_sock_sendf_func, context)) return tb_false; } else { tb_trace_i("sendf[%p]: finished", aice->aico); tb_demo_context_exit(context); } } // closed or failed? else { tb_trace_i("sendf[%p]: state: %s", aice->aico, tb_state_cstr(aice->state)); tb_demo_context_exit(context); } // ok return tb_true; }
static tb_bool_t tb_demo_file_aico_clos(tb_aice_t const* aice) { // check tb_assert_and_check_return_val(aice && aice->aico && aice->code == TB_AICE_CODE_CLOS, tb_false); // the context tb_demo_context_t* context = (tb_demo_context_t*)aice->priv; tb_assert_and_check_return_val(context, tb_false); // trace tb_trace_d("aico[%p]: clos: %s", aice->aico, tb_state_cstr(aice->state)); // exit aico tb_aico_exit(aice->aico); // exit sock aico if (context->sock) tb_aico_clos(context->sock, tb_demo_sock_aico_clos, tb_null); context->sock = tb_null; // ok return tb_true; }
static tb_void_t tb_async_transfer_istream_clos_func(tb_async_stream_ref_t stream, tb_size_t state, tb_cpointer_t priv) { // check tb_async_transfer_impl_t* impl = (tb_async_transfer_impl_t*)priv; tb_assert_and_check_return(stream && impl); // trace tb_trace_d("clos: istream: %s, state: %s", tb_url_cstr(tb_async_stream_url(stream)), tb_state_cstr(state)); // done do { // ok? tb_check_break(state == TB_STATE_OK); // reset state state = TB_STATE_UNKNOWN_ERROR; // clos it if (!tb_async_stream_clos(impl->ostream, tb_async_transfer_ostream_clos_func, impl)) break; // ok state = TB_STATE_OK; } while (0); // failed? if (state != TB_STATE_OK) { // trace tb_trace_e("clos: failed: %s", tb_state_cstr(state)); // done func tb_async_transfer_clos_func(impl, state); } }
static tb_bool_t tb_async_transfer_ostream_writ_func(tb_async_stream_ref_t stream, tb_size_t state, tb_byte_t const* data, tb_size_t real, tb_size_t size, tb_cpointer_t priv) { // check tb_async_transfer_impl_t* impl = (tb_async_transfer_impl_t*)priv; tb_assert_and_check_return_val(stream && impl && impl->aicp && impl->istream, tb_false); // trace tb_trace_d("writ: real: %lu, size: %lu, state: %s", real, size, tb_state_cstr(state)); // the time tb_hong_t time = tb_aicp_time(impl->aicp); // done tb_bool_t bwrit = tb_false; do { // ok? tb_check_break(state == TB_STATE_OK); // reset state state = TB_STATE_UNKNOWN_ERROR; // done func at first once if (!impl->done.saved_size && !tb_async_transfer_done_func(impl, TB_STATE_OK)) break; // update saved size impl->done.saved_size += real; // < 1s? tb_size_t delay = 0; tb_size_t limited_rate = tb_atomic_get(&impl->limited_rate); if (time < impl->done.base_time1s + 1000) { // save size for 1s impl->done.saved_size1s += real; // save current rate if < 1s from base_time if (time < impl->done.base_time + 1000) impl->done.current_rate = impl->done.saved_size1s; // compute the delay for limit rate if (limited_rate) delay = impl->done.saved_size1s >= limited_rate? (tb_size_t)(impl->done.base_time1s + 1000 - time) : 0; } else { // save current rate impl->done.current_rate = impl->done.saved_size1s; // update base_time1s impl->done.base_time1s = time; // reset size impl->done.saved_size1s = 0; // reset delay delay = 0; // done func if (!tb_async_transfer_done_func(impl, TB_STATE_OK)) break; } // killed? if (TB_STATE_KILLING == tb_atomic_get(&impl->state)) { state = TB_STATE_KILLED; break; } // not finished? continue to writ tb_size_t state_pause = TB_STATE_OK; if (real < size) bwrit = tb_true; // pausing or paused? else if ( (TB_STATE_PAUSED == (state_pause = tb_atomic_fetch_and_pset(&impl->state_pause, TB_STATE_PAUSING, TB_STATE_PAUSED))) || (state_pause == TB_STATE_PAUSING)) { // done func if (!tb_async_transfer_done_func(impl, TB_STATE_PAUSED)) break; } // continue? else { // trace tb_trace_d("delay: %lu ms", delay); // continue to read it if (!tb_async_stream_read_after(impl->istream, delay, limited_rate, tb_async_transfer_istream_read_func, (tb_pointer_t)impl)) break; } // ok state = TB_STATE_OK; } while (0); // failed? if (state != TB_STATE_OK) { // compute the total rate impl->done.current_rate = (impl->done.saved_size && (time > impl->done.base_time))? (tb_size_t)((impl->done.saved_size * 1000) / (time - impl->done.base_time)) : (tb_size_t)impl->done.saved_size; // done func tb_async_transfer_done_func(impl, state); // break; bwrit = tb_false; } // continue to writ or break it return bwrit; }
/* ////////////////////////////////////////////////////////////////////////////////////// * private implementation */ static tb_bool_t tb_aicp_walk_wait(tb_pointer_t item, tb_cpointer_t priv) { // check tb_aico_impl_t* aico = (tb_aico_impl_t*)item; tb_assert_and_check_return_val(aico, tb_false); // trace #ifdef __tb_debug__ tb_trace_e("aico[%p]: wait exited failed, type: %lu, handle: %p, state: %s for func: %s, line: %lu, file: %s", aico, tb_aico_type((tb_aico_ref_t)aico), aico->handle, tb_state_cstr(tb_atomic_get(&aico->state)), aico->func, aico->line, aico->file); #else tb_trace_e("aico[%p]: wait exited failed, type: %lu, handle: %p, state: %s", aico, tb_aico_type((tb_aico_ref_t)aico), aico->handle, tb_state_cstr(tb_atomic_get(&aico->state))); #endif // ok return tb_true; }
static tb_bool_t tb_demo_istream_open_func(tb_async_stream_ref_t stream, tb_size_t state, tb_cpointer_t priv) { // check tb_demo_context_t* context = (tb_demo_context_t*)priv; tb_assert_and_check_return_val(stream && context && context->option, tb_false); // done tb_bool_t ok = tb_false; do { // check if (state != TB_STATE_OK) { // print verbose info if (context->verbose) { tb_char_t const* url = tb_null; tb_async_stream_ctrl(stream, TB_STREAM_CTRL_GET_URL, &url); tb_printf("open: %s: %s\n", url, tb_state_cstr(state)); } break; } // trace if (context->verbose) tb_printf("open: ok\n"); // init ostream if (tb_option_find(context->option, "more0")) { // the path tb_char_t const* path = tb_option_item_cstr(context->option, "more0"); // init context->ostream = tb_async_stream_init_from_file(tb_async_stream_aicp((tb_async_stream_ref_t)stream), path, TB_FILE_MODE_RW | TB_FILE_MODE_CREAT | TB_FILE_MODE_BINARY | TB_FILE_MODE_TRUNC); // print verbose info if (context->verbose) tb_printf("save: %s: ..\n", path); } else { // the name tb_char_t const* name = tb_strrchr(tb_option_item_cstr(context->option, "url"), '/'); if (!name) name = tb_strrchr(tb_option_item_cstr(context->option, "url"), '\\'); if (!name) name = "/async_stream.file"; // the path tb_char_t path[TB_PATH_MAXN] = {0}; if (tb_directory_curt(path, TB_PATH_MAXN)) { // append name tb_strcat(path, name); // init file context->ostream = tb_async_stream_init_from_file(tb_async_stream_aicp((tb_async_stream_ref_t)stream), path, TB_FILE_MODE_RW | TB_FILE_MODE_CREAT | TB_FILE_MODE_BINARY | TB_FILE_MODE_TRUNC); // print verbose info if (context->verbose) tb_printf("save: %s: ..\n", path); } } tb_assert_and_check_break(context->ostream); // init transfer context->transfer = tb_async_transfer_init(tb_null, tb_true); tb_assert_and_check_break(context->transfer); // init transfer stream if (!tb_async_transfer_init_istream(context->transfer, stream)) break; if (!tb_async_transfer_init_ostream(context->transfer, context->ostream)) break; // the limit rate if (tb_option_find(context->option, "limitrate")) tb_async_transfer_limitrate(context->transfer, tb_option_item_uint32(context->option, "limitrate")); // open and done transfer if (!tb_async_transfer_open_done(context->transfer, 0, tb_demo_transfer_done_func, context)) break; // ok ok = tb_true; } while (0); // failed or closed? exit wait if (state != TB_STATE_OK && context->event) tb_event_post(context->event); // ok? return ok; }
static tb_bool_t tb_demo_transfer_done_func(tb_size_t state, tb_hize_t offset, tb_hong_t size, tb_hize_t save, tb_size_t rate, tb_cpointer_t priv) { // check tb_demo_context_t* context = (tb_demo_context_t*)priv; tb_assert_and_check_return_val(context && context->option, tb_false); // print verbose info if (context->verbose) { // percent tb_size_t percent = 0; if (size > 0) percent = (tb_size_t)((offset * 100) / size); else if (state == TB_STATE_CLOSED) percent = 100; // trace tb_printf("save: %llu bytes, rate: %lu bytes/s, percent: %lu%%, state: %s\n", save, rate, percent, tb_state_cstr(state)); } // failed or closed? exit wait if (state != TB_STATE_OK && context->event) tb_event_post(context->event); // ok? return (state == TB_STATE_OK)? tb_true : tb_false; }
static tb_bool_t tb_demo_http_post_func(tb_size_t state, tb_hize_t offset, tb_hong_t size, tb_hize_t save, tb_size_t rate, tb_cpointer_t priv) { // percent tb_size_t percent = 0; if (size > 0) percent = (tb_size_t)((offset * 100) / size); else if (state == TB_STATE_CLOSED) percent = 100; // trace tb_trace_i("post: %llu, rate: %lu bytes/s, percent: %lu%%, state: %s", save, rate, percent, tb_state_cstr(state)); // ok return tb_true; }
tb_void_t tb_aicp_loop_util(tb_aicp_ref_t aicp, tb_bool_t (*stop)(tb_cpointer_t priv), tb_cpointer_t priv) { // check tb_aicp_impl_t* impl = (tb_aicp_impl_t*)aicp; tb_assert_and_check_return(impl); // the ptor tb_aicp_ptor_impl_t* ptor = impl->ptor; tb_assert_and_check_return(ptor && ptor->loop_spak); // the loop spak tb_long_t (*loop_spak)(tb_aicp_ptor_impl_t* , tb_handle_t, tb_aice_ref_t , tb_long_t ) = ptor->loop_spak; // worker++ tb_atomic_fetch_and_inc(&impl->work); // init loop tb_handle_t loop = ptor->loop_init? ptor->loop_init(ptor) : tb_null; // trace tb_trace_d("loop[%p]: init", loop); // spak ctime tb_cache_time_spak(); // loop while (1) { // spak tb_aice_t resp = {0}; tb_long_t ok = loop_spak(ptor, loop, &resp, -1); // spak ctime tb_cache_time_spak(); // failed? tb_check_break(ok >= 0); // timeout? tb_check_continue(ok); // check aico tb_aico_impl_t* aico = (tb_aico_impl_t*)resp.aico; tb_assert_and_check_continue(aico); // trace tb_trace_d("loop[%p]: spak: code: %lu, aico: %p, state: %s: %ld", loop, resp.code, aico, aico? tb_state_cstr(tb_atomic_get(&aico->state)) : "null", ok); // pending? clear state if be not accept or accept failed tb_size_t state = TB_STATE_OPENED; state = (resp.code != TB_AICE_CODE_ACPT || resp.state != TB_STATE_OK)? tb_atomic_fetch_and_pset(&aico->state, TB_STATE_PENDING, state) : tb_atomic_get(&aico->state); // killed or killing? if (state == TB_STATE_KILLED || state == TB_STATE_KILLING) { // update the aice state resp.state = TB_STATE_KILLED; // killing? update to the killed state tb_atomic_fetch_and_pset(&aico->state, TB_STATE_KILLING, TB_STATE_KILLED); } // done func, @note maybe the aico exit will be called if (resp.func && !resp.func(&resp)) { // trace #ifdef __tb_debug__ tb_trace_e("loop[%p]: done aice func failed with code: %lu at line: %lu, func: %s, file: %s!", loop, resp.code, aico->line, aico->func, aico->file); #else tb_trace_e("loop[%p]: done aice func failed with code: %lu!", loop, resp.code); #endif } // killing? update to the killed state tb_atomic_fetch_and_pset(&aico->state, TB_STATE_KILLING, TB_STATE_KILLED); // stop it? if (stop && stop(priv)) tb_aicp_kill(aicp); } // exit loop if (ptor->loop_exit) ptor->loop_exit(ptor, loop); // worker-- tb_atomic_fetch_and_dec(&impl->work); // trace tb_trace_d("loop[%p]: exit", loop); }
tb_bool_t tb_aicp_post_(tb_aicp_ref_t aicp, tb_aice_ref_t aice __tb_debug_decl__) { // check tb_aicp_impl_t* impl = (tb_aicp_impl_t*)aicp; tb_assert_and_check_return_val(impl && impl->ptor && impl->ptor->post, tb_false); tb_assert_and_check_return_val(aice && aice->aico, tb_false); // the aico tb_aico_impl_t* aico = (tb_aico_impl_t*)aice->aico; tb_assert_and_check_return_val(aico, tb_false); // opened or killed or closed? pending it tb_size_t state = tb_atomic_fetch_and_pset(&aico->state, TB_STATE_OPENED, TB_STATE_PENDING); if (state == TB_STATE_OPENED || state == TB_STATE_KILLED) { // save debug info #ifdef __tb_debug__ aico->func = func_; aico->file = file_; aico->line = line_; #endif // post aice return impl->ptor->post(impl->ptor, aice); } // trace #ifdef __tb_debug__ tb_trace_e("post aice[%lu] failed, the aico[%p]: type: %lu, handle: %p, state: %s for func: %s, line: %lu, file: %s", aice->code, aico, tb_aico_type((tb_aico_ref_t)aico), aico->handle, tb_state_cstr(state), func_, line_, file_); #else tb_trace_e("post aice[%lu] failed, the aico[%p]: type: %lu, handle: %p, state: %s", aice->code, aico, tb_aico_type((tb_aico_ref_t)aico), aico->handle, tb_state_cstr(state)); #endif // abort it tb_assert(0); // post failed return tb_false; }
static tb_long_t tb_aiop_spak_clos(tb_aiop_ptor_impl_t* impl, tb_aice_ref_t aice) { // check tb_assert_and_check_return_val(impl && impl->aiop && impl->ltimer && impl->timer && aice, -1); tb_assert_and_check_return_val(aice->code == TB_AICE_CODE_CLOS, -1); // the aico tb_aiop_aico_t* aico = (tb_aiop_aico_t*)aice->aico; tb_assert_and_check_return_val(aico, -1); // trace tb_trace_d("clos: aico: %p, code: %u: %s", aico, aice->code, tb_state_cstr(tb_atomic_get(&aico->base.state))); // exit the timer task if (aico->task) { if (aico->bltimer) tb_ltimer_task_exit(impl->ltimer, aico->task); else tb_timer_task_exit(impl->timer, aico->task); aico->bltimer = 0; } aico->task = tb_null; // exit the sock if (aico->base.type == TB_AICO_TYPE_SOCK) { // remove aioo if (aico->aioo) tb_aiop_delo(impl->aiop, aico->aioo); aico->aioo = tb_null; // close the socket handle if (aico->base.handle) tb_socket_exit((tb_socket_ref_t)aico->base.handle); aico->base.handle = tb_null; } // exit file else if (aico->base.type == TB_AICO_TYPE_FILE) { // exit the file handle if (aico->base.handle) tb_file_exit((tb_file_ref_t)aico->base.handle); aico->base.handle = tb_null; } // clear waiting state aico->waiting = 0; aico->wait_ok = 0; aico->aice.code = TB_AICE_CODE_NONE; // clear type aico->base.type = TB_AICO_TYPE_NONE; // clear timeout tb_size_t i = 0; tb_size_t n = tb_arrayn(aico->base.timeout); for (i = 0; i < n; i++) aico->base.timeout[i] = -1; // closed tb_atomic_set(&aico->base.state, TB_STATE_CLOSED); // ok aice->state = TB_STATE_OK; return 1; }
static tb_void_t tb_async_transfer_ostream_clos_func(tb_async_stream_ref_t stream, tb_size_t state, tb_cpointer_t priv) { // check tb_async_transfer_impl_t* impl = (tb_async_transfer_impl_t*)priv; tb_assert_and_check_return(stream && impl); // trace tb_trace_d("clos: ostream: %s, state: %s", tb_url_cstr(tb_async_stream_url(stream)), tb_state_cstr(state)); // done func tb_async_transfer_clos_func(impl, state); }
static tb_bool_t tb_async_transfer_istream_open_func(tb_async_stream_ref_t stream, tb_size_t state, tb_hize_t offset, tb_cpointer_t priv) { // check tb_async_transfer_impl_t* impl = (tb_async_transfer_impl_t*)priv; tb_assert_and_check_return_val(stream && impl && impl->open.func, tb_false); // trace tb_trace_d("open: istream: %s, offset: %llu, state: %s", tb_url_cstr(tb_async_stream_url(stream)), offset, tb_state_cstr(state)); // done tb_bool_t ok = tb_true; do { // ok? tb_check_break(state == TB_STATE_OK); // reset state state = TB_STATE_UNKNOWN_ERROR; // killed? if (TB_STATE_KILLING == tb_atomic_get(&impl->state)) { state = TB_STATE_KILLED; break; } // open it if (!tb_async_stream_open(impl->ostream, tb_async_transfer_ostream_open_func, impl)) break; // ok state = TB_STATE_OK; } while (0); // failed? if (state != TB_STATE_OK) { // done func ok = tb_async_transfer_open_func(impl, state, 0, 0, impl->open.func, impl->open.priv); } // ok? return ok; }
static tb_bool_t tb_async_transfer_istream_read_func(tb_async_stream_ref_t stream, tb_size_t state, tb_byte_t const* data, tb_size_t real, tb_size_t size, tb_cpointer_t priv) { // check tb_async_transfer_impl_t* impl = (tb_async_transfer_impl_t*)priv; tb_assert_and_check_return_val(stream && impl && impl->aicp && impl->ostream, tb_false); // trace tb_trace_d("read: size: %lu, state: %s", real, tb_state_cstr(state)); // done tb_bool_t bread = tb_false; do { // ok? tb_check_break(state == TB_STATE_OK); // reset state state = TB_STATE_UNKNOWN_ERROR; // killed? if (TB_STATE_KILLING == tb_atomic_get(&impl->state)) { state = TB_STATE_KILLED; break; } // check tb_assert_and_check_break(data); // no data? continue it if (!real) { bread = tb_true; state = TB_STATE_OK; break; } // writ it if (!tb_async_stream_writ(impl->ostream, data, real, tb_async_transfer_ostream_writ_func, impl)) break; // ok state = TB_STATE_OK; } while (0); // closed or failed? if (state != TB_STATE_OK) { // sync it if closed tb_bool_t bend = tb_true; if (state == TB_STATE_CLOSED) bend = tb_async_stream_sync(impl->ostream, tb_true, tb_async_transfer_ostream_sync_func, impl)? tb_false : tb_true; // end? if (bend) { // the time tb_hong_t time = tb_aicp_time(impl->aicp); // compute the total rate impl->done.current_rate = (impl->done.saved_size && (time > impl->done.base_time))? (tb_size_t)((impl->done.saved_size * 1000) / (time - impl->done.base_time)) : (tb_size_t)impl->done.saved_size; // done func tb_async_transfer_done_func(impl, state); } // break bread = tb_false; } // continue to read or break it return bread; }
static tb_bool_t tb_async_transfer_done_func(tb_async_transfer_impl_t* impl, tb_size_t state) { // check tb_assert_and_check_return_val(impl && impl->istream && impl->done.func, tb_false); // open failed? closed? if (TB_STATE_CLOSED == tb_atomic_get(&impl->state)) { // done func return impl->done.func(state, 0, 0, 0, 0, impl->done.priv); } // trace tb_trace_d("done: %llu bytes, rate: %lu bytes/s, state: %s", tb_async_stream_offset(impl->istream), impl->done.current_rate, tb_state_cstr(state)); // auto closing it? if (impl->autoclosing) { // killed or failed or closed? close it if ((state != TB_STATE_OK && state != TB_STATE_PAUSED) || (TB_STATE_KILLING == tb_atomic_get(&impl->state))) { // save the closed state impl->done.closed_state = (TB_STATE_KILLING == tb_atomic_get(&impl->state))? TB_STATE_KILLED : state; impl->done.closed_size = tb_async_stream_size(impl->istream); impl->done.closed_offset = tb_async_stream_offset(impl->istream); return tb_async_transfer_clos((tb_async_transfer_ref_t)impl, tb_async_transfer_done_clos_func, impl); } } // done return impl->done.func(state, tb_async_stream_offset(impl->istream), tb_async_stream_size(impl->istream), impl->done.saved_size, impl->done.current_rate, impl->done.priv); }