/* ////////////////////////////////////////////////////////////////////////////////////// * implementation */ tb_bool_t tb_dns_cache_init() { // enter tb_spinlock_enter(&g_lock); // done tb_bool_t ok = tb_false; do { // init hash if (!g_cache.hash) g_cache.hash = tb_hash_init(tb_align8(tb_isqrti(TB_DNS_CACHE_MAXN) + 1), tb_item_func_str(tb_false), tb_item_func_mem(sizeof(tb_dns_cache_addr_t), tb_null, tb_null)); tb_assert_and_check_break(g_cache.hash); // ok ok = tb_true; } while (0); // leave tb_spinlock_leave(&g_lock); // failed? exit it if (!ok) tb_dns_cache_exit(); // ok? return ok; }
/* ////////////////////////////////////////////////////////////////////////////////////// * implementation */ tb_aicp_dns_ref_t tb_aicp_dns_init(tb_aicp_ref_t aicp) { // check tb_assert_and_check_return_val(aicp, tb_null); // done tb_bool_t ok = tb_false; tb_aicp_dns_impl_t* impl = tb_null; do { // make impl impl = tb_malloc0_type(tb_aicp_dns_impl_t); tb_assert_and_check_break(impl); // init aicp impl->aicp = aicp; // ok ok = tb_true; } while (0); // failed? if (!ok) { // exit it if (impl) tb_aicp_dns_exit((tb_aicp_dns_ref_t)impl); impl = tb_null; } // ok? return (tb_aicp_dns_ref_t)impl; }
/* ////////////////////////////////////////////////////////////////////////////////////// * test */ static tb_void_t tb_sort_int_test_perf(tb_size_t n) { __tb_volatile__ tb_size_t i = 0; // init data tb_long_t* data = (tb_long_t*)tb_nalloc0(n, sizeof(tb_long_t)); tb_assert_and_check_return(data); // init iterator tb_array_iterator_t array_iterator; tb_iterator_ref_t iterator = tb_array_iterator_init_long(&array_iterator, data, n); // make for (i = 0; i < n; i++) data[i] = tb_random_range(TB_MINS16, TB_MAXS16); // sort tb_hong_t time = tb_mclock(); tb_sort_all(iterator, tb_null); time = tb_mclock() - time; // time tb_trace_i("tb_sort_int_all: %lld ms", time); // check for (i = 1; i < n; i++) tb_assert_and_check_break(data[i - 1] <= data[i]); // free tb_free(data); }
static tb_bool_t tb_small_pool_item_check(tb_pointer_t data, tb_cpointer_t priv) { // check tb_fixed_pool_ref_t fixed_pool = (tb_fixed_pool_ref_t)priv; tb_assert_return_val(fixed_pool && data, tb_false); // done tb_bool_t ok = tb_false; do { // the data head tb_pool_data_head_t* data_head = &(((tb_pool_data_head_t*)data)[-1]); tb_assertf_break(data_head->debug.magic == TB_POOL_DATA_MAGIC, "invalid data: %p", data); // the data space tb_size_t space = tb_fixed_pool_item_size(fixed_pool); tb_assert_and_check_break(space >= data_head->size); // check underflow tb_assertf_break(space == data_head->size || ((tb_byte_t*)data)[data_head->size] == TB_POOL_DATA_PATCH, "data underflow"); // ok ok = tb_true; } while (0); // continue? return ok; }
tb_mutex_ref_t tb_mutex_init() { // done tb_bool_t ok = tb_false; tb_spinlock_ref_t lock = tb_null; do { // make lock lock = tb_malloc0_type(tb_spinlock_t); tb_assert_and_check_break(lock); // init lock if (!tb_spinlock_init(lock)) break; // ok ok = tb_true; } while (0); // failed? if (!ok) { // exit it tb_free(lock); lock = tb_null; } // ok? return (tb_mutex_ref_t)lock; }
/* ////////////////////////////////////////////////////////////////////////////////////// * implementation */ tb_semaphore_ref_t tb_semaphore_init(tb_size_t init) { // done tb_bool_t ok = tb_false; sem_t* semaphore = tb_null; do { // make semaphore semaphore = tb_malloc0_type(sem_t); tb_assert_and_check_break(semaphore); // init if (sem_init(semaphore, 0, init) < 0) break; // ok ok = tb_true; } while (0); // failed? if (!ok) { // exit it if (semaphore) tb_free(semaphore); semaphore = tb_null; } // ok? return (tb_semaphore_ref_t)semaphore; }
static tb_object_dictionary_t* tb_object_dictionary_init_base() { // done tb_bool_t ok = tb_false; tb_object_dictionary_t* dictionary = tb_null; do { // make dictionary dictionary = tb_malloc0_type(tb_object_dictionary_t); tb_assert_and_check_break(dictionary); // init dictionary if (!tb_object_init((tb_object_ref_t)dictionary, TB_OBJECT_FLAG_NONE, TB_OBJECT_TYPE_DICTIONARY)) break; // init base dictionary->base.copy = tb_object_dictionary_copy; dictionary->base.cler = tb_object_dictionary_cler; dictionary->base.exit = tb_object_dictionary_exit; // ok ok = tb_true; } while (0); // failed? if (!ok) { // exit it if (dictionary) tb_object_exit((tb_object_ref_t)dictionary); dictionary = tb_null; } // ok? return dictionary; }
tb_size_t tb_thread_pool_task_post_list(tb_thread_pool_ref_t pool, tb_thread_pool_task_t const* list, tb_size_t size) { // check tb_thread_pool_impl_t* impl = (tb_thread_pool_impl_t*)pool; tb_assert_and_check_return_val(impl && list, 0); // init the post size tb_size_t post_size = 0; // enter tb_spinlock_enter(&impl->lock); // done tb_size_t ok = 0; if (!impl->bstoped) { for (ok = 0; ok < size; ok++) { // post task tb_thread_pool_job_t* job = tb_thread_pool_jobs_post_task(impl, &list[ok], &post_size); tb_assert_and_check_break(job); } } // leave tb_spinlock_leave(&impl->lock); // post the workers if (ok && post_size) tb_thread_pool_worker_post(impl, post_size); // ok? return ok; }
static tb_bool_t tb_demo_http_session_file_send(tb_socket_ref_t sock, tb_file_ref_t file) { // check tb_assert_and_check_return_val(sock && file, tb_false); // send data tb_hize_t send = 0; tb_hize_t size = tb_file_size(file); tb_long_t wait = 0; while (send < size) { // send it tb_hong_t real = tb_socket_sendf(sock, file, send, size - send); // has data? if (real > 0) { send += real; wait = 0; } // no data? wait it else if (!real && !wait) { // wait it wait = tb_socket_wait(sock, TB_SOCKET_EVENT_SEND, TB_DEMO_TIMEOUT); tb_assert_and_check_break(wait >= 0); } // failed or end? else break; } // ok? return send == size; }
tb_iterator_ref_t tb_ifaddrs_itor(tb_ifaddrs_ref_t ifaddrs, tb_bool_t reload) { // check tb_list_ref_t interfaces = (tb_list_ref_t)ifaddrs; tb_assert_and_check_return_val(interfaces, tb_null); // uses the cached interfaces? tb_check_return_val(reload, (tb_iterator_ref_t)interfaces); // clear interfaces first tb_list_clear(interfaces); // done tb_long_t sock = -1; do { // make sock sock = tb_ifaddrs_netlink_socket_init(); tb_assert_and_check_break(sock >= 0); // load ipaddr if (!tb_ifaddrs_interface_load(interfaces, sock, RTM_GETADDR)) break; // load hwaddr if (!tb_ifaddrs_interface_load(interfaces, sock, RTM_GETLINK)) break; } while (0); // exit sock if (sock >= 0) close(sock); sock = -1; // ok? return (tb_iterator_ref_t)interfaces; }
static tb_object_data_t* tb_object_data_init_base() { // done tb_bool_t ok = tb_false; tb_object_data_t* data = tb_null; do { // make data data = tb_malloc0_type(tb_object_data_t); tb_assert_and_check_break(data); // init data if (!tb_object_init((tb_object_ref_t)data, TB_OBJECT_FLAG_NONE, TB_OBJECT_TYPE_DATA)) break; // init base data->base.copy = tb_object_data_copy; data->base.cler = tb_object_data_cler; data->base.exit = tb_object_data_exit; // ok ok = tb_true; } while (0); // failed? if (!ok) { // exit it if (data) tb_object_exit((tb_object_ref_t)data); data = tb_null; } // ok? return data; }
static tb_oc_number_t* tb_oc_number_init_base() { // done tb_bool_t ok = tb_false; tb_oc_number_t* number = tb_null; do { // make number number = tb_malloc0_type(tb_oc_number_t); tb_assert_and_check_break(number); // init number if (!tb_object_init((tb_object_ref_t)number, TB_OBJECT_FLAG_NONE, TB_OBJECT_TYPE_NUMBER)) break; // init base number->base.copy = tb_oc_number_copy; number->base.exit = tb_oc_number_exit; number->base.clear = tb_oc_number_clear; // ok ok = tb_true; } while (0); // failed? if (!ok) { // exit it if (number) tb_object_exit((tb_object_ref_t)number); number = tb_null; } // ok? return number; }
tb_bool_t tb_dns_looker_done(tb_char_t const* name, tb_ipaddr_ref_t addr) { // check tb_assert_and_check_return_val(name && addr, tb_false); // try to lookup it from cache first if (tb_dns_cache_get(name, addr)) return tb_true; // init looker tb_dns_looker_ref_t looker = tb_dns_looker_init(name); tb_check_return_val(looker, tb_false); // spak tb_long_t r = -1; while (!(r = tb_dns_looker_spak(looker, addr))) { // wait r = tb_dns_looker_wait(looker, TB_DNS_LOOKER_TIMEOUT); tb_assert_and_check_break(r >= 0); } // exit tb_dns_looker_exit(looker); // ok return r > 0? tb_true : tb_false; }
/* ////////////////////////////////////////////////////////////////////////////////////// * implementation */ tb_bool_t tb_memory_init_env(tb_allocator_ref_t allocator) { // done tb_bool_t ok = tb_false; do { // init page if (!tb_page_init()) break; // init the native memory if (!tb_native_memory_init()) break; // init the allocator #if defined(TB_CONFIG_MICRO_ENABLE) || \ (defined(__tb_small__) && !defined(__tb_debug__)) g_allocator = allocator? allocator : tb_native_allocator(); #else g_allocator = allocator? allocator : tb_default_allocator(tb_null, 0); #endif tb_assert_and_check_break(g_allocator); // ok ok = tb_true; } while (0); // failed? exit it if (!ok) tb_memory_exit_env(); // ok? return ok; }
/* ////////////////////////////////////////////////////////////////////////////////////// * implementation */ tb_bool_t tb_memory_init(tb_allocator_ref_t allocator, tb_byte_t* data, tb_size_t size) { // done tb_bool_t ok = tb_false; do { // init page if (!tb_page_init()) break; // init the native memory if (!tb_native_memory_init()) break; // init the allocator g_allocator = allocator; // init the large pool data g_large_pool_data = data; g_large_pool_size = size; // init the pool tb_assert_and_check_break(tb_pool()); // ok ok = tb_true; } while (0); // failed? exit it if (!ok) tb_memory_exit(); // ok? return ok; }
/* ////////////////////////////////////////////////////////////////////////////////////// * implementation */ static tb_object_ref_t tb_object_json_reader_func_null(tb_object_json_reader_t* reader, tb_char_t type) { // check tb_assert_and_check_return_val(reader && reader->stream, tb_null); // init data tb_static_string_t data; tb_char_t buff[256]; if (!tb_static_string_init(&data, buff, 256)) return tb_null; // done tb_object_ref_t null = tb_null; do { // append character tb_static_string_chrcat(&data, type); // walk tb_bool_t failed = tb_false; while (!failed && tb_stream_left(reader->stream)) { // need one character tb_byte_t* p = tb_null; if (!tb_stream_need(reader->stream, &p, 1) && p) { failed = tb_true; break; } // the character tb_char_t ch = *p; // append character if (tb_isalpha(ch)) tb_static_string_chrcat(&data, ch); else break; // skip it tb_stream_skip(reader->stream, 1); } // failed? tb_check_break(!failed); // check tb_assert_and_check_break(tb_static_string_size(&data)); // trace tb_trace_d("null: %s", tb_static_string_cstr(&data)); // null? if (!tb_stricmp(tb_static_string_cstr(&data), "null")) null = tb_object_null_init(); } while (0); // exit data tb_static_string_exit(&data); // ok? return null; }
tb_object_ref_t tb_object_data(tb_object_ref_t object, tb_size_t format) { // check tb_assert_and_check_return_val(object, tb_null); // done tb_object_ref_t odata = tb_null; tb_size_t maxn = 4096; tb_byte_t* data = tb_null; do { // make data data = data? (tb_byte_t*)tb_ralloc(data, maxn) : tb_malloc_bytes(maxn); tb_assert_and_check_break(data); // writ object to data tb_long_t size = tb_object_writ_to_data(object, data, maxn, format); // ok? make the data object if (size >= 0) odata = tb_object_data_init_from_data(data, size); // failed? grow it else maxn <<= 1; } while (!odata); // exit data if (data) tb_free(data); data = tb_null; // ok? return odata; }
tb_aioo_ref_t tb_aiop_addo(tb_aiop_ref_t aiop, tb_socket_ref_t sock, tb_size_t code, tb_cpointer_t priv) { // check tb_aiop_impl_t* impl = (tb_aiop_impl_t*)aiop; tb_assert_and_check_return_val(impl && impl->rtor && impl->rtor->addo && sock, tb_null); tb_assert(tb_aiop_have(aiop, code)); // done tb_bool_t ok = tb_false; tb_aioo_ref_t aioo = tb_null; do { // init aioo aioo = tb_aiop_aioo_init(impl, sock, code, priv); tb_assert_and_check_break(aioo); // addo aioo if (!impl->rtor->addo(impl->rtor, (tb_aioo_impl_t*)aioo)) break; // ok ok = tb_true; } while (0); // failed? remove aioo if (!ok && aioo) { tb_aiop_aioo_exit(impl, aioo); aioo = tb_null; } // ok? return aioo; }
/* ////////////////////////////////////////////////////////////////////////////////////// * main */ tb_int_t tb_demo_stream_transfer_pool_main(tb_int_t argc, tb_char_t** argv) { #if TB_DEMO_TEST_AICP // done do { // init event g_event = tb_event_init(); tb_assert_and_check_break(g_event); // init tasks tb_char_t** p = &argv[2]; for (; p && *p; p++) { // done transfer if (!tb_transfer_pool_done(tb_transfer_pool(), argv[1], *p, 0, 0, tb_demo_transfer_done_func, tb_null, *p)) break; } } while (0); // wait transfer while (g_event && tb_transfer_pool_size(tb_transfer_pool()) && tb_event_wait(g_event, -1) > 0); // exit event if (g_event) tb_event_exit(g_event); g_event = tb_null; #else tb_char_t** p = &argv[2]; for (; p && *p; p++) tb_transfer_done_url(argv[1], *p, 0, tb_demo_transfer_done_func, *p); #endif return 0; }
/* ////////////////////////////////////////////////////////////////////////////////////// * implementation */ tb_lo_scheduler_io_ref_t tb_lo_scheduler_io_init(tb_lo_scheduler_t* scheduler) { // done tb_bool_t ok = tb_false; tb_lo_scheduler_io_ref_t scheduler_io = tb_null; do { // init io scheduler scheduler_io = tb_malloc0_type(tb_lo_scheduler_io_t); tb_assert_and_check_break(scheduler_io); // save scheduler scheduler_io->scheduler = (tb_lo_scheduler_t*)scheduler; // init poller scheduler_io->poller = tb_poller_init(tb_null); tb_assert_and_check_break(scheduler_io->poller); #ifndef TB_CONFIG_MICRO_ENABLE // init timer and using cache time scheduler_io->timer = tb_timer_init(TB_SCHEDULER_IO_TIMER_GROW, tb_true); tb_assert_and_check_break(scheduler_io->timer); // init ltimer and using cache time scheduler_io->ltimer = tb_ltimer_init(TB_SCHEDULER_IO_LTIMER_GROW, TB_LTIMER_TICK_S, tb_true); tb_assert_and_check_break(scheduler_io->ltimer); #endif // start the io loop coroutine if (!tb_lo_coroutine_start((tb_lo_scheduler_ref_t)scheduler, tb_lo_scheduler_io_loop, scheduler_io, tb_null)) break; // ok ok = tb_true; } while (0); // failed? if (!ok) { // exit io scheduler if (scheduler_io) tb_lo_scheduler_io_exit(scheduler_io); scheduler_io = tb_null; } // ok? return scheduler_io; }
/* ////////////////////////////////////////////////////////////////////////////////////// * implementation */ static tb_bool_t tb_aiop_ptor_addo(tb_aicp_ptor_impl_t* ptor, tb_aico_impl_t* aico) { // check tb_aiop_ptor_impl_t* impl = (tb_aiop_ptor_impl_t*)ptor; tb_assert_and_check_return_val(impl && impl->aiop && aico, tb_false); // the aiop aico tb_aiop_aico_t* aiop_aico = (tb_aiop_aico_t*)aico; // init impl aiop_aico->impl = impl; // done tb_bool_t ok = tb_false; switch (aico->type) { case TB_AICO_TYPE_SOCK: { // check tb_assert_and_check_break(aico->handle); // ok ok = tb_true; } break; case TB_AICO_TYPE_FILE: { // check tb_assert_and_check_break(aico->handle); // file: addo ok = tb_aicp_file_addo(impl, aico); } break; case TB_AICO_TYPE_TASK: { // ok ok = tb_true; } break; default: break; } // ok? return ok; }
/* ////////////////////////////////////////////////////////////////////////////////////// * interfaces */ tb_dns_looker_ref_t tb_dns_looker_init(tb_char_t const* name) { // check tb_assert_and_check_return_val(name, tb_null); // must be not address tb_assert(!tb_ipaddr_ip_cstr_set(tb_null, name, TB_IPADDR_FAMILY_NONE)); // done tb_bool_t ok = tb_false; tb_dns_looker_t* looker = tb_null; do { // make looker looker = tb_malloc0_type(tb_dns_looker_t); tb_assert_and_check_return_val(looker, tb_null); // dump server // tb_dns_server_dump(); // get the dns server list looker->maxn = tb_dns_server_get(looker->list); tb_check_break(looker->maxn && looker->maxn <= tb_arrayn(looker->list)); // init name if (!tb_static_string_init(&looker->name, (tb_char_t*)looker->data, TB_DNS_NAME_MAXN)) break; tb_static_string_cstrcpy(&looker->name, name); // init rpkt if (!tb_static_buffer_init(&looker->rpkt, looker->data + TB_DNS_NAME_MAXN, TB_DNS_RPKT_MAXN)) break; // init family looker->family = TB_IPADDR_FAMILY_IPV4; // init sock looker->sock = tb_socket_init(TB_SOCKET_TYPE_UDP, looker->family); tb_assert_and_check_break(looker->sock); // init itor looker->itor = 1; // ok ok = tb_true; } while (0); // failed? if (!ok) { // exit it if (looker) tb_dns_looker_exit((tb_dns_looker_ref_t)looker); looker = tb_null; } // ok? return (tb_dns_looker_ref_t)looker; }
tb_bool_t tb_async_transfer_resume(tb_async_transfer_ref_t transfer) { // check tb_async_transfer_impl_t* impl = (tb_async_transfer_impl_t*)transfer; tb_assert_and_check_return_val(impl && impl->aicp, tb_false); // done tb_bool_t ok = tb_false; tb_size_t state_pause = TB_STATE_OK; do { // must be opened? tb_check_break(TB_STATE_OPENED == tb_atomic_get(&impl->state)); // resume it tb_size_t state_pause = tb_atomic_fetch_and_set(&impl->state_pause, TB_STATE_OK); // pausing or ok? return ok directly tb_check_return_val(state_pause == TB_STATE_PAUSED, tb_true); // check tb_assert_and_check_break(impl->istream); tb_assert_and_check_break(impl->ostream); // init some rate info impl->done.base_time = tb_aicp_time(impl->aicp); impl->done.base_time1s = impl->done.base_time; impl->done.saved_size1s = 0; impl->done.current_rate = 0; // read it if (!tb_async_stream_read(impl->istream, (tb_size_t)tb_atomic_get(&impl->limited_rate), tb_async_transfer_istream_read_func, impl)) break; // ok ok = tb_true; } while (0); // failed? restore state if (!ok && state_pause != TB_STATE_OK) tb_atomic_pset(&impl->state_pause, TB_STATE_OK, state_pause); // ok? return ok; }
tb_pointer_t tb_small_pool_nalloc0_(tb_small_pool_ref_t pool, tb_size_t item, tb_size_t size __tb_debug_decl__) { // check tb_small_pool_impl_t* impl = (tb_small_pool_impl_t*)pool; tb_assert_and_check_return_val(impl && impl->large_pool && size, tb_null); tb_assert_and_check_return_val(item * size <= TB_SMALL_POOL_DATA_SIZE_MAXN, tb_null); // disable small pool for debug #ifdef TB_SMALL_POOL_DISABLE return tb_large_pool_nalloc0(impl->large_pool, item, size, tb_null); #endif // done tb_pointer_t data = tb_null; do { // the fixed pool tb_fixed_pool_ref_t fixed_pool = tb_small_pool_find_fixed(impl, item * size); tb_assert_and_check_break(fixed_pool); // done data = tb_fixed_pool_malloc0_(fixed_pool __tb_debug_args__); tb_assert_and_check_break(data); // the data head tb_pool_data_head_t* data_head = &(((tb_pool_data_head_t*)data)[-1]); tb_assert_abort(data_head->debug.magic == TB_POOL_DATA_MAGIC); #ifdef __tb_debug__ // fill the patch bytes if (data_head->size > (item * size)) tb_memset_((tb_byte_t*)data + (item * size), TB_POOL_DATA_PATCH, data_head->size - (item * size)); #endif // update size data_head->size = item * size; } while (0); // check tb_assertf_abort(data, "nalloc(%lu, %lu) failed!", item, size); // ok? return data; }
tb_bool_t tb_aico_open_sock_from_type(tb_aico_ref_t aico, tb_size_t type) { // check tb_aico_impl_t* impl = (tb_aico_impl_t*)aico; tb_aicp_impl_t* aicp_impl = (tb_aicp_impl_t*)impl->aicp; tb_assert_and_check_return_val(impl && aicp_impl && aicp_impl->ptor && aicp_impl->ptor->addo, tb_false); // done tb_bool_t ok = tb_false; tb_socket_ref_t sock = tb_null; do { // closed? tb_assert_and_check_break(tb_atomic_get(&impl->state) == TB_STATE_CLOSED); tb_assert_and_check_break(!impl->type && !impl->handle); // init sock sock = tb_socket_init(type); tb_assert_and_check_break(sock); // bind type and handle impl->type = TB_AICO_TYPE_SOCK; impl->handle = (tb_handle_t)sock; // addo aico ok = aicp_impl->ptor->addo(aicp_impl->ptor, impl); tb_assert_and_check_break(ok); // opened tb_atomic_set(&impl->state, TB_STATE_OPENED); } while (0); // failed? if (!ok) { // exit it if (sock) tb_socket_exit(sock); sock = tb_null; } // ok? return ok; }
tb_bool_t tb_aico_open_file_from_path(tb_aico_ref_t aico, tb_char_t const* path, tb_size_t mode) { // check tb_aico_impl_t* impl = (tb_aico_impl_t*)aico; tb_aicp_impl_t* aicp_impl = (tb_aicp_impl_t*)impl->aicp; tb_assert_and_check_return_val(impl && path && aicp_impl && aicp_impl->ptor && aicp_impl->ptor->addo, tb_false); // done tb_bool_t ok = tb_false; tb_file_ref_t file = tb_null; do { // closed? tb_assert_and_check_break(tb_atomic_get(&impl->state) == TB_STATE_CLOSED); tb_assert_and_check_break(!impl->type && !impl->handle); // init file file = tb_file_init(path, mode | TB_FILE_MODE_ASIO); tb_assert_and_check_break(file); // bind type and handle impl->type = TB_AICO_TYPE_FILE; impl->handle = (tb_handle_t)file; // addo aico ok = aicp_impl->ptor->addo(aicp_impl->ptor, impl); tb_assert_and_check_break(ok); // opened tb_atomic_set(&impl->state, TB_STATE_OPENED); } while (0); // failed? if (!ok) { // exit it if (file) tb_file_exit(file); file = tb_null; } // ok? return ok; }
static tb_aiop_rtor_impl_t* tb_aiop_rtor_epoll_init(tb_aiop_impl_t* aiop) { // check tb_assert_and_check_return_val(aiop && aiop->maxn, tb_null); // done tb_bool_t ok = tb_false; tb_aiop_rtor_epoll_impl_t* impl = tb_null; do { // make impl impl = tb_malloc0_type(tb_aiop_rtor_epoll_impl_t); tb_assert_and_check_break(impl); // init base impl->base.aiop = aiop; impl->base.code = TB_AIOE_CODE_EALL | TB_AIOE_CODE_CLEAR | TB_AIOE_CODE_ONESHOT; impl->base.exit = tb_aiop_rtor_epoll_exit; impl->base.cler = tb_aiop_rtor_epoll_cler; impl->base.addo = tb_aiop_rtor_epoll_addo; impl->base.delo = tb_aiop_rtor_epoll_delo; impl->base.post = tb_aiop_rtor_epoll_post; impl->base.wait = tb_aiop_rtor_epoll_wait; // init epoll impl->epfd = epoll_create(aiop->maxn); tb_assert_and_check_break(impl->epfd > 0); // ok ok = tb_true; } while (0); // failed? if (!ok) { // exit it if (impl) tb_aiop_rtor_epoll_exit((tb_aiop_rtor_impl_t*)impl); impl = tb_null; } // ok? return (tb_aiop_rtor_impl_t*)impl; }
static tb_aiop_rtor_impl_t* tb_aiop_rtor_kqueue_init(tb_aiop_impl_t* aiop) { // check tb_assert_and_check_return_val(aiop && aiop->maxn, tb_null); // done tb_bool_t ok = tb_false; tb_aiop_rtor_kqueue_impl_t* impl = tb_null; do { // make impl impl = tb_malloc0(sizeof(tb_aiop_rtor_kqueue_impl_t)); tb_assert_and_check_break(impl); // init base impl->base.aiop = aiop; impl->base.code = TB_AIOE_CODE_EALL | TB_AIOE_CODE_CLEAR | TB_AIOE_CODE_ONESHOT; impl->base.exit = tb_aiop_rtor_kqueue_exit; impl->base.cler = tb_aiop_rtor_kqueue_cler; impl->base.addo = tb_aiop_rtor_kqueue_addo; impl->base.delo = tb_aiop_rtor_kqueue_delo; impl->base.post = tb_aiop_rtor_kqueue_post; impl->base.wait = tb_aiop_rtor_kqueue_wait; // init kqueue impl->kqfd = kqueue(); tb_assert_and_check_break(impl->kqfd >= 0); // ok ok = tb_true; } while (0); // failed? if (!ok) { // exit it if (impl) tb_aiop_rtor_kqueue_exit((tb_aiop_rtor_impl_t*)impl); impl = tb_null; } // ok? return (tb_aiop_rtor_impl_t*)impl; }
/* ////////////////////////////////////////////////////////////////////////////////////// * implementation */ tb_poller_ref_t tb_poller_init(tb_cpointer_t priv) { // done tb_bool_t ok = tb_false; tb_poller_poll_ref_t poller = tb_null; do { // make poller poller = tb_malloc0_type(tb_poller_poll_t); tb_assert_and_check_break(poller); // init poll fds poller->pfds = tb_vector_init(0, tb_element_mem(sizeof(struct pollfd), tb_null, tb_null)); tb_assert_and_check_break(poller->pfds); // init copied poll fds poller->cfds = tb_vector_init(0, tb_element_mem(sizeof(struct pollfd), tb_null, tb_null)); tb_assert_and_check_break(poller->cfds); // init user private data poller->priv = priv; // init pair sockets if (!tb_socket_pair(TB_SOCKET_TYPE_TCP, poller->pair)) break; // insert pair socket first if (!tb_poller_insert((tb_poller_ref_t)poller, poller->pair[1], TB_POLLER_EVENT_RECV, tb_null)) break; // ok ok = tb_true; } while (0); // failed? if (!ok) { // exit it if (poller) tb_poller_exit((tb_poller_ref_t)poller); poller = tb_null; } // ok? return (tb_poller_ref_t)poller; }
/* ////////////////////////////////////////////////////////////////////////////////////// * implementation */ gb_mesh_face_list_ref_t gb_mesh_face_list_init(tb_element_t element) { // check tb_assert_and_check_return_val(element.data && element.dupl && element.repl, tb_null); // done tb_bool_t ok = tb_false; gb_mesh_face_list_impl_t* impl = tb_null; do { // make list impl = tb_malloc0_type(gb_mesh_face_list_impl_t); tb_assert_and_check_break(impl); // init element impl->element = element; // init pool, item = face + data impl->pool = tb_fixed_pool_init(tb_null, GB_MESH_FACE_LIST_GROW, sizeof(gb_mesh_face_t) + element.size, tb_null, gb_mesh_face_exit, (tb_cpointer_t)impl); tb_assert_and_check_break(impl->pool); // init head tb_list_entry_init_(&impl->head, 0, sizeof(gb_mesh_face_t) + element.size, tb_null); // init order impl->order = GB_MESH_ORDER_INSERT_TAIL; // ok ok = tb_true; } while (0); // failed? if (!ok) { // exit it if (impl) gb_mesh_face_list_exit((gb_mesh_face_list_ref_t)impl); impl = tb_null; } // ok? return (gb_mesh_face_list_ref_t)impl; }