/* ////////////////////////////////////////////////////////////////////////////////////// * test */ static tb_void_t tb_find_int_test() { __tb_volatile__ tb_size_t i = 0; __tb_volatile__ tb_size_t n = 1000; // init data tb_long_t* data = (tb_long_t*)tb_nalloc0(n, sizeof(tb_long_t)); tb_assert_and_check_return(data); // init iterator tb_array_iterator_t array_iterator; tb_iterator_ref_t iterator = tb_array_iterator_init_long(&array_iterator, data, n); // make for (i = 0; i < n; i++) data[i] = i; // find tb_size_t itor = tb_iterator_tail(iterator); tb_hong_t time = tb_mclock(); for (i = 0; i < n; i++) itor = tb_find_all(iterator, (tb_pointer_t)data[800]); time = tb_mclock() - time; // item tb_long_t item = itor != tb_iterator_tail(iterator)? (tb_long_t)tb_iterator_item(iterator, itor) : 0; // time tb_trace_i("tb_find_int_all[%ld ?= %ld]: %lld ms", item, data[800], time); // free tb_free(data); }
/* ////////////////////////////////////////////////////////////////////////////////////// * test */ static tb_void_t tb_sort_int_test_perf(tb_size_t n) { __tb_volatile__ tb_size_t i = 0; // init data tb_long_t* data = (tb_long_t*)tb_nalloc0(n, sizeof(tb_long_t)); tb_assert_and_check_return(data); // init iterator tb_array_iterator_t array_iterator; tb_iterator_ref_t iterator = tb_iterator_make_for_long(&array_iterator, data, n); // make tb_random_clear(tb_null); for (i = 0; i < n; i++) data[i] = tb_random_range(tb_null, TB_MINS16, TB_MAXS16); // sort tb_hong_t time = tb_mclock(); tb_sort_all(iterator, tb_null); time = tb_mclock() - time; // time tb_trace_i("tb_sort_int_all: %lld ms", time); // check for (i = 1; i < n; i++) tb_assert_and_check_break(data[i - 1] <= data[i]); // free tb_free(data); }
/* ////////////////////////////////////////////////////////////////////////////////////// * interfaces */ tb_vector_ref_t tb_vector_init(tb_size_t grow, tb_item_func_t func) { // check tb_assert_and_check_return_val(grow, tb_null); tb_assert_and_check_return_val(func.size && func.data && func.dupl && func.repl && func.ndupl && func.nrepl, tb_null); // done tb_bool_t ok = tb_false; tb_vector_impl_t* impl = tb_null; do { // make impl impl = tb_malloc0_type(tb_vector_impl_t); tb_assert_and_check_break(impl); // init impl impl->size = 0; impl->grow = grow; impl->maxn = grow; impl->func = func; tb_assert_and_check_break(impl->maxn < TB_VECTOR_MAXN); // init iterator impl->itor.mode = TB_ITERATOR_MODE_FORWARD | TB_ITERATOR_MODE_REVERSE | TB_ITERATOR_MODE_RACCESS | TB_ITERATOR_MODE_MUTABLE; impl->itor.priv = tb_null; impl->itor.step = func.size; impl->itor.size = tb_vector_itor_size; impl->itor.head = tb_vector_itor_head; impl->itor.last = tb_vector_itor_last; impl->itor.tail = tb_vector_itor_tail; impl->itor.prev = tb_vector_itor_prev; impl->itor.next = tb_vector_itor_next; impl->itor.item = tb_vector_itor_item; impl->itor.copy = tb_vector_itor_copy; impl->itor.comp = tb_vector_itor_comp; impl->itor.remove = tb_vector_itor_remove; impl->itor.remove_range = tb_vector_itor_remove_range; // make data impl->data = (tb_byte_t*)tb_nalloc0(impl->maxn, func.size); tb_assert_and_check_break(impl->data); // ok ok = tb_true; } while (0); // failed? if (!ok) { // exit it if (impl) tb_vector_exit((tb_vector_ref_t)impl); impl = tb_null; } // ok? return (tb_vector_ref_t)impl; }
static tb_void_t tb_find_str_test() { __tb_volatile__ tb_size_t i = 0; __tb_volatile__ tb_size_t n = 1000; // init data tb_char_t** data = (tb_char_t**)tb_nalloc0(n, sizeof(tb_char_t*)); tb_assert_and_check_return(data); // init iterator tb_array_iterator_t array_iterator; tb_iterator_ref_t iterator = tb_array_iterator_init_str(&array_iterator, data, n); // make tb_char_t s[256] = {0}; for (i = 0; i < n; i++) { tb_long_t r = tb_snprintf(s, 256, "%04lu", i); s[r] = '\0'; data[i] = tb_strdup(s); } // find tb_size_t itor = tb_iterator_tail(iterator); tb_hong_t time = tb_mclock(); for (i = 0; i < n; i++) itor = tb_find_all(iterator, (tb_pointer_t)data[800]); time = tb_mclock() - time; // item tb_char_t* item = itor != tb_iterator_tail(iterator)? (tb_char_t*)tb_iterator_item(iterator, itor) : 0; // time tb_trace_i("tb_find_str_all[%s ?= %s]: %lld ms", item, data[800], time); // free data for (i = 0; i < n; i++) tb_free(data[i]); tb_free(data); }
static tb_void_t tb_sort_int_test_func_bubble() { // init __tb_volatile__ tb_size_t i = 0; __tb_volatile__ tb_size_t n = 20; // init data tb_long_t* data = (tb_long_t*)tb_nalloc0(n, sizeof(tb_long_t)); tb_assert_and_check_return(data); // init iterator tb_array_iterator_t array_iterator; tb_iterator_ref_t iterator = tb_iterator_make_for_long(&array_iterator, data, n); // trace tb_trace_i(""); // put tb_random_clear(tb_null); for (i = 0; i < n; i++) { data[i] = tb_random_range(tb_null, TB_MINS16, TB_MAXS16); tb_trace_i("bubble_put: %ld", data[i]); } // sort tb_heap_sort_all(iterator, tb_null); // trace tb_trace_i(""); // pop for (i = 0; i < n; i++) tb_trace_i("bubble_pop: %ld", data[i]); // free tb_free(data); }
static tb_void_t tb_sort_str_test_perf_heap(tb_size_t n) { __tb_volatile__ tb_size_t i = 0; // init data tb_char_t** data = (tb_char_t**)tb_nalloc0(n, sizeof(tb_char_t*)); tb_assert_and_check_return(data); // init iterator tb_array_iterator_t array_iterator; tb_iterator_ref_t iterator = tb_iterator_make_for_str(&array_iterator, data, n); // make tb_random_clear(tb_null); tb_char_t s[256] = {0}; for (i = 0; i < n; i++) { tb_long_t r = tb_snprintf(s, 256, "%x", tb_random_range(tb_null, 0, TB_MAXU32)); s[r] = '\0'; data[i] = tb_strdup(s); } // sort tb_hong_t time = tb_mclock(); tb_heap_sort_all(iterator, tb_null); time = tb_mclock() - time; // time tb_trace_i("tb_heap_sort_str_all: %lld ms", time); // check for (i = 1; i < n; i++) tb_assert_and_check_break(tb_strcmp(data[i - 1], data[i]) <= 0); // free data for (i = 0; i < n; i++) tb_free(data[i]); tb_free(data); }
static tb_long_t tb_aiop_rtor_kqueue_wait(tb_aiop_rtor_impl_t* rtor, tb_aioe_ref_t list, tb_size_t maxn, tb_long_t timeout) { // check tb_aiop_rtor_kqueue_impl_t* impl = (tb_aiop_rtor_kqueue_impl_t*)rtor; tb_assert_and_check_return_val(impl && impl->kqfd >= 0 && rtor->aiop && list && maxn, -1); // the aiop tb_aiop_impl_t* aiop = rtor->aiop; tb_assert_and_check_return_val(aiop, -1); // init time struct timespec t = {0}; if (timeout > 0) { t.tv_sec = timeout / 1000; t.tv_nsec = (timeout % 1000) * 1000000; } // init grow tb_size_t grow = tb_align8((rtor->aiop->maxn >> 3) + 1); // init events if (!impl->evts) { impl->evtn = grow; impl->evts = tb_nalloc0(impl->evtn, sizeof(struct kevent)); tb_assert_and_check_return_val(impl->evts, -1); } // wait events tb_long_t evtn = kevent(impl->kqfd, tb_null, 0, impl->evts, impl->evtn, timeout >= 0? &t : tb_null); tb_assert_and_check_return_val(evtn >= 0 && evtn <= impl->evtn, -1); // timeout? tb_check_return_val(evtn, 0); // grow it if events is full if (evtn == impl->evtn) { // grow size impl->evtn += grow; if (impl->evtn > rtor->aiop->maxn) impl->evtn = rtor->aiop->maxn; // grow data impl->evts = tb_ralloc(impl->evts, impl->evtn * sizeof(struct kevent)); tb_assert_and_check_return_val(impl->evts, -1); } tb_assert(evtn <= impl->evtn); // limit evtn = tb_min(evtn, maxn); // sync tb_size_t i = 0; tb_size_t wait = 0; for (i = 0; i < evtn; i++) { // the kevents struct kevent* e = impl->evts + i; // the aioo tb_aioo_impl_t* aioo = (tb_aioo_impl_t*)e->udata; tb_assert_and_check_return_val(aioo && aioo->sock, -1); // the sock tb_socket_ref_t sock = aioo->sock; // spak? if (sock == aiop->spak[1] && e->filter == EVFILT_READ) { // read spak tb_char_t spak = '\0'; if (1 != tb_socket_recv(aiop->spak[1], (tb_byte_t*)&spak, 1)) return -1; // killed? if (spak == 'k') return -1; // continue it continue ; } // skip spak tb_check_continue(sock != aiop->spak[1]); // init the aioe tb_aioe_ref_t aioe = &list[wait++]; aioe->code = TB_AIOE_CODE_NONE; aioe->aioo = (tb_aioo_ref_t)aioo; aioe->priv = aioo->priv; if (e->filter == EVFILT_READ) { aioe->code |= TB_AIOE_CODE_RECV; if (aioo->code & TB_AIOE_CODE_ACPT) aioe->code |= TB_AIOE_CODE_ACPT; } if (e->filter == EVFILT_WRITE) { aioe->code |= TB_AIOE_CODE_SEND; if (aioo->code & TB_AIOE_CODE_CONN) aioe->code |= TB_AIOE_CODE_CONN; } if ((e->flags & EV_ERROR) && !(aioe->code & (TB_AIOE_CODE_RECV | TB_AIOE_CODE_SEND))) aioe->code |= TB_AIOE_CODE_RECV | TB_AIOE_CODE_SEND; // oneshot? clear it if (aioo->code & TB_AIOE_CODE_ONESHOT) { aioo->code = TB_AIOE_CODE_NONE; aioo->priv = tb_null; } } // ok return wait; }
static tb_long_t tb_aiop_rtor_epoll_wait(tb_aiop_rtor_impl_t* rtor, tb_aioe_ref_t list, tb_size_t maxn, tb_long_t timeout) { // check tb_aiop_rtor_epoll_impl_t* impl = (tb_aiop_rtor_epoll_impl_t*)rtor; tb_assert_and_check_return_val(impl && impl->epfd > 0, -1); // the aiop tb_aiop_impl_t* aiop = rtor->aiop; tb_assert_and_check_return_val(aiop, -1); // init grow tb_size_t grow = tb_align8((rtor->aiop->maxn >> 3) + 1); // init events if (!impl->evts) { impl->evtn = grow; impl->evts = tb_nalloc0(impl->evtn, sizeof(struct epoll_event)); tb_assert_and_check_return_val(impl->evts, -1); } // wait events tb_long_t evtn = epoll_wait(impl->epfd, impl->evts, impl->evtn, timeout); // interrupted?(for gdb?) continue it if (evtn < 0 && errno == EINTR) return 0; // check error? tb_assert_and_check_return_val(evtn >= 0 && evtn <= impl->evtn, -1); // timeout? tb_check_return_val(evtn, 0); // grow it if events is full if (evtn == impl->evtn) { // grow size impl->evtn += grow; if (impl->evtn > rtor->aiop->maxn) impl->evtn = rtor->aiop->maxn; // grow data impl->evts = tb_ralloc(impl->evts, impl->evtn * sizeof(struct epoll_event)); tb_assert_and_check_return_val(impl->evts, -1); } tb_assert(evtn <= impl->evtn); // limit evtn = tb_min(evtn, maxn); // sync tb_size_t i = 0; tb_size_t wait = 0; for (i = 0; i < evtn; i++) { // the aioo tb_aioo_impl_t* aioo = (tb_aioo_impl_t*)tb_u2p(impl->evts[i].data.u64); tb_assert_and_check_return_val(aioo, -1); // the sock tb_socket_ref_t sock = aioo->sock; tb_assert_and_check_return_val(sock, -1); // the events tb_size_t events = impl->evts[i].events; // spak? if (sock == aiop->spak[1] && (events & EPOLLIN)) { // read spak tb_char_t spak = '\0'; if (1 != tb_socket_recv(aiop->spak[1], (tb_byte_t*)&spak, 1)) return -1; // killed? if (spak == 'k') return -1; // continue it continue ; } // skip spak tb_check_continue(sock != aiop->spak[1]); // save aioe tb_aioe_ref_t aioe = &list[wait++]; aioe->code = TB_AIOE_CODE_NONE; aioe->priv = aioo->priv; aioe->aioo = (tb_aioo_ref_t)aioo; if (events & EPOLLIN) { aioe->code |= TB_AIOE_CODE_RECV; if (aioo->code & TB_AIOE_CODE_ACPT) aioe->code |= TB_AIOE_CODE_ACPT; } if (events & EPOLLOUT) { aioe->code |= TB_AIOE_CODE_SEND; if (aioo->code & TB_AIOE_CODE_CONN) aioe->code |= TB_AIOE_CODE_CONN; } if (events & (EPOLLHUP | EPOLLERR) && !(aioe->code & (TB_AIOE_CODE_RECV | TB_AIOE_CODE_SEND))) aioe->code |= TB_AIOE_CODE_RECV | TB_AIOE_CODE_SEND; // oneshot? clear it if (aioo->code & TB_AIOE_CODE_ONESHOT) { // clear code aioo->code = TB_AIOE_CODE_NONE; aioo->priv = tb_null; // clear events manually if no epoll oneshot #ifndef EPOLLONESHOT struct epoll_event e = {0}; if (epoll_ctl(impl->epfd, EPOLL_CTL_DEL, tb_sock2fd(aioo->sock), &e) < 0) { // trace tb_trace_e("clear aioo[%p] failed manually for oneshot, error: %d", aioo, errno); } #endif } } // ok return wait; }
/* ////////////////////////////////////////////////////////////////////////////////////// * implementation */ tb_circle_queue_ref_t tb_circle_queue_init(tb_size_t maxn, tb_element_t element) { // check tb_assert_and_check_return_val(element.size && element.dupl && element.data, tb_null); // done tb_bool_t ok = tb_false; tb_circle_queue_t* queue = tb_null; do { // make queue queue = tb_malloc0_type(tb_circle_queue_t); tb_assert_and_check_break(queue); // using the default maxn if (!maxn) maxn = TB_CIRCLE_QUEUE_SIZE_DEFAULT; // init queue, + tail queue->maxn = maxn + 1; queue->element = element; // init operation static tb_iterator_op_t op = { tb_circle_queue_itor_size , tb_circle_queue_itor_head , tb_circle_queue_itor_last , tb_circle_queue_itor_tail , tb_circle_queue_itor_prev , tb_circle_queue_itor_next , tb_circle_queue_itor_item , tb_circle_queue_itor_comp , tb_circle_queue_itor_copy , tb_null , tb_null }; // init iterator queue->itor.priv = tb_null; queue->itor.step = element.size; queue->itor.mode = TB_ITERATOR_MODE_FORWARD | TB_ITERATOR_MODE_REVERSE | TB_ITERATOR_MODE_MUTABLE; queue->itor.op = &op; // make data queue->data = (tb_byte_t*)tb_nalloc0(queue->maxn, element.size); tb_assert_and_check_break(queue->data); // ok ok = tb_true; } while (0); // failed? if (!ok) { if (queue) tb_circle_queue_exit((tb_circle_queue_ref_t)queue); queue = tb_null; } // ok? return (tb_circle_queue_ref_t)queue; }
/* ////////////////////////////////////////////////////////////////////////////////////// * interfaces */ static tb_aicp_ptor_impl_t* tb_aiop_ptor_init(tb_aicp_impl_t* aicp) { // check tb_assert_and_check_return_val(aicp && aicp->maxn, tb_null); // done tb_bool_t ok = tb_false; tb_aiop_ptor_impl_t* impl = tb_null; do { // make ptor impl = tb_malloc0_type(tb_aiop_ptor_impl_t); tb_assert_and_check_break(impl); // init base impl->base.aicp = aicp; impl->base.step = sizeof(tb_aiop_aico_t); impl->base.kill = tb_aiop_ptor_kill; impl->base.exit = tb_aiop_ptor_exit; impl->base.addo = tb_aiop_ptor_addo; impl->base.kilo = tb_aiop_ptor_kilo; impl->base.post = tb_aiop_ptor_post; impl->base.loop_spak = tb_aiop_ptor_spak; // init lock if (!tb_spinlock_init(&impl->lock)) break; // init wait impl->wait = tb_semaphore_init(0); tb_assert_and_check_break(impl->wait); // init aiop impl->aiop = tb_aiop_init(aicp->maxn); tb_assert_and_check_break(impl->aiop); // check tb_assert_and_check_break(tb_aiop_have(impl->aiop, TB_AIOE_CODE_EALL | TB_AIOE_CODE_ONESHOT)); // init spak impl->spak[0] = tb_queue_init((aicp->maxn >> 4) + 16, tb_item_func_mem(sizeof(tb_aice_t), tb_null, tb_null)); impl->spak[1] = tb_queue_init((aicp->maxn >> 4) + 16, tb_item_func_mem(sizeof(tb_aice_t), tb_null, tb_null)); tb_assert_and_check_break(impl->spak[0] && impl->spak[1]); // init file if (!tb_aicp_file_init(impl)) break; // init list impl->maxn = (aicp->maxn >> 4) + 16; impl->list = tb_nalloc0(impl->maxn, sizeof(tb_aioe_t)); tb_assert_and_check_break(impl->list); // init timer and using cache time impl->timer = tb_timer_init((aicp->maxn >> 4) + 16, tb_true); tb_assert_and_check_break(impl->timer); // init ltimer and using cache time impl->ltimer = tb_ltimer_init(aicp->maxn, TB_LTIMER_TICK_S, tb_true); tb_assert_and_check_break(impl->ltimer); // register lock profiler #ifdef TB_LOCK_PROFILER_ENABLE tb_lock_profiler_register(tb_lock_profiler(), (tb_pointer_t)&impl->lock, "aicp_aiop"); #endif // init loop impl->loop = tb_thread_init(tb_null, tb_aiop_spak_loop, impl, 0); tb_assert_and_check_break(impl->loop); // ok ok = tb_true; } while (0); // failed? if (!ok) { // exit it if (impl) tb_aiop_ptor_exit((tb_aicp_ptor_impl_t*)impl); return tb_null; } // ok? return (tb_aicp_ptor_impl_t*)impl; }
/* ////////////////////////////////////////////////////////////////////////////////////// * implementation */ tb_hash_map_ref_t tb_hash_map_init(tb_size_t bucket_size, tb_element_t element_name, tb_element_t element_data) { // check tb_assert_and_check_return_val(element_name.size && element_name.hash && element_name.comp && element_name.data && element_name.dupl, tb_null); tb_assert_and_check_return_val(element_data.data && element_data.dupl && element_data.repl, tb_null); // check bucket size if (!bucket_size) bucket_size = TB_HASH_MAP_BUCKET_SIZE_DEFAULT; tb_assert_and_check_return_val(bucket_size <= TB_HASH_MAP_BUCKET_SIZE_LARGE, tb_null); // done tb_bool_t ok = tb_false; tb_hash_map_impl_t* impl = tb_null; do { // make hash_map impl = tb_malloc0_type(tb_hash_map_impl_t); tb_assert_and_check_break(impl); // init hash_map func impl->element_name = element_name; impl->element_data = element_data; // init item itor impl->itor.mode = TB_ITERATOR_MODE_FORWARD | TB_ITERATOR_MODE_MUTABLE; impl->itor.priv = tb_null; impl->itor.step = sizeof(tb_hash_map_item_t); impl->itor.size = tb_hash_map_itor_size; impl->itor.head = tb_hash_map_itor_head; impl->itor.tail = tb_hash_map_itor_tail; impl->itor.prev = tb_null; impl->itor.next = tb_hash_map_itor_next; impl->itor.item = tb_hash_map_itor_item; impl->itor.copy = tb_hash_map_itor_copy; impl->itor.comp = tb_hash_map_itor_comp; impl->itor.remove = tb_hash_map_itor_remove; impl->itor.remove_range = tb_hash_map_itor_remove_range; // init hash_map size impl->hash_size = tb_align_pow2(bucket_size); tb_assert_and_check_break(impl->hash_size <= TB_HASH_MAP_BUCKET_MAXN); // init hash_map list impl->hash_list = (tb_hash_map_item_list_t**)tb_nalloc0(impl->hash_size, sizeof(tb_size_t)); tb_assert_and_check_break(impl->hash_list); // init item grow impl->item_grow = tb_isqrti(bucket_size); if (impl->item_grow < 8) impl->item_grow = 8; impl->item_grow = tb_align_pow2(impl->item_grow); // ok ok = tb_true; } while (0); // failed? if (!ok) { // exit it if (impl) tb_hash_map_exit((tb_hash_map_ref_t)impl); impl = tb_null; } // ok? return (tb_hash_map_ref_t)impl; }