tb_void_t tb_poller_exit(tb_poller_ref_t self) { // check tb_poller_poll_ref_t poller = (tb_poller_poll_ref_t)self; tb_assert_and_check_return(poller); // exit pair sockets if (poller->pair[0]) tb_socket_exit(poller->pair[0]); if (poller->pair[1]) tb_socket_exit(poller->pair[1]); poller->pair[0] = tb_null; poller->pair[1] = tb_null; // exit hash if (poller->hash) tb_free(poller->hash); poller->hash = tb_null; poller->hash_size = 0; // close pfds if (poller->pfds) tb_vector_exit(poller->pfds); poller->pfds = tb_null; // close cfds if (poller->cfds) tb_vector_exit(poller->cfds); poller->cfds = tb_null; // free it tb_free(poller); }
static tb_void_t gb_device_skia_exit(gb_device_impl_t* device) { // check gb_skia_device_ref_t impl = (gb_skia_device_ref_t)device; tb_assert_and_check_return(impl); // exit path if (impl->path) delete impl->path; impl->path = tb_null; // exit points if (impl->points) tb_free(impl->points); impl->points = tb_null; // exit paint if (impl->paint) delete impl->paint; impl->paint = tb_null; // exit canvas if (impl->canvas) delete impl->canvas; impl->canvas = tb_null; // exit bitmap if (impl->bitmap) delete impl->bitmap; impl->bitmap = tb_null; // exit it tb_free(impl); }
tb_void_t tb_regex_exit(tb_regex_ref_t self) { // check tb_regex_t* regex = (tb_regex_t*)self; tb_assert_and_check_return(regex); // exit buffer if (regex->buffer_data) tb_free(regex->buffer_data); regex->buffer_data = tb_null; regex->buffer_maxn = 0; // exit results if (regex->results) tb_vector_exit(regex->results); regex->results = tb_null; // exit match data if (regex->match_data) pcre2_match_data_free(regex->match_data); regex->match_data = tb_null; // exit code if (regex->code) pcre2_code_free(regex->code); regex->code = tb_null; // exit it tb_free(regex); }
static tb_void_t tb_aiop_ptor_exit(tb_aicp_ptor_impl_t* ptor) { // check tb_aiop_ptor_impl_t* impl = (tb_aiop_ptor_impl_t*)ptor; tb_assert_and_check_return(impl); // trace tb_trace_d("exit"); // exit file tb_aicp_file_exit(impl); // exit loop if (impl->loop) { tb_long_t wait = 0; if ((wait = tb_thread_wait(impl->loop, 5000)) <= 0) { // trace tb_trace_e("loop[%p]: wait failed: %ld!", impl->loop, wait); } tb_thread_exit(impl->loop); impl->loop = tb_null; } // exit spak tb_spinlock_enter(&impl->lock); if (impl->spak[0]) tb_queue_exit(impl->spak[0]); if (impl->spak[1]) tb_queue_exit(impl->spak[1]); impl->spak[0] = tb_null; impl->spak[1] = tb_null; tb_spinlock_leave(&impl->lock); // exit aiop if (impl->aiop) tb_aiop_exit(impl->aiop); impl->aiop = tb_null; // exit list if (impl->list) tb_free(impl->list); impl->list = tb_null; // exit wait if (impl->wait) tb_semaphore_exit(impl->wait); impl->wait = tb_null; // exit timer if (impl->timer) tb_timer_exit(impl->timer); impl->timer = tb_null; // exit ltimer if (impl->ltimer) tb_ltimer_exit(impl->ltimer); impl->ltimer = tb_null; // exit lock tb_spinlock_exit(&impl->lock); // exit it tb_free(impl); }
tb_void_t gb_bitmap_exit(gb_bitmap_ref_t bitmap) { // check gb_bitmap_impl_t* impl = (gb_bitmap_impl_t*)bitmap; tb_assert_and_check_return(impl); // exit data if (impl->is_owner && impl->data) tb_free(impl->data); impl->data = tb_null; // exit it tb_free(impl); }
tb_void_t tb_circle_queue_exit(tb_circle_queue_ref_t self) { // check tb_circle_queue_t* queue = (tb_circle_queue_t*)self; tb_assert_and_check_return(queue); // clear data tb_circle_queue_clear(self); // free data if (queue->data) tb_free(queue->data); // free it tb_free(queue); }
tb_void_t tb_hash_map_exit(tb_hash_map_ref_t hash_map) { // check tb_hash_map_impl_t* impl = (tb_hash_map_impl_t*)hash_map; tb_assert_and_check_return(impl); // clear it tb_hash_map_clear(hash_map); // free impl list if (impl->hash_list) tb_free(impl->hash_list); // free it tb_free(impl); }
static tb_void_t tb_aiop_rtor_kqueue_exit(tb_aiop_rtor_impl_t* rtor) { tb_aiop_rtor_kqueue_impl_t* impl = (tb_aiop_rtor_kqueue_impl_t*)rtor; if (impl) { // free events if (impl->evts) tb_free(impl->evts); // close kqfd if (impl->kqfd >= 0) close(impl->kqfd); // free it tb_free(impl); } }
static tb_void_t tb_aiop_rtor_select_exit(tb_aiop_rtor_impl_t* rtor) { tb_aiop_rtor_select_impl_t* impl = (tb_aiop_rtor_select_impl_t*)rtor; if (impl) { // free fds tb_spinlock_enter(&impl->lock.pfds); FD_ZERO(&impl->rfdi); FD_ZERO(&impl->wfdi); FD_ZERO(&impl->rfdo); FD_ZERO(&impl->wfdo); tb_spinlock_leave(&impl->lock.pfds); // exit hash tb_spinlock_enter(&impl->lock.hash); if (impl->hash) tb_hash_map_exit(impl->hash); impl->hash = tb_null; tb_spinlock_leave(&impl->lock.hash); // exit lock tb_spinlock_exit(&impl->lock.pfds); tb_spinlock_exit(&impl->lock.hash); // free it tb_free(impl); } }
tb_mutex_ref_t tb_mutex_init() { // done tb_bool_t ok = tb_false; tb_spinlock_ref_t lock = tb_null; do { // make lock lock = tb_malloc0_type(tb_spinlock_t); tb_assert_and_check_break(lock); // init lock if (!tb_spinlock_init(lock)) break; // ok ok = tb_true; } while (0); // failed? if (!ok) { // exit it tb_free(lock); lock = tb_null; } // ok? return (tb_mutex_ref_t)lock; }
/* ////////////////////////////////////////////////////////////////////////////////////// * implementation */ tb_semaphore_ref_t tb_semaphore_init(tb_size_t init) { // done tb_bool_t ok = tb_false; sem_t* semaphore = tb_null; do { // make semaphore semaphore = tb_malloc0_type(sem_t); tb_assert_and_check_break(semaphore); // init if (sem_init(semaphore, 0, init) < 0) break; // ok ok = tb_true; } while (0); // failed? if (!ok) { // exit it if (semaphore) tb_free(semaphore); semaphore = tb_null; } // ok? return (tb_semaphore_ref_t)semaphore; }
/* ////////////////////////////////////////////////////////////////////////////////////// * interfaces */ tb_object_ref_t tb_object_data_init_from_url(tb_char_t const* url) { // check tb_assert_and_check_return_val(url, tb_null); // init stream tb_stream_ref_t stream = tb_stream_init_from_url(url); tb_assert_and_check_return_val(stream, tb_null); // make stream tb_object_ref_t object = tb_null; if (tb_stream_open(stream)) { // read all data tb_size_t size = 0; tb_byte_t* data = (tb_byte_t*)tb_stream_bread_all(stream, tb_false, &size); if (data) { // make object object = tb_object_data_init_from_data(data, size); // exit data tb_free(data); } // exit stream tb_stream_exit(stream); } // ok? return object; }
static tb_void_t tb_aiop_rtor_poll_exit(tb_aiop_rtor_impl_t* rtor) { tb_aiop_rtor_poll_impl_t* impl = (tb_aiop_rtor_poll_impl_t*)rtor; if (impl) { // exit pfds tb_spinlock_enter(&impl->lock.pfds); if (impl->pfds) tb_vector_exit(impl->pfds); impl->pfds = tb_null; tb_spinlock_leave(&impl->lock.pfds); // exit cfds if (impl->cfds) tb_vector_exit(impl->cfds); impl->cfds = tb_null; // exit hash tb_spinlock_enter(&impl->lock.hash); if (impl->hash) tb_hash_exit(impl->hash); impl->hash = tb_null; tb_spinlock_leave(&impl->lock.hash); // exit lock tb_spinlock_exit(&impl->lock.pfds); tb_spinlock_exit(&impl->lock.hash); // free it tb_free(impl); } }
/*!the insertion sort * * <pre> * old: 5 2 6 2 8 6 1 * * (hole) * step1: ((5)) 2 6 2 8 6 1 * (next) <= * * (hole) * step2: ((2)) (5) 6 2 8 6 1 * (next) <= * * (hole) * step3: 2 5 ((6)) 2 8 6 1 * (next) <= * * (hole) * step4: 2 ((2)) (5) (6) 8 6 1 * (next) <= * * (hole) * step5: 2 2 5 6 ((8)) 6 1 * (next) <= * * (hole) * step6: 2 2 5 6 ((6)) (8) 1 * (next) <= * * (hole) * step7: ((1)) (2) (2) (5) (6) (6) (8) * (next) * </pre> */ tb_void_t tb_insert_sort(tb_iterator_ref_t iterator, tb_size_t head, tb_size_t tail, tb_iterator_comp_t comp) { // check tb_assert_and_check_return(iterator); tb_assert_and_check_return((tb_iterator_mode(iterator) & TB_ITERATOR_MODE_FORWARD)); tb_assert_and_check_return((tb_iterator_mode(iterator) & TB_ITERATOR_MODE_REVERSE)); tb_check_return(head != tail); // init tb_size_t step = tb_iterator_step(iterator); tb_pointer_t temp = step > sizeof(tb_pointer_t)? tb_malloc(step) : tb_null; tb_assert_and_check_return(step <= sizeof(tb_pointer_t) || temp); // the comparer if (!comp) comp = tb_iterator_comp; // sort tb_size_t last, next; for (next = tb_iterator_next(iterator, head); next != tail; next = tb_iterator_next(iterator, next)) { // save next if (step <= sizeof(tb_pointer_t)) temp = tb_iterator_item(iterator, next); else tb_memcpy(temp, tb_iterator_item(iterator, next), step); // look for hole and move elements[hole, next - 1] => [hole + 1, next] for (last = next; last != head && (last = tb_iterator_prev(iterator, last), comp(iterator, temp, tb_iterator_item(iterator, last)) < 0); next = last) tb_iterator_copy(iterator, next, tb_iterator_item(iterator, last)); // item => hole tb_iterator_copy(iterator, next, temp); } // free if (temp && step > sizeof(tb_pointer_t)) tb_free(temp); }
tb_void_t tb_lo_scheduler_io_exit(tb_lo_scheduler_io_ref_t scheduler_io) { // check tb_assert_and_check_return(scheduler_io); // exit poller if (scheduler_io->poller) tb_poller_exit(scheduler_io->poller); scheduler_io->poller = tb_null; #ifndef TB_CONFIG_MICRO_ENABLE // exit timer if (scheduler_io->timer) tb_timer_exit(scheduler_io->timer); scheduler_io->timer = tb_null; // exit ltimer if (scheduler_io->ltimer) tb_ltimer_exit(scheduler_io->ltimer); scheduler_io->ltimer = tb_null; #endif // clear scheduler scheduler_io->scheduler = tb_null; // exit it tb_free(scheduler_io); }
/* ////////////////////////////////////////////////////////////////////////////////////// * test */ static tb_void_t tb_find_int_test() { __tb_volatile__ tb_size_t i = 0; __tb_volatile__ tb_size_t n = 1000; // init data tb_long_t* data = (tb_long_t*)tb_nalloc0(n, sizeof(tb_long_t)); tb_assert_and_check_return(data); // init iterator tb_array_iterator_t array_iterator; tb_iterator_ref_t iterator = tb_array_iterator_init_long(&array_iterator, data, n); // make for (i = 0; i < n; i++) data[i] = i; // find tb_size_t itor = tb_iterator_tail(iterator); tb_hong_t time = tb_mclock(); for (i = 0; i < n; i++) itor = tb_find_all(iterator, (tb_pointer_t)data[800]); time = tb_mclock() - time; // item tb_long_t item = itor != tb_iterator_tail(iterator)? (tb_long_t)tb_iterator_item(iterator, itor) : 0; // time tb_trace_i("tb_find_int_all[%ld ?= %ld]: %lld ms", item, data[800], time); // free tb_free(data); }
tb_void_t tb_aiop_exit(tb_aiop_ref_t aiop) { // check tb_aiop_impl_t* impl = (tb_aiop_impl_t*)aiop; tb_assert_and_check_return(impl); // exit reactor if (impl->rtor && impl->rtor->exit) impl->rtor->exit(impl->rtor); // exit spak if (impl->spak[0]) tb_socket_exit(impl->spak[0]); if (impl->spak[1]) tb_socket_exit(impl->spak[1]); impl->spak[0] = tb_null; impl->spak[1] = tb_null; // exit pool tb_spinlock_enter(&impl->lock); if (impl->pool) tb_fixed_pool_exit(impl->pool); impl->pool = tb_null; tb_spinlock_leave(&impl->lock); // exit lock tb_spinlock_exit(&impl->lock); // free impl tb_free(impl); }
tb_void_t tb_vector_exit(tb_vector_ref_t vector) { // check tb_vector_impl_t* impl = (tb_vector_impl_t*)vector; tb_assert_and_check_return(impl); // clear data tb_vector_clear(vector); // free data if (impl->data) tb_free(impl->data); impl->data = tb_null; // free it tb_free(impl); }
/* ////////////////////////////////////////////////////////////////////////////////////// * test */ static tb_void_t tb_sort_int_test_perf(tb_size_t n) { __tb_volatile__ tb_size_t i = 0; // init data tb_long_t* data = (tb_long_t*)tb_nalloc0(n, sizeof(tb_long_t)); tb_assert_and_check_return(data); // init iterator tb_array_iterator_t array_iterator; tb_iterator_ref_t iterator = tb_iterator_make_for_long(&array_iterator, data, n); // make tb_random_clear(tb_null); for (i = 0; i < n; i++) data[i] = tb_random_range(tb_null, TB_MINS16, TB_MAXS16); // sort tb_hong_t time = tb_mclock(); tb_sort_all(iterator, tb_null); time = tb_mclock() - time; // time tb_trace_i("tb_sort_int_all: %lld ms", time); // check for (i = 1; i < n; i++) tb_assert_and_check_break(data[i - 1] <= data[i]); // free tb_free(data); }
tb_void_t tb_heap_exit(tb_heap_ref_t heap) { // check tb_heap_impl_t* impl = (tb_heap_impl_t*)heap; tb_assert_and_check_return(impl); // clear data tb_heap_clear(heap); // free data if (impl->data) tb_free(impl->data); impl->data = tb_null; // free it tb_free(impl); }
tb_object_ref_t tb_object_data(tb_object_ref_t object, tb_size_t format) { // check tb_assert_and_check_return_val(object, tb_null); // done tb_object_ref_t odata = tb_null; tb_size_t maxn = 4096; tb_byte_t* data = tb_null; do { // make data data = data? (tb_byte_t*)tb_ralloc(data, maxn) : tb_malloc_bytes(maxn); tb_assert_and_check_break(data); // writ object to data tb_long_t size = tb_object_writ_to_data(object, data, maxn, format); // ok? make the data object if (size >= 0) odata = tb_object_data_init_from_data(data, size); // failed? grow it else maxn <<= 1; } while (!odata); // exit data if (data) tb_free(data); data = tb_null; // ok? return odata; }
tb_void_t g2_image_decoder_exit(g2_image_decoder_t* decoder) { if (decoder) { if (decoder->free) decoder->free(decoder); tb_free(decoder); } }
static tb_void_t tb_aiop_rtor_epoll_exit(tb_aiop_rtor_impl_t* rtor) { tb_aiop_rtor_epoll_impl_t* impl = (tb_aiop_rtor_epoll_impl_t*)rtor; if (impl) { // exit events if (impl->evts) tb_free(impl->evts); impl->evts = tb_null; // exit fd if (impl->epfd) close(impl->epfd); impl->epfd = 0; // exit it tb_free(impl); } }
tb_void_t tb_queue_buffer_exit(tb_queue_buffer_ref_t buffer) { if (buffer) { if (buffer->data) tb_free(buffer->data); tb_memset(buffer, 0, sizeof(tb_queue_buffer_t)); } }
static tb_void_t tb_database_sqlite3_statement_bind_exit(tb_pointer_t data) { // trace tb_trace_d("bind: exit: %p", data); // exit it if (data) tb_free(data); }
tb_bool_t tb_transfer_pool_exit(tb_transfer_pool_ref_t pool) { // check tb_transfer_pool_impl_t* impl = (tb_transfer_pool_impl_t*)pool; tb_assert_and_check_return_val(impl, tb_false); // trace tb_trace_d("exit: .."); // kill it first tb_transfer_pool_kill(pool); // wait all if (tb_transfer_pool_wait_all(pool, 5000) <= 0) { // trace tb_trace_e("exit: wait failed!"); return tb_false; } // enter tb_spinlock_enter(&impl->lock); // check tb_assert(!tb_list_entry_size(&impl->work)); // exit the work list tb_list_entry_exit(&impl->work); // exit the idle list tb_list_entry_exit(&impl->idle); // exit pool if (impl->pool) { // exit all task tb_fixed_pool_walk(impl->pool, tb_transfer_pool_walk_exit, tb_null); // exit it tb_fixed_pool_exit(impl->pool); impl->pool = tb_null; } // leave tb_spinlock_leave(&impl->lock); // exit lock tb_spinlock_exit(&impl->lock); // exit it tb_free(pool); // trace tb_trace_d("exit: ok"); // ok return tb_true; }
tb_void_t tb_semaphore_exit(tb_semaphore_ref_t self) { // check tb_atomic_t* semaphore = (tb_atomic_t*)self; tb_assert_and_check_return(semaphore); // free it tb_free(semaphore); }
static tb_void_t tb_sockdata_local_free(tb_cpointer_t priv) { tb_sockdata_ref_t sockdata = (tb_sockdata_ref_t)priv; if (sockdata) { tb_sockdata_exit(sockdata); tb_free(sockdata); } }
static tb_void_t tb_object_data_exit(tb_object_ref_t object) { tb_object_data_t* data = tb_object_data_cast(object); if (data) { tb_buffer_exit(&data->buffer); tb_free(data); } }
tb_void_t tb_sockdata_exit(tb_sockdata_ref_t sockdata) { // check tb_assert(sockdata); // exit socket data if (sockdata->data) tb_free(sockdata->data); sockdata->data = tb_null; sockdata->maxn = 0; }