tb_timer_task_ref_t tb_timer_task_init_at(tb_timer_ref_t timer, tb_hize_t when, tb_size_t period, tb_bool_t repeat, tb_timer_task_func_t func, tb_cpointer_t priv) { // check tb_timer_impl_t* impl = (tb_timer_impl_t*)timer; tb_assert_and_check_return_val(impl && impl->pool && impl->heap && func, tb_null); // stoped? tb_assert_and_check_return_val(!tb_atomic_get(&impl->stop), tb_null); // enter tb_spinlock_enter(&impl->lock); // make task tb_event_ref_t event = tb_null; tb_hize_t when_top = -1; tb_timer_task_impl_t* task_impl = (tb_timer_task_impl_t*)tb_fixed_pool_malloc0(impl->pool); if (task_impl) { // the top when if (tb_heap_size(impl->heap)) { tb_timer_task_impl_t* task_impl = (tb_timer_task_impl_t*)tb_heap_top(impl->heap); if (task_impl) when_top = task_impl->when; } // init task task_impl->refn = 2; task_impl->func = func; task_impl->priv = priv; task_impl->when = when; task_impl->period = period; task_impl->repeat = repeat? 1 : 0; // add task tb_heap_put(impl->heap, task_impl); // the event event = impl->event; } // leave tb_spinlock_leave(&impl->lock); // post event if the top task is changed if (event && task_impl && when < when_top) tb_event_post(event); // ok? return (tb_timer_task_ref_t)task_impl; }
static tb_void_t tb_test_heap_max_perf() { // init element tb_element_t element = tb_element_uint32(); element.comp = tb_test_heap_max_comp; // init heap tb_heap_ref_t heap = tb_heap_init(4096, element); tb_assert_and_check_return(heap); // clear rand tb_random_clear(tb_null); // init time tb_hong_t time = tb_mclock(); // profile __tb_volatile__ tb_size_t i = 0; __tb_volatile__ tb_size_t n = 100000; __tb_volatile__ tb_size_t p; for (i = 0; i < n; i++) tb_heap_put(heap, (tb_pointer_t)(tb_size_t)tb_random_range(tb_null, 0, 50)); for (i = 0; tb_heap_size(heap); i++) { // get the top value tb_size_t v = (tb_size_t)tb_heap_top(heap); // check order tb_assert_abort(!i || p >= v); // save the previous value p = v; // pop it tb_heap_pop(heap); } // exit time time = tb_mclock() - time; // trace tb_trace_i("heap_max: %lld ms", time); // exit heap tb_heap_exit(heap); }
tb_void_t tb_timer_task_kill(tb_timer_ref_t timer, tb_timer_task_ref_t task) { // check tb_timer_impl_t* impl = (tb_timer_impl_t*)timer; tb_timer_task_impl_t* task_impl = (tb_timer_task_impl_t*)task; tb_assert_and_check_return(impl && impl->pool && task_impl); // trace tb_trace_d("kill: when: %lld, period: %u, refn: %u", task_impl->when, task_impl->period, task_impl->refn); // enter tb_spinlock_enter(&impl->lock); // done do { // expired or removed? tb_check_break(task_impl->refn == 2); // find it tb_size_t itor = tb_find_all_if(impl->heap, tb_timer_comp_by_task, task_impl); tb_assert_and_check_break(itor != tb_iterator_tail(impl->heap)); // del this task_impl tb_heap_del(impl->heap, itor); // killed task_impl->killed = 1; // no repeat task_impl->repeat = 0; // modify when => now task_impl->when = tb_timer_now(impl); // re-add task_impl tb_heap_put(impl->heap, task_impl); } while (0); // leave tb_spinlock_leave(&impl->lock); }
tb_bool_t tb_heap_load(tb_heap_ref_t heap, tb_stream_ref_t stream) { // check tb_heap_impl_t* impl = (tb_heap_impl_t*)heap; tb_assert_and_check_return_val(impl && stream, tb_false); tb_assert_and_check_return_val(impl->func.hash && impl->func.load && impl->func.free, tb_false); // clear the heap first tb_heap_clear(heap); // the offset tb_hize_t offset = tb_stream_offset(stream); // done tb_bool_t ok = tb_false; tb_uint32_t crc32 = 0; tb_pointer_t buff = tb_null; do { // calc type crc32 = tb_crc_encode_cstr(TB_CRC_MODE_32_IEEE_LE, crc32, "heap"); // calc item type crc32 = tb_crc_encode_value(TB_CRC_MODE_32_IEEE_LE, crc32, impl->func.type); // calc item size crc32 = tb_crc_encode_value(TB_CRC_MODE_32_IEEE_LE, crc32, impl->func.size); // load the head crc32 tb_uint32_t crc32_head = tb_stream_bread_u32_be(stream); tb_assert_and_check_break(crc32_head == crc32); // make item buffer buff = impl->func.size? tb_malloc(impl->func.size) : tb_null; // load size tb_uint32_t size = tb_stream_bread_u32_be(stream); // load heap tb_uint32_t load = 0; for (load = 0; load < size; load++) { // load item if (!impl->func.load(&impl->func, buff, stream)) break; // the item data tb_cpointer_t data = impl->func.data(&impl->func, buff); // hash item tb_size_t hash = impl->func.hash(&impl->func, data, -1, 0); // calc item crc32 = tb_crc_encode_value(TB_CRC_MODE_32_IEEE_LE, crc32, hash); // save item tb_heap_put(heap, data); // free name impl->func.free(&impl->func, buff); } // check tb_assert_and_check_break(load == size); // load the body crc32 tb_uint32_t crc32_body = tb_stream_bread_u32_be(stream); tb_assert_and_check_break(crc32_body == crc32); // ok ok = tb_true; } while (0); // failed? if (!ok) { // restore it tb_stream_seek(stream, offset); // clear it tb_heap_clear(heap); } // exit buffer if (buff) tb_free(buff); buff = tb_null; // ok? return ok; }
tb_void_t tb_priority_queue_put(tb_priority_queue_ref_t self, tb_cpointer_t data) { tb_heap_put((tb_heap_ref_t)self, data); }
tb_bool_t tb_timer_spak(tb_timer_ref_t timer) { // check tb_timer_impl_t* impl = (tb_timer_impl_t*)timer; tb_assert_and_check_return_val(impl && impl->pool && impl->heap, tb_false); // stoped? tb_check_return_val(!tb_atomic_get(&impl->stop), tb_false); // enter tb_spinlock_enter(&impl->lock); // done tb_bool_t ok = tb_false; tb_timer_task_func_t func = tb_null; tb_cpointer_t priv = tb_null; tb_bool_t killed = tb_false; do { // empty? if (!tb_heap_size(impl->heap)) { ok = tb_true; break; } // the top task tb_timer_task_impl_t* task_impl = (tb_timer_task_impl_t*)tb_heap_top(impl->heap); tb_assert_and_check_break(task_impl); // check refn tb_assert(task_impl->refn); // the now tb_hong_t now = tb_timer_now(impl); // timeout? if (task_impl->when <= now) { // pop it tb_heap_pop(impl->heap); // save func and data for calling it later func = task_impl->func; priv = task_impl->priv; // killed? killed = task_impl->killed? tb_true : tb_false; // repeat? if (task_impl->repeat) { // update when task_impl->when = now + task_impl->period; // continue task_impl tb_heap_put(impl->heap, task_impl); } else { // refn-- if (task_impl->refn > 1) task_impl->refn--; // remove it from pool directly else tb_fixed_pool_free(impl->pool, task_impl); } } // ok ok = tb_true; } while (0); // leave tb_spinlock_leave(&impl->lock); // done func if (func) func(killed, priv); // ok? return ok; }
static tb_void_t tb_test_heap_min_func() { // init heap tb_heap_ref_t heap = tb_heap_init(16, tb_element_uint32()); tb_assert_and_check_return(heap); // clear rand tb_random_clear(tb_null); // make heap tb_size_t i = 0; for (i = 0; i < 100; i++) { // the value tb_uint32_t val = tb_random_range(tb_null, 0, 50); // trace // tb_trace_i("heap_min: put: %u", val); // put it tb_heap_put(heap, tb_u2p(val)); } // clear rand tb_random_clear(tb_null); // remove some values for (i = 0; i < 100; i++) { // the value tb_uint32_t val = tb_random_range(tb_null, 0, 50); // remove it? if (!(i & 3)) { tb_size_t itor = tb_find_all(heap, tb_u2p(val)); if (itor != tb_iterator_tail(heap)) tb_heap_remove(heap, itor); } } // append heap for (i = 0; i < 30; i++) { // the value tb_uint32_t val = tb_random_range(tb_null, 0, 50); // put it tb_heap_put(heap, tb_u2p(val)); } // trace tb_trace_i(""); // dump heap while (tb_heap_size(heap)) { // put it tb_uint32_t val = (tb_uint32_t)(tb_size_t)tb_heap_top(heap); // trace tb_trace_i("heap_min: pop: %u", val); // pop it tb_heap_pop(heap); } // exit heap tb_heap_exit(heap); }