tb_void_t tb_aico_kill(tb_aico_ref_t aico) { // check tb_aico_impl_t* impl = (tb_aico_impl_t*)aico; tb_aicp_impl_t* aicp_impl = (tb_aicp_impl_t*)impl->aicp; tb_assert_and_check_return(impl && aicp_impl && aicp_impl->ptor && aicp_impl->ptor->kilo); // the impl is killed and not worked? tb_check_return(!tb_atomic_get(&aicp_impl->kill) || tb_atomic_get(&aicp_impl->work)); // trace tb_trace_d("kill: aico[%p]: type: %lu, handle: %p: state: %s: ..", aico, tb_aico_type(aico), impl->handle, tb_state_cstr(tb_atomic_get(&((tb_aico_impl_t*)aico)->state))); // opened? killed if (TB_STATE_OPENED == tb_atomic_fetch_and_pset(&impl->state, TB_STATE_OPENED, TB_STATE_KILLED)) { // trace tb_trace_d("kill: aico[%p]: type: %lu, handle: %p: ok", aico, tb_aico_type(aico), impl->handle); } // pending? kill it else if (TB_STATE_PENDING == tb_atomic_fetch_and_pset(&impl->state, TB_STATE_PENDING, TB_STATE_KILLING)) { // kill aico aicp_impl->ptor->kilo(aicp_impl->ptor, impl); // trace tb_trace_d("kill: aico[%p]: type: %lu, handle: %p: state: pending: ok", aico, tb_aico_type(aico), impl->handle); } }
/* *@note * * fputs(string, stdout) exists compatibility issue when vs2008 => vs2015 * * error: ___iob_func undefined in vs2015 */ tb_void_t tb_print(tb_char_t const* string) { // check tb_check_return(string); // get stdout HANDLE handle = GetStdHandle(STD_OUTPUT_HANDLE); tb_assert_and_check_return(handle != INVALID_HANDLE_VALUE); // the data and size tb_byte_t const* data = (tb_byte_t const*)string; tb_size_t size = tb_strlen(string) + 1; // write string to stdout tb_size_t writ = 0; while (writ < size) { // write to the stdout DWORD real = 0; if (!WriteFile(handle, data + writ, (DWORD)(size - writ), &real, tb_null)) break; // update writted size writ += (tb_size_t)real; } }
tb_void_t tb_aico_exit(tb_aico_ref_t aico) { // check tb_aico_impl_t* impl = (tb_aico_impl_t*)aico; tb_aicp_impl_t* aicp_impl = (tb_aicp_impl_t*)impl->aicp; tb_assert_and_check_return(impl && aicp_impl && aicp_impl->pool); // wait closing? tb_size_t tryn = 15; while (tb_atomic_get(&impl->state) != TB_STATE_CLOSED && tryn--) { // trace tb_trace_d("exit[%p]: type: %lu, handle: %p, state: %s: wait: ..", aico, tb_aico_type(aico), impl->handle, tb_state_cstr(tb_atomic_get(&impl->state))); // wait some time tb_msleep(200); } // check tb_assert_abort(tb_atomic_get(&impl->state) == TB_STATE_CLOSED); tb_check_return(tb_atomic_get(&impl->state) == TB_STATE_CLOSED); // enter tb_spinlock_enter(&aicp_impl->lock); // trace tb_trace_d("exit[%p]: type: %lu, handle: %p, state: %s: ok", aico, tb_aico_type(aico), impl->handle, tb_state_cstr(tb_atomic_get(&impl->state))); // free it tb_fixed_pool_free(aicp_impl->pool, aico); // leave tb_spinlock_leave(&aicp_impl->lock); }
static tb_void_t tb_fixed_pool_slot_exit(tb_fixed_pool_t* pool, tb_fixed_pool_slot_t* slot) { // check tb_assert_and_check_return(pool && pool->large_allocator && slot); tb_assert_and_check_return(pool->slot_list && pool->slot_count); // trace tb_trace_d("slot[%lu]: exit: size: %lu", pool->item_size, slot->size); // make the iterator tb_array_iterator_t array_iterator; tb_iterator_ref_t iterator = tb_iterator_make_for_ptr(&array_iterator, (tb_pointer_t*)pool->slot_list, pool->slot_count); tb_assert(iterator); // find the slot from the slot list tb_size_t itor = tb_binary_find_all(iterator, (tb_cpointer_t)slot); tb_assert(itor != tb_iterator_tail(iterator) && itor < pool->slot_count && pool->slot_list[itor]); tb_check_return(itor != tb_iterator_tail(iterator) && itor < pool->slot_count && pool->slot_list[itor]); // remove the slot if (itor + 1 < pool->slot_count) tb_memmov_(pool->slot_list + itor, pool->slot_list + itor + 1, (pool->slot_count - itor - 1) * sizeof(tb_fixed_pool_slot_t*)); // update the slot count pool->slot_count--; // exit slot tb_allocator_large_free(pool->large_allocator, slot); }
/*!the insertion sort * * <pre> * old: 5 2 6 2 8 6 1 * * (hole) * step1: ((5)) 2 6 2 8 6 1 * (next) <= * * (hole) * step2: ((2)) (5) 6 2 8 6 1 * (next) <= * * (hole) * step3: 2 5 ((6)) 2 8 6 1 * (next) <= * * (hole) * step4: 2 ((2)) (5) (6) 8 6 1 * (next) <= * * (hole) * step5: 2 2 5 6 ((8)) 6 1 * (next) <= * * (hole) * step6: 2 2 5 6 ((6)) (8) 1 * (next) <= * * (hole) * step7: ((1)) (2) (2) (5) (6) (6) (8) * (next) * </pre> */ tb_void_t tb_insert_sort(tb_iterator_ref_t iterator, tb_size_t head, tb_size_t tail, tb_iterator_comp_t comp) { // check tb_assert_and_check_return(iterator); tb_assert_and_check_return((tb_iterator_mode(iterator) & TB_ITERATOR_MODE_FORWARD)); tb_assert_and_check_return((tb_iterator_mode(iterator) & TB_ITERATOR_MODE_REVERSE)); tb_check_return(head != tail); // init tb_size_t step = tb_iterator_step(iterator); tb_pointer_t temp = step > sizeof(tb_pointer_t)? tb_malloc(step) : tb_null; tb_assert_and_check_return(step <= sizeof(tb_pointer_t) || temp); // the comparer if (!comp) comp = tb_iterator_comp; // sort tb_size_t last, next; for (next = tb_iterator_next(iterator, head); next != tail; next = tb_iterator_next(iterator, next)) { // save next if (step <= sizeof(tb_pointer_t)) temp = tb_iterator_item(iterator, next); else tb_memcpy(temp, tb_iterator_item(iterator, next), step); // look for hole and move elements[hole, next - 1] => [hole + 1, next] for (last = next; last != head && (last = tb_iterator_prev(iterator, last), comp(iterator, temp, tb_iterator_item(iterator, last)) < 0); next = last) tb_iterator_copy(iterator, next, tb_iterator_item(iterator, last)); // item => hole tb_iterator_copy(iterator, next, temp); } // free if (temp && step > sizeof(tb_pointer_t)) tb_free(temp); }
tb_void_t tb_filter_clos(tb_filter_ref_t self) { // check tb_filter_t* filter = (tb_filter_t*)self; tb_assert_and_check_return(filter); // opened? tb_check_return(filter->bopened); // clos it if (filter->clos) filter->clos(filter); // clear eof filter->beof = tb_false; // clear limit filter->limit = -1; // clear offset filter->offset = 0; // exit idata tb_buffer_clear(&filter->idata); // exit odata tb_queue_buffer_clear(&filter->odata); // closed filter->bopened = tb_false; }
static tb_void_t tb_fixed_pool_slot_exit(tb_fixed_pool_impl_t* impl, tb_fixed_pool_slot_t* slot) { // check tb_assert_and_check_return(impl && impl->large_pool && slot); tb_assert_and_check_return(impl->slot_list && impl->slot_count); // trace tb_trace_d("slot[%lu]: exit: size: %lu", impl->item_size, slot->size); // init the iterator tb_iterator_t iterator = tb_iterator_init_ptr((tb_pointer_t*)impl->slot_list, impl->slot_count); // find the slot from the slot list tb_size_t itor = tb_binary_find_all(&iterator, (tb_cpointer_t)slot); tb_assert_abort(itor != tb_iterator_tail(&iterator) && itor < impl->slot_count && impl->slot_list[itor]); tb_check_return(itor != tb_iterator_tail(&iterator) && itor < impl->slot_count && impl->slot_list[itor]); // remove the slot if (itor + 1 < impl->slot_count) tb_memmov_(impl->slot_list + itor, impl->slot_list + itor + 1, (impl->slot_count - itor - 1) * sizeof(tb_fixed_pool_slot_t*)); // update the slot count impl->slot_count--; // exit slot tb_large_pool_free(impl->large_pool, slot); }
/* ////////////////////////////////////////////////////////////////////////////////////// * implementation */ tb_void_t tb_sort(tb_iterator_ref_t iterator, tb_size_t head, tb_size_t tail, tb_iterator_comp_t comp) { // check tb_assert_and_check_return(iterator); // no elements? tb_check_return(head != tail); // readonly? tb_assert_and_check_return(!(tb_iterator_mode(iterator) & TB_ITERATOR_MODE_READONLY)); #ifdef TB_CONFIG_MICRO_ENABLE // random access iterator? tb_assert_and_check_return(tb_iterator_mode(iterator) & TB_ITERATOR_MODE_RACCESS); // sort it tb_quick_sort(iterator, head, tail, comp); #else // random access iterator? if (tb_iterator_mode(iterator) & TB_ITERATOR_MODE_RACCESS) { if (tb_distance(iterator, head, tail) > 100000) tb_heap_sort(iterator, head, tail, comp); else tb_quick_sort(iterator, head, tail, comp); //!< @note the recursive stack size is limit } else tb_insert_sort(iterator, head, tail, comp); #endif }
/* ////////////////////////////////////////////////////////////////////////////////////// * implementation */ tb_void_t tb_print(tb_char_t const* string) { // check tb_check_return(string); // print to the stdout fputs(string, stdout); }
tb_void_t tb_trace_tail(tb_char_t const* format, ...) { // check tb_check_return(format); // enter tb_spinlock_enter_without_profiler(&g_lock); // done do { // check tb_check_break(g_mode); // init tb_va_list_t l; tb_char_t* p = g_line; tb_char_t* e = g_line + sizeof(g_line); tb_va_start(l, format); // append format if (p < e) p += tb_vsnprintf(p, e - p, format, l); // append end if (p < e) *p = '\0'; e[-1] = '\0'; // print it if (g_mode & TB_TRACE_MODE_PRINT) tb_print(g_line); // print it to file #ifndef TB_CONFIG_MICRO_ENABLE if ((g_mode & TB_TRACE_MODE_FILE) && g_file) { // done tb_size_t size = p - g_line; tb_size_t writ = 0; while (writ < size) { // writ it tb_long_t real = tb_file_writ(g_file, (tb_byte_t const*)g_line + writ, size - writ); tb_check_break(real > 0); // save size writ += real; } } #endif // exit tb_va_end(l); } while (0); // leave tb_spinlock_leave(&g_lock); }
static __tb_inline__ tb_void_t g2_gl_draw_stencil_exit(g2_gl_draw_t* draw) { // check tb_check_return(draw->flag & G2_GL_DRAW_FLAG_STENCIL); // exit g2_glColorMask(G2_GL_TRUE, G2_GL_TRUE, G2_GL_TRUE, G2_GL_TRUE); g2_glDisable(G2_GL_STENCIL_TEST); }
static __tb_inline__ tb_void_t g2_gl_draw_style_fill(g2_gl_draw_t* draw, g2_gl_rect_t const* bounds) { // rect or stencil? tb_check_return(draw->flag & (G2_GL_DRAW_FLAG_RECT | G2_GL_DRAW_FLAG_STENCIL)); // draw if (draw->shader) g2_gl_draw_style_fill_shader(draw, bounds); else g2_gl_draw_style_fill_color(draw, bounds); }
static __tb_inline__ tb_void_t g2_gl_draw_stencil_draw(g2_gl_draw_t* draw) { // check tb_check_return(draw->flag & G2_GL_DRAW_FLAG_STENCIL); // draw stencil g2_glStencilFunc(G2_GL_EQUAL, 0xff, 0xff); g2_glStencilOp(G2_GL_ZERO, G2_GL_ZERO, G2_GL_ZERO); g2_glColorMask(G2_GL_TRUE, G2_GL_TRUE, G2_GL_TRUE, G2_GL_TRUE); }
tb_void_t tb_printl(tb_char_t const* string) { // check tb_check_return(string); // print string to the stdout fputs(string, stdout); // print newline to the stdout fputs(__tb_newline__, stdout); }
tb_void_t tb_object_inc(tb_object_ref_t object) { // check tb_assert_and_check_return(object); // readonly? tb_check_return(!(object->flag & TB_OBJECT_FLAG_READONLY)); // refn++ object->refn++; }
tb_void_t tb_object_cler(tb_object_ref_t object) { // check tb_assert_and_check_return(object); // readonly? tb_check_return(!(object->flag & TB_OBJECT_FLAG_READONLY)); // clear if (object->cler) object->cler(object); }
/* ////////////////////////////////////////////////////////////////////////////////////// * implementation */ tb_void_t tb_print(tb_char_t const* string) { // check tb_check_return(string); // print to the ios device log asl_log(tb_null, tb_null, ASL_LEVEL_WARNING, "[%08x]: %s", (tb_uint32_t)tb_thread_self(), string); // print to the stdout fputs(string, stdout); }
tb_void_t tb_object_exit(tb_object_ref_t object) { // check tb_assert_and_check_return(object); // readonly? tb_check_return(!(object->flag & TB_OBJECT_FLAG_READONLY)); // exit tb_object_dec(object); }
tb_void_t tb_trace_done(tb_char_t const* prefix, tb_char_t const* module, tb_char_t const* format, ...) { // check tb_check_return(format); // init args tb_va_list_t args; tb_va_start(args, format); // done trace tb_trace_done_with_args(prefix, module, format, args); // exit args tb_va_end(args); }
static tb_void_t tb_list_itor_remove_range(tb_iterator_ref_t iterator, tb_size_t prev, tb_size_t next, tb_size_t size) { // no size? tb_check_return(size); // the self size tb_size_t list_size = tb_list_size((tb_list_ref_t)iterator); tb_check_return(list_size); // limit size if (size > list_size) size = list_size; // remove the body items if (prev) { tb_size_t itor = tb_iterator_next((tb_list_ref_t)iterator, prev); while (itor != next && size--) itor = tb_list_remove((tb_list_ref_t)iterator, itor); } // remove the head items else { while (size--) tb_list_remove_head((tb_list_ref_t)iterator); } }
static __tb_inline__ tb_void_t g2_gl_draw_stencil_clip_rect(g2_gl_draw_t* draw, g2_clipper_item_t const* item) { // check tb_assert_and_check_return(item->type == G2_CLIPPER_ITEM_RECT); // init bounds g2_gl_rect_t bounds; bounds.x1 = g2_float_to_tb(item->u.rect.x); bounds.y1 = g2_float_to_tb(item->u.rect.y); bounds.x2 = g2_float_to_tb(item->u.rect.x + item->u.rect.w); bounds.y2 = g2_float_to_tb(item->u.rect.y + item->u.rect.h); tb_check_return(bounds.x1 < bounds.x2 && bounds.y1 < bounds.y2); // clip bounds g2_gl_draw_stencil_clip_bounds(draw, &bounds); }
tb_void_t tb_object_dec(tb_object_ref_t object) { // check tb_assert_and_check_return(object); // readonly? tb_check_return(!(object->flag & TB_OBJECT_FLAG_READONLY)); // check refn tb_assert_and_check_return(object->refn); // refn-- object->refn--; // exit it? if (!object->refn && object->exit) object->exit(object); }
static __tb_inline__ tb_void_t tb_static_large_allocator_pred_update(tb_static_large_allocator_ref_t allocator, tb_static_large_data_head_t* data_head) { // check tb_assert(allocator && data_head && data_head->bfree); // cannot be tail tb_check_return(data_head != allocator->data_tail); // the pred index tb_size_t indx = tb_static_large_allocator_pred_index(allocator, data_head->space); // the pred head tb_static_large_data_head_t* pred_head = allocator->data_pred[indx].data_head; // cache this data head if (!pred_head || data_head->space > pred_head->space) allocator->data_pred[indx].data_head = data_head; }
/* /////////////////////////////////////////////////////////////////////// * interfaces */ tb_void_t g2_gl_draw_rect(g2_gl_painter_t* painter, tb_size_t mode, g2_rect_t const* rect) { // check tb_assert_and_check_return(painter && rect); tb_assert_and_check_return((mode == G2_STYLE_MODE_FILL) || (mode == G2_STYLE_MODE_STOK)); // init bounds g2_gl_rect_t bounds; bounds.x1 = g2_float_to_tb(rect->x); bounds.y1 = g2_float_to_tb(rect->y); bounds.x2 = g2_float_to_tb(rect->x + rect->w); bounds.y2 = g2_float_to_tb(rect->y + rect->h); tb_check_return(bounds.x1 < bounds.x2 && bounds.y1 < bounds.y2); // draw bounds g2_gl_draw_bounds(painter, mode, &bounds); }
tb_void_t tb_database_sql_clos(tb_database_sql_ref_t database) { // check tb_database_sql_impl_t* impl = (tb_database_sql_impl_t*)database; tb_assert_and_check_return(impl); // opened? tb_check_return(impl->bopened); // clos it if (impl->clos) impl->clos(impl); // closed impl->bopened = tb_false; // clear state impl->state = TB_STATE_OK; }
static tb_void_t tb_single_list_entry_itor_remove_range(tb_iterator_ref_t iterator, tb_size_t prev, tb_size_t next, tb_size_t size) { // check tb_single_list_entry_head_ref_t list = tb_container_of(tb_single_list_entry_head_t, itor, iterator); tb_assert(list && prev && next); // no size? tb_check_return(size); // the entry tb_single_list_entry_ref_t prev_entry = (tb_single_list_entry_ref_t)prev; tb_single_list_entry_ref_t next_entry = (tb_single_list_entry_ref_t)next; // remove entries prev_entry->next = next_entry; // update size list->size -= size; }
tb_void_t tb_async_transfer_kill(tb_async_transfer_ref_t transfer) { // check tb_async_transfer_impl_t* impl = (tb_async_transfer_impl_t*)transfer; tb_assert_and_check_return(impl); // kill it tb_size_t state = tb_atomic_fetch_and_set(&impl->state, TB_STATE_KILLING); tb_check_return(state != TB_STATE_KILLING); // trace tb_trace_d("kill: .."); // kill istream if (impl->istream) tb_async_stream_kill(impl->istream); // kill ostream if (impl->ostream) tb_async_stream_kill(impl->ostream); }
tb_void_t g2_cutter_fill_done(g2_cutter_fill_t* cutter, tb_handle_t path) { // check tb_assert_and_check_return(cutter->base.func && path); // null? tb_check_return(!g2_path_null(path)); // done switch (cutter->rule) { case G2_STYLE_RULE_NONZERO: g2_cutter_fill_nonzero_done(cutter, path); break; case G2_STYLE_RULE_EVENODD: default: g2_cutter_fill_evenodd_done(cutter, path); break; } }
tb_void_t g2_gl_draw_triangle(g2_gl_painter_t* painter, tb_size_t mode, g2_triangle_t const* triangle) { // check tb_assert_and_check_return(painter && triangle); tb_assert_and_check_return((mode == G2_STYLE_MODE_FILL) || (mode == G2_STYLE_MODE_STOK)); // init vertices tb_float_t vertices[6]; vertices[0] = g2_float_to_tb(triangle->p0.x); vertices[1] = g2_float_to_tb(triangle->p0.y); vertices[2] = g2_float_to_tb(triangle->p1.x); vertices[3] = g2_float_to_tb(triangle->p1.y); vertices[4] = g2_float_to_tb(triangle->p2.x); vertices[5] = g2_float_to_tb(triangle->p2.y); // init bounds g2_gl_rect_t bounds; g2_gl_bounds_init(&bounds, vertices[0], vertices[1]); g2_gl_bounds_done(&bounds, vertices[2], vertices[3]); g2_gl_bounds_done(&bounds, vertices[4], vertices[5]); g2_gl_bounds_stok(&bounds, 1); tb_check_return(bounds.x1 < bounds.x2 && bounds.y1 < bounds.y2); // init draw g2_gl_draw_t draw = {0}; if (!g2_gl_draw_init(&draw, painter, mode, G2_GL_DRAW_FLAG_CONVEX)) return ; // clip draw g2_gl_draw_clip(&draw, &bounds); // draw vertices if (draw.context->version < 0x20) g2_glVertexPointer(2, G2_GL_FLOAT, 0, vertices); else g2_glVertexAttribPointer(g2_gl_program_location(draw.program, G2_GL_PROGRAM_LOCATION_VERTICES), 2, G2_GL_FLOAT, G2_GL_FALSE, 0, vertices); g2_glDrawArrays((draw.mode == G2_STYLE_MODE_FILL)? G2_GL_TRIANGLE_STRIP : G2_GL_LINE_STRIP, 0, 3); // draw fill g2_gl_draw_fill(&draw, &bounds); // exit draw g2_gl_draw_exit(&draw); }
tb_void_t tb_transfer_pool_kill_all(tb_transfer_pool_ref_t pool) { // check tb_transfer_pool_impl_t* impl = (tb_transfer_pool_impl_t*)pool; tb_assert_and_check_return(impl); // check tb_check_return(TB_STATE_OK == tb_atomic_get(&impl->state)); // enter tb_spinlock_enter(&impl->lock); // trace tb_trace_d("kill_all: %lu, ..", tb_list_entry_size(&impl->work)); // kill it tb_walk_all(tb_list_entry_itor(&impl->work), tb_transfer_pool_work_kill, tb_null); // leave tb_spinlock_leave(&impl->lock); }