/*!the insertion sort * * <pre> * old: 5 2 6 2 8 6 1 * * (hole) * step1: ((5)) 2 6 2 8 6 1 * (next) <= * * (hole) * step2: ((2)) (5) 6 2 8 6 1 * (next) <= * * (hole) * step3: 2 5 ((6)) 2 8 6 1 * (next) <= * * (hole) * step4: 2 ((2)) (5) (6) 8 6 1 * (next) <= * * (hole) * step5: 2 2 5 6 ((8)) 6 1 * (next) <= * * (hole) * step6: 2 2 5 6 ((6)) (8) 1 * (next) <= * * (hole) * step7: ((1)) (2) (2) (5) (6) (6) (8) * (next) * </pre> */ tb_void_t tb_insert_sort(tb_iterator_ref_t iterator, tb_size_t head, tb_size_t tail, tb_iterator_comp_t comp) { // check tb_assert_and_check_return(iterator); tb_assert_and_check_return((tb_iterator_mode(iterator) & TB_ITERATOR_MODE_FORWARD)); tb_assert_and_check_return((tb_iterator_mode(iterator) & TB_ITERATOR_MODE_REVERSE)); tb_check_return(head != tail); // init tb_size_t step = tb_iterator_step(iterator); tb_pointer_t temp = step > sizeof(tb_pointer_t)? tb_malloc(step) : tb_null; tb_assert_and_check_return(step <= sizeof(tb_pointer_t) || temp); // the comparer if (!comp) comp = tb_iterator_comp; // sort tb_size_t last, next; for (next = tb_iterator_next(iterator, head); next != tail; next = tb_iterator_next(iterator, next)) { // save next if (step <= sizeof(tb_pointer_t)) temp = tb_iterator_item(iterator, next); else tb_memcpy(temp, tb_iterator_item(iterator, next), step); // look for hole and move elements[hole, next - 1] => [hole + 1, next] for (last = next; last != head && (last = tb_iterator_prev(iterator, last), comp(iterator, temp, tb_iterator_item(iterator, last)) < 0); next = last) tb_iterator_copy(iterator, next, tb_iterator_item(iterator, last)); // item => hole tb_iterator_copy(iterator, next, temp); } // free if (temp && step > sizeof(tb_pointer_t)) tb_free(temp); }
/* ////////////////////////////////////////////////////////////////////////////////////// * test */ static tb_void_t tb_find_int_test() { __tb_volatile__ tb_size_t i = 0; __tb_volatile__ tb_size_t n = 1000; // init data tb_long_t* data = (tb_long_t*)tb_nalloc0(n, sizeof(tb_long_t)); tb_assert_and_check_return(data); // init iterator tb_array_iterator_t array_iterator; tb_iterator_ref_t iterator = tb_array_iterator_init_long(&array_iterator, data, n); // make for (i = 0; i < n; i++) data[i] = i; // find tb_size_t itor = tb_iterator_tail(iterator); tb_hong_t time = tb_mclock(); for (i = 0; i < n; i++) itor = tb_find_all(iterator, (tb_pointer_t)data[800]); time = tb_mclock() - time; // item tb_long_t item = itor != tb_iterator_tail(iterator)? (tb_long_t)tb_iterator_item(iterator, itor) : 0; // time tb_trace_i("tb_find_int_all[%ld ?= %ld]: %lld ms", item, data[800], time); // free tb_free(data); }
/* ////////////////////////////////////////////////////////////////////////////////////// * implementation */ tb_void_t tb_remove_first_if(tb_iterator_ref_t iterator, tb_predicate_ref_t pred, tb_cpointer_t value) { // check tb_assert_and_check_return(iterator && pred); // the iterator mode tb_size_t mode = tb_iterator_mode(iterator); tb_assert_and_check_return((mode & TB_ITERATOR_MODE_FORWARD)); tb_assert_and_check_return(!(mode & TB_ITERATOR_MODE_READONLY)); // done tb_size_t itor = tb_iterator_head(iterator); while (itor != tb_iterator_tail(iterator)) { // done predicate if (pred(iterator, tb_iterator_item(iterator, itor), value)) { // remove it tb_iterator_remove(iterator, itor); break; } // next itor = tb_iterator_next(iterator, itor); } }
tb_char_t const* tb_environment_at(tb_environment_ref_t environment, tb_size_t index) { // check tb_assert_and_check_return_val(environment, tb_null); // get the value return (index < tb_vector_size(environment))? (tb_char_t const*)tb_iterator_item(environment, index) : tb_null; }
static tb_ifaddrs_interface_ref_t tb_ifaddrs_interface_find(tb_iterator_ref_t iterator, tb_char_t const* name) { // check tb_assert_and_check_return_val(iterator && name, tb_null); // find it tb_size_t itor = tb_find_all_if(iterator, tb_ifaddrs_interface_pred, name); tb_check_return_val(itor != tb_iterator_tail(iterator), tb_null); // ok return (tb_ifaddrs_interface_ref_t)tb_iterator_item(iterator, itor); }
static tb_charset_ref_t tb_charset_find_by_type(tb_size_t type) { // init iterator tb_iterator_t iterator = tb_iterator_init_mem(g_charsets, tb_arrayn(g_charsets), sizeof(tb_charset_t)); // find it by the binary search tb_size_t itor = tb_binary_find_all_if(&iterator, tb_charset_comp_by_type, (tb_cpointer_t)TB_CHARSET_TYPE(type)); // ok? if (itor != tb_iterator_tail(&iterator)) return (tb_charset_ref_t)tb_iterator_item(&iterator, itor); else return tb_null; }
static tb_charset_ref_t tb_charset_find_by_name(tb_char_t const* name) { // init iterator tb_iterator_t iterator = tb_iterator_init_mem(g_charsets, tb_arrayn(g_charsets), sizeof(tb_charset_t)); // find it by the binary search tb_size_t itor = tb_binary_find_all_if(&iterator, tb_charset_comp_by_name, name); // ok? if (itor != tb_iterator_tail(&iterator)) return (tb_charset_ref_t)tb_iterator_item(&iterator, itor); else return tb_null; }
static __tb_inline__ tb_bool_t tb_heap_check(tb_iterator_ref_t iterator, tb_size_t head, tb_size_t tail, tb_iterator_comp_t comp) { // the comparer if (!comp) comp = tb_iterator_comp; // walk if (head != tail) { tb_size_t root; for (root = head; ++head != tail; ++root) { // root < left? if (tb_iterator_comp(iterator, tb_iterator_item(iterator, root), tb_iterator_item(iterator, head)) < 0) return tb_false; // end? else if (++head == tail) break; // root < right? else if (tb_iterator_comp(iterator, tb_iterator_item(iterator, root), tb_iterator_item(iterator, head)) < 0) return tb_false; } } // ok return tb_true; }
/* ////////////////////////////////////////////////////////////////////////////////////// * implementation */ tb_void_t gb_tessellator_triangulation_make(gb_tessellator_impl_t* impl) { // check tb_assert_abort(impl && impl->mesh); // the new face must be inserted to the head of faces tb_assert_abort(gb_mesh_face_order(impl->mesh) == GB_MESH_ORDER_INSERT_HEAD); // the iterator tb_iterator_ref_t iterator = gb_mesh_face_itor(impl->mesh); tb_assert_abort(iterator); // done tb_size_t itor = tb_iterator_head(iterator); tb_size_t tail = tb_iterator_tail(iterator); gb_mesh_face_ref_t face = tb_null; while (itor != tail) { // the face face = (gb_mesh_face_ref_t)tb_iterator_item(iterator, itor); tb_assert_abort(face); /* the next face * * @note we don't process the new faces at the head */ itor = tb_iterator_next(iterator, itor); // the face is inside? if (gb_tessellator_face_inside(face)) { // make triangulation for the face region gb_tessellator_triangulation_make_face(impl, face); } } #ifdef __gb_debug__ // check mesh gb_mesh_check(impl->mesh); #endif }
static __tb_inline__ tb_void_t tb_heap_push(tb_iterator_ref_t iterator, tb_size_t head, tb_size_t hole, tb_size_t top, tb_cpointer_t item, tb_iterator_comp_t comp) { // check tb_assert_and_check_return(comp); // (hole - 1) / 2: the parent node of the hole // finds the final hole tb_size_t parent = 0; tb_cpointer_t parent_item = tb_null; for (parent = (hole - 1) >> 1; hole > top && (comp(iterator, (parent_item = tb_iterator_item(iterator, head + parent)), item) < 0); parent = (hole - 1) >> 1) { // move item: parent => hole // tb_iterator_copy(iterator, head + parent, item); tb_iterator_copy(iterator, head + hole, parent_item); // move node: hole => parent hole = parent; } // copy item tb_iterator_copy(iterator, head + hole, item); }
static tb_void_t tb_find_str_test() { __tb_volatile__ tb_size_t i = 0; __tb_volatile__ tb_size_t n = 1000; // init data tb_char_t** data = (tb_char_t**)tb_nalloc0(n, sizeof(tb_char_t*)); tb_assert_and_check_return(data); // init iterator tb_array_iterator_t array_iterator; tb_iterator_ref_t iterator = tb_array_iterator_init_str(&array_iterator, data, n); // make tb_char_t s[256] = {0}; for (i = 0; i < n; i++) { tb_long_t r = tb_snprintf(s, 256, "%04lu", i); s[r] = '\0'; data[i] = tb_strdup(s); } // find tb_size_t itor = tb_iterator_tail(iterator); tb_hong_t time = tb_mclock(); for (i = 0; i < n; i++) itor = tb_find_all(iterator, (tb_pointer_t)data[800]); time = tb_mclock() - time; // item tb_char_t* item = itor != tb_iterator_tail(iterator)? (tb_char_t*)tb_iterator_item(iterator, itor) : 0; // time tb_trace_i("tb_find_str_all[%s ?= %s]: %lld ms", item, data[800], time); // free data for (i = 0; i < n; i++) tb_free(data[i]); tb_free(data); }
/* ////////////////////////////////////////////////////////////////////////////////////// * implementation */ tb_size_t tb_rfind_if(tb_iterator_ref_t iterator, tb_size_t head, tb_size_t tail, tb_predicate_ref_t pred, tb_cpointer_t value) { // check tb_assert_and_check_return_val(pred && iterator && (tb_iterator_mode(iterator) & TB_ITERATOR_MODE_REVERSE), tb_iterator_tail(iterator)); // null? tb_check_return_val(head != tail, tb_iterator_tail(iterator)); // find tb_size_t itor = tail; tb_bool_t find = tb_false; do { // the previous item itor = tb_iterator_prev(iterator, itor); // comp if ((find = pred(iterator, tb_iterator_item(iterator, itor), value))) break; } while (itor != head); // ok? return find? itor : tb_iterator_tail(iterator); }
tb_pointer_t tb_vector_head(tb_vector_ref_t vector) { return tb_iterator_item(vector, tb_iterator_head(vector)); }
tb_pointer_t tb_heap_top(tb_heap_ref_t heap) { return tb_iterator_item(heap, tb_iterator_head(heap)); }
tb_void_t tb_fixed_pool_clear(tb_fixed_pool_ref_t self) { // check tb_fixed_pool_t* pool = (tb_fixed_pool_t*)self; tb_assert_and_check_return(pool); // exit items if (pool->func_exit) tb_fixed_pool_walk(self, tb_fixed_pool_item_exit, (tb_pointer_t)pool); // exit the partial slots tb_iterator_ref_t partial_iterator = tb_list_entry_itor(&pool->partial_slots); if (partial_iterator) { // walk it tb_size_t itor = tb_iterator_head(partial_iterator); while (itor != tb_iterator_tail(partial_iterator)) { // the slot tb_fixed_pool_slot_t* slot = (tb_fixed_pool_slot_t*)tb_iterator_item(partial_iterator, itor); tb_assert_and_check_break(slot); // check tb_assert(slot != pool->current_slot); // save next tb_size_t next = tb_iterator_next(partial_iterator, itor); // exit slot tb_fixed_pool_slot_exit(pool, slot); // next itor = next; } } // exit the full slots tb_iterator_ref_t full_iterator = tb_list_entry_itor(&pool->full_slots); if (full_iterator) { // walk it tb_size_t itor = tb_iterator_head(full_iterator); while (itor != tb_iterator_tail(full_iterator)) { // the slot tb_fixed_pool_slot_t* slot = (tb_fixed_pool_slot_t*)tb_iterator_item(full_iterator, itor); tb_assert_and_check_break(slot); // check tb_assert(slot != pool->current_slot); // save next tb_size_t next = tb_iterator_next(full_iterator, itor); // exit slot tb_fixed_pool_slot_exit(pool, slot); // next itor = next; } } // clear current slot if (pool->current_slot && pool->current_slot->pool) tb_static_fixed_pool_clear(pool->current_slot->pool); // clear item count pool->item_count = 0; // clear partial slots tb_list_entry_clear(&pool->partial_slots); // clear full slots tb_list_entry_clear(&pool->full_slots); }
/*! adjust heap * * <pre> * init: * 16(head) * ------------------------- * | | * (hole) 10 * -------------- ------------- * | | | | * 8(larger) 7 9 3 * --------- ---- * | | | * 2 4 1(tail - 1) * * after: * 16(head) * ------------------------- * | | * 8 10 * -------------- ------------- * | | | | * (hole) 7 9 3 * --------- ---- * | | | * 2 (larger)4 1(tail - 1) * * after: * 16(head) * ------------------------- * | | * 8 10 * -------------- ------------- * | | | | * 4 7 9 3 * --------- ---- * | | | * 2 (hole) 1(tail - 1) * * </pre> */ static __tb_inline__ tb_void_t tb_heap_adjust(tb_iterator_ref_t iterator, tb_size_t head, tb_size_t hole, tb_size_t tail, tb_cpointer_t item, tb_iterator_comp_t comp) { // the comparer if (!comp) comp = tb_iterator_comp; #if 0 // save top position tb_size_t top = hole; // 2 * hole + 2: the right child node of hole tb_size_t child = (hole << 1) + 2; for (; child < tail; child = (child << 1) + 2) { // the larger child node if (comp(iterator, tb_iterator_item(iterator, head + child), tb_iterator_item(iterator, head + child - 1)) < 0) child--; // the larger child node => hole tb_iterator_copy(iterator, head + hole, tb_iterator_item(iterator, head + child)); // move the hole down to it's larger child node hole = child; } // no right child node? if (child == tail) { // the last child => hole tb_iterator_copy(iterator, head + hole, tb_iterator_item(iterator, head + tail - 1)); // move hole down to tail hole = tail - 1; } // push item into the hole tb_heap_push(iterator, head, hole, top, item, comp); #else // walk, 2 * hole + 1: the left child node of hole tb_size_t child = (hole << 1) + 1; tb_cpointer_t child_item = tb_null; tb_cpointer_t child_item_r = tb_null; for (; child < tail; child = (child << 1) + 1) { // the larger child node child_item = tb_iterator_item(iterator, head + child); if (child + 1 < tail && comp(iterator, child_item, (child_item_r = tb_iterator_item(iterator, head + child + 1))) < 0) { child++; child_item = child_item_r; } // end? if (comp(iterator, child_item, item) < 0) break; // the larger child node => hole tb_iterator_copy(iterator, head + hole, child_item); // move the hole down to it's larger child node hole = child; } // copy item tb_iterator_copy(iterator, head + hole, item); #endif }
tb_pointer_t tb_list_head(tb_list_ref_t self) { return tb_iterator_item(self, tb_iterator_head(self)); }
/* ////////////////////////////////////////////////////////////////////////////////////// * implementation */ tb_void_t tb_remove_if(tb_iterator_ref_t iterator, tb_iterator_comp_t comp, tb_cpointer_t priv) { // check tb_assert_and_check_return(iterator && comp); // the iterator mode tb_size_t mode = tb_iterator_mode(iterator); tb_assert_and_check_return((mode & TB_ITERATOR_MODE_FORWARD)); tb_assert_and_check_return(!(mode & TB_ITERATOR_MODE_READONLY)); // done tb_long_t ok = 1; tb_size_t size = 0; tb_bool_t stop = tb_false; tb_bool_t need = tb_false; tb_size_t prev = tb_iterator_tail(iterator); tb_size_t itor = tb_iterator_head(iterator); tb_size_t base = tb_iterator_tail(iterator); tb_bool_t bmutable = (mode & TB_ITERATOR_MODE_MUTABLE)? tb_true : tb_false; while (itor != tb_iterator_tail(iterator)) { // save next tb_size_t next = tb_iterator_next(iterator, itor); // done func if ((ok = comp(iterator, tb_iterator_item(iterator, itor), priv)) < 0) stop = tb_true; // remove it? if (!ok) { // is the first removed item? if (!need) { // save the removed range base base = prev; // need remove items need = tb_true; } // update size size++; } // the removed range have been passed or stop or end? if (ok || next == tb_iterator_tail(iterator)) { // need remove items? if (need) { // check tb_assert_abort(size); // the previous tail tb_size_t prev_tail = tb_iterator_tail(iterator); // remove items tb_iterator_remove_range(iterator, base, !ok? next : itor, size); // reset state need = tb_false; size = 0; // is the mutable iterator? if (bmutable) { // update itor prev = base; // the body items are removed? if (base != prev_tail) { // the next itor itor = tb_iterator_next(iterator, base); // the last item be not removed? skip the last walked item if (ok) { prev = itor; itor = tb_iterator_next(iterator, itor); } } // the head items are removed? else itor = tb_iterator_head(iterator); // stop? tb_check_break(!stop); // continue? continue ; } } // stop? tb_check_break(!stop); } // next prev = itor; itor = next; } }
tb_pointer_t tb_circle_queue_head(tb_circle_queue_ref_t self) { // the head item return tb_iterator_item((tb_iterator_ref_t)self, tb_iterator_head((tb_iterator_ref_t)self)); }
tb_bool_t vm86_parser_get_register(tb_char_t const** pp, tb_char_t const* e, tb_uint16_t* r) { // check tb_assert(pp && e && r); // done tb_bool_t ok = tb_false; tb_char_t const* p = *pp; do { // save base tb_char_t const* b = p; // get instruction name tb_char_t name[64] = {0}; while (p < e && tb_isalpha(*p)) p++; tb_check_break(p <= e && p - b < sizeof(name)); tb_memcpy(name, b, p - b); // skip the space while (p < e && tb_isspace(*p)) p++; // the register entry type typedef struct __vm86_register_entry_t { // the register name tb_char_t const* name; // the register index tb_uint8_t index; }vm86_register_entry_t, *vm86_register_entry_ref_t; // the registers static vm86_register_entry_t s_registers[] = { { "ah", VM86_REGISTER_EAX | VM86_REGISTER_AH } , { "al", VM86_REGISTER_EAX | VM86_REGISTER_AL } , { "ax", VM86_REGISTER_EAX | VM86_REGISTER_AX } , { "bh", VM86_REGISTER_EBX | VM86_REGISTER_BH } , { "bl", VM86_REGISTER_EBX | VM86_REGISTER_BL } , { "bx", VM86_REGISTER_EBX | VM86_REGISTER_BX } , { "ch", VM86_REGISTER_ECX | VM86_REGISTER_CH } , { "cl", VM86_REGISTER_ECX | VM86_REGISTER_CL } , { "cx", VM86_REGISTER_ECX | VM86_REGISTER_CX } , { "dh", VM86_REGISTER_EDX | VM86_REGISTER_DH } , { "dl", VM86_REGISTER_EDX | VM86_REGISTER_DL } , { "dx", VM86_REGISTER_EDX | VM86_REGISTER_DX } , { "eax", VM86_REGISTER_EAX } , { "ebp", VM86_REGISTER_EBP } , { "ebx", VM86_REGISTER_EBX } , { "ecx", VM86_REGISTER_ECX } , { "edi", VM86_REGISTER_EDI } , { "edx", VM86_REGISTER_EDX } , { "esi", VM86_REGISTER_ESI } , { "esp", VM86_REGISTER_ESP } }; // init iterator tb_array_iterator_t array_iterator; tb_iterator_ref_t iterator = tb_array_iterator_init_mem(&array_iterator, s_registers, tb_arrayn(s_registers), sizeof(vm86_register_entry_t)); // find register by the binary search tb_size_t itor = tb_binary_find_all_if(iterator, vm86_parser_comp_register, name); tb_check_break(itor != tb_iterator_tail(iterator)); // get the register vm86_register_entry_ref_t entry = (vm86_register_entry_ref_t)tb_iterator_item(iterator, itor); tb_assert_and_check_break(entry && (entry->index & VM86_REGISTER_MASK) < VM86_REGISTER_MAXN); // save register *r = entry->index; // trace tb_trace_d("register: %s: %x", name, entry->index); // ok ok = tb_true; } while (0); // update the code pointer if ok if (ok) *pp = p; // ok? return ok; }
tb_void_t tb_fixed_pool_clear(tb_fixed_pool_ref_t pool) { // check tb_fixed_pool_impl_t* impl = (tb_fixed_pool_impl_t*)pool; tb_assert_and_check_return(impl); // exit items if (impl->func_exit) tb_fixed_pool_walk(pool, tb_fixed_pool_item_exit, (tb_pointer_t)impl); // exit the current slot first if (impl->current_slot) tb_fixed_pool_slot_exit(impl, impl->current_slot); impl->current_slot = tb_null; // exit the partial slots tb_iterator_ref_t partial_iterator = tb_list_entry_itor(&impl->partial_slots); if (partial_iterator) { // walk it tb_size_t itor = tb_iterator_head(partial_iterator); while (itor != tb_iterator_tail(partial_iterator)) { // the slot tb_fixed_pool_slot_t* slot = (tb_fixed_pool_slot_t*)tb_iterator_item(partial_iterator, itor); tb_assert_and_check_break(slot); // save next tb_size_t next = tb_iterator_next(partial_iterator, itor); // exit data tb_fixed_pool_slot_exit(impl, slot); // next itor = next; } } // exit the full slots tb_iterator_ref_t full_iterator = tb_list_entry_itor(&impl->full_slots); if (full_iterator) { // walk it tb_size_t itor = tb_iterator_head(full_iterator); while (itor != tb_iterator_tail(full_iterator)) { // the slot tb_fixed_pool_slot_t* slot = (tb_fixed_pool_slot_t*)tb_iterator_item(full_iterator, itor); tb_assert_and_check_break(slot); // save next tb_size_t next = tb_iterator_next(full_iterator, itor); // exit data tb_fixed_pool_slot_exit(impl, slot); // next itor = next; } } // clear item count impl->item_count = 0; // clear current slot impl->current_slot = tb_null; // clear partial slots tb_list_entry_clear(&impl->partial_slots); // clear full slots tb_list_entry_clear(&impl->full_slots); }
static __tb_inline__ tb_void_t g2_gl_draw_stencil_clip_path(g2_gl_draw_t* draw, g2_clipper_item_t const* item) { // check tb_assert_and_check_return(item->type == G2_CLIPPER_ITEM_PATH); // the clip path g2_gl_path_t* path = (g2_gl_path_t*)item->u.path; tb_assert_and_check_return(path); // null? tb_check_return(!g2_path_null(path)); // like g2_gl_path_make_like(path); // like rect? if (path->like == G2_GL_PATH_LIKE_RECT) { // clip bounds g2_gl_draw_stencil_clip_bounds(draw, &path->rect); // ok return ; } // like triangle? else if (path->like == G2_GL_PATH_LIKE_TRIG) { // clip triangle g2_clipper_item_t clip = {0}; clip.type = G2_CLIPPER_ITEM_TRIANGLE; clip.mode = item->mode; clip.u.triangle = path->trig; g2_gl_draw_stencil_clip_triangle(draw, &clip); // ok return ; } // make draw if (!g2_gl_path_make_fill(path)) return ; // check tb_assert(path->fill.data && tb_vector_size(path->fill.data)); tb_assert(path->fill.size && tb_vector_size(path->fill.size)); tb_check_return(path->fill.rect.x1 < path->fill.rect.x2 && path->fill.rect.y1 < path->fill.rect.y2); // init vertices if (draw->context->version < 0x20) g2_glVertexPointer(2, G2_GL_FLOAT, 0, tb_vector_data(path->fill.data)); else g2_glVertexAttribPointer(g2_gl_program_location(draw->program, G2_GL_PROGRAM_LOCATION_VERTICES), 2, G2_GL_FLOAT, G2_GL_FALSE, 0, tb_vector_data(path->fill.data)); // clip path tb_size_t head = 0; tb_size_t size = 0; tb_size_t itor = tb_iterator_head(path->fill.size); tb_size_t tail = tb_iterator_tail(path->fill.size); for (; itor != tail; itor++) { size = tb_iterator_item(path->fill.size, itor); g2_glDrawArrays(G2_GL_TRIANGLE_FAN, (g2_GLint_t)head, (g2_GLint_t)size); head += size; } }
static tb_pointer_t tb_thread_pool_worker_loop(tb_cpointer_t priv) { // the worker tb_thread_pool_worker_t* worker = (tb_thread_pool_worker_t*)priv; // trace tb_trace_d("worker[%lu]: init", worker? worker->id : -1); // done do { // check tb_assert_and_check_break(worker && !worker->jobs && !worker->stats); // the pool tb_thread_pool_impl_t* impl = (tb_thread_pool_impl_t*)worker->pool; tb_assert_and_check_break(impl && impl->semaphore); // wait some time for leaving the lock tb_msleep((worker->id + 1)* 20); // init jobs worker->jobs = tb_vector_init(TB_THREAD_POOL_JOBS_WORKING_GROW, tb_element_ptr(tb_null, tb_null)); tb_assert_and_check_break(worker->jobs); // init stats worker->stats = tb_hash_map_init(TB_HASH_MAP_BUCKET_SIZE_MICRO, tb_element_ptr(tb_null, tb_null), tb_element_mem(sizeof(tb_thread_pool_job_stats_t), tb_null, tb_null)); tb_assert_and_check_break(worker->stats); // loop while (1) { // pull jobs if be idle if (!tb_vector_size(worker->jobs)) { // enter tb_spinlock_enter(&impl->lock); // init the pull time worker->pull = 0; // pull from the urgent jobs if (tb_list_entry_size(&impl->jobs_urgent)) { // trace tb_trace_d("worker[%lu]: try pulling from urgent: %lu", worker->id, tb_list_entry_size(&impl->jobs_urgent)); // pull it tb_remove_if_until(tb_list_entry_itor(&impl->jobs_urgent), tb_thread_pool_worker_walk_pull, worker); } // pull from the waiting jobs if (tb_list_entry_size(&impl->jobs_waiting)) { // trace tb_trace_d("worker[%lu]: try pulling from waiting: %lu", worker->id, tb_list_entry_size(&impl->jobs_waiting)); // pull it tb_remove_if_until(tb_list_entry_itor(&impl->jobs_waiting), tb_thread_pool_worker_walk_pull, worker); } // pull from the pending jobs and clean some finished and killed jobs if (tb_list_entry_size(&impl->jobs_pending)) { // trace tb_trace_d("worker[%lu]: try pulling from pending: %lu", worker->id, tb_list_entry_size(&impl->jobs_pending)); // no jobs? try to pull from the pending jobs if (!tb_vector_size(worker->jobs)) tb_remove_if(tb_list_entry_itor(&impl->jobs_pending), tb_thread_pool_worker_walk_pull_and_clean, worker); // clean some finished and killed jobs else tb_remove_if(tb_list_entry_itor(&impl->jobs_pending), tb_thread_pool_worker_walk_clean, worker); } // leave tb_spinlock_leave(&impl->lock); // idle? wait it if (!tb_vector_size(worker->jobs)) { // killed? tb_check_break(!tb_atomic_get(&worker->bstoped)); // trace tb_trace_d("worker[%lu]: wait: ..", worker->id); // wait some time tb_long_t wait = tb_semaphore_wait(impl->semaphore, -1); tb_assert_and_check_break(wait > 0); // trace tb_trace_d("worker[%lu]: wait: ok", worker->id); // continue it continue; } else { #ifdef TB_TRACE_DEBUG // update the jobs urgent size tb_size_t jobs_urgent_size = tb_list_entry_size(&impl->jobs_urgent); // update the jobs waiting size tb_size_t jobs_waiting_size = tb_list_entry_size(&impl->jobs_waiting); // update the jobs pending size tb_size_t jobs_pending_size = tb_list_entry_size(&impl->jobs_pending); // trace tb_trace_d("worker[%lu]: pull: jobs: %lu, time: %lu ms, waiting: %lu, pending: %lu, urgent: %lu", worker->id, tb_vector_size(worker->jobs), worker->pull, jobs_waiting_size, jobs_pending_size, jobs_urgent_size); #endif } } // done jobs tb_for_all (tb_thread_pool_job_t*, job, worker->jobs) { // check tb_assert_and_check_continue(job && job->task.done); // the job state tb_size_t state = tb_atomic_fetch_and_pset(&job->state, TB_STATE_WAITING, TB_STATE_WORKING); // the job is waiting? work it if (state == TB_STATE_WAITING) { // trace tb_trace_d("worker[%lu]: done: task[%p:%s]: ..", worker->id, job->task.done, job->task.name); // init the time tb_hong_t time = tb_cache_time_spak(); // done the job job->task.done((tb_thread_pool_worker_ref_t)worker, job->task.priv); // computate the time time = tb_cache_time_spak() - time; // exists? update time and count tb_size_t itor; tb_hash_map_item_ref_t item = tb_null; if ( ((itor = tb_hash_map_find(worker->stats, job->task.done)) != tb_iterator_tail(worker->stats)) && (item = (tb_hash_map_item_ref_t)tb_iterator_item(worker->stats, itor))) { // the stats tb_thread_pool_job_stats_t* stats = (tb_thread_pool_job_stats_t*)item->data; tb_assert_and_check_break(stats); // update the done count stats->done_count++; // update the total time stats->total_time += time; } // no item? add it if (!item) { // init stats tb_thread_pool_job_stats_t stats = {0}; stats.done_count = 1; stats.total_time = time; // add stats tb_hash_map_insert(worker->stats, job->task.done, &stats); } #ifdef TB_TRACE_DEBUG tb_size_t done_count = 0; tb_hize_t total_time = 0; tb_thread_pool_job_stats_t* stats = (tb_thread_pool_job_stats_t*)tb_hash_map_get(worker->stats, job->task.done); if (stats) { done_count = stats->done_count; total_time = stats->total_time; } // trace tb_trace_d("worker[%lu]: done: task[%p:%s]: time: %lld ms, average: %lld ms, count: %lu", worker->id, job->task.done, job->task.name, time, (total_time / (tb_hize_t)done_count), done_count); #endif // update the job state tb_atomic_set(&job->state, TB_STATE_FINISHED); } // the job is killing? work it else if (state == TB_STATE_KILLING) { // update the job state tb_atomic_set(&job->state, TB_STATE_KILLED); } } // clear jobs tb_vector_clear(worker->jobs); } } while (0); // exit worker if (worker) { // trace tb_trace_d("worker[%lu]: exit", worker->id); // stoped tb_atomic_set(&worker->bstoped, 1); // exit all private data tb_size_t i = 0; tb_size_t n = tb_arrayn(worker->priv); for (i = 0; i < n; i++) { // the private data tb_thread_pool_worker_priv_t* priv = &worker->priv[n - i - 1]; // exit it if (priv->exit) priv->exit((tb_thread_pool_worker_ref_t)worker, priv->priv); // clear it priv->exit = tb_null; priv->priv = tb_null; } // exit stats if (worker->stats) tb_hash_map_exit(worker->stats); worker->stats = tb_null; // exit jobs if (worker->jobs) tb_vector_exit(worker->jobs); worker->jobs = tb_null; } // exit tb_thread_return(tb_null); return tb_null; }
tb_pointer_t tb_circle_queue_last(tb_circle_queue_ref_t self) { // the last item return tb_iterator_item((tb_iterator_ref_t)self, tb_iterator_last((tb_iterator_ref_t)self)); }
tb_pointer_t tb_vector_last(tb_vector_ref_t vector) { return tb_iterator_item(vector, tb_iterator_last(vector)); }
tb_pointer_t tb_list_last(tb_list_ref_t self) { return tb_iterator_item(self, tb_iterator_last(self)); }
static tb_object_ref_t tb_object_bin_reader_func_array(tb_object_bin_reader_t* reader, tb_size_t type, tb_uint64_t size) { // check tb_assert_and_check_return_val(reader && reader->stream && reader->list, tb_null); // empty? if (!size) return tb_object_array_init(TB_OBJECT_BIN_READER_ARRAY_GROW, tb_false); // init array tb_object_ref_t array = tb_object_array_init(TB_OBJECT_BIN_READER_ARRAY_GROW, tb_false); tb_assert_and_check_return_val(array, tb_null); // walk tb_size_t i = 0; tb_size_t n = (tb_size_t)size; for (i = 0; i < n; i++) { // the type & size tb_size_t type = 0; tb_uint64_t size = 0; tb_object_reader_bin_type_size(reader->stream, &type, &size); // trace tb_trace_d("item: type: %lu, size: %llu", type, size); // is index? tb_object_ref_t item = tb_null; if (!type) { // the object index tb_size_t index = (tb_size_t)size; // check tb_assert_and_check_break(index < tb_vector_size(reader->list)); // the item item = (tb_object_ref_t)tb_iterator_item(reader->list, index); // refn++ if (item) tb_object_inc(item); } else { // the reader func tb_object_bin_reader_func_t func = tb_object_bin_reader_func(type); tb_assert_and_check_break(func); // read it item = func(reader, type, size); // save it tb_vector_insert_tail(reader->list, item); } // check tb_assert_and_check_break(item); // append item tb_object_array_append(array, item); } // failed? if (i != n) { if (array) tb_object_exit(array); array = tb_null; } // ok? return array; }
static tb_long_t tb_aiop_rtor_select_wait(tb_aiop_rtor_impl_t* rtor, tb_aioe_ref_t list, tb_size_t maxn, tb_long_t timeout) { // check tb_aiop_rtor_select_impl_t* impl = (tb_aiop_rtor_select_impl_t*)rtor; tb_assert_and_check_return_val(impl && rtor->aiop && list && maxn, -1); // the aiop tb_aiop_impl_t* aiop = rtor->aiop; tb_assert_and_check_return_val(aiop, tb_false); // init time struct timeval t = {0}; if (timeout > 0) { #ifdef TB_CONFIG_OS_WINDOWS t.tv_sec = (LONG)(timeout / 1000); #else t.tv_sec = (timeout / 1000); #endif t.tv_usec = (timeout % 1000) * 1000; } // loop tb_long_t wait = 0; tb_bool_t stop = tb_false; tb_hong_t time = tb_mclock(); while (!wait && !stop && (timeout < 0 || tb_mclock() < time + timeout)) { // enter tb_spinlock_enter(&impl->lock.pfds); // init fdo tb_size_t sfdm = impl->sfdm; tb_memcpy(&impl->rfdo, &impl->rfdi, sizeof(fd_set)); tb_memcpy(&impl->wfdo, &impl->wfdi, sizeof(fd_set)); // leave tb_spinlock_leave(&impl->lock.pfds); // wait #ifdef TB_CONFIG_OS_WINDOWS tb_long_t sfdn = tb_ws2_32()->select((tb_int_t)sfdm + 1, &impl->rfdo, &impl->wfdo, tb_null, timeout >= 0? &t : tb_null); #else tb_long_t sfdn = select(sfdm + 1, &impl->rfdo, &impl->wfdo, tb_null, timeout >= 0? &t : tb_null); #endif tb_assert_and_check_return_val(sfdn >= 0, -1); // timeout? tb_check_return_val(sfdn, 0); // enter tb_spinlock_enter(&impl->lock.hash); // sync tb_size_t itor = tb_iterator_head(impl->hash); tb_size_t tail = tb_iterator_tail(impl->hash); for (; itor != tail && wait >= 0 && (tb_size_t)wait < maxn; itor = tb_iterator_next(impl->hash, itor)) { tb_hash_map_item_ref_t item = (tb_hash_map_item_ref_t)tb_iterator_item(impl->hash, itor); if (item) { // the sock tb_socket_ref_t sock = (tb_socket_ref_t)item->name; tb_assert_and_check_return_val(sock, -1); // spak? if (sock == aiop->spak[1] && FD_ISSET(((tb_long_t)aiop->spak[1] - 1), &impl->rfdo)) { // read spak tb_char_t spak = '\0'; if (1 != tb_socket_recv(aiop->spak[1], (tb_byte_t*)&spak, 1)) wait = -1; // killed? if (spak == 'k') wait = -1; tb_check_break(wait >= 0); // stop to wait stop = tb_true; // continue it continue ; } // filter spak tb_check_continue(sock != aiop->spak[1]); // the fd tb_long_t fd = (tb_long_t)item->name - 1; // the aioo tb_aioo_impl_t* aioo = (tb_aioo_impl_t*)item->data; tb_assert_and_check_return_val(aioo && aioo->sock == sock, -1); // init aioe tb_aioe_t aioe = {0}; aioe.priv = aioo->priv; aioe.aioo = (tb_aioo_ref_t)aioo; if (FD_ISSET(fd, &impl->rfdo)) { aioe.code |= TB_AIOE_CODE_RECV; if (aioo->code & TB_AIOE_CODE_ACPT) aioe.code |= TB_AIOE_CODE_ACPT; } if (FD_ISSET(fd, &impl->wfdo)) { aioe.code |= TB_AIOE_CODE_SEND; if (aioo->code & TB_AIOE_CODE_CONN) aioe.code |= TB_AIOE_CODE_CONN; } // ok? if (aioe.code) { // save aioe list[wait++] = aioe; // oneshot? clear it if (aioo->code & TB_AIOE_CODE_ONESHOT) { // clear aioo aioo->code = TB_AIOE_CODE_NONE; aioo->priv = tb_null; // clear events tb_spinlock_enter(&impl->lock.pfds); FD_CLR(fd, &impl->rfdi); FD_CLR(fd, &impl->wfdi); tb_spinlock_leave(&impl->lock.pfds); } } } } // leave tb_spinlock_leave(&impl->lock.hash); } // ok return wait; }
static tb_object_ref_t tb_object_bin_reader_func_dictionary(tb_object_bin_reader_t* reader, tb_size_t type, tb_uint64_t size) { // check tb_assert_and_check_return_val(reader && reader->stream && reader->list, tb_null); // empty? if (!size) return tb_object_dictionary_init(TB_OBJECT_DICTIONARY_SIZE_MICRO, tb_false); // init dictionary tb_object_ref_t dictionary = tb_object_dictionary_init(0, tb_false); tb_assert_and_check_return_val(dictionary, tb_null); // walk tb_size_t i = 0; tb_size_t n = (tb_size_t)size; for (i = 0; i < n; i++) { // read key tb_object_ref_t key = tb_null; do { // the type & size tb_size_t type = 0; tb_uint64_t size = 0; tb_object_reader_bin_type_size(reader->stream, &type, &size); // trace tb_trace_d("key: type: %lu, size: %llu", type, size); // is index? if (!type) { // the object index tb_size_t index = (tb_size_t)size; // check tb_assert_and_check_break(index < tb_vector_size(reader->list)); // the item key = (tb_object_ref_t)tb_iterator_item(reader->list, index); } else { // check tb_assert_and_check_break(type == TB_OBJECT_TYPE_STRING); // the reader func tb_object_bin_reader_func_t func = tb_object_bin_reader_func(type); tb_assert_and_check_break(func); // read it key = func(reader, type, size); tb_assert_and_check_break(key); // save it tb_vector_insert_tail(reader->list, key); // refn-- tb_object_dec(key); } } while (0); // check tb_assert_and_check_break(key && tb_object_type(key) == TB_OBJECT_TYPE_STRING); tb_assert_and_check_break(tb_object_string_size(key) && tb_object_string_cstr(key)); // read val tb_object_ref_t val = tb_null; do { // the type & size tb_size_t type = 0; tb_uint64_t size = 0; tb_object_reader_bin_type_size(reader->stream, &type, &size); // trace tb_trace_d("val: type: %lu, size: %llu", type, size); // is index? if (!type) { // the object index tb_size_t index = (tb_size_t)size; // check tb_assert_and_check_break(index < tb_vector_size(reader->list)); // the item val = (tb_object_ref_t)tb_iterator_item(reader->list, index); // refn++ if (val) tb_object_inc(val); } else { // the reader func tb_object_bin_reader_func_t func = tb_object_bin_reader_func(type); tb_assert_and_check_break(func); // read it val = func(reader, type, size); // save it if (val) tb_vector_insert_tail(reader->list, val); } } while (0); // check tb_assert_and_check_break(val); // set key => val tb_object_dictionary_set(dictionary, tb_object_string_cstr(key), val); } // failed? if (i != n) { if (dictionary) tb_object_exit(dictionary); dictionary = tb_null; } // ok? return dictionary; }
tb_void_t g2_gl_draw_path(g2_gl_painter_t* painter, tb_size_t mode, g2_gl_path_t const* path) { // check tb_assert_and_check_return(painter && path); tb_assert_and_check_return((mode == G2_STYLE_MODE_FILL) || (mode == G2_STYLE_MODE_STOK)); // null? tb_check_return(!g2_path_null(path)); // make like g2_gl_path_make_like((g2_gl_path_t*)path); // make draw if (!g2_gl_path_make_fill((g2_gl_path_t*)path)) return ; // check tb_assert(path->fill.data && tb_vector_size(path->fill.data)); tb_assert(path->fill.size && tb_vector_size(path->fill.size)); tb_check_return(path->fill.rect.x1 < path->fill.rect.x2 && path->fill.rect.y1 < path->fill.rect.y2); // like rect? if (path->like == G2_GL_PATH_LIKE_RECT) { // draw bounds g2_gl_draw_bounds(painter, mode, &path->rect); // ok return ; } // like triangle? else if (path->like == G2_GL_PATH_LIKE_TRIG) { // draw triangle g2_gl_draw_triangle(painter, mode, &path->trig); // ok return ; } // init draw g2_gl_draw_t draw = {0}; if (!g2_gl_draw_init(&draw, painter, mode, path->like == G2_GL_PATH_LIKE_CONX? G2_GL_DRAW_FLAG_CONVEX : G2_GL_DRAW_FLAG_NONE)) return ; // init bounds g2_gl_rect_t bounds = path->fill.rect; if (draw.mode == G2_STYLE_MODE_STOK) g2_gl_bounds_stok(&bounds, 1); // clip draw g2_gl_draw_clip(&draw, &bounds); // init vertices if (draw.context->version < 0x20) g2_glVertexPointer(2, G2_GL_FLOAT, 0, tb_vector_data(path->fill.data)); else g2_glVertexAttribPointer(g2_gl_program_location(draw.program, G2_GL_PROGRAM_LOCATION_VERTICES), 2, G2_GL_FLOAT, G2_GL_FALSE, 0, tb_vector_data(path->fill.data)); // draw vertices tb_size_t head = 0; tb_size_t size = 0; tb_size_t itor = tb_iterator_head(path->fill.size); tb_size_t tail = tb_iterator_tail(path->fill.size); g2_GLenum_t gmode = (draw.mode == G2_STYLE_MODE_FILL)? G2_GL_TRIANGLE_FAN : G2_GL_LINE_STRIP; for (; itor != tail; itor++) { size = tb_iterator_item(path->fill.size, itor); g2_glDrawArrays(gmode, (g2_GLint_t)head, (g2_GLint_t)size); head += size; } // draw fill g2_gl_draw_fill(&draw, &bounds); // exit draw g2_gl_draw_exit(&draw); }