tb_handle_t g2_pcache_init(tb_size_t maxn) { // check tb_assert_and_check_return_val(maxn, tb_null); // alloc g2_pcache_t* pcache = tb_malloc0(sizeof(g2_pcache_t)); tb_assert_and_check_return_val(pcache, tb_null); // init path cache pcache->maxn = maxn; pcache->cache = tb_stack_init(maxn, tb_item_func_ptr(tb_null, tb_null)); tb_assert_and_check_goto(pcache->cache, fail); while (maxn--) { // init tb_handle_t path = g2_path_init(); tb_assert_and_check_goto(path, fail); // put tb_stack_put(pcache->cache, path); } // init path hash pcache->hash = tb_hash_init(tb_isqrti(pcache->maxn), tb_item_func_ifm(sizeof(g2_shape_t), tb_null, tb_null), tb_item_func_ptr(g2_pcache_hash_item_free, pcache)); tb_assert_and_check_goto(pcache->hash, fail); // ok return pcache; fail: if (pcache) g2_pcache_exit(pcache); return tb_null; }
/* ////////////////////////////////////////////////////////////////////////////////////// * implementation */ tb_bool_t tb_dns_cache_init() { // enter tb_spinlock_enter(&g_lock); // done tb_bool_t ok = tb_false; do { // init hash if (!g_cache.hash) g_cache.hash = tb_hash_init(tb_align8(tb_isqrti(TB_DNS_CACHE_MAXN) + 1), tb_item_func_str(tb_false), tb_item_func_mem(sizeof(tb_dns_cache_addr_t), tb_null, tb_null)); tb_assert_and_check_break(g_cache.hash); // ok ok = tb_true; } while (0); // leave tb_spinlock_leave(&g_lock); // failed? exit it if (!ok) tb_dns_cache_exit(); // ok? return ok; }
static tb_aiop_rtor_impl_t* tb_aiop_rtor_select_init(tb_aiop_impl_t* aiop) { // check tb_assert_and_check_return_val(aiop && aiop->maxn, tb_null); // done tb_bool_t ok = tb_false; tb_aiop_rtor_select_impl_t* impl = tb_null; do { // make rtor impl = tb_malloc0_type(tb_aiop_rtor_select_impl_t); tb_assert_and_check_break(impl); // init base impl->base.aiop = aiop; impl->base.code = TB_AIOE_CODE_EALL | TB_AIOE_CODE_ONESHOT; impl->base.exit = tb_aiop_rtor_select_exit; impl->base.cler = tb_aiop_rtor_select_cler; impl->base.addo = tb_aiop_rtor_select_addo; impl->base.delo = tb_aiop_rtor_select_delo; impl->base.post = tb_aiop_rtor_select_post; impl->base.wait = tb_aiop_rtor_select_wait; // init fds FD_ZERO(&impl->rfdi); FD_ZERO(&impl->wfdi); FD_ZERO(&impl->efdi); FD_ZERO(&impl->rfdo); FD_ZERO(&impl->wfdo); FD_ZERO(&impl->efdo); // init lock if (!tb_spinlock_init(&impl->lock.pfds)) break; if (!tb_spinlock_init(&impl->lock.hash)) break; // init hash impl->hash = tb_hash_init(tb_align8(tb_isqrti(aiop->maxn) + 1), tb_item_func_ptr(tb_null, tb_null), tb_item_func_ptr(tb_null, tb_null)); tb_assert_and_check_break(impl->hash); // ok ok = tb_true; } while (0); // failed? if (!ok) { // exit it if (impl) tb_aiop_rtor_select_exit((tb_aiop_rtor_impl_t*)impl); impl = tb_null; } // ok? return (tb_aiop_rtor_impl_t*)impl; }
static tb_aiop_rtor_impl_t* tb_aiop_rtor_poll_init(tb_aiop_impl_t* aiop) { // check tb_assert_and_check_return_val(aiop && aiop->maxn, tb_null); // done tb_bool_t ok = tb_false; tb_aiop_rtor_poll_impl_t* impl = tb_null; do { // make rtor impl = tb_malloc0_type(tb_aiop_rtor_poll_impl_t); tb_assert_and_check_break(impl); // init base impl->base.aiop = aiop; impl->base.code = TB_AIOE_CODE_EALL | TB_AIOE_CODE_ONESHOT; impl->base.exit = tb_aiop_rtor_poll_exit; impl->base.cler = tb_aiop_rtor_poll_cler; impl->base.addo = tb_aiop_rtor_poll_addo; impl->base.delo = tb_aiop_rtor_poll_delo; impl->base.post = tb_aiop_rtor_poll_post; impl->base.wait = tb_aiop_rtor_poll_wait; // init lock if (!tb_spinlock_init(&impl->lock.pfds)) break; if (!tb_spinlock_init(&impl->lock.hash)) break; // init pfds impl->pfds = tb_vector_init(tb_align8((aiop->maxn >> 3) + 1), tb_item_func_mem(sizeof(struct pollfd), tb_null, tb_null)); tb_assert_and_check_break(impl->pfds); // init cfds impl->cfds = tb_vector_init(tb_align8((aiop->maxn >> 3) + 1), tb_item_func_mem(sizeof(struct pollfd), tb_null, tb_null)); tb_assert_and_check_break(impl->cfds); // init hash impl->hash = tb_hash_init(tb_align8(tb_isqrti(aiop->maxn) + 1), tb_item_func_ptr(tb_null, tb_null), tb_item_func_ptr(tb_null, tb_null)); tb_assert_and_check_break(impl->hash); // ok ok = tb_true; } while (0); // failed? if (!ok) { // exit it if (impl) tb_aiop_rtor_poll_exit((tb_aiop_rtor_impl_t*)impl); impl = tb_null; } // ok return (tb_aiop_rtor_impl_t*)impl; }
/* ////////////////////////////////////////////////////////////////////////////////////// * implementation */ tb_hash_map_ref_t tb_hash_map_init(tb_size_t bucket_size, tb_element_t element_name, tb_element_t element_data) { // check tb_assert_and_check_return_val(element_name.size && element_name.hash && element_name.comp && element_name.data && element_name.dupl, tb_null); tb_assert_and_check_return_val(element_data.data && element_data.dupl && element_data.repl, tb_null); // check bucket size if (!bucket_size) bucket_size = TB_HASH_MAP_BUCKET_SIZE_DEFAULT; tb_assert_and_check_return_val(bucket_size <= TB_HASH_MAP_BUCKET_SIZE_LARGE, tb_null); // done tb_bool_t ok = tb_false; tb_hash_map_impl_t* impl = tb_null; do { // make hash_map impl = tb_malloc0_type(tb_hash_map_impl_t); tb_assert_and_check_break(impl); // init hash_map func impl->element_name = element_name; impl->element_data = element_data; // init item itor impl->itor.mode = TB_ITERATOR_MODE_FORWARD | TB_ITERATOR_MODE_MUTABLE; impl->itor.priv = tb_null; impl->itor.step = sizeof(tb_hash_map_item_t); impl->itor.size = tb_hash_map_itor_size; impl->itor.head = tb_hash_map_itor_head; impl->itor.tail = tb_hash_map_itor_tail; impl->itor.prev = tb_null; impl->itor.next = tb_hash_map_itor_next; impl->itor.item = tb_hash_map_itor_item; impl->itor.copy = tb_hash_map_itor_copy; impl->itor.comp = tb_hash_map_itor_comp; impl->itor.remove = tb_hash_map_itor_remove; impl->itor.remove_range = tb_hash_map_itor_remove_range; // init hash_map size impl->hash_size = tb_align_pow2(bucket_size); tb_assert_and_check_break(impl->hash_size <= TB_HASH_MAP_BUCKET_MAXN); // init hash_map list impl->hash_list = (tb_hash_map_item_list_t**)tb_nalloc0(impl->hash_size, sizeof(tb_size_t)); tb_assert_and_check_break(impl->hash_list); // init item grow impl->item_grow = tb_isqrti(bucket_size); if (impl->item_grow < 8) impl->item_grow = 8; impl->item_grow = tb_align_pow2(impl->item_grow); // ok ok = tb_true; } while (0); // failed? if (!ok) { // exit it if (impl) tb_hash_map_exit((tb_hash_map_ref_t)impl); impl = tb_null; } // ok? return (tb_hash_map_ref_t)impl; }
static __tb_inline__ tb_void_t g2_gl_draw_style_fill_shader_radial(g2_gl_draw_t* draw, g2_gl_rect_t const* bounds) { // enter texture state g2_gl_draw_enter_texture_state(draw); // enter texture matrix g2_gl_draw_enter_texture_matrix(draw); // init texcoords draw->texcoords[0] = 0.0f; draw->texcoords[1] = 0.0f; draw->texcoords[2] = 1.0f; draw->texcoords[3] = 0.0f; draw->texcoords[4] = 0.0f; draw->texcoords[5] = 1.0f; draw->texcoords[6] = 1.0f; draw->texcoords[7] = 1.0f; // apply texcoords g2_gl_draw_apply_texcoords(draw); // init radial variables tb_float_t smatrix[16]; g2_gl_matrix_from(smatrix, &draw->shader->matrix); tb_float_t cx = g2_float_to_tb(draw->shader->u.radial.cp.c.x); tb_float_t cy = g2_float_to_tb(draw->shader->u.radial.cp.c.y); tb_float_t x0 = g2_gl_matrix_apply_x(smatrix, cx, cy); tb_float_t y0 = g2_gl_matrix_apply_y(smatrix, cx, cy); // init scale factor tb_float_t sx = tb_fabs(smatrix[0]); tb_float_t sy = tb_fabs(smatrix[5]); tb_float_t fs = tb_min(sx, sy); if (fs < 1e-9) fs = 1e-9; // init maximum radius tb_float_t n1 = (x0 - bounds->x1) * (x0 - bounds->x1) + (y0 - bounds->y1) * (y0 - bounds->y1); tb_float_t n2 = (x0 - bounds->x2) * (x0 - bounds->x2) + (y0 - bounds->y1) * (y0 - bounds->y1); tb_float_t n3 = (x0 - bounds->x1) * (x0 - bounds->x1) + (y0 - bounds->y2) * (y0 - bounds->y2); tb_float_t n4 = (x0 - bounds->x2) * (x0 - bounds->x2) + (y0 - bounds->y2) * (y0 - bounds->y2); if (n2 > n1) n1 = n2; if (n3 > n1) n1 = n3; if (n4 > n1) n1 = n4; tb_float_t rm = (tb_float_t)(tb_isqrti(tb_ceil(n1)) + 1) / fs; // the radial factor static g2_gl_draw_radial_factor_t factors[] = { {0.105396307f, 12.0f, 30} // rm * sin(6.05) , {0.070626986f, 8.0f, 45} // rm * sin(4.05) , {0.035771616f, 4.0f, 90} // rm * sin(2.05) }; tb_assert(g2_quality() < tb_arrayn(factors)); g2_gl_draw_radial_factor_t const* factor = &factors[g2_quality()]; /* init fragment vertices * * fn * *****|***** * * | * * *rm| * * * | * * *|* * * */ tb_float_t fn = rm * factor->factor; // rm * sin(x.05) draw->vertices[0] = cx - fn; draw->vertices[1] = cy - rm; draw->vertices[2] = cx + fn; draw->vertices[3] = cy - rm; draw->vertices[4] = cx; draw->vertices[5] = cy; // init fragment bounds g2_gl_rect_t fbounds; g2_gl_bounds_init(&fbounds, draw->vertices[0], draw->vertices[1]); g2_gl_bounds_done(&fbounds, draw->vertices[2], draw->vertices[3]); g2_gl_bounds_done(&fbounds, draw->vertices[4], draw->vertices[5]); // apply vertices g2_gl_draw_apply_vertices(draw); // apply texture matrix g2_gl_draw_apply_texture_matrix(draw, &fbounds); // save vetex matrix tb_float_t matrix0[16]; g2_gl_matrix_copy(matrix0, draw->vmatrix); // apply shader matrix g2_gl_matrix_multiply(draw->vmatrix, smatrix); // init rotate matrix: rotate one degress tb_float_t matrix1[16]; g2_gl_matrix_init_rotatep(matrix1, factor->rotation, cx, cy); // rotate for drawing all fragments tb_size_t n = factor->count; while (n--) { // rotate one degress g2_gl_matrix_multiply(draw->vmatrix, matrix1); // apply vetex matrix g2_gl_draw_apply_vertex_matrix(draw); // draw fragment g2_glDrawArrays(G2_GL_TRIANGLE_STRIP, 0, 3); } // restore vetex matrix g2_gl_matrix_copy(draw->vmatrix, matrix0); // apply vetex matrix g2_gl_draw_apply_vertex_matrix(draw); // leave texture matrix g2_gl_draw_leave_texture_matrix(draw); // leave texture state g2_gl_draw_leave_texture_state(draw); }