tb_size_t tb_backtrace_frames(tb_pointer_t* frames, tb_size_t nframe, tb_size_t nskip) { // note: cannot use assert tb_check_return_val(frames && nframe, 0); // skip some frames? if (nskip) { // init temp frames tb_pointer_t temp[256] = {0}; tb_check_return_val(nframe + nskip < 256, 0); // done backtrace tb_size_t size = backtrace(temp, nframe + nskip); tb_check_return_val(nskip < size, 0); // update nframe nframe = tb_min(nframe, size - nskip); // save to frames tb_memcpy_(frames, temp + nskip, nframe * sizeof(tb_pointer_t)); } // backtrace else nframe = backtrace(frames, nframe); // ok? return nframe; }
tb_pointer_t tb_pool_align_malloc_(tb_pool_ref_t pool, tb_size_t size, tb_size_t align __tb_debug_decl__) { // check tb_assertf_abort(!(align & 3), "invalid alignment size: %lu", align); tb_check_return_val(!(align & 3), tb_null); // malloc it tb_byte_t* data = (tb_byte_t*)tb_pool_malloc_(pool, size + align __tb_debug_args__); tb_check_return_val(data, tb_null); // the different bytes tb_byte_t diff = (tb_byte_t)((~(tb_long_t)data) & (align - 1)) + 1; // adjust the address data += diff; // check tb_assert_abort(!((tb_size_t)data & (align - 1))); // save the different bytes data[-1] = diff; // ok? return (tb_pointer_t)data; }
tb_long_t tb_semaphore_wait(tb_semaphore_ref_t semaphore, tb_long_t timeout) { // check sem_t* h = (sem_t*)semaphore; tb_assert_and_check_return_val(h, -1); // init time struct timespec t = {0}; t.tv_sec = time(tb_null); if (timeout > 0) { t.tv_sec += timeout / 1000; t.tv_nsec += (timeout % 1000) * 1000000; } else if (timeout < 0) t.tv_sec += 12 * 30 * 24 * 3600; // infinity: one year // wait semaphore tb_long_t r = sem_timedwait(h, &t); // ok? tb_check_return_val(r, 1); // timeout? tb_check_return_val(errno != EAGAIN && errno != ETIMEDOUT, 0); // error return -1; }
tb_long_t tb_semaphore_wait(tb_semaphore_ref_t semaphore, tb_long_t timeout) { // check tb_semaphore_impl_t* impl = (tb_semaphore_impl_t*)semaphore; tb_assert_and_check_return_val(semaphore && impl->semaphore && impl->semaphore != INVALID_HANDLE_VALUE, -1); // wait tb_long_t r = WaitForSingleObject(impl->semaphore, timeout >= 0? timeout : INFINITE); tb_assert_and_check_return_val(r != WAIT_FAILED, -1); // timeout? tb_check_return_val(r != WAIT_TIMEOUT, 0); // error? tb_check_return_val(r >= WAIT_OBJECT_0, -1); // check value tb_assert_and_check_return_val((tb_long_t)tb_atomic_get(&impl->value) > 0, -1); // value-- tb_atomic_fetch_and_dec(&impl->value); // ok return 1; }
tb_char_t const* tb_backtrace_symbols_name(tb_handle_t symbols, tb_pointer_t* frames, tb_size_t nframe, tb_size_t iframe) { // check tb_check_return_val(symbols && frames && nframe && iframe < nframe, tb_null); // the frame address tb_pointer_t frame = frames[iframe]; tb_check_return_val(frame, tb_null); // the frame dlinfo Dl_info dlinfo = {0}; if (!dladdr(frame, &dlinfo)) return tb_null; // format tb_long_t size = 0; tb_size_t maxn = 8192; if (dlinfo.dli_fname) size = tb_snprintf((tb_char_t*)symbols, maxn, "%s(", dlinfo.dli_fname); if (dlinfo.dli_sname && size >= 0) size += tb_snprintf((tb_char_t*)symbols + size, maxn - size, "%s", dlinfo.dli_sname); if (dlinfo.dli_sname && frame >= dlinfo.dli_saddr && size >= 0) size += tb_snprintf((tb_char_t*)symbols + size, maxn - size, "+%#lx", (tb_size_t)(frame - dlinfo.dli_saddr)); if (size >= 0) size += tb_snprintf((tb_char_t*)symbols + size, maxn - size, ") [%p]", frame); if (size >= 0) ((tb_char_t*)symbols)[size] = '\0'; // ok return symbols; }
static tb_long_t tb_dns_looker_resp(tb_dns_looker_impl_t* impl, tb_ipaddr_ref_t addr) { // check tb_check_return_val(!(impl->step & TB_DNS_LOOKER_STEP_RESP), 1); // need wait if no data impl->step &= ~TB_DNS_LOOKER_STEP_NEVT; // recv response data tb_byte_t rpkt[4096]; while (1) { // read data tb_long_t read = tb_socket_urecv(impl->sock, tb_null, rpkt, 4096); //tb_trace_d("read %d", read); tb_assert_and_check_return_val(read >= 0, -1); // no data? if (!read) { // end? read x, read 0 tb_check_break(!tb_static_buffer_size(&impl->rpkt)); // abort? read 0, read 0 tb_check_return_val(!impl->tryn, -1); // tryn++ impl->tryn++; // continue return 0; } else impl->tryn = 0; // copy data tb_static_buffer_memncat(&impl->rpkt, rpkt, read); } // done if (!tb_dns_looker_resp_done(impl, addr)) return -1; // check tb_assert_and_check_return_val(tb_static_string_size(&impl->name) && !tb_ipaddr_ip_is_empty(addr), -1); // save address to cache tb_dns_cache_set(tb_static_string_cstr(&impl->name), addr); // finish it impl->step |= TB_DNS_LOOKER_STEP_RESP; impl->tryn = 0; // reset rpkt impl->size = 0; tb_static_buffer_clear(&impl->rpkt); // ok tb_trace_d("response: ok"); return 1; }
static tb_pool_data_empty_head_t* tb_static_fixed_pool_malloc_find(tb_static_fixed_pool_t* pool) { // check tb_assert_and_check_return_val(pool, tb_null); // init tb_size_t i = 0; tb_size_t* p = (tb_size_t*)pool->used_info; tb_size_t* e = (tb_size_t*)(pool->used_info + pool->info_size); tb_byte_t* d = tb_null; // check align tb_assert_and_check_return_val(!(((tb_size_t)p) & (TB_CPU_BITBYTE - 1)), tb_null); // find the free chunk, item_space * 32|64 items #ifdef __tb_small__ // while (p < e && *p == 0xffffffff) p++; // while (p < e && *p == 0xffffffffffffffffL) p++; while (p < e && !((*p) + 1)) p++; #else while (p + 7 < e) { if (p[0] + 1) { p += 0; break; } if (p[1] + 1) { p += 1; break; } if (p[2] + 1) { p += 2; break; } if (p[3] + 1) { p += 3; break; } if (p[4] + 1) { p += 4; break; } if (p[5] + 1) { p += 5; break; } if (p[6] + 1) { p += 6; break; } if (p[7] + 1) { p += 7; break; } p += 8; } while (p < e && !(*p + 1)) p++; #endif tb_check_return_val(p < e, tb_null); // find the free bit index tb_size_t m = pool->item_maxn; i = (((tb_byte_t*)p - pool->used_info) << 3) + tb_static_fixed_pool_find_free(*p); tb_check_return_val(i < m, tb_null); // allocate it d = pool->data + i * pool->item_space; tb_static_fixed_pool_used_set1(pool->used_info, i); // predict this index if no full? if ((*p) + 1) tb_static_fixed_pool_cache_pred(pool, i); // ok? return (tb_pool_data_empty_head_t*)d; }
tb_long_t tb_thread_wait(tb_thread_ref_t thread, tb_long_t timeout) { // wait tb_long_t r = WaitForSingleObject((HANDLE)thread, (DWORD)(timeout >= 0? timeout : INFINITE)); tb_assert_and_check_return_val(r != WAIT_FAILED, -1); // timeout? tb_check_return_val(r != WAIT_TIMEOUT, 0); // error? tb_check_return_val(r >= WAIT_OBJECT_0, -1); // ok return 1; }
tb_int32_t tb_int32_div(tb_int32_t x, tb_int32_t y, tb_int_t nbits) { tb_assert(y); tb_check_return_val(x, 0); // get sign tb_int32_t s = tb_int32_get_sign(x ^ y); x = tb_abs(x); y = tb_abs(y); tb_int_t xbits = (tb_int_t)tb_bits_cl0_u32_be(x) - 1; tb_int_t ybits = (tb_int_t)tb_bits_cl0_u32_be(y) - 1; tb_int_t bits = nbits - xbits + ybits; // underflow? if (bits < 0) return 0; // overflow? if (bits > 31) return tb_int32_set_sign(TB_MAXS32, s); x <<= xbits; y <<= ybits; // do the first one tb_int32_t r = 0; if ((x -= y) >= 0) r = 1; else x += y; // now fall into our switch statement if there are more bits to compute if (bits > 0) { // make room for the rest of the answer bits r <<= bits; switch (bits) { #define TB_INT32_DIV_CASE(n) \ case n: \ if ((x = (x << 1) - y) >= 0) \ r |= 1 << (n - 1); else x += y TB_INT32_DIV_CASE(31); TB_INT32_DIV_CASE(30); TB_INT32_DIV_CASE(29); TB_INT32_DIV_CASE(28); TB_INT32_DIV_CASE(27); TB_INT32_DIV_CASE(26); TB_INT32_DIV_CASE(25); TB_INT32_DIV_CASE(24); TB_INT32_DIV_CASE(23); TB_INT32_DIV_CASE(22); TB_INT32_DIV_CASE(21); TB_INT32_DIV_CASE(20); TB_INT32_DIV_CASE(19); TB_INT32_DIV_CASE(18); TB_INT32_DIV_CASE(17); TB_INT32_DIV_CASE(16); TB_INT32_DIV_CASE(15); TB_INT32_DIV_CASE(14); TB_INT32_DIV_CASE(13); TB_INT32_DIV_CASE(12); TB_INT32_DIV_CASE(11); TB_INT32_DIV_CASE(10); TB_INT32_DIV_CASE( 9); TB_INT32_DIV_CASE( 8); TB_INT32_DIV_CASE( 7); TB_INT32_DIV_CASE( 6); TB_INT32_DIV_CASE( 5); TB_INT32_DIV_CASE( 4); TB_INT32_DIV_CASE( 3); TB_INT32_DIV_CASE( 2); // we merge these last two together, makes gcc make better arm default: TB_INT32_DIV_CASE(1); } } if (r < 0) r = TB_MAXS32; return tb_int32_set_sign(r, s); }
tb_iterator_ref_t tb_ifaddrs_itor(tb_ifaddrs_ref_t ifaddrs, tb_bool_t reload) { // check tb_list_ref_t interfaces = (tb_list_ref_t)ifaddrs; tb_assert_and_check_return_val(interfaces, tb_null); // uses the cached interfaces? tb_check_return_val(reload, (tb_iterator_ref_t)interfaces); // clear interfaces first tb_list_clear(interfaces); // done tb_long_t sock = -1; do { // make sock sock = tb_ifaddrs_netlink_socket_init(); tb_assert_and_check_break(sock >= 0); // load ipaddr if (!tb_ifaddrs_interface_load(interfaces, sock, RTM_GETADDR)) break; // load hwaddr if (!tb_ifaddrs_interface_load(interfaces, sock, RTM_GETLINK)) break; } while (0); // exit sock if (sock >= 0) close(sock); sock = -1; // ok? return (tb_iterator_ref_t)interfaces; }
/* ////////////////////////////////////////////////////////////////////////////////////// * implementation */ tb_size_t gb_float_unit_divide(gb_float_t numer, gb_float_t denom, gb_float_t* result) { // check tb_assert_abort(result); // negate it if (numer < 0) { numer = -numer; denom = -denom; } // must be valid numerator and denominator if (0 == denom || 0 == numer || numer >= denom) return 0; // the result: numer / denom gb_float_t r = gb_div(numer, denom); // must be finite value tb_assert_and_check_return_val(gb_isfinite(r), 0); // must be in range: [0, 1) tb_assert_and_check_return_val(r >= 0 && r < GB_ONE, 0); // too smaller? not save result tb_check_return_val(r != 0, 0); // save result *result = r; // ok return 1; }
tb_long_t tb_dns_looker_wait(tb_dns_looker_ref_t looker, tb_long_t timeout) { // check tb_dns_looker_impl_t* impl = (tb_dns_looker_impl_t*)looker; tb_assert_and_check_return_val(impl && impl->sock, -1); // has asio event? tb_size_t e = TB_AIOE_CODE_NONE; if (!(impl->step & TB_DNS_LOOKER_STEP_NEVT)) { if (!(impl->step & TB_DNS_LOOKER_STEP_REQT)) e = TB_AIOE_CODE_SEND; else if (!(impl->step & TB_DNS_LOOKER_STEP_RESP)) e = TB_AIOE_CODE_RECV; } // need wait? tb_long_t r = 0; if (e) { // wait r = tb_aioo_wait(impl->sock, e, timeout); // fail or timeout? tb_check_return_val(r > 0, r); } // ok? return r; }
static tb_object_ref_t tb_object_json_reader_done(tb_stream_ref_t stream) { // check tb_assert_and_check_return_val(stream, tb_null); // init reader tb_object_json_reader_t reader = {0}; reader.stream = stream; // skip spaces tb_char_t type = '\0'; while (tb_stream_left(stream)) { type = tb_stream_bread_s8(stream); if (!tb_isspace(type)) break; } // empty? tb_check_return_val(tb_stream_left(stream), tb_null); // the func tb_object_json_reader_func_t func = tb_object_json_reader_func(type); tb_assert_and_check_return_val(func, tb_null); // read it return func(&reader, type); }
static tb_pointer_t tb_memset_u16_impl(tb_pointer_t s, tb_uint16_t c, tb_size_t n) { // check tb_assert_and_check_return_val(s, tb_null); // no size? tb_check_return_val(n, s); // must be aligned by 2-bytes tb_assert(!(((tb_size_t)s) & 0x1)); // init __tb_register__ tb_uint16_t* p = (tb_uint16_t*)s; // done #ifdef __tb_small__ while (n--) *p++ = c; #else tb_size_t l = n & 0x3; n = (n - l) >> 2; while (n--) { p[0] = c; p[1] = c; p[2] = c; p[3] = c; p += 4; } while (l--) *p++ = c; #endif // ok? return s; }
tb_bool_t tb_dns_looker_done(tb_char_t const* name, tb_ipaddr_ref_t addr) { // check tb_assert_and_check_return_val(name && addr, tb_false); // try to lookup it from cache first if (tb_dns_cache_get(name, addr)) return tb_true; // init looker tb_dns_looker_ref_t looker = tb_dns_looker_init(name); tb_check_return_val(looker, tb_false); // spak tb_long_t r = -1; while (!(r = tb_dns_looker_spak(looker, addr))) { // wait r = tb_dns_looker_wait(looker, TB_DNS_LOOKER_TIMEOUT); tb_assert_and_check_break(r >= 0); } // exit tb_dns_looker_exit(looker); // ok return r > 0? tb_true : tb_false; }
static tb_pointer_t tb_memset_impl(tb_pointer_t s, tb_byte_t c, tb_size_t n) { // check tb_assert_and_check_return_val(s, tb_null); // no size? tb_check_return_val(n, s); // init __tb_register__ tb_byte_t* p = s; // done #ifdef __tb_small__ while (n--) *p++ = c; #else tb_size_t l = n & 0x3; n = (n - l) >> 2; while (n--) { p[0] = c; p[1] = c; p[2] = c; p[3] = c; p += 4; } while (l--) *p++ = c; #endif return s; }
tb_bool_t tb_aicp_post_after_(tb_aicp_ref_t aicp, tb_size_t delay, tb_aice_ref_t aice __tb_debug_decl__) { // check tb_aicp_impl_t* impl = (tb_aicp_impl_t*)aicp; tb_assert_and_check_return_val(impl && impl->ptor && impl->ptor->post, tb_false); tb_assert_and_check_return_val(aice && aice->aico, tb_false); // killed? tb_check_return_val(!tb_atomic_get(&impl->kill_all), tb_false); // no delay? if (!delay) return tb_aicp_post_(aicp, aice __tb_debug_args__); // the aico tb_aico_impl_t* aico = (tb_aico_impl_t*)aice->aico; tb_assert_and_check_return_val(aico, tb_false); // make the posted aice tb_aice_ref_t posted_aice = tb_malloc0_type(tb_aice_t); tb_assert_and_check_return_val(posted_aice, tb_false); // init the posted aice *posted_aice = *aice; // run the delay task return tb_aico_task_run_((tb_aico_ref_t)aico, delay, tb_aicp_post_after_func, posted_aice __tb_debug_args__); }
tb_wchar_t* tb_wcsncpy(tb_wchar_t* s1, tb_wchar_t const* s2, tb_size_t n) { // check tb_assert_and_check_return_val(s1 && s2, s1); // no size or same? tb_check_return_val(n && s1 != s2, s1); // copy #if 0 tb_wchar_t* s = s1; while (n) { if (*s = *s2) s2++; ++s; --n; } return s1; #else tb_size_t sn = tb_wcslen(s2); tb_size_t cn = tb_min(sn, n); tb_size_t fn = sn < n? n - sn : 0; tb_memcpy(s1, s2, cn * sizeof(tb_wchar_t)); if (fn) tb_memset(s1 + cn, 0, fn * sizeof(tb_wchar_t)); return s1; #endif }
__tb_no_sanitize_address__ tb_size_t tb_pool_data_size(tb_cpointer_t data) { // check tb_check_return_val(data, 0); // done tb_size_t size = 0; tb_pool_data_head_t* data_head = tb_null; do { // tbox must be running normally tb_check_break(tb_state() == TB_STATE_OK); // get global allocator tb_allocator_ref_t allocator = tb_allocator(); tb_check_break(allocator); // have this data address? tb_check_break(tb_allocator_have(allocator, data)); // the data head data_head = &(((tb_pool_data_head_t*)data)[-1]); tb_check_break(data_head->debug.magic == TB_POOL_DATA_MAGIC); // ok size = data_head->size; } while (0); // ok? return size; }
tb_size_t tb_wcslcpy(tb_wchar_t* s1, tb_wchar_t const* s2, tb_size_t n) { // check tb_assert_and_check_return_val(s1 && s2, 0); // no size or same? tb_check_return_val(n && s1 != s2, tb_wcslen(s1)); // copy #if 0 tb_wchar_t const* s = s2; --n; while (*s1 = *s2) { if (n) { --n; ++s1; } ++s2; } return s2 - s; #else tb_size_t sn = tb_wcslen(s2); tb_memcpy(s1, s2, tb_min(sn + 1, n) * sizeof(tb_wchar_t)); return tb_min(sn, n); #endif }
tb_long_t tb_dns_looker_wait(tb_dns_looker_ref_t self, tb_long_t timeout) { // check tb_dns_looker_t* looker = (tb_dns_looker_t*)self; tb_assert_and_check_return_val(looker && looker->sock, -1); // has io event? tb_size_t e = TB_SOCKET_EVENT_NONE; if (!(looker->step & TB_DNS_LOOKER_STEP_NEVT)) { if (!(looker->step & TB_DNS_LOOKER_STEP_REQT)) e = TB_SOCKET_EVENT_SEND; else if (!(looker->step & TB_DNS_LOOKER_STEP_RESP)) e = TB_SOCKET_EVENT_RECV; } // need wait? tb_long_t r = 0; if (e) { // trace tb_trace_d("waiting %p ..", looker->sock); // wait r = tb_socket_wait(looker->sock, e, timeout); // fail or timeout? tb_check_return_val(r > 0, r); } // ok? return r; }
tb_byte_t* tb_queue_buffer_push_init(tb_queue_buffer_ref_t buffer, tb_size_t* size) { // check tb_assert_and_check_return_val(buffer && buffer->maxn, tb_null); // no data? if (!buffer->data) { // make data buffer->data = tb_malloc_bytes(buffer->maxn); tb_assert_and_check_return_val(buffer->data, tb_null); // init buffer->head = buffer->data; buffer->size = 0; } tb_assert_and_check_return_val(buffer->data && buffer->head, tb_null); // full? tb_size_t left = buffer->maxn - buffer->size; tb_check_return_val(left, tb_null); // move data to head first, make sure there is enough write space if (buffer->head != buffer->data) { if (buffer->size) tb_memmov(buffer->data, buffer->head, buffer->size); buffer->head = buffer->data; } // save size if (size) *size = left; // ok return buffer->head + buffer->size; }
tb_char_t const* tb_string_ltrim(tb_string_ref_t string) { // check tb_assert_and_check_return_val(string, tb_null); // init tb_char_t* s = (tb_char_t*)tb_string_cstr(string); tb_size_t n = tb_string_size(string); tb_check_return_val(s && n, tb_null); // done tb_char_t* p = s; tb_char_t* e = s + n; while (p < e && tb_isspace(*p)) p++; // strip it if (p < e) { // move it if exists spaces if (p > s) tb_buffer_memmov(string, p - s); } // clear it else tb_string_clear(string); // ok? return tb_string_cstr(string); }
/* ////////////////////////////////////////////////////////////////////////////////////// * implementation */ tb_bool_t gb_bitmap_biltter_shader_init(gb_bitmap_biltter_ref_t biltter, gb_bitmap_ref_t bitmap, gb_paint_ref_t paint) { // check tb_assert_abort(biltter && bitmap && paint); // init bitmap biltter->bitmap = bitmap; // init pixmap biltter->pixmap = gb_pixmap(gb_bitmap_pixfmt(bitmap), gb_paint_alpha(paint)); tb_check_return_val(biltter->pixmap, tb_false); // init btp and row_bytes biltter->btp = biltter->pixmap->btp; biltter->row_bytes = gb_bitmap_row_bytes(biltter->bitmap); // init shader // TODO // init operations // TODO // trace tb_trace_noimpl(); // ok return tb_false; }
tb_pointer_t tb_pool_align_ralloc_(tb_pool_ref_t pool, tb_pointer_t data, tb_size_t size, tb_size_t align __tb_debug_decl__) { // check align tb_assertf_abort(!(align & 3), "invalid alignment size: %lu", align); tb_check_return_val(!(align & 3), tb_null); // ralloc? tb_byte_t diff = 0; if (data) { // check address tb_assertf_abort(!((tb_size_t)data & (align - 1)), "invalid address %p", data); tb_check_return_val(!((tb_size_t)data & (align - 1)), tb_null); // the different bytes diff = ((tb_byte_t*)data)[-1]; // adjust the address data = (tb_byte_t*)data - diff; // ralloc it data = tb_pool_ralloc_(pool, data, size + align __tb_debug_args__); tb_check_return_val(data, tb_null); } // no data? else { // malloc it directly data = tb_pool_malloc_(pool, size + align __tb_debug_args__); tb_check_return_val(data, tb_null); } // the different bytes diff = (tb_byte_t)((~(tb_long_t)data) & (align - 1)) + 1; // adjust the address data = (tb_byte_t*)data + diff; // check tb_assert_abort(!((tb_size_t)data & (align - 1))); // save the different bytes ((tb_byte_t*)data)[-1] = diff; // ok? return data; }
tb_handle_t tb_backtrace_symbols_init(tb_pointer_t* frames, tb_size_t nframe) { // check tb_check_return_val(frames && nframe, tb_null); // init symbols return malloc(8192); }
tb_pointer_t tb_native_memory_nalloc0(tb_size_t item, tb_size_t size) { // check tb_check_return_val(item && size, tb_null); // nalloc0 return tb_native_memory_malloc0(item * size); }
tb_char_t const* tb_addrinfo_name(tb_ipaddr_ref_t addr, tb_char_t* name, tb_size_t maxn) { // check tb_assert_and_check_return_val(addr && name && maxn, tb_null); #if defined(TB_CONFIG_POSIX_HAVE_GETNAMEINFO) // load socket address struct sockaddr_storage saddr; socklen_t saddrlen = (socklen_t)tb_sockaddr_load(&saddr, addr); tb_assert_and_check_return_val(saddrlen, tb_null); // get host name from address return !getnameinfo((struct sockaddr const*)&saddr, saddrlen, name, maxn, tb_null, 0, NI_NAMEREQD)? name : tb_null; #elif defined(TB_CONFIG_POSIX_HAVE_GETHOSTBYNAME) // done struct hostent* hostaddr = tb_null; switch (tb_ipaddr_family(addr)) { case TB_IPADDR_FAMILY_IPV4: { // init ip address struct in_addr ipaddr = {0}; ipaddr.s_addr = tb_ipaddr_ip_is_any(addr)? INADDR_ANY : addr->u.ipv4.u32; // get host name from address hostaddr = gethostbyaddr((tb_char_t const*)&ipaddr, sizeof(ipaddr), AF_INET); } break; case TB_IPADDR_FAMILY_IPV6: { // init ip address struct in6_addr ipaddr; tb_memset(&ipaddr, 0, sizeof(ipaddr)); // save ipv6 if (tb_ipaddr_ip_is_any(addr)) ipaddr = in6addr_any; else tb_memcpy(ipaddr.s6_addr, addr->u.ipv6.addr.u8, sizeof(ipaddr.s6_addr)); // get host name from address hostaddr = gethostbyaddr((tb_char_t const*)&ipaddr, sizeof(ipaddr), AF_INET6); } break; default: break; } tb_check_return_val(hostaddr && hostaddr->h_name, tb_null); // save name tb_strlcpy(name, hostaddr->h_name, maxn); // ok? return name; #else tb_trace_noimpl(); return tb_null; #endif }
/* ////////////////////////////////////////////////////////////////////////////////////// * implementation */ static tb_bool_t tb_shell32_instance_init(tb_handle_t instance, tb_cpointer_t priv) { // check tb_shell32_ref_t shell32 = (tb_shell32_ref_t)instance; tb_check_return_val(shell32, tb_false); // the shell32 module HANDLE module = GetModuleHandleA("shell32.dll"); if (!module) module = tb_dynamic_init("shell32.dll"); tb_check_return_val(module, tb_false); // init interfaces TB_INTERFACE_LOAD(shell32, SHGetSpecialFolderLocation); TB_INTERFACE_LOAD(shell32, SHGetPathFromIDListW); // ok return tb_true; }
static tb_handle_t tb_default_allocator_instance_init(tb_cpointer_t* ppriv) { // check tb_check_return_val(ppriv, tb_null); // the data and size tb_value_ref_t tuple = (tb_value_ref_t)*ppriv; tb_byte_t* data = (tb_byte_t*)tuple[0].ptr; tb_size_t size = tuple[1].ul; // clear the private data first *ppriv = tb_null; // done tb_bool_t ok = tb_false; tb_allocator_ref_t allocator = tb_null; tb_allocator_ref_t large_allocator = tb_null; do { /* init the page first * * because this allocator may be called before tb_init() */ if (!tb_page_init()) break ; /* init the native memory first * * because this allocator may be called before tb_init() */ if (!tb_native_memory_init()) break ; // init large allocator large_allocator = tb_large_allocator_init(data, size); tb_assert_and_check_break(large_allocator); // init allocator allocator = tb_default_allocator_init(large_allocator); tb_assert_and_check_break(allocator); // ok ok = tb_true; } while (0); // failed? if (!ok) { // exit large allocator if (large_allocator) tb_allocator_exit(large_allocator); large_allocator = tb_null; } // ok? return (tb_handle_t)allocator; }