static tb_void_t gb_device_skia_draw_points(gb_device_impl_t* device, gb_point_ref_t points, tb_size_t count, gb_rect_ref_t bounds) { // check gb_skia_device_ref_t impl = (gb_skia_device_ref_t)device; tb_assert_and_check_return(impl && impl->canvas && points && count); // apply matrix gb_device_skia_apply_matrix(impl); // apply paint gb_device_skia_apply_paint(impl); // make points if (!impl->points) impl->points = tb_nalloc_type(count, SkPoint); // not enough? grow points else if (count > impl->points_count) impl->points = (SkPoint*)tb_ralloc(impl->points, count * sizeof(SkPoint)); tb_assert_and_check_return(impl->points); // update points count if (count > impl->points_count) impl->points_count = count; // convert points tb_size_t i = 0; for (i = 0; i < count; i++) impl->points[i].set(gb_float_to_sk(points[i].x), gb_float_to_sk(points[i].y)); // draw it impl->canvas->drawPoints(SkCanvas::kPoints_PointMode, count, impl->points, *impl->paint); }
tb_object_ref_t tb_object_data(tb_object_ref_t object, tb_size_t format) { // check tb_assert_and_check_return_val(object, tb_null); // done tb_object_ref_t odata = tb_null; tb_size_t maxn = 4096; tb_byte_t* data = tb_null; do { // make data data = data? (tb_byte_t*)tb_ralloc(data, maxn) : tb_malloc_bytes(maxn); tb_assert_and_check_break(data); // writ object to data tb_long_t size = tb_object_writ_to_data(object, data, maxn, format); // ok? make the data object if (size >= 0) odata = tb_object_data_init_from_data(data, size); // failed? grow it else maxn <<= 1; } while (!odata); // exit data if (data) tb_free(data); data = tb_null; // ok? return odata; }
static tb_void_t tb_element_str_repl(tb_element_ref_t element, tb_pointer_t buff, tb_cpointer_t data) { // check tb_assert_and_check_return(element && element->dupl && buff); #if 0 // free it if (element->free) element->free(element, buff); // dupl it element->dupl(element, buff, data); #else // replace it tb_pointer_t cstr = *((tb_pointer_t*)buff); if (cstr && data) { // attempt to replace it tb_char_t* p = (tb_char_t*)cstr; tb_char_t const* q = (tb_char_t const*)data; while (*p && *q) *p++ = *q++; // not enough space? if (!*p && *q) { // the left size tb_size_t left = tb_strlen(q); tb_assert_abort(left); // the copy size tb_size_t copy = p - (tb_char_t*)cstr; // grow size cstr = tb_ralloc(cstr, copy + left + 1); tb_assert_abort(cstr); // copy the left data tb_strlcpy((tb_char_t*)cstr + copy, q, left + 1); // update the cstr *((tb_pointer_t*)buff) = cstr; } // end else *p = '\0'; } // duplicate it else if (data) element->dupl(element, buff, data); // free it else if (element->free) element->free(element, buff); // clear it else *((tb_char_t const**)buff) = tb_null; #endif }
tb_bool_t gb_bitmap_resize(gb_bitmap_ref_t bitmap, tb_size_t width, tb_size_t height) { // check gb_bitmap_impl_t* impl = (gb_bitmap_impl_t*)bitmap; tb_assert_and_check_return_val(impl && impl->data, tb_false); // same? tb_check_return_val(impl->width != width || impl->height != height, tb_true); // the pixmap, only using btp gb_pixmap_ref_t pixmap = gb_pixmap(impl->pixfmt, 0xff); tb_assert_and_check_return_val(pixmap, tb_false); // space enough? if (height * width * pixmap->btp <= impl->size) { // resize impl->width = (tb_uint16_t)width; impl->height = (tb_uint16_t)height; if (impl->is_owner) impl->row_bytes = (tb_uint16_t)(width * pixmap->btp); impl->size = impl->row_bytes * height; } // grow? else { // must be owner tb_assert_abort(impl->is_owner); tb_check_return_val(impl->is_owner, tb_false); // resize impl->width = (tb_uint16_t)width; impl->height = (tb_uint16_t)height; impl->row_bytes = (tb_uint16_t)(width * pixmap->btp); impl->size = impl->row_bytes * height; impl->data = tb_ralloc(impl->data, impl->size); tb_assert_and_check_return_val(impl->data, tb_false); } // ok return tb_true; }
static tb_void_t tb_poller_hash_set(tb_poller_poll_ref_t poller, tb_socket_ref_t sock, tb_cpointer_t priv) { // check tb_assert(poller && sock); // the socket fd tb_long_t fd = tb_sock2fd(sock); tb_assert(fd > 0 && fd < TB_MAXS32); // not null? if (priv) { // no hash? init it first tb_size_t need = fd + 1; if (!poller->hash) { // init hash poller->hash = tb_nalloc0_type(need, tb_cpointer_t); tb_assert_and_check_return(poller->hash); // init hash size poller->hash_size = need; } else if (need > poller->hash_size) { // grow hash poller->hash = (tb_cpointer_t*)tb_ralloc(poller->hash, need * sizeof(tb_cpointer_t)); tb_assert_and_check_return(poller->hash); // init growed space tb_memset(poller->hash + poller->hash_size, 0, (need - poller->hash_size) * sizeof(tb_cpointer_t)); // grow hash size poller->hash_size = need; } // save the user private data poller->hash[fd] = priv; } }
tb_void_t tb_sockdata_insert(tb_sockdata_ref_t sockdata, tb_socket_ref_t sock, tb_cpointer_t priv) { // check tb_long_t fd = tb_sock2fd(sock); tb_assert(sockdata && fd > 0 && fd < TB_MAXS32); // not null? if (priv) { // no data? init it first tb_size_t need = fd + 1; if (!sockdata->data) { // init data need += TB_SOCKDATA_GROW; sockdata->data = tb_nalloc0_type(need, tb_cpointer_t); tb_assert_and_check_return(sockdata->data); // init data size sockdata->maxn = need; } else if (need > sockdata->maxn) { // grow data need += TB_SOCKDATA_GROW; sockdata->data = (tb_cpointer_t*)tb_ralloc(sockdata->data, need * sizeof(tb_cpointer_t)); tb_assert_and_check_return(sockdata->data); // init growed space tb_memset(sockdata->data + sockdata->maxn, 0, (need - sockdata->maxn) * sizeof(tb_cpointer_t)); // grow data size sockdata->maxn = need; } // save the socket private data sockdata->data[fd] = priv; } }
tb_bool_t tb_vector_resize(tb_vector_ref_t vector, tb_size_t size) { // check tb_vector_impl_t* impl = (tb_vector_impl_t*)vector; tb_assert_and_check_return_val(impl, tb_false); // free items if the impl is decreased if (size < impl->size) { // free data if (impl->func.nfree) impl->func.nfree(&impl->func, impl->data + size * impl->func.size, impl->size - size); } // resize buffer if (size > impl->maxn) { tb_size_t maxn = tb_align4(size + impl->grow); tb_assert_and_check_return_val(maxn < TB_VECTOR_MAXN, tb_false); // realloc data impl->data = (tb_byte_t*)tb_ralloc(impl->data, maxn * impl->func.size); tb_assert_and_check_return_val(impl->data, tb_false); // must be align by 4-bytes tb_assert_and_check_return_val(!(((tb_size_t)(impl->data)) & 3), tb_false); // clear the grow data tb_memset(impl->data + impl->size * impl->func.size, 0, (maxn - impl->maxn) * impl->func.size); // save maxn impl->maxn = maxn; } // update size impl->size = size; return tb_true; }
tb_byte_t* tb_queue_buffer_resize(tb_queue_buffer_ref_t buffer, tb_size_t maxn) { // check tb_assert_and_check_return_val(buffer && maxn && maxn >= buffer->size, tb_null); // has data? if (buffer->data) { // move data to head if (buffer->head != buffer->data) { if (buffer->size) tb_memmov(buffer->data, buffer->head, buffer->size); buffer->head = buffer->data; } // realloc if (maxn > buffer->maxn) { // init head buffer->head = tb_null; // make data buffer->data = (tb_byte_t*)tb_ralloc(buffer->data, maxn); tb_assert_and_check_return_val(buffer->data, tb_null); // save head buffer->head = buffer->data; } } // update maxn buffer->maxn = maxn; // ok return buffer->data; }
static tb_long_t tb_aiop_rtor_kqueue_wait(tb_aiop_rtor_impl_t* rtor, tb_aioe_ref_t list, tb_size_t maxn, tb_long_t timeout) { // check tb_aiop_rtor_kqueue_impl_t* impl = (tb_aiop_rtor_kqueue_impl_t*)rtor; tb_assert_and_check_return_val(impl && impl->kqfd >= 0 && rtor->aiop && list && maxn, -1); // the aiop tb_aiop_impl_t* aiop = rtor->aiop; tb_assert_and_check_return_val(aiop, -1); // init time struct timespec t = {0}; if (timeout > 0) { t.tv_sec = timeout / 1000; t.tv_nsec = (timeout % 1000) * 1000000; } // init grow tb_size_t grow = tb_align8((rtor->aiop->maxn >> 3) + 1); // init events if (!impl->evts) { impl->evtn = grow; impl->evts = tb_nalloc0(impl->evtn, sizeof(struct kevent)); tb_assert_and_check_return_val(impl->evts, -1); } // wait events tb_long_t evtn = kevent(impl->kqfd, tb_null, 0, impl->evts, impl->evtn, timeout >= 0? &t : tb_null); tb_assert_and_check_return_val(evtn >= 0 && evtn <= impl->evtn, -1); // timeout? tb_check_return_val(evtn, 0); // grow it if events is full if (evtn == impl->evtn) { // grow size impl->evtn += grow; if (impl->evtn > rtor->aiop->maxn) impl->evtn = rtor->aiop->maxn; // grow data impl->evts = tb_ralloc(impl->evts, impl->evtn * sizeof(struct kevent)); tb_assert_and_check_return_val(impl->evts, -1); } tb_assert(evtn <= impl->evtn); // limit evtn = tb_min(evtn, maxn); // sync tb_size_t i = 0; tb_size_t wait = 0; for (i = 0; i < evtn; i++) { // the kevents struct kevent* e = impl->evts + i; // the aioo tb_aioo_impl_t* aioo = (tb_aioo_impl_t*)e->udata; tb_assert_and_check_return_val(aioo && aioo->sock, -1); // the sock tb_socket_ref_t sock = aioo->sock; // spak? if (sock == aiop->spak[1] && e->filter == EVFILT_READ) { // read spak tb_char_t spak = '\0'; if (1 != tb_socket_recv(aiop->spak[1], (tb_byte_t*)&spak, 1)) return -1; // killed? if (spak == 'k') return -1; // continue it continue ; } // skip spak tb_check_continue(sock != aiop->spak[1]); // init the aioe tb_aioe_ref_t aioe = &list[wait++]; aioe->code = TB_AIOE_CODE_NONE; aioe->aioo = (tb_aioo_ref_t)aioo; aioe->priv = aioo->priv; if (e->filter == EVFILT_READ) { aioe->code |= TB_AIOE_CODE_RECV; if (aioo->code & TB_AIOE_CODE_ACPT) aioe->code |= TB_AIOE_CODE_ACPT; } if (e->filter == EVFILT_WRITE) { aioe->code |= TB_AIOE_CODE_SEND; if (aioo->code & TB_AIOE_CODE_CONN) aioe->code |= TB_AIOE_CODE_CONN; } if ((e->flags & EV_ERROR) && !(aioe->code & (TB_AIOE_CODE_RECV | TB_AIOE_CODE_SEND))) aioe->code |= TB_AIOE_CODE_RECV | TB_AIOE_CODE_SEND; // oneshot? clear it if (aioo->code & TB_AIOE_CODE_ONESHOT) { aioo->code = TB_AIOE_CODE_NONE; aioo->priv = tb_null; } } // ok return wait; }
static tb_void_t tb_ifaddrs_interface_done_ipaddr(tb_list_ref_t interfaces, tb_hash_map_ref_t names, struct nlmsghdr* response) { // check tb_assert_and_check_return(interfaces && names && response); // the info struct ifaddrmsg* info = (struct ifaddrmsg *)NLMSG_DATA(response); // must be not link tb_assert_and_check_return(info->ifa_family != AF_PACKET); // attempt to find the interface name tb_bool_t owner = tb_false; tb_char_t* name = (tb_char_t*)tb_hash_map_get(names, tb_u2p(info->ifa_index)); if (!name) { // get the interface name struct rtattr* rta = tb_null; tb_size_t rta_size = NLMSG_PAYLOAD(response, sizeof(struct ifaddrmsg)); for(rta = IFA_RTA(info); RTA_OK(rta, rta_size); rta = RTA_NEXT(rta, rta_size)) { // done tb_pointer_t rta_data = RTA_DATA(rta); tb_size_t rta_data_size = RTA_PAYLOAD(rta); switch(rta->rta_type) { case IFA_LABEL: { // make name name = (tb_char_t*)tb_ralloc(name, rta_data_size + 1); tb_assert_and_check_break(name); // copy name tb_strlcpy(name, rta_data, rta_data_size + 1); // save name tb_hash_map_insert(names, tb_u2p(info->ifa_index), name); owner = tb_true; } break; default: break; } } } // check tb_check_return(name); // done struct rtattr* rta = tb_null; tb_size_t rta_size = NLMSG_PAYLOAD(response, sizeof(struct ifaddrmsg)); for(rta = IFA_RTA(info); RTA_OK(rta, rta_size); rta = RTA_NEXT(rta, rta_size)) { /* attempt to get the interface from the cached interfaces * and make a new interface if no the cached interface */ tb_ifaddrs_interface_t interface_new = {0}; tb_ifaddrs_interface_ref_t interface = tb_ifaddrs_interface_find((tb_iterator_ref_t)interfaces, name); if (!interface) interface = &interface_new; // check tb_assert(interface == &interface_new || interface->name); // done tb_pointer_t rta_data = RTA_DATA(rta); switch(rta->rta_type) { case IFA_LOCAL: case IFA_ADDRESS: { // make ipaddr tb_ipaddr_t ipaddr; if (!tb_ifaddrs_netlink_ipaddr_save(&ipaddr, info->ifa_family, info->ifa_index, rta_data)) break; // save flags if ((info->ifa_flags & IFF_LOOPBACK) || tb_ipaddr_ip_is_loopback(&ipaddr)) interface->flags |= TB_IFADDRS_INTERFACE_FLAG_IS_LOOPBACK; // save ipaddr switch (tb_ipaddr_family(&ipaddr)) { case TB_IPADDR_FAMILY_IPV4: { interface->flags |= TB_IFADDRS_INTERFACE_FLAG_HAVE_IPADDR4; interface->ipaddr4 = ipaddr.u.ipv4; } break; case TB_IPADDR_FAMILY_IPV6: { interface->flags |= TB_IFADDRS_INTERFACE_FLAG_HAVE_IPADDR6; interface->ipaddr6 = ipaddr.u.ipv6; } break; default: break; } // trace tb_trace_d("name: %s, ipaddr: %{ipaddr}", name, &ipaddr); // new interface? save it if (tb_ipaddr_family(&ipaddr) && interface == &interface_new) { // save interface name interface->name = tb_strdup(name); tb_assert(interface->name); // save interface tb_list_insert_tail(interfaces, interface); } } break; case IFA_LABEL: case IFA_BROADCAST: break; default: break; } } // exit name if (name && owner) tb_free(name); name = tb_null; }
/*! put impl * * <pre> * init: * * 1(head) * ------------------------- * | | * 4 2 * -------------- ------------- * | | | | * 6(parent) 9 7 8 * --------- * | | * 10(last) (hole) <= 5(val) * after: * * 1(head) * ------------------------- * | | * 4 2 * -------------- ------------- * | | | | * 5(hole) 9 7 8 * --------- * | | * 10(last) 6(last) * </pre> */ tb_void_t tb_heap_put(tb_heap_ref_t heap, tb_cpointer_t data) { // check tb_heap_impl_t* impl = (tb_heap_impl_t*)heap; tb_assert_and_check_return(impl && impl->data); // full? grow it if (impl->size == impl->maxn) { // the maxn tb_size_t maxn = tb_align4(impl->maxn + impl->grow); tb_assert_and_check_return(maxn < TB_HEAD_MAXN); // realloc data impl->data = (tb_byte_t*)tb_ralloc(impl->data, maxn * impl->func.size); tb_assert_and_check_return(impl->data); // must be align by 4-bytes tb_assert_and_check_return(!(((tb_size_t)(impl->data)) & 3)); // clear the grow data tb_memset(impl->data + impl->size * impl->func.size, 0, (maxn - impl->maxn) * impl->func.size); // save maxn impl->maxn = maxn; } // check tb_assert_and_check_return(impl->size < impl->maxn); // init func tb_item_func_comp_t func_comp = impl->func.comp; tb_item_func_data_t func_data = impl->func.data; tb_assert_and_check_return(func_comp && func_data); // walk, (hole - 1) / 2: the parent node of the hole tb_size_t parent = 0; tb_byte_t* head = impl->data; tb_size_t hole = impl->size; tb_size_t step = impl->func.size; switch (step) { #ifndef __tb_small__ case sizeof(tb_uint64_t): { for (parent = (hole - 1) >> 1; hole && (func_comp(&impl->func, func_data(&impl->func, head + parent * step), data) > 0); parent = (hole - 1) >> 1) { // move item: parent => hole *((tb_uint64_t*)(head + hole * step)) = *((tb_uint64_t*)(head + parent * step)); // move node: hole => parent hole = parent; } } break; case sizeof(tb_uint32_t): { for (parent = (hole - 1) >> 1; hole && (func_comp(&impl->func, func_data(&impl->func, head + parent * step), data) > 0); parent = (hole - 1) >> 1) { // move item: parent => hole *((tb_uint32_t*)(head + hole * step)) = *((tb_uint32_t*)(head + parent * step)); // move node: hole => parent hole = parent; } } break; case sizeof(tb_uint16_t): { for (parent = (hole - 1) >> 1; hole && (func_comp(&impl->func, func_data(&impl->func, head + parent * step), data) > 0); parent = (hole - 1) >> 1) { // move item: parent => hole *((tb_uint16_t*)(head + hole * step)) = *((tb_uint16_t*)(head + parent * step)); // move node: hole => parent hole = parent; } } break; case sizeof(tb_uint8_t): { for (parent = (hole - 1) >> 1; hole && (func_comp(&impl->func, func_data(&impl->func, head + parent * step), data) > 0); parent = (hole - 1) >> 1) { // move item: parent => hole *((tb_uint8_t*)(head + hole * step)) = *((tb_uint8_t*)(head + parent * step)); // move node: hole => parent hole = parent; } } break; #endif default: for (parent = (hole - 1) >> 1; hole && (func_comp(&impl->func, func_data(&impl->func, head + parent * step), data) > 0); parent = (hole - 1) >> 1) { // move item: parent => hole tb_memcpy(head + hole * step, head + parent * step, step); // move node: hole => parent hole = parent; } break; } // save data impl->func.dupl(&impl->func, head + hole * step, data); // size++ impl->size++; // check // tb_heap_check(impl); }
static tb_void_t tb_ifaddrs_interface_done_hwaddr(tb_list_ref_t interfaces, tb_hash_map_ref_t names, struct nlmsghdr* response) { // check tb_assert_and_check_return(interfaces && names && response); // the info struct ifaddrmsg* info = (struct ifaddrmsg *)NLMSG_DATA(response); // attempt to find the interface name tb_bool_t owner = tb_false; tb_char_t* name = (tb_char_t*)tb_hash_map_get(names, tb_u2p(info->ifa_index)); if (!name) { // get the interface name struct rtattr* rta = tb_null; tb_size_t rta_size = NLMSG_PAYLOAD(response, sizeof(struct ifaddrmsg)); for(rta = IFLA_RTA(info); RTA_OK(rta, rta_size); rta = RTA_NEXT(rta, rta_size)) { // done tb_pointer_t rta_data = RTA_DATA(rta); tb_size_t rta_data_size = RTA_PAYLOAD(rta); switch(rta->rta_type) { case IFLA_IFNAME: { // make name name = (tb_char_t*)tb_ralloc(name, rta_data_size + 1); tb_assert_and_check_break(name); // copy name tb_strlcpy(name, rta_data, rta_data_size + 1); // save name tb_hash_map_insert(names, tb_u2p(info->ifa_index), name); owner = tb_true; } break; default: break; } } } // check tb_check_return(name); // done struct rtattr* rta = tb_null; tb_size_t rta_size = NLMSG_PAYLOAD(response, sizeof(struct ifaddrmsg)); for(rta = IFLA_RTA(info); RTA_OK(rta, rta_size); rta = RTA_NEXT(rta, rta_size)) { /* attempt to get the interface from the cached interfaces * and make a new interface if no the cached interface */ tb_ifaddrs_interface_t interface_new = {0}; tb_ifaddrs_interface_ref_t interface = tb_ifaddrs_interface_find((tb_iterator_ref_t)interfaces, name); if (!interface) interface = &interface_new; // check tb_assert(interface == &interface_new || interface->name); // done tb_pointer_t rta_data = RTA_DATA(rta); tb_size_t rta_data_size = RTA_PAYLOAD(rta); switch(rta->rta_type) { case IFLA_ADDRESS: { // no hwaddr? if (!(interface->flags & TB_IFADDRS_INTERFACE_FLAG_HAVE_HWADDR)) { // check tb_check_break(rta_data_size == sizeof(interface->hwaddr.u8)); // save flags interface->flags |= TB_IFADDRS_INTERFACE_FLAG_HAVE_HWADDR; if (info->ifa_flags & IFF_LOOPBACK) interface->flags |= TB_IFADDRS_INTERFACE_FLAG_IS_LOOPBACK; // save hwaddr tb_memcpy(interface->hwaddr.u8, rta_data, sizeof(interface->hwaddr.u8)); // trace tb_trace_d("name: %s, hwaddr: %{hwaddr}", name, &interface->hwaddr); // new interface? save it if (interface == &interface_new) { // save interface name interface->name = tb_strdup(name); tb_assert(interface->name); // save interface tb_list_insert_tail(interfaces, interface); } } } break; case IFLA_IFNAME: case IFLA_BROADCAST: case IFLA_STATS: break; default: break; } } // exit name if (name && owner) tb_free(name); name = tb_null; }
static tb_long_t tb_aiop_rtor_epoll_wait(tb_aiop_rtor_impl_t* rtor, tb_aioe_ref_t list, tb_size_t maxn, tb_long_t timeout) { // check tb_aiop_rtor_epoll_impl_t* impl = (tb_aiop_rtor_epoll_impl_t*)rtor; tb_assert_and_check_return_val(impl && impl->epfd > 0, -1); // the aiop tb_aiop_impl_t* aiop = rtor->aiop; tb_assert_and_check_return_val(aiop, -1); // init grow tb_size_t grow = tb_align8((rtor->aiop->maxn >> 3) + 1); // init events if (!impl->evts) { impl->evtn = grow; impl->evts = tb_nalloc0(impl->evtn, sizeof(struct epoll_event)); tb_assert_and_check_return_val(impl->evts, -1); } // wait events tb_long_t evtn = epoll_wait(impl->epfd, impl->evts, impl->evtn, timeout); // interrupted?(for gdb?) continue it if (evtn < 0 && errno == EINTR) return 0; // check error? tb_assert_and_check_return_val(evtn >= 0 && evtn <= impl->evtn, -1); // timeout? tb_check_return_val(evtn, 0); // grow it if events is full if (evtn == impl->evtn) { // grow size impl->evtn += grow; if (impl->evtn > rtor->aiop->maxn) impl->evtn = rtor->aiop->maxn; // grow data impl->evts = tb_ralloc(impl->evts, impl->evtn * sizeof(struct epoll_event)); tb_assert_and_check_return_val(impl->evts, -1); } tb_assert(evtn <= impl->evtn); // limit evtn = tb_min(evtn, maxn); // sync tb_size_t i = 0; tb_size_t wait = 0; for (i = 0; i < evtn; i++) { // the aioo tb_aioo_impl_t* aioo = (tb_aioo_impl_t*)tb_u2p(impl->evts[i].data.u64); tb_assert_and_check_return_val(aioo, -1); // the sock tb_socket_ref_t sock = aioo->sock; tb_assert_and_check_return_val(sock, -1); // the events tb_size_t events = impl->evts[i].events; // spak? if (sock == aiop->spak[1] && (events & EPOLLIN)) { // read spak tb_char_t spak = '\0'; if (1 != tb_socket_recv(aiop->spak[1], (tb_byte_t*)&spak, 1)) return -1; // killed? if (spak == 'k') return -1; // continue it continue ; } // skip spak tb_check_continue(sock != aiop->spak[1]); // save aioe tb_aioe_ref_t aioe = &list[wait++]; aioe->code = TB_AIOE_CODE_NONE; aioe->priv = aioo->priv; aioe->aioo = (tb_aioo_ref_t)aioo; if (events & EPOLLIN) { aioe->code |= TB_AIOE_CODE_RECV; if (aioo->code & TB_AIOE_CODE_ACPT) aioe->code |= TB_AIOE_CODE_ACPT; } if (events & EPOLLOUT) { aioe->code |= TB_AIOE_CODE_SEND; if (aioo->code & TB_AIOE_CODE_CONN) aioe->code |= TB_AIOE_CODE_CONN; } if (events & (EPOLLHUP | EPOLLERR) && !(aioe->code & (TB_AIOE_CODE_RECV | TB_AIOE_CODE_SEND))) aioe->code |= TB_AIOE_CODE_RECV | TB_AIOE_CODE_SEND; // oneshot? clear it if (aioo->code & TB_AIOE_CODE_ONESHOT) { // clear code aioo->code = TB_AIOE_CODE_NONE; aioo->priv = tb_null; // clear events manually if no epoll oneshot #ifndef EPOLLONESHOT struct epoll_event e = {0}; if (epoll_ctl(impl->epfd, EPOLL_CTL_DEL, tb_sock2fd(aioo->sock), &e) < 0) { // trace tb_trace_e("clear aioo[%p] failed manually for oneshot, error: %d", aioo, errno); } #endif } } // ok return wait; }
static tb_long_t tb_ifaddrs_interface_done(tb_list_ref_t interfaces, tb_hash_map_ref_t names, tb_long_t sock, tb_long_t request) { // check tb_assert_and_check_return_val(interfaces && names && sock >= 0, -1); // done tb_size_t size = 4096; tb_pointer_t data = tb_null; tb_long_t ok = -1; pid_t pid = getpid(); while (ok < 0) { // make data data = tb_ralloc(data, size); tb_assert_and_check_break(data); // trace tb_trace_d("netlink: recv: .."); // recv response tb_long_t recv = tb_ifaddrs_netlink_socket_recv(sock, data, size); // trace tb_trace_d("netlink: recv: %ld", recv); // space not enough? if (recv == -1) { // grow space and continue it size <<= 1; continue ; } // check tb_assert_and_check_break(recv > 0); // done tb_bool_t failed = tb_false; struct nlmsghdr* response = tb_null; for (response = (struct nlmsghdr *)data; NLMSG_OK(response, (tb_uint_t)recv); response = (struct nlmsghdr *)NLMSG_NEXT(response, recv)) { // trace tb_trace_d("type: %d, pid: %ld ?= %ld, sock: %ld ?= %ld", response->nlmsg_type, (tb_long_t)response->nlmsg_pid, (tb_long_t)pid, (tb_long_t)response->nlmsg_seq, (tb_long_t)sock); // failed? tb_check_break_state(response->nlmsg_type != NLMSG_ERROR, failed, tb_true); // invalid pid? tb_assert_and_check_break_state((tb_long_t)response->nlmsg_pid > 0, failed, tb_true); // isn't it? if ((pid_t)response->nlmsg_pid != pid || (tb_long_t)response->nlmsg_seq != sock) continue; // done? if (response->nlmsg_type == NLMSG_DONE) { // trace tb_trace_d("done"); // ok ok = 1; break; } // get hwaddr? if (request == RTM_GETLINK && response->nlmsg_type == RTM_NEWLINK) { // done hwaddr tb_ifaddrs_interface_done_hwaddr(interfaces, names, response); } // get ipaddr? else if (request == RTM_GETADDR && response->nlmsg_type == RTM_NEWADDR) { // done ipaddr tb_ifaddrs_interface_done_ipaddr(interfaces, names, response); } } // failed? tb_check_break(!failed); // continue if empty? if (ok < 0) ok = 0; break; } // exit data if (data) tb_free(data); data = tb_null; // ok? return ok; }
tb_byte_t* tb_buffer_resize(tb_buffer_t* buffer, tb_size_t size) { // check tb_assert_and_check_return_val(buffer && size, tb_null); // done tb_bool_t ok = tb_false; tb_byte_t* buff_data = buffer->data; tb_size_t buff_size = buffer->size; tb_size_t buff_maxn = buffer->maxn; do { // check tb_assert_and_check_break(buff_data); // using static buffer? if (buff_data == buffer->buff) { // grow? if (size > buff_maxn) { // grow maxn buff_maxn = tb_align8(size + TB_BUFFER_GROW_SIZE); tb_assert_and_check_break(size <= buff_maxn); // grow data buff_data = tb_malloc_bytes(buff_maxn); tb_assert_and_check_break(buff_data); // copy data tb_memcpy(buff_data, buffer->buff, buff_size); } // update the size buff_size = size; } else { // grow? if (size > buff_maxn) { // grow maxn buff_maxn = tb_align8(size + TB_BUFFER_GROW_SIZE); tb_assert_and_check_break(size <= buff_maxn); // grow data buff_data = (tb_byte_t*)tb_ralloc(buff_data, buff_maxn); tb_assert_and_check_break(buff_data); } #if 0 // decrease to the static buffer else if (size <= sizeof(buffer->buff)) { // update the maxn buff_maxn = sizeof(buffer->buff); // copy data tb_memcpy(buffer->buff, buff_data, size); // free data tb_free(buff_data); // using the static buffer buff_data = buffer->buff; } #endif // update the size buff_size = size; } // update the buffer buffer->data = buff_data; buffer->size = buff_size; buffer->maxn = buff_maxn; // ok ok = tb_true; } while (0); // trace if (!ok) tb_trace_e("resize buffer failed: %lu => %lu", buff_size, size); // ok return ok? (tb_byte_t*)buffer->data : tb_null; }
tb_size_t tb_hash_map_insert(tb_hash_map_ref_t hash_map, tb_cpointer_t name, tb_cpointer_t data) { // check tb_hash_map_impl_t* impl = (tb_hash_map_impl_t*)hash_map; tb_assert_and_check_return_val(impl, 0); // the step tb_size_t step = impl->element_name.size + impl->element_data.size; tb_assert_and_check_return_val(step, 0); // find it tb_size_t buck = 0; tb_size_t item = 0; if (tb_hash_map_item_find(impl, name, &buck, &item)) { // check tb_assert_and_check_return_val(buck < impl->hash_size, 0); // get list tb_hash_map_item_list_t* list = impl->hash_list[buck]; tb_assert_and_check_return_val(list && list->size && item < list->size, 0); // replace data impl->element_data.repl(&impl->element_data, ((tb_byte_t*)&list[1]) + item * step + impl->element_name.size, data); } else { // check tb_assert_and_check_return_val(buck < impl->hash_size, 0); // get list tb_hash_map_item_list_t* list = impl->hash_list[buck]; // insert item if (list) { // grow? if (list->size >= list->maxn) { // check tb_assert_and_check_return_val(impl->item_grow, 0); // resize maxn tb_size_t maxn = tb_align_pow2(list->maxn + impl->item_grow); tb_assert_and_check_return_val(maxn > list->maxn, 0); // realloc it list = (tb_hash_map_item_list_t*)tb_ralloc(list, sizeof(tb_hash_map_item_list_t) + maxn * step); tb_assert_and_check_return_val(list, 0); // update the impl item maxn impl->item_maxn += maxn - list->maxn; // update maxn list->maxn = maxn; // reattach list impl->hash_list[buck] = list; } tb_assert_and_check_return_val(item <= list->size && list->size < list->maxn, 0); // move items if (item != list->size) tb_memmov(((tb_byte_t*)&list[1]) + (item + 1) * step, ((tb_byte_t*)&list[1]) + item * step, (list->size - item) * step); // dupl item list->size++; impl->element_name.dupl(&impl->element_name, ((tb_byte_t*)&list[1]) + item * step, name); impl->element_data.dupl(&impl->element_data, ((tb_byte_t*)&list[1]) + item * step + impl->element_name.size, data); } // create list for adding item else { // check tb_assert_and_check_return_val(impl->item_grow, 0); // make list list = (tb_hash_map_item_list_t*)tb_malloc0(sizeof(tb_hash_map_item_list_t) + impl->item_grow * step); tb_assert_and_check_return_val(list, 0); // init list list->size = 1; list->maxn = impl->item_grow; impl->element_name.dupl(&impl->element_name, ((tb_byte_t*)&list[1]), name); impl->element_data.dupl(&impl->element_data, ((tb_byte_t*)&list[1]) + impl->element_name.size, data); // attach list impl->hash_list[buck] = list; // update the impl item maxn impl->item_maxn += list->maxn; } // update the impl item size impl->item_size++; } // ok? return tb_hash_map_index_make(buck + 1, item + 1); }
static tb_void_t tb_ifaddrs_interface_load6(tb_list_ref_t interfaces) { // check tb_assert_and_check_return(interfaces); // done PIP_ADAPTER_ADDRESSES addresses = tb_null; do { // make the addresses addresses = (PIP_ADAPTER_ADDRESSES)tb_malloc0_type(IP_ADAPTER_ADDRESSES); tb_assert_and_check_break(addresses); // get the real adapter info size ULONG size = sizeof(IP_ADAPTER_ADDRESSES); if (tb_iphlpapi()->GetAdaptersAddresses(AF_INET6, GAA_FLAG_SKIP_DNS_SERVER, tb_null, addresses, &size) == ERROR_BUFFER_OVERFLOW) { // grow the adapter info buffer addresses = (PIP_ADAPTER_ADDRESSES)tb_ralloc(addresses, size); tb_assert_and_check_break(addresses); // reclear it tb_memset(addresses, 0, size); } // get the addresses if (tb_iphlpapi()->GetAdaptersAddresses(AF_INET6, GAA_FLAG_SKIP_DNS_SERVER, tb_null, addresses, &size) != NO_ERROR) break; // done PIP_ADAPTER_ADDRESSES address = addresses; while (address) { // check tb_assert(address->AdapterName); /* attempt to get the interface from the cached interfaces * and make a new interface if no the cached interface */ tb_ifaddrs_interface_t interface_new = {0}; tb_ifaddrs_interface_ref_t interface = tb_ifaddrs_interface_find((tb_iterator_ref_t)interfaces, address->AdapterName); if (!interface) interface = &interface_new; // check tb_assert(interface == &interface_new || interface->name); // save flags if (address->IfType == IF_TYPE_SOFTWARE_LOOPBACK) interface->flags |= TB_IFADDRS_INTERFACE_FLAG_IS_LOOPBACK; // save hwaddr if (address->PhysicalAddressLength == sizeof(interface->hwaddr.u8)) { interface->flags |= TB_IFADDRS_INTERFACE_FLAG_HAVE_HWADDR; tb_memcpy(interface->hwaddr.u8, address->PhysicalAddress, sizeof(interface->hwaddr.u8)); } // save ipaddrs PIP_ADAPTER_UNICAST_ADDRESS ipAddress = address->FirstUnicastAddress; while (ipAddress && (interface->flags & TB_IFADDRS_INTERFACE_FLAG_HAVE_IPADDR) != TB_IFADDRS_INTERFACE_FLAG_HAVE_IPADDR) { // done tb_ipaddr_t ipaddr; struct sockaddr_storage* saddr = (struct sockaddr_storage*)ipAddress->Address.lpSockaddr; if (saddr && tb_sockaddr_save(&ipaddr, saddr)) { if (ipaddr.family == TB_IPADDR_FAMILY_IPV4) { interface->flags |= TB_IFADDRS_INTERFACE_FLAG_HAVE_IPADDR4; interface->ipaddr4 = ipaddr.u.ipv4; } else if (ipaddr.family == TB_IPADDR_FAMILY_IPV6) { interface->flags |= TB_IFADDRS_INTERFACE_FLAG_HAVE_IPADDR6; interface->ipaddr6 = ipaddr.u.ipv6; } } // the next ipAddress = ipAddress->Next; } // new interface? save it if ( interface == &interface_new && interface->flags) { // save interface name interface->name = tb_strdup(address->AdapterName); tb_assert(interface->name); // save interface tb_list_insert_tail(interfaces, interface); } // the next address address = address->Next; } } while (0); // exit the addresses if (addresses) tb_free(addresses); addresses = tb_null; }
static pid_t it_pid(tb_char_t const* name) { // check tb_assert_and_check_return_val(name, 0); // is pid? tb_size_t pid = tb_atoi(name); if (pid) return pid; // init struct kinfo_proc* p = tb_null; struct kinfo_proc* q = tb_null; tb_int_t mib[4] = {CTL_KERN, KERN_PROC, KERN_PROC_ALL, 0}; tb_size_t miblen = 4; tb_size_t size = 0; tb_long_t ok = sysctl(mib, miblen, tb_null, &size, tb_null, 0); // walk do { // grow size += size / 10; q = tb_ralloc(p, size); // no memory? if (!q) { if (p) tb_free(p); return 0; } // list p = q; ok = sysctl(mib, miblen, p, &size, tb_null, 0); } while (ok == -1 && errno == ENOMEM); // ok? if (ok == 0) { if (!(size % sizeof(struct kinfo_proc))) { tb_size_t i = 0; tb_size_t n = size / sizeof(struct kinfo_proc); // try accurate name for (i = 0; i < n; i++) { if (!tb_stricmp(p[i].kp_proc.p_comm, name)) { tb_trace_i("name: %s, pid: %u", p[i].kp_proc.p_comm, p[i].kp_proc.p_pid); pid = p[i].kp_proc.p_pid; break; } } // try other name if (!pid) { for (i = 0; i < n; i++) { if (!tb_strnicmp(p[i].kp_proc.p_comm, name, tb_strlen(name))) { tb_trace_i("name: %s, pid: %u", p[i].kp_proc.p_comm, p[i].kp_proc.p_pid); pid = p[i].kp_proc.p_pid; break; } } } } } // free if (p) tb_free(p); // ok return pid; }
/* ////////////////////////////////////////////////////////////////////////////////////// * private implementation */ static tb_char_t* tb_environment_get_impl(tb_char_t const* name, tb_size_t* psize) { // check tb_assert_and_check_return_val(name, 0); // done tb_bool_t ok = tb_false; tb_size_t size = 0; tb_size_t maxn = 256; tb_char_t* value = tb_null; tb_wchar_t* value_w = tb_null; do { // make value_w value_w = (tb_wchar_t*)tb_malloc0(sizeof(tb_wchar_t) * maxn); tb_assert_and_check_break(value_w); // make name tb_wchar_t name_w[512]; tb_size_t name_n = tb_atow(name_w, name, tb_arrayn(name_w)); tb_assert_and_check_break(name_n != -1); // get it size = (tb_size_t)tb_kernel32()->GetEnvironmentVariableW(name_w, value_w, (DWORD)maxn); if (!size) { // error? if (ERROR_ENVVAR_NOT_FOUND == GetLastError()) { // trace tb_trace_d("environment variable(%s) does not exist", name); } break; } else if (size > maxn) { // grow space value_w = (tb_wchar_t*)tb_ralloc(value_w, sizeof(tb_wchar_t) * (size + 1)); tb_assert_and_check_break(value_w); // get it size = (tb_size_t)tb_kernel32()->GetEnvironmentVariableW(name_w, value_w, (DWORD)size + 1); tb_assert_and_check_break(size); } // make value value = (tb_char_t*)tb_malloc0(sizeof(tb_char_t) * (size + 1)); tb_assert_and_check_break(value); // save value if ((size = tb_wtoa(value, value_w, size)) == -1) break; // save size if (psize) *psize = size; // ok ok = tb_true; } while (0); // failed? if (!ok) { // exit value if (value) tb_free(value); value = tb_null; } // exit value_w if (value_w) tb_free(value_w); value_w = tb_null; // ok? return value; }
static tb_void_t tb_ifaddrs_interface_load4(tb_list_ref_t interfaces) { // check tb_assert_and_check_return(interfaces); // done PIP_ADAPTER_INFO adapter_info = tb_null; do { // make the adapter info adapter_info = tb_malloc0_type(IP_ADAPTER_INFO); tb_assert_and_check_break(adapter_info); // get the real adapter info size ULONG size = sizeof(IP_ADAPTER_INFO); if (tb_iphlpapi()->GetAdaptersInfo(adapter_info, &size) == ERROR_BUFFER_OVERFLOW) { // grow the adapter info buffer adapter_info = (PIP_ADAPTER_INFO)tb_ralloc(adapter_info, size); tb_assert_and_check_break(adapter_info); // reclear it tb_memset(adapter_info, 0, size); } // get the adapter info if (tb_iphlpapi()->GetAdaptersInfo(adapter_info, &size) != NO_ERROR) break; // done PIP_ADAPTER_INFO adapter = adapter_info; while (adapter) { // check tb_assert(adapter->AdapterName); /* attempt to get the interface from the cached interfaces * and make a new interface if no the cached interface */ tb_ifaddrs_interface_t interface_new = {0}; tb_ifaddrs_interface_ref_t interface = tb_ifaddrs_interface_find((tb_iterator_ref_t)interfaces, adapter->AdapterName); if (!interface) interface = &interface_new; // check tb_assert(interface == &interface_new || interface->name); // save flags if (adapter->Type == MIB_IF_TYPE_LOOPBACK) interface->flags |= TB_IFADDRS_INTERFACE_FLAG_IS_LOOPBACK; // save hwaddr if (adapter->AddressLength == sizeof(interface->hwaddr.u8)) { interface->flags |= TB_IFADDRS_INTERFACE_FLAG_HAVE_HWADDR; tb_memcpy(interface->hwaddr.u8, adapter->Address, sizeof(interface->hwaddr.u8)); } // save ipaddrs PIP_ADDR_STRING ipAddress = &adapter->IpAddressList; while (ipAddress && (interface->flags & TB_IFADDRS_INTERFACE_FLAG_HAVE_IPADDR) != TB_IFADDRS_INTERFACE_FLAG_HAVE_IPADDR) { // done tb_ipaddr_t ipaddr; if ( ipAddress->IpAddress.String && tb_ipaddr_ip_cstr_set(&ipaddr, ipAddress->IpAddress.String, TB_IPADDR_FAMILY_NONE)) { if (ipaddr.family == TB_IPADDR_FAMILY_IPV4) { interface->flags |= TB_IFADDRS_INTERFACE_FLAG_HAVE_IPADDR4; interface->ipaddr4 = ipaddr.u.ipv4; } else if (ipaddr.family == TB_IPADDR_FAMILY_IPV6) { interface->flags |= TB_IFADDRS_INTERFACE_FLAG_HAVE_IPADDR6; interface->ipaddr6 = ipaddr.u.ipv6; } } // the next ipAddress = ipAddress->Next; } // new interface? save it if ( interface == &interface_new && interface->flags) { // save interface name interface->name = tb_strdup(adapter->AdapterName); tb_assert(interface->name); // save interface tb_list_insert_tail(interfaces, interface); } // the next adapter adapter = adapter->Next; } } while (0); // exit the adapter info if (adapter_info) tb_free(adapter_info); adapter_info = tb_null; }
static tb_pointer_t tb_aiop_spak_loop(tb_cpointer_t priv) { // check tb_aiop_ptor_impl_t* impl = (tb_aiop_ptor_impl_t*)priv; tb_aicp_impl_t* aicp = impl? impl->base.aicp : tb_null; // done do { // check tb_assert_and_check_break(impl && impl->aiop && impl->list && impl->timer && impl->ltimer && aicp); // trace tb_trace_d("loop: init"); // loop while (!tb_atomic_get(&aicp->kill)) { // the delay tb_size_t delay = tb_timer_delay(impl->timer); // the ldelay tb_size_t ldelay = tb_ltimer_delay(impl->ltimer); tb_assert_and_check_break(ldelay != -1); // trace tb_trace_d("loop: wait: .."); // wait aioe tb_long_t real = tb_aiop_wait(impl->aiop, impl->list, impl->maxn, tb_min(delay, ldelay)); // trace tb_trace_d("loop: wait: %ld", real); // spak ctime tb_cache_time_spak(); // spak timer if (!tb_timer_spak(impl->timer)) break; // spak ltimer if (!tb_ltimer_spak(impl->ltimer)) break; // killed? tb_check_break(real >= 0); // error? out of range tb_assert_and_check_break(real <= impl->maxn); // timeout? tb_check_continue(real); // grow it if aioe is full if (real == impl->maxn) { // grow size impl->maxn += (aicp->maxn >> 4) + 16; if (impl->maxn > aicp->maxn) impl->maxn = aicp->maxn; // grow list impl->list = tb_ralloc(impl->list, impl->maxn * sizeof(tb_aioe_t)); tb_assert_and_check_break(impl->list); } // walk aioe list tb_size_t i = 0; tb_bool_t end = tb_false; for (i = 0; i < real && !end; i++) { // the aioe tb_aioe_ref_t aioe = &impl->list[i]; tb_assert_and_check_break_state(aioe, end, tb_true); // the aice tb_aice_ref_t aice = (tb_aice_ref_t)aioe->priv; tb_assert_and_check_break_state(aice, end, tb_true); // the aico tb_aiop_aico_t* aico = (tb_aiop_aico_t*)aice->aico; tb_assert_and_check_break_state(aico, end, tb_true); // have wait? tb_check_continue(aice->code); // have been waited ok for the timer timeout/killed func? need not spak it repeatly tb_check_continue(!aico->wait_ok); // sock? if (aico->base.type == TB_AICO_TYPE_SOCK) { // push the acpt aice if (aice->code == TB_AICE_CODE_ACPT) end = tb_aiop_push_acpt(impl, aice)? tb_false : tb_true; // push the sock aice else end = tb_aiop_push_sock(impl, aice)? tb_false : tb_true; } else if (aico->base.type == TB_AICO_TYPE_FILE) { // poll file tb_aicp_file_poll(impl); } else tb_assert(0); } // end? tb_check_break(!end); // work it tb_aiop_spak_work(impl); } } while (0); // trace tb_trace_d("loop: exit"); // kill tb_aicp_kill((tb_aicp_ref_t)aicp); // exit tb_thread_return(tb_null); return tb_null; }