tb_size_t tb_wcslcpy(tb_wchar_t* s1, tb_wchar_t const* s2, tb_size_t n) { // check tb_assert_and_check_return_val(s1 && s2, 0); // no size or same? tb_check_return_val(n && s1 != s2, tb_wcslen(s1)); // copy #if 0 tb_wchar_t const* s = s2; --n; while (*s1 = *s2) { if (n) { --n; ++s1; } ++s2; } return s2 - s; #else tb_size_t sn = tb_wcslen(s2); tb_memcpy(s1, s2, tb_min(sn + 1, n) * sizeof(tb_wchar_t)); return tb_min(sn, n); #endif }
tb_wchar_t* tb_wcsncpy(tb_wchar_t* s1, tb_wchar_t const* s2, tb_size_t n) { // check tb_assert_and_check_return_val(s1 && s2, s1); // no size or same? tb_check_return_val(n && s1 != s2, s1); // copy #if 0 tb_wchar_t* s = s1; while (n) { if (*s = *s2) s2++; ++s; --n; } return s1; #else tb_size_t sn = tb_wcslen(s2); tb_size_t cn = tb_min(sn, n); tb_size_t fn = sn < n? n - sn : 0; tb_memcpy(s1, s2, cn * sizeof(tb_wchar_t)); if (fn) tb_memset(s1 + cn, 0, fn * sizeof(tb_wchar_t)); return s1; #endif }
tb_size_t tb_backtrace_frames(tb_pointer_t* frames, tb_size_t nframe, tb_size_t nskip) { // note: cannot use assert tb_check_return_val(frames && nframe, 0); // skip some frames? if (nskip) { // init temp frames tb_pointer_t temp[256] = {0}; tb_check_return_val(nframe + nskip < 256, 0); // done backtrace tb_size_t size = backtrace(temp, nframe + nskip); tb_check_return_val(nskip < size, 0); // update nframe nframe = tb_min(nframe, size - nskip); // save to frames tb_memcpy_(frames, temp + nskip, nframe * sizeof(tb_pointer_t)); } // backtrace else nframe = backtrace(frames, nframe); // ok? return nframe; }
static tb_bool_t tb_directory_walk_impl(tb_char_t const* path, tb_bool_t recursion, tb_bool_t prefix, tb_directory_walk_func_t func, tb_cpointer_t priv) { // check tb_assert_and_check_return_val(path && func, tb_false); // last tb_long_t last = tb_strlen(path) - 1; tb_assert_and_check_return_val(last >= 0, tb_false); // done tb_bool_t ok = tb_true; tb_char_t temp[4096] = {0}; DIR* directory = tb_null; if ((directory = opendir(path))) { // walk struct dirent* item = tb_null; while ((item = readdir(directory))) { // check tb_assert_and_check_continue(item->d_reclen); // the item name tb_char_t name[1024] = {0}; tb_strncpy(name, item->d_name, tb_min(item->d_reclen, sizeof(name) - 1)); if (tb_strcmp(name, ".") && tb_strcmp(name, "..")) { // the temp path tb_long_t n = tb_snprintf(temp, 4095, "%s%s%s", path, path[last] == '/'? "" : "/", name); if (n >= 0) temp[n] = '\0'; // the file info tb_file_info_t info = {0}; if (tb_file_info(temp, &info)) { // do callback if (prefix) ok = func(temp, &info, priv); tb_check_break(ok); // walk to the next directory if (info.type == TB_FILE_TYPE_DIRECTORY && recursion) ok = tb_directory_walk_impl(temp, recursion, prefix, func, priv); tb_check_break(ok); // do callback if (!prefix) ok = func(temp, &info, priv); tb_check_break(ok); } } } // exit directory closedir(directory); } // continue ? return ok; }
/* ////////////////////////////////////////////////////////////////////////////////////// * interfaces */ tb_size_t tb_strlcpy(tb_char_t* s1, tb_char_t const* s2, tb_size_t n) { // check #ifdef __tb_debug__ { // overflow dst? tb_size_t n2 = tb_strlen(s2); // strlcpy overflow? tb_size_t n1 = tb_pool_data_size(s1); if (n1 && tb_min(n2, n) + 1 > n1) { tb_trace_i("[strlcpy]: [overflow]: [%p, %lu] => [%p, %lu]", s2, tb_min(n2, n), s1, n1); tb_backtrace_dump("[strlcpy]: [overflow]: ", tb_null, 10); tb_pool_data_dump(s2, tb_true, "\t[malloc]: [from]: "); tb_abort(); } } #endif // done return tb_strlcpy_impl(s1, s2, n); }
static tb_long_t tb_lo_scheduler_io_timer_delay(tb_lo_scheduler_io_ref_t scheduler_io) { // check tb_assert(scheduler_io && scheduler_io->timer && scheduler_io->ltimer); // the delay tb_size_t delay = tb_timer_delay(scheduler_io->timer); // the ldelay tb_size_t ldelay = tb_ltimer_delay(scheduler_io->ltimer); // return the timer delay return tb_min(delay, ldelay); }
void tb_printf(void) { int j, pos; unsigned long saved_options=tb_control->options; tb_set_option(TB_OPTION_STOP); WLAN_OS_REPORT(("Trace Dump:\n")); WLAN_OS_REPORT(("===========\n\n")); if (tb_control->count < TB_NUM_ENTRIES) { pos = 0; } else { pos = (tb_control->pos + 1) % TB_NUM_ENTRIES; } for (j=0; (unsigned int)j < tb_min((unsigned int)TB_NUM_ENTRIES,(unsigned int)tb_control->count); j++) { WLAN_OS_REPORT(("%4i id=0x%8x %s \n", j, tb_control->entry[pos].loc, tb_control->entry[pos].msg)); pos = (pos+1) % TB_NUM_ENTRIES; } tb_control->options = saved_options; }
void tb_dump(void) { int j, pos; WLAN_OS_REPORT(("Trace Dump:\n")); WLAN_OS_REPORT(("===========\n\n")); if (tb_control->count < TB_NUM_ENTRIES) { pos = 0; } else { pos = (tb_control->pos + 1) % TB_NUM_ENTRIES; } for (j=0; (unsigned int)j < tb_min((unsigned int)TB_NUM_ENTRIES,(unsigned int)tb_control->count); j++) { WLAN_OS_REPORT(("%4i %08x %08x %08x %08x\n", j, (int)tb_control->entry[pos].ts, (int)tb_control->entry[pos].loc, (int)tb_control->entry[pos].p1, (int)tb_control->entry[pos].p2)); pos = (pos+1) % TB_NUM_ENTRIES; } }
static tb_long_t tb_aiop_rtor_epoll_wait(tb_aiop_rtor_impl_t* rtor, tb_aioe_ref_t list, tb_size_t maxn, tb_long_t timeout) { // check tb_aiop_rtor_epoll_impl_t* impl = (tb_aiop_rtor_epoll_impl_t*)rtor; tb_assert_and_check_return_val(impl && impl->epfd > 0, -1); // the aiop tb_aiop_impl_t* aiop = rtor->aiop; tb_assert_and_check_return_val(aiop, -1); // init grow tb_size_t grow = tb_align8((rtor->aiop->maxn >> 3) + 1); // init events if (!impl->evts) { impl->evtn = grow; impl->evts = tb_nalloc0(impl->evtn, sizeof(struct epoll_event)); tb_assert_and_check_return_val(impl->evts, -1); } // wait events tb_long_t evtn = epoll_wait(impl->epfd, impl->evts, impl->evtn, timeout); // interrupted?(for gdb?) continue it if (evtn < 0 && errno == EINTR) return 0; // check error? tb_assert_and_check_return_val(evtn >= 0 && evtn <= impl->evtn, -1); // timeout? tb_check_return_val(evtn, 0); // grow it if events is full if (evtn == impl->evtn) { // grow size impl->evtn += grow; if (impl->evtn > rtor->aiop->maxn) impl->evtn = rtor->aiop->maxn; // grow data impl->evts = tb_ralloc(impl->evts, impl->evtn * sizeof(struct epoll_event)); tb_assert_and_check_return_val(impl->evts, -1); } tb_assert(evtn <= impl->evtn); // limit evtn = tb_min(evtn, maxn); // sync tb_size_t i = 0; tb_size_t wait = 0; for (i = 0; i < evtn; i++) { // the aioo tb_aioo_impl_t* aioo = (tb_aioo_impl_t*)tb_u2p(impl->evts[i].data.u64); tb_assert_and_check_return_val(aioo, -1); // the sock tb_socket_ref_t sock = aioo->sock; tb_assert_and_check_return_val(sock, -1); // the events tb_size_t events = impl->evts[i].events; // spak? if (sock == aiop->spak[1] && (events & EPOLLIN)) { // read spak tb_char_t spak = '\0'; if (1 != tb_socket_recv(aiop->spak[1], (tb_byte_t*)&spak, 1)) return -1; // killed? if (spak == 'k') return -1; // continue it continue ; } // skip spak tb_check_continue(sock != aiop->spak[1]); // save aioe tb_aioe_ref_t aioe = &list[wait++]; aioe->code = TB_AIOE_CODE_NONE; aioe->priv = aioo->priv; aioe->aioo = (tb_aioo_ref_t)aioo; if (events & EPOLLIN) { aioe->code |= TB_AIOE_CODE_RECV; if (aioo->code & TB_AIOE_CODE_ACPT) aioe->code |= TB_AIOE_CODE_ACPT; } if (events & EPOLLOUT) { aioe->code |= TB_AIOE_CODE_SEND; if (aioo->code & TB_AIOE_CODE_CONN) aioe->code |= TB_AIOE_CODE_CONN; } if (events & (EPOLLHUP | EPOLLERR) && !(aioe->code & (TB_AIOE_CODE_RECV | TB_AIOE_CODE_SEND))) aioe->code |= TB_AIOE_CODE_RECV | TB_AIOE_CODE_SEND; // oneshot? clear it if (aioo->code & TB_AIOE_CODE_ONESHOT) { // clear code aioo->code = TB_AIOE_CODE_NONE; aioo->priv = tb_null; // clear events manually if no epoll oneshot #ifndef EPOLLONESHOT struct epoll_event e = {0}; if (epoll_ctl(impl->epfd, EPOLL_CTL_DEL, tb_sock2fd(aioo->sock), &e) < 0) { // trace tb_trace_e("clear aioo[%p] failed manually for oneshot, error: %d", aioo, errno); } #endif } } // ok return wait; }
tb_bool_t tb_environment_add(tb_char_t const* name, tb_char_t const* values, tb_bool_t to_head) { // check tb_assert_and_check_return_val(name && values, tb_false); // find the first separator position tb_bool_t ok = tb_false; tb_char_t const* p = values? tb_strchr(values, TM_ENVIRONMENT_SEP) : tb_null; if (p) { // init filter tb_hash_set_ref_t filter = tb_hash_set_init(8, tb_element_str(tb_true)); // init environment tb_char_t data[TB_PATH_MAXN]; tb_environment_ref_t environment = tb_environment_init(); if (environment) { // load the previous values tb_environment_load(environment, name); // make environment tb_char_t const* b = values; tb_char_t const* e = b + tb_strlen(values); do { // not empty? if (b < p) { // the size tb_size_t size = tb_min(p - b, sizeof(data) - 1); // copy it tb_strncpy(data, b, size); data[size] = '\0'; // have been not inserted? if (!filter || !tb_hash_set_get(filter, data)) { // append the environment tb_environment_insert(environment, data, to_head); // save it to the filter tb_hash_set_insert(filter, data); } } // end? tb_check_break(p + 1 < e); // find the next separator position b = p + 1; p = tb_strchr(b, TM_ENVIRONMENT_SEP); if (!p) p = e; } while (1); // set environment variables ok = tb_environment_save(environment, name); // exit environment tb_environment_exit(environment); } // exit filter if (filter) tb_hash_set_exit(filter); filter = tb_null; } // only one? else { // set environment variables tb_environment_ref_t environment = tb_environment_init(); if (environment) { // load the previous values tb_environment_load(environment, name); // append the environment tb_environment_insert(environment, values, to_head); // set environment variables ok = tb_environment_save(environment, name); // exit environment tb_environment_exit(environment); } } // ok? return ok; }
/* ////////////////////////////////////////////////////////////////////////////////////// * implementation */ tb_size_t tb_directory_temporary(tb_char_t* path, tb_size_t maxn) { // check tb_assert_and_check_return_val(path && maxn > 4, 0); // the jvm JavaVM* jvm = tb_android_jvm(); tb_assert_and_check_return_val(jvm, 0); // attempt to get jni environment first JNIEnv* jenv = tb_null; tb_bool_t jattached = tb_false; if ((*jvm)->GetEnv(jvm, (tb_pointer_t*)&jenv, JNI_VERSION_1_4) != JNI_OK) { // bind jni environment if ((*jvm)->AttachCurrentThread(jvm, &jenv, tb_null) != JNI_OK) return 0; // attach ok jattached = tb_true; } tb_assert_and_check_return_val(jenv, 0); // enter if ((*jenv)->PushLocalFrame(jenv, 10) < 0) return 0; // done tb_size_t size = 0; jboolean error = tb_false; do { // get the environment class jclass environment = (*jenv)->FindClass(jenv, "android/os/Environment"); tb_assert_and_check_break(!(error = (*jenv)->ExceptionCheck(jenv)) && environment); // get the getDownloadCacheDirectory func jmethodID getDownloadCacheDirectory_func = (*jenv)->GetStaticMethodID(jenv, environment, "getDownloadCacheDirectory", "()Ljava/io/File;"); tb_assert_and_check_break(getDownloadCacheDirectory_func); // get the download cache directory jobject directory = (*jenv)->CallStaticObjectMethod(jenv, environment, getDownloadCacheDirectory_func); tb_assert_and_check_break(!(error = (*jenv)->ExceptionCheck(jenv)) && directory); // get file class jclass file_class = (*jenv)->GetObjectClass(jenv, directory); tb_assert_and_check_break(!(error = (*jenv)->ExceptionCheck(jenv)) && file_class); // get the getPath func jmethodID getPath_func = (*jenv)->GetMethodID(jenv, file_class, "getPath", "()Ljava/lang/String;"); tb_assert_and_check_break(getPath_func); // get the directory path jstring path_jstr = (jstring)(*jenv)->CallObjectMethod(jenv, directory, getPath_func); tb_assert_and_check_break(!(error = (*jenv)->ExceptionCheck(jenv)) && path_jstr); // get the path string length size = (tb_size_t)(*jenv)->GetStringLength(jenv, path_jstr); tb_assert_and_check_break(size); // get the path string tb_char_t const* path_cstr = (*jenv)->GetStringUTFChars(jenv, path_jstr, tb_null); tb_assert_and_check_break(path_cstr); // trace tb_trace_d("temp: %s", path_cstr); // copy it tb_size_t need = tb_min(size + 1, maxn); tb_strlcpy(path, path_cstr, need); // exit the path string (*jenv)->ReleaseStringUTFChars(jenv, path_jstr, path_cstr); } while (0); // exception? clear it if (error) (*jenv)->ExceptionClear(jenv); // leave (*jenv)->PopLocalFrame(jenv, tb_null); // detach it? if (jattached) { // detach jni environment if ((*jvm)->DetachCurrentThread(jvm) == JNI_OK) jattached = tb_false; } // ok? return size; }
static tb_pointer_t tb_aiop_spak_loop(tb_cpointer_t priv) { // check tb_aiop_ptor_impl_t* impl = (tb_aiop_ptor_impl_t*)priv; tb_aicp_impl_t* aicp = impl? impl->base.aicp : tb_null; // done do { // check tb_assert_and_check_break(impl && impl->aiop && impl->list && impl->timer && impl->ltimer && aicp); // trace tb_trace_d("loop: init"); // loop while (!tb_atomic_get(&aicp->kill)) { // the delay tb_size_t delay = tb_timer_delay(impl->timer); // the ldelay tb_size_t ldelay = tb_ltimer_delay(impl->ltimer); tb_assert_and_check_break(ldelay != -1); // trace tb_trace_d("loop: wait: .."); // wait aioe tb_long_t real = tb_aiop_wait(impl->aiop, impl->list, impl->maxn, tb_min(delay, ldelay)); // trace tb_trace_d("loop: wait: %ld", real); // spak ctime tb_cache_time_spak(); // spak timer if (!tb_timer_spak(impl->timer)) break; // spak ltimer if (!tb_ltimer_spak(impl->ltimer)) break; // killed? tb_check_break(real >= 0); // error? out of range tb_assert_and_check_break(real <= impl->maxn); // timeout? tb_check_continue(real); // grow it if aioe is full if (real == impl->maxn) { // grow size impl->maxn += (aicp->maxn >> 4) + 16; if (impl->maxn > aicp->maxn) impl->maxn = aicp->maxn; // grow list impl->list = tb_ralloc(impl->list, impl->maxn * sizeof(tb_aioe_t)); tb_assert_and_check_break(impl->list); } // walk aioe list tb_size_t i = 0; tb_bool_t end = tb_false; for (i = 0; i < real && !end; i++) { // the aioe tb_aioe_ref_t aioe = &impl->list[i]; tb_assert_and_check_break_state(aioe, end, tb_true); // the aice tb_aice_ref_t aice = (tb_aice_ref_t)aioe->priv; tb_assert_and_check_break_state(aice, end, tb_true); // the aico tb_aiop_aico_t* aico = (tb_aiop_aico_t*)aice->aico; tb_assert_and_check_break_state(aico, end, tb_true); // have wait? tb_check_continue(aice->code); // have been waited ok for the timer timeout/killed func? need not spak it repeatly tb_check_continue(!aico->wait_ok); // sock? if (aico->base.type == TB_AICO_TYPE_SOCK) { // push the acpt aice if (aice->code == TB_AICE_CODE_ACPT) end = tb_aiop_push_acpt(impl, aice)? tb_false : tb_true; // push the sock aice else end = tb_aiop_push_sock(impl, aice)? tb_false : tb_true; } else if (aico->base.type == TB_AICO_TYPE_FILE) { // poll file tb_aicp_file_poll(impl); } else tb_assert(0); } // end? tb_check_break(!end); // work it tb_aiop_spak_work(impl); } } while (0); // trace tb_trace_d("loop: exit"); // kill tb_aicp_kill((tb_aicp_ref_t)aicp); // exit tb_thread_return(tb_null); return tb_null; }
/* ////////////////////////////////////////////////////////////////////////////////////// * main */ tb_int_t tb_demo_asio_aiopd_main(tb_int_t argc, tb_char_t** argv) { // check tb_assert_and_check_return_val(argv[1], 0); // done tb_socket_ref_t sock = tb_null; tb_aiop_ref_t aiop = tb_null; do { // init sock sock = tb_socket_init(TB_SOCKET_TYPE_TCP); tb_assert_and_check_break(sock); // init aiop aiop = tb_aiop_init(16); tb_assert_and_check_break(aiop); // bind if (!tb_socket_bind(sock, tb_null, 9090)) break; // listen sock if (!tb_socket_listen(sock, 20)) break; // addo sock if (!tb_aiop_addo(aiop, sock, TB_AIOE_CODE_ACPT, tb_null)) break; // accept tb_aioe_t list[16]; while (1) { // wait tb_long_t objn = tb_aiop_wait(aiop, list, 16, -1); tb_assert_and_check_break(objn >= 0); // walk list tb_size_t i = 0; for (i = 0; i < objn; i++) { // the aioo tb_aioo_ref_t aioo = list[i].aioo; // check tb_assert_and_check_break(aioo && tb_aioo_sock(aioo)); // acpt? if (list[i].code & TB_AIOE_CODE_ACPT) { // done acpt tb_bool_t ok = tb_false; tb_demo_context_t* context = tb_null; do { // make context context = tb_malloc0_type(tb_demo_context_t); tb_assert_and_check_break(context); // init sock context->sock = tb_socket_accept(tb_aioo_sock(aioo), tb_null, tb_null); tb_assert_and_check_break(context->sock); // init file context->file = tb_file_init(argv[1], TB_FILE_MODE_RO); tb_assert_and_check_break(context->file); // init data context->data = tb_malloc_bytes(TB_DEMO_FILE_READ_MAXN); tb_assert_and_check_break(context->data); // addo sock context->aioo = tb_aiop_addo(aiop, context->sock, TB_AIOE_CODE_SEND, context); tb_assert_and_check_break(context->aioo); // trace tb_trace_i("acpt[%p]: ok", context->sock); // init left context->left = tb_file_size(context->file); // done read tb_long_t real = tb_file_read(context->file, context->data, tb_min((tb_size_t)context->left, TB_DEMO_FILE_READ_MAXN)); tb_assert_and_check_break(real > 0); // save size context->left -= real; // trace // tb_trace_i("read[%p]: real: %ld", context->file, real); // done send context->send = real; real = tb_socket_send(context->sock, context->data + context->real, context->send - context->real); if (real >= 0) { // save real context->real += real; // trace // tb_trace_i("send[%p]: real: %ld", context->sock, real); } else { // trace tb_trace_i("send[%p]: closed", context->sock); break; } // ok ok = tb_true; } while (0); // failed or closed? if (!ok) { // exit context tb_demo_context_exit(aiop, context); break; } } // writ? else if (list[i].code & TB_AIOE_CODE_SEND) { // the context tb_demo_context_t* context = (tb_demo_context_t*)list[i].priv; tb_assert_and_check_break(context); // continue to send it if not finished if (context->real < context->send) { // done send tb_long_t real = tb_socket_send(tb_aioo_sock(aioo), context->data + context->real, context->send - context->real); if (real > 0) { // save real context->real += real; // trace // tb_trace_i("send[%p]: real: %ld", tb_aioo_sock(aioo), real); } else { // trace tb_trace_i("send[%p]: closed", tb_aioo_sock(aioo)); // exit context tb_demo_context_exit(aiop, context); break; } } // finished? read file else if (context->left) { // init context->real = 0; context->send = 0; // done read tb_size_t tryn = 1; tb_long_t real = 0; while (!(real = tb_file_read(context->file, context->data, tb_min((tb_size_t)context->left, TB_DEMO_FILE_READ_MAXN))) && tryn--); if (real > 0) { // save left context->left -= real; // trace // tb_trace_i("read[%p]: real: %ld", context->file, real); // done send context->send = real; real = tb_socket_send(tb_aioo_sock(aioo), context->data, context->send); if (real >= 0) { // save real context->real += real; // trace // tb_trace_i("send[%p]: real: %ld", tb_aioo_sock(aioo), real); } else { // trace tb_trace_i("send[%p]: closed", tb_aioo_sock(aioo)); // exit context tb_demo_context_exit(aiop, context); break; } } else { // trace tb_trace_i("read[%p]: closed", tb_aioo_sock(aioo)); // exit context tb_demo_context_exit(aiop, context); break; } } else { // trace tb_trace_i("read[%p]: closed", tb_aioo_sock(aioo)); // exit context tb_demo_context_exit(aiop, context); break; } } // error? else { tb_trace_i("aioe[%p]: unknown code: %lu", tb_aioo_sock(aioo), list[i].code); break; } } } } while (0); // trace tb_trace_i("end"); // exit socket if (sock) tb_socket_exit(sock); // exit aiop if (aiop) tb_aiop_exit(aiop); // end return 0; }
/* ////////////////////////////////////////////////////////////////////////////////////// * implementation */ static tb_bool_t tb_demo_spider_parser_open_html(tb_stream_ref_t stream, tb_char_t const* url) { // check tb_assert_and_check_return_val(stream && url, tb_false); // done tb_bool_t ok = tb_false; do { // find the .suffix tb_char_t const* p = tb_strrchr(url, '.'); if (p) { // not html? tb_check_break ( tb_stricmp(p, ".css") && tb_stricmp(p, ".js") && tb_stricmp(p, ".png") && tb_stricmp(p, ".jpg") && tb_stricmp(p, ".gif") && tb_stricmp(p, ".ico") && tb_stricmp(p, ".bmp") && tb_stricmp(p, ".mp4") && tb_stricmp(p, ".mp3") && tb_stricmp(p, ".flv") && tb_stricmp(p, ".avi") && tb_stricmp(p, ".exe") && tb_stricmp(p, ".msi") && tb_stricmp(p, ".zip") && tb_stricmp(p, ".rar") && tb_stricmp(p, ".7z")); } // ctrl stream if (!tb_stream_ctrl(stream, TB_STREAM_CTRL_SET_URL, url)) break; // open stream if (!tb_stream_open(stream)) break; // the stream size tb_hong_t size = tb_stream_size(stream); tb_check_break(size); // prefetch some data tb_byte_t* data = tb_null; tb_size_t need = tb_min((tb_size_t)size, 256); if (!tb_stream_need(stream, &data, need)) break; // is html? if (tb_strnistr((tb_char_t const*)data, need, "<!DOCTYPE html>")) { ok = tb_true; break; } // is html? ok = tb_strnistr((tb_char_t const*)data, need, "<html")? tb_true : tb_false; } while (0); // failed? if (!ok) { // clos stream if (stream) tb_stream_clos(stream); } // ok? return ok; }
static tb_thread_pool_job_t* tb_thread_pool_jobs_post_task(tb_thread_pool_impl_t* impl, tb_thread_pool_task_t const* task, tb_size_t* post_size) { // check tb_assert_and_check_return_val(impl && task && task->done && post_size, tb_null); // done tb_bool_t ok = tb_false; tb_thread_pool_job_t* job = tb_null; do { // check tb_assert_and_check_break(tb_list_entry_size(&impl->jobs_waiting) + tb_list_entry_size(&impl->jobs_urgent) + 1 < TB_THREAD_POOL_JOBS_WAITING_MAXN); // make job job = (tb_thread_pool_job_t*)tb_fixed_pool_malloc0(impl->jobs_pool); tb_assert_and_check_break(job); // init job job->refn = 1; job->state = TB_STATE_WAITING; job->task = *task; // non-urgent job? if (!task->urgent) { // post to the waiting jobs tb_list_entry_insert_tail(&impl->jobs_waiting, &job->entry); } else { // post to the urgent jobs tb_list_entry_insert_tail(&impl->jobs_urgent, &job->entry); } // the waiting jobs count tb_size_t jobs_waiting_count = tb_list_entry_size(&impl->jobs_waiting) + tb_list_entry_size(&impl->jobs_urgent); tb_assert_and_check_break(jobs_waiting_count); // update the post size if (*post_size < impl->worker_size) (*post_size)++; // trace tb_trace_d("task[%p:%s]: post: %lu: ..", task->done, task->name, *post_size); // init them if the workers have been not inited if (impl->worker_size < jobs_waiting_count) { tb_size_t i = impl->worker_size; tb_size_t n = tb_min(jobs_waiting_count, impl->worker_maxn); for (; i < n; i++) { // the worker tb_thread_pool_worker_t* worker = &impl->worker_list[i]; // clear worker tb_memset(worker, 0, sizeof(tb_thread_pool_worker_t)); // init worker worker->id = i; worker->pool = (tb_thread_pool_ref_t)impl; worker->loop = tb_thread_init(__tb_lstring__("thread_pool"), tb_thread_pool_worker_loop, worker, impl->stack); tb_assert_and_check_continue(worker->loop); } // update the worker size impl->worker_size = i; } // ok ok = tb_true; } while (0); // failed? if (!ok) { // exit it tb_fixed_pool_free(impl->jobs_pool, job); job = tb_null; } // ok? return job; }
tb_pointer_t tb_pool_ralloc_(tb_pool_ref_t pool, tb_pointer_t data, tb_size_t size __tb_debug_decl__) { // check tb_pool_impl_t* impl = (tb_pool_impl_t*)pool; tb_assert_and_check_return_val(impl, tb_null); // uses allocator? if (impl->allocator) return tb_allocator_ralloc_(impl->allocator, data, size __tb_debug_args__); // check tb_assert_and_check_return_val(impl && impl->large_pool && impl->small_pool && size, tb_null); // enter tb_spinlock_enter(&impl->lock); // done tb_pointer_t data_new = tb_null; do { // no data? if (!data) { // malloc it directly data_new = size <= TB_SMALL_POOL_DATA_SIZE_MAXN? tb_small_pool_malloc_(impl->small_pool, size __tb_debug_args__) : tb_large_pool_malloc_(impl->large_pool, size, tb_null __tb_debug_args__); break; } // the data head tb_pool_data_head_t* data_head = &(((tb_pool_data_head_t*)data)[-1]); tb_assertf_break(data_head->debug.magic == TB_POOL_DATA_MAGIC, "ralloc invalid data: %p", data); tb_assert_and_check_break(data_head->size); // small => small if (data_head->size <= TB_SMALL_POOL_DATA_SIZE_MAXN && size <= TB_SMALL_POOL_DATA_SIZE_MAXN) data_new = tb_small_pool_ralloc_(impl->small_pool, data, size __tb_debug_args__); // small => large else if (data_head->size <= TB_SMALL_POOL_DATA_SIZE_MAXN) { // make the new data data_new = tb_large_pool_malloc_(impl->large_pool, size, tb_null __tb_debug_args__); tb_assert_and_check_break(data_new); // copy the old data tb_memcpy_(data_new, data, tb_min(data_head->size, size)); // free the old data tb_small_pool_free_(impl->small_pool, data __tb_debug_args__); } // large => small else if (size <= TB_SMALL_POOL_DATA_SIZE_MAXN) { // make the new data data_new = tb_small_pool_malloc_(impl->small_pool, size __tb_debug_args__); tb_assert_and_check_break(data_new); // copy the old data tb_memcpy_(data_new, data, tb_min(data_head->size, size)); // free the old data tb_large_pool_free_(impl->large_pool, data __tb_debug_args__); } // large => large else data_new = tb_large_pool_ralloc_(impl->large_pool, data, size, tb_null __tb_debug_args__); } while (0); // failed? dump it #ifdef __tb_debug__ if (!data_new) { // trace tb_trace_e("ralloc(%p, %lu) failed! at %s(): %lu, %s", data, size, func_, line_, file_); // dump data if (data) tb_pool_data_dump((tb_byte_t const*)data, tb_true, "[pool]: [error]: "); // abort tb_abort(); } #endif // leave tb_spinlock_leave(&impl->lock); // ok? return data_new; }
tb_pointer_t tb_small_pool_ralloc_(tb_small_pool_ref_t pool, tb_pointer_t data, tb_size_t size __tb_debug_decl__) { // check tb_small_pool_impl_t* impl = (tb_small_pool_impl_t*)pool; tb_assert_and_check_return_val(impl && impl->large_pool && data && size, tb_null); tb_assert_and_check_return_val(size <= TB_SMALL_POOL_DATA_SIZE_MAXN, tb_null); // disable small pool for debug #ifdef TB_SMALL_POOL_DISABLE return tb_large_pool_ralloc(impl->large_pool, data, size, tb_null); #endif // done tb_pointer_t data_new = tb_null; do { // the old data head tb_pool_data_head_t* data_head_old = &(((tb_pool_data_head_t*)data)[-1]); tb_assertf_break(data_head_old->debug.magic == TB_POOL_DATA_MAGIC, "ralloc invalid data: %p", data); // the old fixed pool tb_fixed_pool_ref_t fixed_pool_old = tb_small_pool_find_fixed(impl, data_head_old->size); tb_assert_and_check_break(fixed_pool_old); // the old data space tb_size_t space_old = tb_fixed_pool_item_size(fixed_pool_old); tb_assert_and_check_break(space_old >= data_head_old->size); // check underflow tb_assertf_break(space_old == data_head_old->size || ((tb_byte_t*)data)[data_head_old->size] == TB_POOL_DATA_PATCH, "data underflow"); // the new fixed pool tb_fixed_pool_ref_t fixed_pool_new = tb_small_pool_find_fixed(impl, size); tb_assert_and_check_break(fixed_pool_new); // same space? if (fixed_pool_old == fixed_pool_new) { #ifdef __tb_debug__ // fill the patch bytes if (data_head_old->size > size) tb_memset_((tb_byte_t*)data + size, TB_POOL_DATA_PATCH, data_head_old->size - size); #endif // only update size data_head_old->size = size; // ok data_new = data; break; } // make the new data data_new = tb_fixed_pool_malloc_(fixed_pool_new __tb_debug_args__); tb_assert_and_check_break(data_new); // the new data head tb_pool_data_head_t* data_head_new = &(((tb_pool_data_head_t*)data_new)[-1]); tb_assert_abort(data_head_new->debug.magic == TB_POOL_DATA_MAGIC); #ifdef __tb_debug__ // fill the patch bytes if (data_head_new->size > size) tb_memset_((tb_byte_t*)data_new + size, TB_POOL_DATA_PATCH, data_head_new->size - size); #endif // update size data_head_new->size = size; // copy the old data tb_memcpy_(data_new, data, tb_min(data_head_old->size, size)); // free the old data tb_fixed_pool_free_(fixed_pool_old, data __tb_debug_args__); } while (0); // failed? dump it #ifdef __tb_debug__ if (!data_new) { // trace tb_trace_e("ralloc(%p, %lu) failed! at %s(): %lu, %s", data, size, func_, line_, file_); // dump data tb_pool_data_dump((tb_byte_t const*)data, tb_true, "[small_pool]: [error]: "); // abort tb_abort(); } #endif // ok return data_new; }
tb_long_t tb_filter_spak(tb_filter_ref_t self, tb_byte_t const* data, tb_size_t size, tb_byte_t const** pdata, tb_size_t need, tb_long_t sync) { // check tb_filter_t* filter = (tb_filter_t*)self; tb_assert_and_check_return_val(filter && filter->spak && pdata, -1); // init odata *pdata = tb_null; // save the input offset filter->offset += size; // eof? if (filter->limit >= 0 && filter->offset == filter->limit) filter->beof = tb_true; // eof? sync it if (filter->beof) sync = -1; // the idata tb_byte_t const* idata = tb_buffer_data(&filter->idata); tb_size_t isize = tb_buffer_size(&filter->idata); if (data && size) { // append data to cache if have the cache data if (idata && isize) { // trace tb_trace_d("[%p]: append idata: %lu", self, size); // append data idata = tb_buffer_memncat(&filter->idata, data, size); isize = tb_buffer_size(&filter->idata); } // using the data directly if no cache data else { // trace tb_trace_d("[%p]: using idata directly: %lu", self, size); // using it directly idata = data; isize = size; } } // sync data if null else { // check sync tb_assert_and_check_return_val(sync, 0); } // the need if (!need) need = tb_max(size, tb_queue_buffer_maxn(&filter->odata)); tb_assert_and_check_return_val(need, -1); // init pull tb_size_t omaxn = 0; tb_byte_t* odata = tb_queue_buffer_pull_init(&filter->odata, &omaxn); if (odata) { // the osize tb_long_t osize = omaxn >= need? need : 0; // exit pull if (odata) tb_queue_buffer_pull_exit(&filter->odata, osize > 0? osize : 0); // enough? if (osize > 0) { // append to the cache if idata is not belong to the cache if (size && idata == data) tb_buffer_memncat(&filter->idata, data, size); // return it directly *pdata = odata; return osize; } } // grow odata maxn if not enough if (need > tb_queue_buffer_maxn(&filter->odata)) tb_queue_buffer_resize(&filter->odata, need); // the odata omaxn = 0; odata = tb_queue_buffer_push_init(&filter->odata, &omaxn); tb_assert_and_check_return_val(odata && omaxn, -1); // init stream tb_static_stream_t istream = {0}; tb_static_stream_t ostream = {0}; if (idata && isize) { // @note istream maybe null for sync the end data if (!tb_static_stream_init(&istream, (tb_byte_t*)idata, isize)) return -1; } if (!tb_static_stream_init(&ostream, (tb_byte_t*)odata, omaxn)) return -1; // trace tb_trace_d("[%p]: spak: ileft: %lu, oleft: %lu, offset: %llu, limit: %lld, beof: %d: ..", self, tb_buffer_size(&filter->idata), tb_queue_buffer_size(&filter->odata), filter->offset, filter->limit, filter->beof); // spak data tb_long_t osize = filter->spak(filter, &istream, &ostream, sync); // eof? if (osize < 0) filter->beof = tb_true; // no data and eof? if (!osize && !tb_static_stream_left(&istream) && filter->beof) osize = -1; // eof? sync it if (filter->beof) sync = -1; // exit odata tb_queue_buffer_push_exit(&filter->odata, osize > 0? osize : 0); // have the left idata? tb_size_t left = tb_static_stream_left(&istream); if (left) { // move to the cache head if idata is belong to the cache if (idata != data) { // trace tb_trace_d("[%p]: move to the cache head: %lu", self, left); tb_buffer_memnmov(&filter->idata, tb_static_stream_offset(&istream), left); } // append to the cache if idata is not belong to the cache else { // trace tb_trace_d("[%p]: append to the cache: %lu", self, left); tb_buffer_memncat(&filter->idata, tb_static_stream_pos(&istream), left); } } // clear the cache else tb_buffer_clear(&filter->idata); // init pull omaxn = 0; odata = tb_queue_buffer_pull_init(&filter->odata, &omaxn); // no sync? cache the output data if (!sync) osize = omaxn >= need? need : 0; // sync and has data? return it directly else if (omaxn) osize = tb_min(omaxn, need); // sync, no data or end? // else osize = osize; // exit pull if (odata) tb_queue_buffer_pull_exit(&filter->odata, osize > 0? osize : 0); // return it if have the odata if (osize > 0) *pdata = odata; // trace tb_trace_d("[%p]: spak: ileft: %lu, oleft: %lu, offset: %llu, limit: %lld, beof: %d: %ld", self, tb_buffer_size(&filter->idata), tb_queue_buffer_size(&filter->odata), filter->offset, filter->limit, filter->beof, osize); // ok? return osize; }
tb_char_t const* tb_path_relative_to(tb_char_t const* root, tb_char_t const* path, tb_char_t* data, tb_size_t maxn) { // check tb_assert_and_check_return_val(path && data && maxn, tb_null); // trace tb_trace_d("path: %s", path); // the root is the current and the path is absolute? return path directly if (!root && !tb_path_is_absolute(path)) { // copy it tb_strlcpy(data, path, maxn); // translate it return tb_path_translate(data, 0, maxn)? data : tb_null; } // get the absolute path tb_size_t path_size = 0; tb_char_t path_absolute[TB_PATH_MAXN]; tb_size_t path_maxn = sizeof(path_absolute); path = tb_path_absolute(path, path_absolute, path_maxn); path_size = tb_strlen(path); tb_assert_and_check_return_val(path && path_size && path_size < path_maxn, tb_null); // trace tb_trace_d("path_absolute: %s", path); // get the absolute root tb_size_t root_size = 0; tb_char_t root_absolute[TB_PATH_MAXN]; tb_size_t root_maxn = sizeof(root_absolute); if (root) { // get the absolute root root = tb_path_absolute(root, root_absolute, root_maxn); root_size = tb_strlen(root); } else { // get the current directory if (!(root_size = tb_directory_current(root_absolute, root_maxn))) return tb_null; // translate it if (!(root_size = tb_path_translate(root_absolute, root_size, root_maxn))) return tb_null; root = root_absolute; } tb_assert_and_check_return_val(root && root_size && root_size < root_maxn, tb_null); // trace tb_trace_d("root_absolute: %s", root); // same directory? return "." if (path_size == root_size && !tb_strncmp(path, root, root_size)) { // check tb_assert_and_check_return_val(maxn >= 2, "."); // return "." data[0] = '.'; data[1] = '\0'; return data; } // append separator if (path_size + 1 < path_maxn) { path_absolute[path_size++] = TB_PATH_SEPARATOR; path_absolute[path_size] = '\0'; } if (root_size + 1 < root_maxn) { root_absolute[root_size++] = TB_PATH_SEPARATOR; root_absolute[root_size] = '\0'; } // find the common leading directory tb_char_t const* p = path; tb_char_t const* q = root; tb_long_t last = -1; for (; *p && *q && *p == *q; q++, p++) { // save the last separator if (*p == TB_PATH_SEPARATOR) last = q - root; } // is different directory or outside the windows drive root? using the absolute path if (last <= 0 || (last == 2 && root[1] == ':')) { // the path size tb_size_t size = tb_min(path_size - 1, maxn); // copy it tb_strncpy(data, path, size); data[size] = '\0'; } // exists same root? else { // count the remaining levels in root tb_size_t count = 0; tb_char_t const* l = root + last + 1; for (; *l; l++) { if (*l == TB_PATH_SEPARATOR) count++; } // append "../" or "..\\" tb_char_t* d = data; tb_char_t* e = data + maxn; while (count--) { if (d + 3 < e) { d[0] = '.'; d[1] = '.'; d[2] = TB_PATH_SEPARATOR; d += 3; } } // append the left path l = path + last + 1; while (*l && d < e) *d++ = *l++; // remove the last separator if (d > data) d--; // end *d = '\0'; } // trace tb_trace_d("relative: %s", data); // ok? return data; }
tb_bool_t tb_file_copy(tb_char_t const* path, tb_char_t const* dest) { // check tb_assert_and_check_return_val(path && dest, tb_false); #ifdef TB_CONFIG_POSIX_HAVE_COPYFILE // the full path tb_char_t full0[TB_PATH_MAXN]; path = tb_path_absolute(path, full0, TB_PATH_MAXN); tb_assert_and_check_return_val(path, tb_false); // the dest path tb_char_t full1[TB_PATH_MAXN]; dest = tb_path_absolute(dest, full1, TB_PATH_MAXN); tb_assert_and_check_return_val(dest, tb_false); // attempt to copy it directly if (!copyfile(path, dest, 0, COPYFILE_ALL)) return tb_true; else { // attempt to copy it again after creating directory tb_char_t dir[TB_PATH_MAXN]; if (tb_directory_create(tb_path_directory(dest, dir, sizeof(dir)))) return !copyfile(path, dest, 0, COPYFILE_ALL); } // failed return tb_false; #else tb_int_t ifd = -1; tb_int_t ofd = -1; tb_bool_t ok = tb_false; do { // get the absolute source path tb_char_t data[8192]; path = tb_path_absolute(path, data, sizeof(data)); tb_assert_and_check_break(path); // get stat.st_mode first #ifdef TB_CONFIG_POSIX_HAVE_STAT64 struct stat64 st = {0}; if (stat64(path, &st)) break; #else struct stat st = {0}; if (stat(path, &st)) break; #endif // open source file ifd = open(path, O_RDONLY); tb_check_break(ifd >= 0); // get the absolute source path dest = tb_path_absolute(dest, data, sizeof(data)); tb_assert_and_check_break(dest); // open destinate file and copy file mode ofd = open(dest, O_RDWR | O_CREAT | O_TRUNC, st.st_mode & (S_IRWXU | S_IRWXG | S_IRWXO)); if (ofd < 0) { // attempt to open it again after creating directory tb_char_t dir[TB_PATH_MAXN]; if (tb_directory_create(tb_path_directory(dest, dir, sizeof(dir)))) ofd = open(dest, O_RDWR | O_CREAT | O_TRUNC, st.st_mode & (S_IRWXU | S_IRWXG | S_IRWXO)); } tb_check_break(ofd >= 0); // get file size tb_hize_t size = tb_file_size(tb_fd2file(ifd)); // init write size tb_hize_t writ = 0; // attempt to copy file using `sendfile` #ifdef TB_CONFIG_POSIX_HAVE_SENDFILE while (writ < size) { off_t seek = writ; tb_hong_t real = sendfile(ofd, ifd, &seek, (size_t)(size - writ)); if (real > 0) writ += real; else break; } /* attempt to copy file directly if sendfile failed * * sendfile() supports regular file only after "since Linux 2.6.33". */ if (writ != size) { lseek(ifd, 0, SEEK_SET); lseek(ofd, 0, SEEK_SET); } else { ok = tb_true; break; } #endif // copy file using `read` and `write` writ = 0; while (writ < size) { // read some data tb_int_t real = read(ifd, data, (size_t)tb_min(size - writ, sizeof(data))); if (real > 0) { real = write(ofd, data, real); if (real > 0) writ += real; else break; } else break; } // ok? ok = (writ == size); } while (0); // close source file if (ifd >= 0) close(ifd); ifd = -1; // close destinate file if (ofd >= 0) close(ofd); ofd = -1; // ok? return ok; #endif }
static tb_pointer_t tb_default_allocator_ralloc(tb_allocator_ref_t self, tb_pointer_t data, tb_size_t size __tb_debug_decl__) { // check tb_default_allocator_ref_t allocator = (tb_default_allocator_ref_t)self; tb_assert_and_check_return_val(allocator, tb_null); // check tb_assert_and_check_return_val(allocator && allocator->large_allocator && allocator->small_allocator && size, tb_null); // done tb_pointer_t data_new = tb_null; do { // no data? if (!data) { // malloc it directly data_new = size <= TB_SMALL_ALLOCATOR_DATA_MAXN? tb_allocator_malloc_(allocator->small_allocator, size __tb_debug_args__) : tb_allocator_large_malloc_(allocator->large_allocator, size, tb_null __tb_debug_args__); break; } // the data head tb_pool_data_head_t* data_head = &(((tb_pool_data_head_t*)data)[-1]); tb_assertf(data_head->debug.magic == TB_POOL_DATA_MAGIC, "ralloc invalid data: %p", data); tb_assert_and_check_break(data_head->size); // small => small if (data_head->size <= TB_SMALL_ALLOCATOR_DATA_MAXN && size <= TB_SMALL_ALLOCATOR_DATA_MAXN) data_new = tb_allocator_ralloc_(allocator->small_allocator, data, size __tb_debug_args__); // small => large else if (data_head->size <= TB_SMALL_ALLOCATOR_DATA_MAXN) { // make the new data data_new = tb_allocator_large_malloc_(allocator->large_allocator, size, tb_null __tb_debug_args__); tb_assert_and_check_break(data_new); // copy the old data tb_memcpy_(data_new, data, tb_min(data_head->size, size)); // free the old data tb_allocator_free_(allocator->small_allocator, data __tb_debug_args__); } // large => small else if (size <= TB_SMALL_ALLOCATOR_DATA_MAXN) { // make the new data data_new = tb_allocator_malloc_(allocator->small_allocator, size __tb_debug_args__); tb_assert_and_check_break(data_new); // copy the old data tb_memcpy_(data_new, data, tb_min(data_head->size, size)); // free the old data tb_allocator_large_free_(allocator->large_allocator, data __tb_debug_args__); } // large => large else data_new = tb_allocator_large_ralloc_(allocator->large_allocator, data, size, tb_null __tb_debug_args__); } while (0); // ok? return data_new; }
tb_void_t tb_pool_data_dump(tb_cpointer_t data, tb_bool_t verbose, tb_char_t const* prefix) { // done tb_pool_data_head_t* data_head = tb_null; do { // no data? tb_assert_and_check_break(data); // the data head data_head = &(((tb_pool_data_head_t*)data)[-1]); // dump the head info tb_size_t data_limit = 256; if (data_head->debug.magic == TB_POOL_DATA_MAGIC) { // the data size tb_size_t data_size = (tb_size_t)data_head->size; // format the backtrace prefix tb_char_t backtrace_prefix[256] = {0}; tb_snprintf(backtrace_prefix, sizeof(backtrace_prefix), "%s ", prefix? prefix : ""); // dump backtrace tb_size_t nframe = 0; while (nframe < tb_arrayn(data_head->debug.backtrace) && data_head->debug.backtrace[nframe]) nframe++; tb_trace_i("%sdata: from: %s(): %u, %s", prefix? prefix : "", data_head->debug.func, data_head->debug.line, data_head->debug.file); tb_backtrace_dump(backtrace_prefix, data_head->debug.backtrace, nframe); // dump the data info tb_trace_i("%sdata: %p, size: %lu, patch: %x", prefix? prefix : "", data, data_size, ((tb_byte_t const*)data)[data_size]); // dump the first 256-bytes data if (data_size && verbose) { // the dump size tb_size_t dump_size = tb_min(data_size, data_limit); // dump it tb_trace_i("%sdata: first %lu-bytes:", prefix? prefix : "", dump_size); tb_pool_data_dump_data((tb_byte_t const*)data, dump_size); // dump the last 256-bytes data if (data_size > dump_size) { // the last data tb_byte_t const* data_last = tb_max((tb_byte_t const*)data + data_size - data_limit, (tb_byte_t const*)data + dump_size); // update the dump size dump_size = (tb_byte_t const*)data + data_size - data_last; // dump it tb_trace_i("%sdata: last %lu-bytes:", prefix? prefix : "", dump_size); tb_pool_data_dump_data(data_last, dump_size); } } } // for the public fixed_pool else if (data_head->debug.magic == TB_POOL_DATA_EMPTY_MAGIC) { // format the backtrace prefix tb_char_t backtrace_prefix[256] = {0}; tb_snprintf(backtrace_prefix, sizeof(backtrace_prefix), "%s ", prefix? prefix : ""); // dump backtrace tb_size_t nframe = 0; while (nframe < tb_arrayn(data_head->debug.backtrace) && data_head->debug.backtrace[nframe]) nframe++; tb_trace_i("%sdata: from: %s(): %u, %s", prefix? prefix : "", data_head->debug.func, data_head->debug.line, data_head->debug.file); tb_backtrace_dump(backtrace_prefix, data_head->debug.backtrace, nframe); // dump the data info tb_trace_i("%sdata: %p, size: fixed", prefix? prefix : "", data); } else { // dump the data head tb_trace_i("%sdata: invalid head:", prefix? prefix : ""); tb_pool_data_dump_data((tb_byte_t const*)data_head, sizeof(tb_pool_data_head_t)); // dump the first 256-bytes data tb_trace_i("%sdata: first %lu-bytes:", prefix? prefix : "", data_limit); tb_pool_data_dump_data((tb_byte_t const*)data, data_limit); } } while (0); }
static tb_long_t tb_aiop_rtor_kqueue_wait(tb_aiop_rtor_impl_t* rtor, tb_aioe_ref_t list, tb_size_t maxn, tb_long_t timeout) { // check tb_aiop_rtor_kqueue_impl_t* impl = (tb_aiop_rtor_kqueue_impl_t*)rtor; tb_assert_and_check_return_val(impl && impl->kqfd >= 0 && rtor->aiop && list && maxn, -1); // the aiop tb_aiop_impl_t* aiop = rtor->aiop; tb_assert_and_check_return_val(aiop, -1); // init time struct timespec t = {0}; if (timeout > 0) { t.tv_sec = timeout / 1000; t.tv_nsec = (timeout % 1000) * 1000000; } // init grow tb_size_t grow = tb_align8((rtor->aiop->maxn >> 3) + 1); // init events if (!impl->evts) { impl->evtn = grow; impl->evts = tb_nalloc0(impl->evtn, sizeof(struct kevent)); tb_assert_and_check_return_val(impl->evts, -1); } // wait events tb_long_t evtn = kevent(impl->kqfd, tb_null, 0, impl->evts, impl->evtn, timeout >= 0? &t : tb_null); tb_assert_and_check_return_val(evtn >= 0 && evtn <= impl->evtn, -1); // timeout? tb_check_return_val(evtn, 0); // grow it if events is full if (evtn == impl->evtn) { // grow size impl->evtn += grow; if (impl->evtn > rtor->aiop->maxn) impl->evtn = rtor->aiop->maxn; // grow data impl->evts = tb_ralloc(impl->evts, impl->evtn * sizeof(struct kevent)); tb_assert_and_check_return_val(impl->evts, -1); } tb_assert(evtn <= impl->evtn); // limit evtn = tb_min(evtn, maxn); // sync tb_size_t i = 0; tb_size_t wait = 0; for (i = 0; i < evtn; i++) { // the kevents struct kevent* e = impl->evts + i; // the aioo tb_aioo_impl_t* aioo = (tb_aioo_impl_t*)e->udata; tb_assert_and_check_return_val(aioo && aioo->sock, -1); // the sock tb_socket_ref_t sock = aioo->sock; // spak? if (sock == aiop->spak[1] && e->filter == EVFILT_READ) { // read spak tb_char_t spak = '\0'; if (1 != tb_socket_recv(aiop->spak[1], (tb_byte_t*)&spak, 1)) return -1; // killed? if (spak == 'k') return -1; // continue it continue ; } // skip spak tb_check_continue(sock != aiop->spak[1]); // init the aioe tb_aioe_ref_t aioe = &list[wait++]; aioe->code = TB_AIOE_CODE_NONE; aioe->aioo = (tb_aioo_ref_t)aioo; aioe->priv = aioo->priv; if (e->filter == EVFILT_READ) { aioe->code |= TB_AIOE_CODE_RECV; if (aioo->code & TB_AIOE_CODE_ACPT) aioe->code |= TB_AIOE_CODE_ACPT; } if (e->filter == EVFILT_WRITE) { aioe->code |= TB_AIOE_CODE_SEND; if (aioo->code & TB_AIOE_CODE_CONN) aioe->code |= TB_AIOE_CODE_CONN; } if ((e->flags & EV_ERROR) && !(aioe->code & (TB_AIOE_CODE_RECV | TB_AIOE_CODE_SEND))) aioe->code |= TB_AIOE_CODE_RECV | TB_AIOE_CODE_SEND; // oneshot? clear it if (aioo->code & TB_AIOE_CODE_ONESHOT) { aioo->code = TB_AIOE_CODE_NONE; aioo->priv = tb_null; } } // ok return wait; }
static __tb_inline__ tb_void_t g2_gl_draw_style_fill_shader_radial(g2_gl_draw_t* draw, g2_gl_rect_t const* bounds) { // enter texture state g2_gl_draw_enter_texture_state(draw); // enter texture matrix g2_gl_draw_enter_texture_matrix(draw); // init texcoords draw->texcoords[0] = 0.0f; draw->texcoords[1] = 0.0f; draw->texcoords[2] = 1.0f; draw->texcoords[3] = 0.0f; draw->texcoords[4] = 0.0f; draw->texcoords[5] = 1.0f; draw->texcoords[6] = 1.0f; draw->texcoords[7] = 1.0f; // apply texcoords g2_gl_draw_apply_texcoords(draw); // init radial variables tb_float_t smatrix[16]; g2_gl_matrix_from(smatrix, &draw->shader->matrix); tb_float_t cx = g2_float_to_tb(draw->shader->u.radial.cp.c.x); tb_float_t cy = g2_float_to_tb(draw->shader->u.radial.cp.c.y); tb_float_t x0 = g2_gl_matrix_apply_x(smatrix, cx, cy); tb_float_t y0 = g2_gl_matrix_apply_y(smatrix, cx, cy); // init scale factor tb_float_t sx = tb_fabs(smatrix[0]); tb_float_t sy = tb_fabs(smatrix[5]); tb_float_t fs = tb_min(sx, sy); if (fs < 1e-9) fs = 1e-9; // init maximum radius tb_float_t n1 = (x0 - bounds->x1) * (x0 - bounds->x1) + (y0 - bounds->y1) * (y0 - bounds->y1); tb_float_t n2 = (x0 - bounds->x2) * (x0 - bounds->x2) + (y0 - bounds->y1) * (y0 - bounds->y1); tb_float_t n3 = (x0 - bounds->x1) * (x0 - bounds->x1) + (y0 - bounds->y2) * (y0 - bounds->y2); tb_float_t n4 = (x0 - bounds->x2) * (x0 - bounds->x2) + (y0 - bounds->y2) * (y0 - bounds->y2); if (n2 > n1) n1 = n2; if (n3 > n1) n1 = n3; if (n4 > n1) n1 = n4; tb_float_t rm = (tb_float_t)(tb_isqrti(tb_ceil(n1)) + 1) / fs; // the radial factor static g2_gl_draw_radial_factor_t factors[] = { {0.105396307f, 12.0f, 30} // rm * sin(6.05) , {0.070626986f, 8.0f, 45} // rm * sin(4.05) , {0.035771616f, 4.0f, 90} // rm * sin(2.05) }; tb_assert(g2_quality() < tb_arrayn(factors)); g2_gl_draw_radial_factor_t const* factor = &factors[g2_quality()]; /* init fragment vertices * * fn * *****|***** * * | * * *rm| * * * | * * *|* * * */ tb_float_t fn = rm * factor->factor; // rm * sin(x.05) draw->vertices[0] = cx - fn; draw->vertices[1] = cy - rm; draw->vertices[2] = cx + fn; draw->vertices[3] = cy - rm; draw->vertices[4] = cx; draw->vertices[5] = cy; // init fragment bounds g2_gl_rect_t fbounds; g2_gl_bounds_init(&fbounds, draw->vertices[0], draw->vertices[1]); g2_gl_bounds_done(&fbounds, draw->vertices[2], draw->vertices[3]); g2_gl_bounds_done(&fbounds, draw->vertices[4], draw->vertices[5]); // apply vertices g2_gl_draw_apply_vertices(draw); // apply texture matrix g2_gl_draw_apply_texture_matrix(draw, &fbounds); // save vetex matrix tb_float_t matrix0[16]; g2_gl_matrix_copy(matrix0, draw->vmatrix); // apply shader matrix g2_gl_matrix_multiply(draw->vmatrix, smatrix); // init rotate matrix: rotate one degress tb_float_t matrix1[16]; g2_gl_matrix_init_rotatep(matrix1, factor->rotation, cx, cy); // rotate for drawing all fragments tb_size_t n = factor->count; while (n--) { // rotate one degress g2_gl_matrix_multiply(draw->vmatrix, matrix1); // apply vetex matrix g2_gl_draw_apply_vertex_matrix(draw); // draw fragment g2_glDrawArrays(G2_GL_TRIANGLE_STRIP, 0, 3); } // restore vetex matrix g2_gl_matrix_copy(draw->vmatrix, matrix0); // apply vetex matrix g2_gl_draw_apply_vertex_matrix(draw); // leave texture matrix g2_gl_draw_leave_texture_matrix(draw); // leave texture state g2_gl_draw_leave_texture_state(draw); }