/* ////////////////////////////////////////////////////////////////////////////////////// * main */ tb_int_t tb_demo_platform_cache_time_main(tb_int_t argc, tb_char_t** argv) { tb_trace_i("%lld %lld", tb_cache_time_spak(), tb_cache_time_mclock()); tb_sleep(1); tb_trace_i("%lld %lld", tb_cache_time_spak(), tb_cache_time_mclock()); tb_sleep(1); tb_trace_i("%lld %lld", tb_cache_time_spak(), tb_cache_time_mclock()); return 0; }
tb_long_t tb_thread_pool_task_wait_all(tb_thread_pool_ref_t pool, tb_long_t timeout) { // check tb_thread_pool_impl_t* impl = (tb_thread_pool_impl_t*)pool; tb_assert_and_check_return_val(impl, -1); // wait it tb_size_t size = 0; tb_hong_t time = tb_cache_time_spak(); while ((timeout < 0 || tb_cache_time_spak() < time + timeout)) { // enter tb_spinlock_enter(&impl->lock); // the jobs count size = impl->jobs_pool? tb_fixed_pool_size(impl->jobs_pool) : 0; // trace tb_trace_d("wait: jobs: %lu, waiting: %lu, pending: %lu, urgent: %lu: .." , size , tb_list_entry_size(&impl->jobs_waiting) , tb_list_entry_size(&impl->jobs_pending) , tb_list_entry_size(&impl->jobs_urgent)); #if 0 tb_for_all_if (tb_thread_pool_job_t*, job, tb_list_entry_itor(&impl->jobs_pending), job) { tb_trace_d("wait: job: %s from pending", tb_state_cstr(tb_atomic_get(&job->state))); } #endif // leave tb_spinlock_leave(&impl->lock); // ok? tb_check_break(size); // wait some time tb_msleep(200); } // ok? return !size? 1 : 0; }
tb_long_t tb_semaphore_wait(tb_semaphore_ref_t self, tb_long_t timeout) { // check tb_atomic_t* semaphore = (tb_atomic_t*)self; tb_assert_and_check_return_val(semaphore, -1); // init tb_long_t r = 0; tb_hong_t base = tb_cache_time_spak(); // wait while (1) { // get post tb_long_t post = (tb_long_t)tb_atomic_get(semaphore); // has signal? if (post > 0) { // semaphore-- tb_atomic_fetch_and_dec(semaphore); // ok r = post; break; } // no signal? else if (!post) { // timeout? if (timeout >= 0 && tb_cache_time_spak() - base >= timeout) break; else tb_msleep(200); } // error else { r = -1; break; } } return r; }
tb_long_t tb_transfer_pool_wait_all(tb_transfer_pool_ref_t pool, tb_long_t timeout) { // check tb_transfer_pool_impl_t* impl = (tb_transfer_pool_impl_t*)pool; tb_assert_and_check_return_val(impl, -1); // wait it tb_size_t size = 0; tb_hong_t time = tb_cache_time_spak(); while ((timeout < 0 || tb_cache_time_spak() < time + timeout)) { // enter tb_spinlock_enter(&impl->lock); // the size tb_size_t size = tb_list_entry_size(&impl->work); // trace tb_trace_d("wait: %lu: ..", size); // trace work #ifdef __tb_debug__ if (size) tb_walk_all(tb_list_entry_itor(&impl->work), tb_transfer_pool_work_wait, tb_null); #endif // leave tb_spinlock_leave(&impl->lock); // ok? tb_check_break(size); // wait some time tb_msleep(200); } // ok? return !size? 1 : 0; }
tb_long_t tb_thread_pool_task_wait(tb_thread_pool_ref_t pool, tb_thread_pool_task_ref_t task, tb_long_t timeout) { // check tb_thread_pool_job_t* job = (tb_thread_pool_job_t*)task; tb_assert_and_check_return_val(pool && job, -1); // wait it tb_hong_t time = tb_cache_time_spak(); tb_size_t state = TB_STATE_WAITING; while ( ((state = tb_atomic_get(&job->state)) != TB_STATE_FINISHED) && state != TB_STATE_KILLED && (timeout < 0 || tb_cache_time_spak() < time + timeout)) { // trace tb_trace_d("task[%p:%s]: wait: state: %s: ..", job->task.done, job->task.name, tb_state_cstr(state)); // wait some time tb_msleep(200); } // ok? return (state == TB_STATE_FINISHED || state == TB_STATE_KILLED)? 1 : 0; }
tb_long_t tb_aicp_wait_all(tb_aicp_ref_t aicp, tb_long_t timeout) { // check tb_aicp_impl_t* impl = (tb_aicp_impl_t*)aicp; tb_assert_and_check_return_val(impl, -1); // trace tb_trace_d("wait: all: .."); // wait it tb_size_t size = 0; tb_hong_t time = tb_cache_time_spak(); while ((timeout < 0 || tb_cache_time_spak() < time + timeout)) { // enter tb_spinlock_enter(&impl->lock); // the aico count size = impl->pool? tb_fixed_pool_size(impl->pool) : 0; // trace tb_trace_d("wait: count: %lu: ..", size); // leave tb_spinlock_leave(&impl->lock); // ok? tb_check_break(size); // wait some time tb_msleep(200); } // ok? return !size? 1 : 0; }
static tb_bool_t tb_lo_scheduler_io_timer_spak(tb_lo_scheduler_io_ref_t scheduler_io) { // check tb_assert(scheduler_io && scheduler_io->timer && scheduler_io->ltimer); // spak ctime tb_cache_time_spak(); // spak timer if (!tb_timer_spak(scheduler_io->timer)) return tb_false; // spak ltimer if (!tb_ltimer_spak(scheduler_io->ltimer)) return tb_false; // pk return tb_true; }
tb_void_t tb_timer_loop(tb_timer_ref_t timer) { // check tb_timer_impl_t* impl = (tb_timer_impl_t*)timer; tb_assert_and_check_return(impl); // work++ tb_atomic_fetch_and_inc(&impl->work); // init event tb_spinlock_enter(&impl->lock); if (!impl->event) impl->event = tb_event_init(); tb_spinlock_leave(&impl->lock); // loop while (!tb_atomic_get(&impl->stop)) { // the delay tb_size_t delay = tb_timer_delay(timer); if (delay) { // the event tb_spinlock_enter(&impl->lock); tb_event_ref_t event = impl->event; tb_spinlock_leave(&impl->lock); tb_check_break(event); // wait some time if (tb_event_wait(event, delay) < 0) break; } // spak ctime if (impl->ctime) tb_cache_time_spak(); // spak it if (!tb_timer_spak(timer)) break; } // work-- tb_atomic_fetch_and_dec(&impl->work); }
tb_void_t tb_aicp_loop_util(tb_aicp_ref_t aicp, tb_bool_t (*stop)(tb_cpointer_t priv), tb_cpointer_t priv) { // check tb_aicp_impl_t* impl = (tb_aicp_impl_t*)aicp; tb_assert_and_check_return(impl); // the ptor tb_aicp_ptor_impl_t* ptor = impl->ptor; tb_assert_and_check_return(ptor && ptor->loop_spak); // the loop spak tb_long_t (*loop_spak)(tb_aicp_ptor_impl_t* , tb_handle_t, tb_aice_ref_t , tb_long_t ) = ptor->loop_spak; // worker++ tb_atomic_fetch_and_inc(&impl->work); // init loop tb_handle_t loop = ptor->loop_init? ptor->loop_init(ptor) : tb_null; // trace tb_trace_d("loop[%p]: init", loop); // spak ctime tb_cache_time_spak(); // loop while (1) { // spak tb_aice_t resp = {0}; tb_long_t ok = loop_spak(ptor, loop, &resp, -1); // spak ctime tb_cache_time_spak(); // failed? tb_check_break(ok >= 0); // timeout? tb_check_continue(ok); // check aico tb_aico_impl_t* aico = (tb_aico_impl_t*)resp.aico; tb_assert_and_check_continue(aico); // trace tb_trace_d("loop[%p]: spak: code: %lu, aico: %p, state: %s: %ld", loop, resp.code, aico, aico? tb_state_cstr(tb_atomic_get(&aico->state)) : "null", ok); // pending? clear state if be not accept or accept failed tb_size_t state = TB_STATE_OPENED; state = (resp.code != TB_AICE_CODE_ACPT || resp.state != TB_STATE_OK)? tb_atomic_fetch_and_pset(&aico->state, TB_STATE_PENDING, state) : tb_atomic_get(&aico->state); // killed or killing? if (state == TB_STATE_KILLED || state == TB_STATE_KILLING) { // update the aice state resp.state = TB_STATE_KILLED; // killing? update to the killed state tb_atomic_fetch_and_pset(&aico->state, TB_STATE_KILLING, TB_STATE_KILLED); } // done func, @note maybe the aico exit will be called if (resp.func && !resp.func(&resp)) { // trace #ifdef __tb_debug__ tb_trace_e("loop[%p]: done aice func failed with code: %lu at line: %lu, func: %s, file: %s!", loop, resp.code, aico->line, aico->func, aico->file); #else tb_trace_e("loop[%p]: done aice func failed with code: %lu!", loop, resp.code); #endif } // killing? update to the killed state tb_atomic_fetch_and_pset(&aico->state, TB_STATE_KILLING, TB_STATE_KILLED); // stop it? if (stop && stop(priv)) tb_aicp_kill(aicp); } // exit loop if (ptor->loop_exit) ptor->loop_exit(ptor, loop); // worker-- tb_atomic_fetch_and_dec(&impl->work); // trace tb_trace_d("loop[%p]: exit", loop); }
static tb_pointer_t tb_aiop_spak_loop(tb_cpointer_t priv) { // check tb_aiop_ptor_impl_t* impl = (tb_aiop_ptor_impl_t*)priv; tb_aicp_impl_t* aicp = impl? impl->base.aicp : tb_null; // done do { // check tb_assert_and_check_break(impl && impl->aiop && impl->list && impl->timer && impl->ltimer && aicp); // trace tb_trace_d("loop: init"); // loop while (!tb_atomic_get(&aicp->kill)) { // the delay tb_size_t delay = tb_timer_delay(impl->timer); // the ldelay tb_size_t ldelay = tb_ltimer_delay(impl->ltimer); tb_assert_and_check_break(ldelay != -1); // trace tb_trace_d("loop: wait: .."); // wait aioe tb_long_t real = tb_aiop_wait(impl->aiop, impl->list, impl->maxn, tb_min(delay, ldelay)); // trace tb_trace_d("loop: wait: %ld", real); // spak ctime tb_cache_time_spak(); // spak timer if (!tb_timer_spak(impl->timer)) break; // spak ltimer if (!tb_ltimer_spak(impl->ltimer)) break; // killed? tb_check_break(real >= 0); // error? out of range tb_assert_and_check_break(real <= impl->maxn); // timeout? tb_check_continue(real); // grow it if aioe is full if (real == impl->maxn) { // grow size impl->maxn += (aicp->maxn >> 4) + 16; if (impl->maxn > aicp->maxn) impl->maxn = aicp->maxn; // grow list impl->list = tb_ralloc(impl->list, impl->maxn * sizeof(tb_aioe_t)); tb_assert_and_check_break(impl->list); } // walk aioe list tb_size_t i = 0; tb_bool_t end = tb_false; for (i = 0; i < real && !end; i++) { // the aioe tb_aioe_ref_t aioe = &impl->list[i]; tb_assert_and_check_break_state(aioe, end, tb_true); // the aice tb_aice_ref_t aice = (tb_aice_ref_t)aioe->priv; tb_assert_and_check_break_state(aice, end, tb_true); // the aico tb_aiop_aico_t* aico = (tb_aiop_aico_t*)aice->aico; tb_assert_and_check_break_state(aico, end, tb_true); // have wait? tb_check_continue(aice->code); // have been waited ok for the timer timeout/killed func? need not spak it repeatly tb_check_continue(!aico->wait_ok); // sock? if (aico->base.type == TB_AICO_TYPE_SOCK) { // push the acpt aice if (aice->code == TB_AICE_CODE_ACPT) end = tb_aiop_push_acpt(impl, aice)? tb_false : tb_true; // push the sock aice else end = tb_aiop_push_sock(impl, aice)? tb_false : tb_true; } else if (aico->base.type == TB_AICO_TYPE_FILE) { // poll file tb_aicp_file_poll(impl); } else tb_assert(0); } // end? tb_check_break(!end); // work it tb_aiop_spak_work(impl); } } while (0); // trace tb_trace_d("loop: exit"); // kill tb_aicp_kill((tb_aicp_ref_t)aicp); // exit tb_thread_return(tb_null); return tb_null; }
static tb_void_t gb_window_sdl_loop(gb_window_ref_t window) { // check gb_window_sdl_impl_t* impl = (gb_window_sdl_impl_t*)window; tb_assert_and_check_return(impl); // init canvas if (!impl->canvas) impl->canvas = gb_canvas_init_from_window(window); tb_assert(impl->canvas); // done init if (impl->base.info.init && !impl->base.info.init((gb_window_ref_t)impl, impl->canvas, impl->base.info.priv)) return ; // loop SDL_Event evet; tb_hong_t time; tb_bool_t stop = tb_false; tb_size_t delay = 1000 / (impl->base.info.framerate? impl->base.info.framerate : GB_WINDOW_DEFAULT_FRAMERATE); while (!stop) { // spak time = gb_window_impl_spak((gb_window_ref_t)impl); // lock the surface SDL_LockSurface(impl->surface); // draw gb_window_impl_draw((gb_window_ref_t)impl, impl->canvas); // unlock the surface SDL_UnlockSurface(impl->surface); // flip if (SDL_Flip(impl->surface) < 0) stop = tb_true; // poll while (SDL_PollEvent(&evet)) { // done switch (evet.type) { case SDL_MOUSEMOTION: { // init event gb_event_t event = {0}; event.type = GB_EVENT_TYPE_MOUSE; event.u.mouse.code = GB_MOUSE_MOVE; event.u.mouse.button = impl->button; gb_point_imake(&event.u.mouse.cursor, evet.motion.x, evet.motion.y); // done event gb_window_impl_event((gb_window_ref_t)impl, &event); } break; case SDL_MOUSEBUTTONUP: case SDL_MOUSEBUTTONDOWN: { // init event gb_event_t event = {0}; event.type = GB_EVENT_TYPE_MOUSE; event.u.mouse.code = evet.type == SDL_MOUSEBUTTONDOWN? GB_MOUSE_DOWN : GB_MOUSE_UP; gb_point_imake(&event.u.mouse.cursor, evet.button.x, evet.button.y); // init button switch (evet.button.button) { case SDL_BUTTON_LEFT: event.u.mouse.button = GB_MOUSE_BUTTON_LEFT; break; case SDL_BUTTON_RIGHT: event.u.mouse.button = GB_MOUSE_BUTTON_RIGHT; break; case SDL_BUTTON_MIDDLE: event.u.mouse.button = GB_MOUSE_BUTTON_MIDDLE; break; default: event.u.mouse.button = GB_MOUSE_BUTTON_NONE; break; } // save button impl->button = evet.type == SDL_MOUSEBUTTONDOWN? event.u.mouse.button : GB_MOUSE_BUTTON_NONE; // done event gb_window_impl_event((gb_window_ref_t)impl, &event); } break; case SDL_KEYDOWN: case SDL_KEYUP: { // init event gb_event_t event = {0}; event.type = GB_EVENT_TYPE_KEYBOARD; event.u.keyboard.pressed = evet.type == SDL_KEYDOWN? tb_true : tb_false; // init code switch ((tb_size_t)evet.key.keysym.sym) { case SDLK_F1: event.u.keyboard.code = GB_KEY_F1; break; case SDLK_F2: event.u.keyboard.code = GB_KEY_F2; break; case SDLK_F3: event.u.keyboard.code = GB_KEY_F3; break; case SDLK_F4: event.u.keyboard.code = GB_KEY_F4; break; case SDLK_F5: event.u.keyboard.code = GB_KEY_F5; break; case SDLK_F6: event.u.keyboard.code = GB_KEY_F6; break; case SDLK_F7: event.u.keyboard.code = GB_KEY_F7; break; case SDLK_F8: event.u.keyboard.code = GB_KEY_F8; break; case SDLK_F9: event.u.keyboard.code = GB_KEY_F9; break; case SDLK_F10: event.u.keyboard.code = GB_KEY_F10; break; case SDLK_F11: event.u.keyboard.code = GB_KEY_F11; break; case SDLK_F12: event.u.keyboard.code = GB_KEY_F12; break; case SDLK_LEFT: event.u.keyboard.code = GB_KEY_LEFT; break; case SDLK_UP: event.u.keyboard.code = GB_KEY_UP; break; case SDLK_RIGHT: event.u.keyboard.code = GB_KEY_RIGHT; break; case SDLK_DOWN: event.u.keyboard.code = GB_KEY_DOWN; break; case SDLK_HOME: event.u.keyboard.code = GB_KEY_HOME; break; case SDLK_END: event.u.keyboard.code = GB_KEY_END; break; case SDLK_INSERT: event.u.keyboard.code = GB_KEY_INSERT; break; case SDLK_PAGEUP: event.u.keyboard.code = GB_KEY_PAGEUP; break; case SDLK_PAGEDOWN: event.u.keyboard.code = GB_KEY_PAGEDOWN; break; case SDLK_HELP: event.u.keyboard.code = GB_KEY_HELP; break; case SDLK_PRINT: event.u.keyboard.code = GB_KEY_PRINT; break; case SDLK_SYSREQ: event.u.keyboard.code = GB_KEY_SYSREQ; break; case SDLK_BREAK: event.u.keyboard.code = GB_KEY_BREAK; break; case SDLK_MENU: event.u.keyboard.code = GB_KEY_MENU; break; case SDLK_POWER: event.u.keyboard.code = GB_KEY_POWER; break; case SDLK_EURO: event.u.keyboard.code = GB_KEY_EURO; break; case SDLK_UNDO: event.u.keyboard.code = GB_KEY_UNDO; break; case SDLK_NUMLOCK: event.u.keyboard.code = GB_KEY_NUMLOCK; break; case SDLK_CAPSLOCK: event.u.keyboard.code = GB_KEY_CAPSLOCK; break; case SDLK_SCROLLOCK: event.u.keyboard.code = GB_KEY_SCROLLLOCK; break; case SDLK_RSHIFT: event.u.keyboard.code = GB_KEY_RSHIFT; break; case SDLK_LSHIFT: event.u.keyboard.code = GB_KEY_LSHIFT; break; case SDLK_RCTRL: event.u.keyboard.code = GB_KEY_RCTRL; break; case SDLK_LCTRL: event.u.keyboard.code = GB_KEY_LCTRL; break; case SDLK_RALT: event.u.keyboard.code = GB_KEY_RALT; break; case SDLK_LALT: event.u.keyboard.code = GB_KEY_LALT; break; case 0x136: event.u.keyboard.code = GB_KEY_RCMD; break; case 0x135: event.u.keyboard.code = GB_KEY_LCMD; break; case SDLK_PAUSE: event.u.keyboard.code = GB_KEY_PAUSE; break; default : if (evet.key.keysym.sym < 256) { // the char code event.u.keyboard.code = evet.key.keysym.sym; } break; } // done event if (event.u.keyboard.code) gb_window_impl_event((gb_window_ref_t)impl, &event); } break; case SDL_VIDEORESIZE: { // trace tb_trace_d("resize: type: %d, %dx%d", evet.resize.type, evet.resize.w, evet.resize.h); // TODO // ... } break; case SDL_ACTIVEEVENT: { // trace tb_trace_d("active: type: %d, gain: %d, state: %d", evet.active.type, evet.active.gain, evet.active.state); // active? if (evet.active.state == SDL_APPACTIVE) { // init event gb_event_t event = {0}; event.type = GB_EVENT_TYPE_ACTIVE; event.u.active.code = evet.active.gain? GB_ACTIVE_FOREGROUND : GB_ACTIVE_BACKGROUND; // done event gb_window_impl_event((gb_window_ref_t)impl, &event); } } break; case SDL_QUIT: { // stop it stop = tb_true; } break; default: // trace tb_trace_e("unknown event: %x", evet.type); break; } } // compute the delta time time = tb_cache_time_spak() - time; // wait if (delay > (tb_size_t)time) SDL_Delay(delay - (tb_size_t)time); } // done exit if (impl->base.info.exit) impl->base.info.exit((gb_window_ref_t)impl, impl->canvas, impl->base.info.priv); }
static tb_pointer_t tb_thread_pool_worker_loop(tb_cpointer_t priv) { // the worker tb_thread_pool_worker_t* worker = (tb_thread_pool_worker_t*)priv; // trace tb_trace_d("worker[%lu]: init", worker? worker->id : -1); // done do { // check tb_assert_and_check_break(worker && !worker->jobs && !worker->stats); // the pool tb_thread_pool_impl_t* impl = (tb_thread_pool_impl_t*)worker->pool; tb_assert_and_check_break(impl && impl->semaphore); // wait some time for leaving the lock tb_msleep((worker->id + 1)* 20); // init jobs worker->jobs = tb_vector_init(TB_THREAD_POOL_JOBS_WORKING_GROW, tb_element_ptr(tb_null, tb_null)); tb_assert_and_check_break(worker->jobs); // init stats worker->stats = tb_hash_map_init(TB_HASH_MAP_BUCKET_SIZE_MICRO, tb_element_ptr(tb_null, tb_null), tb_element_mem(sizeof(tb_thread_pool_job_stats_t), tb_null, tb_null)); tb_assert_and_check_break(worker->stats); // loop while (1) { // pull jobs if be idle if (!tb_vector_size(worker->jobs)) { // enter tb_spinlock_enter(&impl->lock); // init the pull time worker->pull = 0; // pull from the urgent jobs if (tb_list_entry_size(&impl->jobs_urgent)) { // trace tb_trace_d("worker[%lu]: try pulling from urgent: %lu", worker->id, tb_list_entry_size(&impl->jobs_urgent)); // pull it tb_remove_if_until(tb_list_entry_itor(&impl->jobs_urgent), tb_thread_pool_worker_walk_pull, worker); } // pull from the waiting jobs if (tb_list_entry_size(&impl->jobs_waiting)) { // trace tb_trace_d("worker[%lu]: try pulling from waiting: %lu", worker->id, tb_list_entry_size(&impl->jobs_waiting)); // pull it tb_remove_if_until(tb_list_entry_itor(&impl->jobs_waiting), tb_thread_pool_worker_walk_pull, worker); } // pull from the pending jobs and clean some finished and killed jobs if (tb_list_entry_size(&impl->jobs_pending)) { // trace tb_trace_d("worker[%lu]: try pulling from pending: %lu", worker->id, tb_list_entry_size(&impl->jobs_pending)); // no jobs? try to pull from the pending jobs if (!tb_vector_size(worker->jobs)) tb_remove_if(tb_list_entry_itor(&impl->jobs_pending), tb_thread_pool_worker_walk_pull_and_clean, worker); // clean some finished and killed jobs else tb_remove_if(tb_list_entry_itor(&impl->jobs_pending), tb_thread_pool_worker_walk_clean, worker); } // leave tb_spinlock_leave(&impl->lock); // idle? wait it if (!tb_vector_size(worker->jobs)) { // killed? tb_check_break(!tb_atomic_get(&worker->bstoped)); // trace tb_trace_d("worker[%lu]: wait: ..", worker->id); // wait some time tb_long_t wait = tb_semaphore_wait(impl->semaphore, -1); tb_assert_and_check_break(wait > 0); // trace tb_trace_d("worker[%lu]: wait: ok", worker->id); // continue it continue; } else { #ifdef TB_TRACE_DEBUG // update the jobs urgent size tb_size_t jobs_urgent_size = tb_list_entry_size(&impl->jobs_urgent); // update the jobs waiting size tb_size_t jobs_waiting_size = tb_list_entry_size(&impl->jobs_waiting); // update the jobs pending size tb_size_t jobs_pending_size = tb_list_entry_size(&impl->jobs_pending); // trace tb_trace_d("worker[%lu]: pull: jobs: %lu, time: %lu ms, waiting: %lu, pending: %lu, urgent: %lu", worker->id, tb_vector_size(worker->jobs), worker->pull, jobs_waiting_size, jobs_pending_size, jobs_urgent_size); #endif } } // done jobs tb_for_all (tb_thread_pool_job_t*, job, worker->jobs) { // check tb_assert_and_check_continue(job && job->task.done); // the job state tb_size_t state = tb_atomic_fetch_and_pset(&job->state, TB_STATE_WAITING, TB_STATE_WORKING); // the job is waiting? work it if (state == TB_STATE_WAITING) { // trace tb_trace_d("worker[%lu]: done: task[%p:%s]: ..", worker->id, job->task.done, job->task.name); // init the time tb_hong_t time = tb_cache_time_spak(); // done the job job->task.done((tb_thread_pool_worker_ref_t)worker, job->task.priv); // computate the time time = tb_cache_time_spak() - time; // exists? update time and count tb_size_t itor; tb_hash_map_item_ref_t item = tb_null; if ( ((itor = tb_hash_map_find(worker->stats, job->task.done)) != tb_iterator_tail(worker->stats)) && (item = (tb_hash_map_item_ref_t)tb_iterator_item(worker->stats, itor))) { // the stats tb_thread_pool_job_stats_t* stats = (tb_thread_pool_job_stats_t*)item->data; tb_assert_and_check_break(stats); // update the done count stats->done_count++; // update the total time stats->total_time += time; } // no item? add it if (!item) { // init stats tb_thread_pool_job_stats_t stats = {0}; stats.done_count = 1; stats.total_time = time; // add stats tb_hash_map_insert(worker->stats, job->task.done, &stats); } #ifdef TB_TRACE_DEBUG tb_size_t done_count = 0; tb_hize_t total_time = 0; tb_thread_pool_job_stats_t* stats = (tb_thread_pool_job_stats_t*)tb_hash_map_get(worker->stats, job->task.done); if (stats) { done_count = stats->done_count; total_time = stats->total_time; } // trace tb_trace_d("worker[%lu]: done: task[%p:%s]: time: %lld ms, average: %lld ms, count: %lu", worker->id, job->task.done, job->task.name, time, (total_time / (tb_hize_t)done_count), done_count); #endif // update the job state tb_atomic_set(&job->state, TB_STATE_FINISHED); } // the job is killing? work it else if (state == TB_STATE_KILLING) { // update the job state tb_atomic_set(&job->state, TB_STATE_KILLED); } } // clear jobs tb_vector_clear(worker->jobs); } } while (0); // exit worker if (worker) { // trace tb_trace_d("worker[%lu]: exit", worker->id); // stoped tb_atomic_set(&worker->bstoped, 1); // exit all private data tb_size_t i = 0; tb_size_t n = tb_arrayn(worker->priv); for (i = 0; i < n; i++) { // the private data tb_thread_pool_worker_priv_t* priv = &worker->priv[n - i - 1]; // exit it if (priv->exit) priv->exit((tb_thread_pool_worker_ref_t)worker, priv->priv); // clear it priv->exit = tb_null; priv->priv = tb_null; } // exit stats if (worker->stats) tb_hash_map_exit(worker->stats); worker->stats = tb_null; // exit jobs if (worker->jobs) tb_vector_exit(worker->jobs); worker->jobs = tb_null; } // exit tb_thread_return(tb_null); return tb_null; }
/* ////////////////////////////////////////////////////////////////////////////////////// * helper */ static __tb_inline__ tb_size_t tb_dns_cache_now() { return (tb_size_t)(tb_cache_time_spak() / 1000); }