// // Purpose: thread-local event manager set event (all) // void SB_Ms_Tl_Event_Mgr::set_event_all(int pv_event) { const char *WHERE = "SB_Ms_Event_Mgr::set_event_all"; int lv_status; SB_Ms_Event_Mgr::Map_All_Entry_Type *lp_all_list_entry; lv_status = SB_Ms_Event_Mgr::cv_sl_map.lock(); SB_util_assert_ieq(lv_status, 0); lp_all_list_entry = reinterpret_cast<SB_Ms_Event_Mgr::Map_All_Entry_Type *> (SB_Ms_Event_Mgr::cv_all_list.head()); if (gv_ms_trace_events && (lp_all_list_entry == NULL)) trace_where_printf(WHERE, "all=EMPTY\n"); while (lp_all_list_entry != NULL) { if (gv_ms_trace_events) trace_where_printf(WHERE, "id=%ld, mgr=%p\n", lp_all_list_entry->iv_link.iv_id.l, pfp(lp_all_list_entry->ip_mgr)); lp_all_list_entry->ip_mgr->set_event(pv_event, NULL); lp_all_list_entry = reinterpret_cast<SB_Ms_Event_Mgr::Map_All_Entry_Type *> (lp_all_list_entry->iv_link.ip_next); } lv_status = SB_Ms_Event_Mgr::cv_sl_map.unlock(); SB_util_assert_ieq(lv_status, 0); }
// // Purpose: thread-local event manager set event (registered) // void SB_Ms_Tl_Event_Mgr::set_event_reg(int pv_event, bool *pp_done) { const char *WHERE = "SB_Ms_Event_Mgr::set_event_reg"; int lv_status; int lv_event; switch (pv_event) { case LREQ: lv_event = SB_Ms_Event_Mgr::EVENT_LREQ; break; case LDONE: lv_event = SB_Ms_Event_Mgr::EVENT_LDONE; break; default: lv_event = -1; // touch SB_util_abort("invalid pv_event"); // sw fault } lv_status = SB_Ms_Event_Mgr::cv_sl_map.lock(); SB_util_assert_ieq(lv_status, 0); SB_Lmap_Enum lv_enum(&SB_Ms_Event_Mgr::ca_reg_map[lv_event]); while (lv_enum.more()) { SB_Ms_Event_Mgr::Map_Reg_Entry_Type *lp_reg_entry = reinterpret_cast<SB_Ms_Event_Mgr::Map_Reg_Entry_Type *>(lv_enum.next()); if (gv_ms_trace_events) trace_where_printf(WHERE, "id=%ld, mgr=%p\n", lp_reg_entry->iv_link.iv_id.l, pfp(lp_reg_entry->ip_mgr)); lp_reg_entry->ip_mgr->set_event(pv_event, pp_done); } lv_status = SB_Ms_Event_Mgr::cv_sl_map.unlock(); SB_util_assert_ieq(lv_status, 0); }
// // Purpose: event manager - register event // bool SB_Ms_Event_Mgr::register_event(int pv_event) { const char *WHERE = "SB_Ms_Event_Mgr::register_event"; int lv_status; if (gv_ms_trace_events) trace_where_printf(WHERE, "event=0x%x\n", pv_event); int lv_event; switch (pv_event) { case LREQ: lv_event = EVENT_LREQ; break; case LDONE: lv_event = EVENT_LDONE; break; default: lv_event = -1; // touch SB_util_abort("invalid pv_event"); // sw fault } long lv_id = SB_Thread::Sthr::self_id(); Map_Reg_Entry_Type *lp_reg_entry = static_cast<Map_Reg_Entry_Type *>(ca_reg_map[lv_event].get(lv_id)); if (lp_reg_entry == NULL) { ia_reg_entry[lv_event].iv_link.iv_id.l = lv_id; ia_reg_entry[lv_event].ip_mgr = this; lv_status = cv_sl_map.lock(); SB_util_assert_ieq(lv_status, 0); ca_reg_map[lv_event].put(reinterpret_cast<SB_LML_Type *>(&ia_reg_entry[lv_event].iv_link)); lv_status = cv_sl_map.unlock(); SB_util_assert_ieq(lv_status, 0); return true; } else return false; }
void SB_Timer::Timer::start() { const char *WHERE = "Timer::start"; char la_fmt_pop[100]; Timer *lp_curr; Timer *lp_next; const char *lp_trace_type; Slot_Type lv_slot; int lv_status; if (iv_running) { lp_trace_type = "(re)start"; cancel_int(true); } else lp_trace_type = "start"; iv_pop_time.tic_set_now_add(iv_interval); lv_slot = hash(iv_pop_time); if (cv_trace_enabled) trace_where_printf(WHERE, "%s, timer=%p, user-param=%ld, interval=" PF64 ", pop time=%s, slot=%d\n", lp_trace_type, pfp(this), iv_user_param, iv_interval, format_pop_time(la_fmt_pop), lv_slot); lv_status = cv_mutex.lock(); SB_util_assert_ieq(lv_status, 0); // sw fault lp_curr = ca_slots[lv_slot]; // is it the only one in this slot? if (lp_curr == NULL) { ca_slots[lv_slot] = this; ip_next = NULL; } else if (iv_pop_time.ts_lt(lp_curr->iv_pop_time)) { // should it go first in this slot? ca_slots[lv_slot] = this; ip_next = lp_curr; } else { // it goes somewhere after the first lp_next = lp_curr->ip_next; while ((lp_next != NULL) && (iv_pop_time.ts_ge(lp_next->iv_pop_time))) { lp_curr = lp_next; lp_next = lp_curr->ip_next; } lp_curr->ip_next = this; ip_next = lp_next; } iv_running = true; lv_status = cv_mutex.unlock(); SB_util_assert_ieq(lv_status, 0); // sw fault }
void SB_Timer::Timer::check_timers() { const char *WHERE = "Timer::check_timers"; char la_fmt_pop[100]; Timer *lp_curr; Timer *lp_next; Time_Stamp lv_now; Slot_Type lv_now_slot; Slot_Type lv_slot; int lv_status; lp_next = NULL; lv_now_slot = hash(lv_now); lv_status = cv_mutex.lock(); SB_util_assert_ieq(lv_status, 0); // sw fault for (lv_slot = cv_last_slot_checked; lv_slot != (lv_now_slot + 1) % MAX_SLOTS; lv_slot = (lv_slot + 1) % MAX_SLOTS) { lp_curr = ca_slots[lv_slot]; while (lp_curr != NULL) { // // we're done if the timer pops later than now. // if (lp_curr->iv_pop_time.ts_gt(lv_now)) { lv_status = cv_mutex.unlock(); SB_util_assert_ieq(lv_status, 0); // sw fault if (cv_trace_enabled) trace_where_printf(WHERE, "no timers ready\n"); return; } lp_next = lp_curr->ip_next; lp_curr->cancel_int(false); if (cv_trace_enabled) trace_where_printf(WHERE, "timer=%p, user-param=%ld, pop time=%s\n", pfp(lp_curr), lp_curr->iv_user_param, lp_curr->format_pop_time(la_fmt_pop)); lp_curr->ip_th->handle_timeout(lp_curr); lp_curr = lp_next; } if (lv_slot != lv_now_slot) { // Only increment if not at "now". A new timer might be // set in the current time slot before "now" goes to the next // slot so we want to be sure and check it again. cv_last_slot_checked = (cv_last_slot_checked + 1) % MAX_SLOTS; } } lv_status = cv_mutex.unlock(); SB_util_assert_ieq(lv_status, 0); // sw fault }
SB_Timer::Tics SB_Timer::Timer::get_wait_time() { const char *WHERE = "Timer::get_wait_time"; Timer *lp_curr; static bool lv_1st_time = true; Slot_Type lv_default_slot; Tics lv_result; Slot_Type lv_slot; int lv_status; if (lv_1st_time) { lv_1st_time = false; lv_result = Time_Stamp::TICS_PER_SEC; if (cv_trace_enabled) trace_where_printf(WHERE, "result=" PF64 "\n", lv_result); return lv_result; } lv_status = cv_mutex.lock(); SB_util_assert_ieq(lv_status, 0); // sw fault lv_result = DEFAULT_WAIT_TICS; lv_default_slot = (cv_last_slot_checked + DEFAULT_SLOT_COUNT) % MAX_SLOTS; // // check for any timers ready to pop between the last slot checked // and the slot of the default wake time. // for (lv_slot = cv_last_slot_checked; lv_slot != lv_default_slot; lv_slot = (lv_slot + 1) % MAX_SLOTS) { lp_curr = ca_slots[lv_slot]; if (lp_curr != NULL) { Time_Stamp lv_now; lv_result = lp_curr->iv_pop_time.ts_sub(lv_now); if (lv_result < 0) lv_result = 0; if (cv_trace_enabled) trace_where_printf(WHERE, "result=" PF64 "\n", lv_result); lv_status = cv_mutex.unlock(); SB_util_assert_ieq(lv_status, 0); // sw fault return lv_result; } } if (cv_trace_enabled) trace_where_printf(WHERE, "result=" PF64 " (default)\n", lv_result); lv_status = cv_mutex.unlock(); SB_util_assert_ieq(lv_status, 0); // sw fault return lv_result; }
void SB_Timer::Timer::init() { Slot_Type lv_slot; int lv_status; lv_status = cv_mutex.lock(); SB_util_assert_ieq(lv_status, 0); // sw fault for (lv_slot = 0; lv_slot < MAX_SLOTS; lv_slot++) ca_slots[lv_slot] = NULL; lv_status = cv_mutex.unlock(); SB_util_assert_ieq(lv_status, 0); // sw fault }
// // wait for TIMER_SIG and process timer-list // void SB_Timer_Thread::run() { const char *WHERE = "SB_Timer_Thread::run"; int lv_err; int lv_sig; sigset_t lv_set; int lv_status; if (gv_ms_trace_timer) trace_where_printf(WHERE, "timer sig thread started\n"); iv_running = true; iv_cv.signal(true); // need lock sigemptyset(&lv_set); sigaddset(&lv_set, TIMER_SIG); while (!iv_shutdown) { lv_err = sigwait(&lv_set, &lv_sig); SB_util_assert_ieq(lv_err, 0); if (gv_ms_trace_timer) trace_where_printf(WHERE, "sigwait returned sig=%d\n", lv_sig); if (iv_shutdown) break; if (lv_sig != TIMER_SIG) continue; lv_status = gv_timer_mutex.lock(); SB_util_assert_ieq(lv_status, 0); sb_timer_timer_list_complete(WHERE); if (gp_timer_head != NULL) { if (gv_ms_trace_timer) sb_timer_timer_list_print(); for (;;) { if (gp_timer_head == NULL) break; // restart timer if (sb_timer_setitimer(WHERE, gp_timer_head->iv_to)) break; sb_timer_timer_list_complete(WHERE); } } lv_status = gv_timer_mutex.unlock(); SB_util_assert_ieq(lv_status, 0); } if (gv_ms_trace_timer) trace_where_printf(WHERE, "EXITING timer sig thread\n"); iv_running = false; }
// // Purpose: event manager - remove_from_event_all // void SB_Ms_Event_Mgr::remove_from_event_all() { const char *WHERE = "SB_Ms_Event_Mgr::remove_from_event_all"; int lv_status; if (gv_ms_trace_events) trace_where_printf(WHERE, "id=%ld, mgr=%p\n", iv_id, pfp(this)); lv_status = cv_sl_map.lock(); SB_util_assert_ieq(lv_status, 0); cv_all_list.remove_list(&iv_all_list_entry.iv_link); lv_status = cv_sl_map.unlock(); SB_util_assert_ieq(lv_status, 0); }
char *SB_Smap_Enum::next() { SB_Smap::SML_Type *lp_item; int lv_buckets; int lv_hash; lp_item = ip_item; lv_hash = iv_hash; SB_util_assert_ieq(iv_mod, ip_map->iv_mod); // sw fault if (iv_inx >= iv_count) return NULL; lv_buckets = ip_map->iv_buckets; for (; lv_hash < lv_buckets; lv_hash++) { if (lp_item != NULL) { iv_inx++; break; } lp_item = ip_map->ipp_HT[lv_hash+1]; } ip_item = lp_item; iv_hash = lv_hash; if (lp_item != NULL) { ip_item = reinterpret_cast<SB_Smap::SML_Type *>(lp_item->iv_link.ip_next); return lp_item->ip_key; } return NULL; }
// // set SRE fields // void sb_timer_set_sre_tpop(BMS_SRE_TPOP *pp_sre, void *pp_tle) { int lv_status; Timer_TLE_Type *lp_tle; lp_tle = static_cast<Timer_TLE_Type *>(pp_tle); pp_sre->sre_tleId = lp_tle->iv_tleid; pp_sre->sre_tleTOVal = lp_tle->iv_toval; pp_sre->sre_tleType = 0; pp_sre->sre_tleParm1 = lp_tle->iv_parm1; pp_sre->sre_tleParm2 = lp_tle->iv_parm2; lv_status = gv_timer_mutex.lock(); SB_util_assert_ieq(lv_status, 0); sb_timer_tle_free(lp_tle); lv_status = gv_timer_mutex.unlock(); SB_util_assert_ieq(lv_status, 0); }
SB_Thread::Scoped_Mutex::~Scoped_Mutex() { if (iv_lock) { int lv_ret = ir_mutex.unlock(); SB_util_assert_ieq(lv_ret, 0); // sw fault lv_ret = lv_ret; // touch (in case assert disabled) } }
void Pctl_Ext_Wakeup_Thread::run() { const char *WHERE = "Pctl_Ext_Wakeup_Thread::run"; int lv_err; siginfo_t lv_info; sigset_t lv_set; sigemptyset(&lv_set); sigaddset(&lv_set, cv_sig); while (!cv_shutdown) { lv_err = sigwaitinfo(&lv_set, &lv_info); if (gv_ms_trace_params) trace_where_printf(WHERE, "sigwait returned err=%d, sig=%d, val=%d\n", lv_err, lv_info.si_signo, lv_info.si_int); if ((lv_err == -1) && (errno == EINTR)) continue; SB_util_assert_ieq(lv_err, cv_sig); if (lv_info.si_int) { gv_ms_event_mgr.set_event_all(static_cast<short>(lv_info.si_int)); continue; } if (cv_shutdown) break; } if (gv_ms_trace_params) trace_where_printf(WHERE, "EXITING ext wakeup thread\n"); }
// // Purpose: destructor event-manager // SB_Ms_Event_Mgr::~SB_Ms_Event_Mgr() { const char *WHERE = "SB_Ms_Event_Mgr::~SB_Ms_Event_Mgr"; int lv_status; if (gv_ms_trace_events) trace_where_printf(WHERE, "ENTER id=%ld, mgr=%p\n", iv_id, pfp(this)); if (!iv_mutex_locked) { lv_status = cv_sl_map.lock(); SB_util_assert_ieq(lv_status, 0); } cv_all_list.remove_list(&iv_all_list_entry.iv_link); cv_all_map.remove(iv_all_map_entry.iv_link.iv_id.l); if (!iv_mutex_locked) { lv_status = cv_sl_map.unlock(); SB_util_assert_ieq(lv_status, 0); } for (int lv_event = 0; lv_event < EVENT_MAX; lv_event++) ca_reg_map[lv_event].remove(ia_reg_entry[lv_event].iv_link.iv_id.l); if (iv_pin >= 0) { Map_Pin_Entry_Type *lp_entry = static_cast<Map_Pin_Entry_Type *>(cv_pin_map.remove(iv_pin)); SB_util_assert_peq(lp_entry, &iv_pin_entry); // sw fault lp_entry = lp_entry; // touch (in case assert disabled) if (gv_ms_trace_events) trace_where_printf(WHERE, "id=%ld, mgr=%p, deleting pin=%d\n", iv_id, pfp(this), iv_pin); } if (iv_group_entry.ip_map != NULL) delete iv_group_entry.ip_map; // Clear TLS if (iv_tls_inx >= 0) { lv_status = SB_Thread::Sthr::specific_set(iv_tls_inx, NULL); SB_util_assert_ieq(lv_status, 0); } // destroy CV do { lv_status = iv_cv.destroy(); if (lv_status == EBUSY) usleep(100); } while (lv_status); if (gv_ms_trace_events) trace_where_printf(WHERE, "EXIT id=%ld, mgr=%p\n", iv_id, pfp(this)); }
void SB_Thread::Scoped_Mutex::lock() { if (!iv_lock) { int lv_ret = ir_mutex.lock(); SB_util_assert_ieq(lv_ret, 0); // sw fault lv_ret = lv_ret; // touch (in case assert disabled) iv_lock = true; } }
void SB_Thread::Thread::suspend() { int lv_rc = pthread_sigmask(SIG_BLOCK, &gv_rs_sigset, NULL); SB_util_assert_ine(lv_rc, -1); gv_rs_queue.push(std::make_pair(static_cast<int>(RS_SUSPEND), this)); lv_rc = kill(SIGUSR1); SB_util_assert_ieq(lv_rc, 0); lv_rc = pthread_sigmask(SIG_UNBLOCK, &gv_rs_sigset, NULL); SB_util_assert_ine(lv_rc, -1); }
void *sb_timer_comp_q_remove() { SB_Timer_Comp_Queue *lp_comp_q; Timer_TLE_Type *lp_tle; int lv_status; lv_status = gv_timer_mutex.lock(); SB_util_assert_ieq(lv_status, 0); lp_comp_q = sb_timer_comp_q_get(); lp_tle = static_cast<Timer_TLE_Type *>(lp_comp_q->remove()); if (lp_tle != NULL) lp_tle->iv_on_list = LIST_NONE; lv_status = gv_timer_mutex.unlock(); SB_util_assert_ieq(lv_status, 0); if (gv_ms_trace_timer) sb_timer_comp_q_print(lp_comp_q); return lp_tle; }
// resume/suspend signal-handler static void sig_rs(int, siginfo_t *, void *) { int lv_status; if (!gv_rs_queue.empty()) { Rs_Type lv_front = gv_rs_queue.front(); gv_rs_queue.pop(); int lv_action = lv_front.first; SB_Thread::Thread *lp_thr = lv_front.second; SB_Thread::CV *lp_cv = reinterpret_cast<SB_Thread::CV *>(lp_thr->ip_rs); if (lv_action == RS_SUSPEND) { lv_status = lp_cv->wait(true); // need lock SB_util_assert_ieq(lv_status, 0); } else { // strict signal() does not seem to always work lv_status = lp_cv->broadcast(true); // need lock SB_util_assert_ieq(lv_status, 0); } } }
// // Purpose: return an ms error // short ms_err_rtn(short pv_fserr) { if (pv_fserr != XZFIL_ERR_OK) { SB_UTRACE_API_ADD2(SB_UTRACE_API_OP_MS_EXIT, pv_fserr); if (gv_ms_trace_errors) trace_printf("setting ms ret=%d\n", pv_fserr); if (gv_ms_assert_error) SB_util_assert_ieq(pv_fserr, XZFIL_ERR_OK); // sw fault } return pv_fserr; }
void Pctl_Ext_Wakeup_Thread::shutdown() { const char *WHERE = "Pctl_Ext_Wakeup_Thread::shutdown"; void *lp_result; int lv_err; int lv_status; sigval lv_val; if (gv_ms_trace_params) trace_where_printf(WHERE, "shutdown\n"); cv_shutdown = true; if (cp_thread != NULL) { lv_val.sival_int = 0; lv_err = sigqueue(cv_pid, cv_sig, lv_val); SB_util_assert_ieq(lv_err, 0); lv_status = cp_thread->join(&lp_result); SB_util_assert_ieq(lv_status, 0); delete cp_thread; } cp_thread = NULL; }
void Pctl_Ext_Wakeup_Thread::init() { int lv_err; sigset_t lv_set; cv_sig = SIGRTMAX - 3; sigemptyset(&lv_set); sigaddset(&lv_set, cv_sig); lv_err = pthread_sigmask(SIG_BLOCK, &lv_set, NULL); SB_util_assert_ieq(lv_err, 0); // sw fault }
void SB_Thread::Thread::allow_suspend() { struct sigaction lv_act; memset(&lv_act, 0, sizeof(struct sigaction)); lv_act.sa_sigaction = sig_rs; // SA_SIGINFO says to use sa_sigaction // SA_NODEFER says to allow signal stacking lv_act.sa_flags = SA_SIGINFO | SA_NODEFER; int lv_rc = sigaction(SIGUSR1, &lv_act, NULL); SB_util_assert_ieq(lv_rc, 0); lv_rc = lv_rc; // touch (in case assert disabled) }
// // Purpose: event manager - shutdown // void SB_Ms_Event_Mgr::shutdown() { bool lv_done = false; int lv_status; lv_status = cv_sl_map.lock(); SB_util_assert_ieq(lv_status, 0); do { SB_Lmap_Enum *lp_enum = cv_all_map.keys(); if (lp_enum->more()) { Map_All_Entry_Type *lp_all_map_entry = reinterpret_cast<Map_All_Entry_Type *>(lp_enum->next()); // entry will be removed from table on delete lp_all_map_entry->ip_mgr->iv_mutex_locked = true; delete lp_all_map_entry->ip_mgr; } else lv_done = true; delete lp_enum; } while (!lv_done); lv_status = cv_sl_map.unlock(); SB_util_assert_ieq(lv_status, 0); }
// // Purpose: constructor event-manager // SB_Ms_Event_Mgr::SB_Ms_Event_Mgr(long pv_id, int pv_tls_inx) : ip_waiter_next(NULL), ip_waiter_prev(NULL), iv_awake(0), iv_awake_event(0), iv_mutex_locked(false), iv_replies(0), iv_tls_inx(pv_tls_inx), iv_wait_start_time(0), iv_wait_us(0) { const char *WHERE = "SB_Ms_Event_Mgr::SB_Ms_Event_Mgr"; int lv_inx; int lv_status; iv_cv.setname("cv-SB_Ms_Event_Mgr::iv_cv"); iv_event_mutex.setname("mutex-SB_Ms_Event_Mgr::iv_event_mutex"); for (lv_inx = 0; lv_inx < TLS_DTOR_MAX; lv_inx++) { ia_tls_dtor[lv_inx].ip_dtor = NULL; ia_tls_dtor_data[lv_inx] = NULL; } iv_group = -1; iv_pin = -1; if (pv_id < 0) iv_id = SB_Thread::Sthr::self_id(); else iv_id = pv_id; iv_all_list_entry.iv_link.iv_id.l = iv_id; iv_all_list_entry.ip_mgr = this; iv_all_map_entry.iv_link.iv_id.l = iv_id; iv_all_map_entry.ip_mgr = this; for (int lv_event = 0; lv_event < EVENT_MAX; lv_event++) memset(&ia_reg_entry[lv_event], 0, sizeof(ia_reg_entry[lv_event])); lv_status = cv_sl_map.lock(); SB_util_assert_ieq(lv_status, 0); cv_all_list.add(&iv_all_list_entry.iv_link); SB_util_assert_peq(cv_all_map.get(iv_id), NULL); cv_all_map.put(reinterpret_cast<SB_LML_Type *>(&iv_all_map_entry.iv_link)); lv_status = cv_sl_map.unlock(); SB_util_assert_ieq(lv_status, 0); iv_group_entry.ip_map = NULL; if (gv_ms_trace_events) trace_where_printf(WHERE, "id=%ld, mgr=%p\n", iv_id, pfp(this)); }
void SB_Timer::Timer::print_timers(Print_Timer_Cb pv_cb) { Timer *lp_curr; Slot_Type lv_slot; int lv_status; lv_status = cv_mutex.lock(); SB_util_assert_ieq(lv_status, 0); // sw fault for (lv_slot = 0; lv_slot < MAX_SLOTS; lv_slot++) { lp_curr = ca_slots[lv_slot]; while (lp_curr != NULL) { pv_cb(*lp_curr); lp_curr = lp_curr->ip_next; } } lv_status = cv_mutex.unlock(); SB_util_assert_ieq(lv_status, 0); // sw fault }
void SB_Timer_Thread::shutdown() { int lv_status; iv_shutdown = true; while (iv_running) { lv_status = kill(TIMER_SIG); if (lv_status == ESRCH) SB_util_assert(!iv_running); else SB_util_assert_ieq(lv_status, 0); SB_Thread::Sthr::usleep(100); // shutdown } }
// // Purpose: thread-local event manager destructor // void SB_Ms_Tl_Event_Mgr::dtor(void *pp_mgr) { SB_Ms_Event_Mgr *lp_mgr = static_cast<SB_Ms_Event_Mgr *>(pp_mgr); if (lp_mgr != NULL) { // only delete if it hasn't already been deleted long lv_id = SB_Thread::Sthr::self_id(); if (SB_Ms_Event_Mgr::cv_all_map.get(lv_id) != NULL) { int lv_status = SB_Thread::Sthr::specific_set(lp_mgr->iv_tls_inx, pp_mgr); // put it back! SB_util_assert_ieq(lv_status, 0); lp_mgr->call_tls_dtors(); delete lp_mgr; } } }
void SB_Thread::Thread::stop() { if (ip_id != NULL) { // ignore if nothing started #ifdef SB_THREAD_PRINT_THREAD_CALLS trace_printf("Thread::stop() ENTRY, name=%s\n", ip_name); #endif int lv_ret = Sthr::cancel(ip_id); #ifdef SB_THREAD_PRINT_THREAD_CALLS trace_printf("Thread::stop() EXIT, ret=%d\n", lv_ret); #endif SB_util_assert_ieq(lv_ret, 0); // sw fault lv_ret = lv_ret; // touch (in case assert disabled) ip_id = NULL; } }
// // timer-module init // void sb_timer_init() { int lv_err; sigset_t lv_set; // need to block signal so that other threads will get block too sigemptyset(&lv_set); sigaddset(&lv_set, TIMER_SIG); lv_err = pthread_sigmask(SIG_BLOCK, &lv_set, NULL); SB_util_assert_ieq(lv_err, 0); // it_interval isn't used gv_timer_to.it_interval.tv_sec = 0; gv_timer_to.it_interval.tv_usec = 0; }
// // Purpose: trace msg and return an ms error // short ms_err_rtn_msg(const char *pp_where, const char *pp_msg, short pv_fserr) { if (pv_fserr != XZFIL_ERR_OK) { SB_UTRACE_API_ADD2(SB_UTRACE_API_OP_MS_EXIT, pv_fserr); if (gv_ms_trace_errors) trace_where_printf(pp_where, "setting ms (%s) ret=%d\n", pp_msg, pv_fserr); if (gv_ms_assert_error) SB_util_assert_ieq(pv_fserr, XZFIL_ERR_OK); // sw fault } if (gv_ms_trace_params) trace_where_printf(pp_where, "%s\n", pp_msg); return ms_err_rtn(pv_fserr); }