void SB_Lf_Queue::add(SB_QL_Type *pp_item) { #ifdef SB_Q_ALLOW_QALLOC const char *WHERE = "SB_Lf_Queue::add"; #endif LFNT *lp_node; PT lv_next; PT lv_tail; PT lv_temp; lp_node = new LFNT; #ifdef SB_Q_ALLOW_QALLOC if (gv_ms_trace_qalloc) trace_where_printf(WHERE, "this=%p(%s), new=%p\n", pfp(this), ia_q_name, pfp(lp_node)); #endif LFQ_PT_AS3(&lp_node->iv_next, NULL, 0); lp_node->ip_data = pp_item; for (;;) { // Keep trying until Enqueue is done LFQ_PT_AS2(&lv_tail, &iv_tail); // Read Tail.ptr and Tail.count together LFQ_PT_AS2(&lv_next, lv_tail.ip_ptr); // Read next ptr and count fields together if (lv_tail == iv_tail) { // Are tail and next consistent? if (lv_next.ip_ptr == NULL) { // Was Tail pointing to the last node? LFQ_PT_AS3(&lv_temp, &lp_node->iv_next, lv_next.iv_count + 1); if (LFQ_CAS(lv_tail.ip_ptr, lv_next, lv_temp)) { // Try to link node at the end of the linked list break; // Enqueue is done. Exit loop } } else { // Tail was not pointing to the last node LFQ_PT_AS3(&lv_temp, lv_next.ip_ptr, lv_tail.iv_count + 1); LFQ_CAS(&iv_tail, lv_tail, lv_temp); // Try to swing Tail to the next node } } } LFQ_PT_AS3(&lv_temp, &lp_node->iv_next, lv_tail.iv_count + 1); LFQ_CAS(&iv_tail, lv_tail, lv_temp); // Enqueue is done. Try to swing Tail to the inserted node }
void SB_Queue::printself(bool pv_traverse) { int lv_inx; printf("this=%p(%s), size=%d %s\n", pfp(this), ia_q_name, iv_count, (ip_head == NULL) ? "empty" : ""); if (pv_traverse) { SB_QL_Type *lp_item = ip_head; for (lv_inx = 0; lp_item != NULL; lv_inx++, lp_item = lp_item->ip_next) printf(" inx=%d, item=%p\n", lv_inx, pfp(lp_item)); } }
// // Purpose: format phandle // figure out how much storage is needed for a formatted phandle // 0x0123456789abcdef [ - 20 // 0123456789012345. - 17 * 8 = 136 // 0123456789012345. // 0123456789012345. // 0123456789012345. // 0123456789012345. // 0123456789012345. // 0123456789012345. // 0123456789012345] // (12345678901234567890123456789012- - 34 // 0123456789/ - 11 * 3 = 33 // 0123456789/ // 0123456789) // // 20 + 136 + 34 + 33 = 222 // void msg_util_format_phandle(char *pp_buf, SB_Phandle_Type *pp_phandle) { if (pp_phandle == NULL) strcpy(pp_buf, "<null>"); else { SB_Phandle *lp_phandle = reinterpret_cast<SB_Phandle *>(pp_phandle); char la_name[SB_PHANDLE_NAME_SIZE+1]; la_name[SB_PHANDLE_NAME_SIZE] = '\0'; for (int lv_inx = 0; lv_inx < static_cast<int>(sizeof(lp_phandle->ia_name)); lv_inx++) { char lv_char = lp_phandle->ia_name[lv_inx]; if ((lv_char >= ' ') && (lv_char <= 0x7e)) la_name[lv_inx] = lv_char; else if (lv_char == 0) { la_name[lv_inx] = '\0'; break; } else la_name[lv_inx] = '.'; } #ifdef SQ_PHANDLE_VERIFIER sprintf(pp_buf, "%p [" PF64X "." PF64X "." PF64X "." PF64X "." PF64X "." PF64X "." PF64X "." PF64X "](%s-%d/%d/%d)", pfp(pp_phandle), pp_phandle->_data[0], pp_phandle->_data[1], pp_phandle->_data[2], pp_phandle->_data[3], pp_phandle->_data[4], pp_phandle->_data[5], pp_phandle->_data[6], pp_phandle->_data[7], la_name, lp_phandle->iv_nid, lp_phandle->iv_pid, lp_phandle->iv_verifier); #else sprintf(pp_buf, "%p [" PF64X "." PF64X "." PF64X "." PF64X "." PF64X "." PF64X "." PF64X "." PF64X "](%s-%d/%d)", pfp(pp_phandle), pp_phandle->_data[0], pp_phandle->_data[1], pp_phandle->_data[2], pp_phandle->_data[3], pp_phandle->_data[4], pp_phandle->_data[5], pp_phandle->_data[6], pp_phandle->_data[7], la_name, lp_phandle->iv_nid, lp_phandle->iv_pid); #endif } }
SB_Timer::Timer::Timer(TH *pp_th, long pv_user_param, Tics pv_interval, bool pv_start) { const char *WHERE = "Timer::Timer"; SB_util_assert_pne(pp_th, NULL); ip_th = pp_th; iv_user_param = pv_user_param; if (pv_interval > ((MAX_SLOTS-1) * TICS_PER_SLOT)) // Don't allow interval that would cause a wrap-around of // the timer slots array. pv_interval = (MAX_SLOTS-1) * TICS_PER_SLOT; else if (pv_interval < 0) pv_interval = 0; iv_interval = pv_interval; ip_next = NULL; iv_running = false; if (cv_trace_enabled) trace_where_printf(WHERE, "timer=%p, user-param=%ld, interval=" PF64 ", start=%d\n", pfp(this), iv_user_param, iv_interval, pv_start); if (pv_start) start(); }
void SB_Smap::printself(bool pv_traverse) { SML_Type *lp_item; int lv_hash; int lv_inx; printf("this=%p, type=%s, name=%s, size=%d, buckets=%d\n", pfp(this), ip_map_type, ia_map_name, iv_count, iv_buckets); if (pv_traverse) { lv_inx = 0; for (lv_hash = 0; lv_hash < iv_buckets; lv_hash++) { lp_item = ipp_HT[lv_hash]; for (; lp_item != NULL; lp_item = reinterpret_cast<SML_Type *>(lp_item->iv_link.ip_next)) { if (lp_item->iv_use_vvalue) printf(" inx=%d, hash=%d, key='%s', value=%p\n", lv_inx, lv_hash, lp_item->ip_key, lp_item->ip_vvalue); else printf(" inx=%d, hash=%d, key='%s', value='%s'\n", lv_inx, lv_hash, lp_item->ip_key, lp_item->ip_value); lv_inx++; } } } }
void handle_read(PacketFrom *udpfrom, const boost::system::error_code& error, const size_t bytes_recvd) { OPENVPN_LOG_UDPLINK_VERBOSE("UDPLink::handle_read: " << error.message()); PacketFrom::SPtr pfp(udpfrom); if (!halt) { if (bytes_recvd) { if (!error) { OPENVPN_LOG_UDPLINK_VERBOSE("UDP from " << pfp->sender_endpoint); pfp->buf.set_size(bytes_recvd); stats->inc_stat(SessionStats::BYTES_IN, bytes_recvd); stats->inc_stat(SessionStats::PACKETS_IN, 1); read_handler->udp_read_handler(pfp); } else { OPENVPN_LOG_UDPLINK_ERROR("UDP recv error: " << error.message()); stats->error(Error::NETWORK_RECV_ERROR); } } queue_read(pfp.release()); // reuse PacketFrom object if still available } }
// // Purpose: thread-local event manager set event (registered) // void SB_Ms_Tl_Event_Mgr::set_event_reg(int pv_event, bool *pp_done) { const char *WHERE = "SB_Ms_Event_Mgr::set_event_reg"; int lv_status; int lv_event; switch (pv_event) { case LREQ: lv_event = SB_Ms_Event_Mgr::EVENT_LREQ; break; case LDONE: lv_event = SB_Ms_Event_Mgr::EVENT_LDONE; break; default: lv_event = -1; // touch SB_util_abort("invalid pv_event"); // sw fault } lv_status = SB_Ms_Event_Mgr::cv_sl_map.lock(); SB_util_assert_ieq(lv_status, 0); SB_Lmap_Enum lv_enum(&SB_Ms_Event_Mgr::ca_reg_map[lv_event]); while (lv_enum.more()) { SB_Ms_Event_Mgr::Map_Reg_Entry_Type *lp_reg_entry = reinterpret_cast<SB_Ms_Event_Mgr::Map_Reg_Entry_Type *>(lv_enum.next()); if (gv_ms_trace_events) trace_where_printf(WHERE, "id=%ld, mgr=%p\n", lp_reg_entry->iv_link.iv_id.l, pfp(lp_reg_entry->ip_mgr)); lp_reg_entry->ip_mgr->set_event(pv_event, pp_done); } lv_status = SB_Ms_Event_Mgr::cv_sl_map.unlock(); SB_util_assert_ieq(lv_status, 0); }
// // Purpose: thread-local event manager set event (all) // void SB_Ms_Tl_Event_Mgr::set_event_all(int pv_event) { const char *WHERE = "SB_Ms_Event_Mgr::set_event_all"; int lv_status; SB_Ms_Event_Mgr::Map_All_Entry_Type *lp_all_list_entry; lv_status = SB_Ms_Event_Mgr::cv_sl_map.lock(); SB_util_assert_ieq(lv_status, 0); lp_all_list_entry = reinterpret_cast<SB_Ms_Event_Mgr::Map_All_Entry_Type *> (SB_Ms_Event_Mgr::cv_all_list.head()); if (gv_ms_trace_events && (lp_all_list_entry == NULL)) trace_where_printf(WHERE, "all=EMPTY\n"); while (lp_all_list_entry != NULL) { if (gv_ms_trace_events) trace_where_printf(WHERE, "id=%ld, mgr=%p\n", lp_all_list_entry->iv_link.iv_id.l, pfp(lp_all_list_entry->ip_mgr)); lp_all_list_entry->ip_mgr->set_event(pv_event, NULL); lp_all_list_entry = reinterpret_cast<SB_Ms_Event_Mgr::Map_All_Entry_Type *> (lp_all_list_entry->iv_link.ip_next); } lv_status = SB_Ms_Event_Mgr::cv_sl_map.unlock(); SB_util_assert_ieq(lv_status, 0); }
// // Purpose: A near close to XSIGNALTIMEOUT, but callback is called on to // SB_Export int timer_start_cb(int pv_toval, short pv_parm1, long pv_parm2, short *pp_tleid, Timer_Cb_Type pv_callback) { const char *WHERE = "timer_start_cb"; void *lp_cb; short lv_fserr; SB_API_CTR (lv_zctr, TIMER_START_CB); if (gv_ms_trace_params) { lp_cb = SB_CB_TO_PTR(pv_callback); trace_where_printf(WHERE, "ENTER toval=%d, parm1=%d(0x%x), parm2=%ld(0x%lx), tleid=%p, cb=%p\n", pv_toval, pv_parm1, pv_parm1, pv_parm2, pv_parm2, pfp(pp_tleid), lp_cb); } lv_fserr = sb_timer_start_com(WHERE, false, TIMER_TLE_KIND_CB, pv_toval, pv_parm1, pv_parm2, pp_tleid, 0, NULL, pv_callback); return ms_err_rtn(lv_fserr); }
// // Purpose: event manager - register group/pin // void SB_Ms_Event_Mgr::register_group_pin(int pv_group, int pv_pin) { const char *WHERE = "SB_Ms_Event_Mgr::register_group_pin"; if (gv_ms_trace_events) trace_where_printf(WHERE, "id=%ld, mgr=%p, group=%d, pin=%d\n", iv_id, pfp(this), pv_group, pv_pin); iv_group = pv_group; iv_pin = pv_pin; if (pv_group >= 0) { cv_group_map.lock(); Map_Group_Entry_Type *lp_group_entry = static_cast<Map_Group_Entry_Type *>(cv_group_map.get_lock(pv_group, false)); if (lp_group_entry == NULL) { iv_group_entry.iv_link.iv_id.i = pv_group; iv_group_entry.ip_map = new SB_Imap(); cv_group_map.put_lock(&iv_group_entry.iv_link, false); lp_group_entry = &iv_group_entry; } cv_group_map.unlock(); iv_group_pin_entry.iv_link.iv_id.i = pv_pin; iv_group_pin_entry.ip_mgr = this; lp_group_entry->ip_map->put(&iv_group_pin_entry.iv_link); } iv_pin_entry.iv_link.iv_id.i = pv_pin; iv_pin_entry.iv_group = pv_group; iv_pin_entry.ip_mgr = this; cv_pin_map.put(&iv_pin_entry.iv_link); }
// // Purpose: destructor event-manager // SB_Ms_Event_Mgr::~SB_Ms_Event_Mgr() { const char *WHERE = "SB_Ms_Event_Mgr::~SB_Ms_Event_Mgr"; int lv_status; if (gv_ms_trace_events) trace_where_printf(WHERE, "ENTER id=%ld, mgr=%p\n", iv_id, pfp(this)); if (!iv_mutex_locked) { lv_status = cv_sl_map.lock(); SB_util_assert_ieq(lv_status, 0); } cv_all_list.remove_list(&iv_all_list_entry.iv_link); cv_all_map.remove(iv_all_map_entry.iv_link.iv_id.l); if (!iv_mutex_locked) { lv_status = cv_sl_map.unlock(); SB_util_assert_ieq(lv_status, 0); } for (int lv_event = 0; lv_event < EVENT_MAX; lv_event++) ca_reg_map[lv_event].remove(ia_reg_entry[lv_event].iv_link.iv_id.l); if (iv_pin >= 0) { Map_Pin_Entry_Type *lp_entry = static_cast<Map_Pin_Entry_Type *>(cv_pin_map.remove(iv_pin)); SB_util_assert_peq(lp_entry, &iv_pin_entry); // sw fault lp_entry = lp_entry; // touch (in case assert disabled) if (gv_ms_trace_events) trace_where_printf(WHERE, "id=%ld, mgr=%p, deleting pin=%d\n", iv_id, pfp(this), iv_pin); } if (iv_group_entry.ip_map != NULL) delete iv_group_entry.ip_map; // Clear TLS if (iv_tls_inx >= 0) { lv_status = SB_Thread::Sthr::specific_set(iv_tls_inx, NULL); SB_util_assert_ieq(lv_status, 0); } // destroy CV do { lv_status = iv_cv.destroy(); if (lv_status == EBUSY) usleep(100); } while (lv_status); if (gv_ms_trace_events) trace_where_printf(WHERE, "EXIT id=%ld, mgr=%p\n", iv_id, pfp(this)); }
void SB_Timer::Timer::start() { const char *WHERE = "Timer::start"; char la_fmt_pop[100]; Timer *lp_curr; Timer *lp_next; const char *lp_trace_type; Slot_Type lv_slot; int lv_status; if (iv_running) { lp_trace_type = "(re)start"; cancel_int(true); } else lp_trace_type = "start"; iv_pop_time.tic_set_now_add(iv_interval); lv_slot = hash(iv_pop_time); if (cv_trace_enabled) trace_where_printf(WHERE, "%s, timer=%p, user-param=%ld, interval=" PF64 ", pop time=%s, slot=%d\n", lp_trace_type, pfp(this), iv_user_param, iv_interval, format_pop_time(la_fmt_pop), lv_slot); lv_status = cv_mutex.lock(); SB_util_assert_ieq(lv_status, 0); // sw fault lp_curr = ca_slots[lv_slot]; // is it the only one in this slot? if (lp_curr == NULL) { ca_slots[lv_slot] = this; ip_next = NULL; } else if (iv_pop_time.ts_lt(lp_curr->iv_pop_time)) { // should it go first in this slot? ca_slots[lv_slot] = this; ip_next = lp_curr; } else { // it goes somewhere after the first lp_next = lp_curr->ip_next; while ((lp_next != NULL) && (iv_pop_time.ts_ge(lp_next->iv_pop_time))) { lp_curr = lp_next; lp_next = lp_curr->ip_next; } lp_curr->ip_next = this; ip_next = lp_next; } iv_running = true; lv_status = cv_mutex.unlock(); SB_util_assert_ieq(lv_status, 0); // sw fault }
const char *SB_Timer::Timer::format_timer(char *pp_buf) { char la_buf[100]; format_pop_time(la_buf); sprintf(pp_buf, "timer=%p, user-param=%ld, interval=" PF64 ", pop-time=%s", pfp(this), iv_user_param, iv_interval, la_buf); return pp_buf; }
void SB_Timer::Timer::check_timers() { const char *WHERE = "Timer::check_timers"; char la_fmt_pop[100]; Timer *lp_curr; Timer *lp_next; Time_Stamp lv_now; Slot_Type lv_now_slot; Slot_Type lv_slot; int lv_status; lp_next = NULL; lv_now_slot = hash(lv_now); lv_status = cv_mutex.lock(); SB_util_assert_ieq(lv_status, 0); // sw fault for (lv_slot = cv_last_slot_checked; lv_slot != (lv_now_slot + 1) % MAX_SLOTS; lv_slot = (lv_slot + 1) % MAX_SLOTS) { lp_curr = ca_slots[lv_slot]; while (lp_curr != NULL) { // // we're done if the timer pops later than now. // if (lp_curr->iv_pop_time.ts_gt(lv_now)) { lv_status = cv_mutex.unlock(); SB_util_assert_ieq(lv_status, 0); // sw fault if (cv_trace_enabled) trace_where_printf(WHERE, "no timers ready\n"); return; } lp_next = lp_curr->ip_next; lp_curr->cancel_int(false); if (cv_trace_enabled) trace_where_printf(WHERE, "timer=%p, user-param=%ld, pop time=%s\n", pfp(lp_curr), lp_curr->iv_user_param, lp_curr->format_pop_time(la_fmt_pop)); lp_curr->ip_th->handle_timeout(lp_curr); lp_curr = lp_next; } if (lv_slot != lv_now_slot) { // Only increment if not at "now". A new timer might be // set in the current time slot before "now" goes to the next // slot so we want to be sure and check it again. cv_last_slot_checked = (cv_last_slot_checked + 1) % MAX_SLOTS; } } lv_status = cv_mutex.unlock(); SB_util_assert_ieq(lv_status, 0); // sw fault }
SB_Lf_Queue::~SB_Lf_Queue() { #ifdef SB_Q_ALLOW_QALLOC const char *WHERE = "SB_Lf_Queue::~SB_Lf_Queue"; #endif void *lp_node; do { lp_node = remove(); } while (lp_node != NULL); if (iv_tail.ip_ptr != NULL) { #ifdef SB_Q_ALLOW_QALLOC if (gv_ms_trace_qalloc) trace_where_printf(WHERE, "this=%p(%s), delete=%p\n", pfp(this), ia_q_name, pfp(iv_tail.ip_ptr)); #endif delete iv_tail.ip_ptr; } }
// SB_Lf_Queue constructor SB_Lf_Queue::SB_Lf_Queue(const char *pp_name) : SB_Queue(pp_name) { #ifdef SB_Q_ALLOW_QALLOC const char *WHERE = "SB_Lf_Queue::SB_Lf_Queue"; #endif LFNT *lp_dummy; lp_dummy = new LFNT; #ifdef SB_Q_ALLOW_QALLOC if (gv_ms_trace_qalloc) trace_where_printf(WHERE, "this=%p(%s), new=%p\n", pfp(this), ia_q_name, pfp(lp_dummy)); #endif lp_dummy->ip_data = NULL; LFQ_PT_AS3(&lp_dummy->iv_next, NULL, 0); LFQ_PT_AS3(&iv_head, &lp_dummy->iv_next, 0); LFQ_PT_AS3(&iv_tail, &lp_dummy->iv_next, 0); }
void SB_Timer::Timer::set_param(long pv_user_param) { const char *WHERE = "Timer::set_param"; if (cv_trace_enabled) trace_where_printf(WHERE, "timer=%p, user-param=%ld\n", pfp(this), pv_user_param); iv_user_param = pv_user_param; }
// // print timer list // void sb_timer_timer_list_print() { char la_info[50]; char la_to[40]; Timer_TLE_Type *lp_tle; Timer_TLE_Type *lp_tle_prev; lp_tle = gp_timer_head; lp_tle_prev = NULL; if (lp_tle != NULL) trace_printf("TIMER timer list\n"); while (lp_tle != NULL) { sb_timer_to_fmt(la_to, lp_tle->iv_to); switch (lp_tle->iv_kind) { case TIMER_TLE_KIND_CB: sprintf(la_info, "cb=%p", SB_CB_TO_PTR(lp_tle->iv_cb)); break; case TIMER_TLE_KIND_COMPQ: sprintf(la_info, "q=%p", pfp(lp_tle->ip_comp_q)); break; default: strcpy(la_info, "?"); SB_util_abort("invalid iv_kind"); // sw fault break; } trace_printf("tle id=%d, addr=%p, p=%p, n=%p, %s, stid=%d, ttid=%d, mgr=%p, to=%s, toval=%d, p1=%d(0x%x), p2=%ld(0x%lx)\n", lp_tle->iv_tleid, pfp(lp_tle), pfp(lp_tle->ip_prev), pfp(lp_tle->ip_next), la_info, lp_tle->iv_stid, lp_tle->iv_ttid, pfp(lp_tle->ip_mgr), la_to, lp_tle->iv_toval, lp_tle->iv_parm1, lp_tle->iv_parm1, lp_tle->iv_parm2, lp_tle->iv_parm2); SB_util_assert_peq(lp_tle->ip_prev, lp_tle_prev); lp_tle_prev = lp_tle; lp_tle = lp_tle->ip_next; } }
void *SB_Lf_Queue::remove() { #ifdef SB_Q_ALLOW_QALLOC const char *WHERE = "SB_Lf_Queue::remove"; #endif LFNT *lp_node; void *lp_ret; PT lv_head; PT lv_next; PT lv_tail; PT lv_temp; for (;;) { // Keep trying until Dequeue is done LFQ_PT_AS2(&lv_head, &iv_head); // Read Head LFQ_PT_AS2(&lv_tail, &iv_tail); // Read Tail LFQ_PT_AS2(&lv_next, lv_head.ip_ptr); // Read Head.ptr->next if (lv_head == iv_head) { // Are head, tail, and next consistent? if (lv_head.ip_ptr == lv_tail.ip_ptr) { // Is queue empty or Tail falling behind? if (lv_next.ip_ptr == NULL) { // Is queue empty? return NULL; // Queue is empty, couldn't dequeue } LFQ_PT_AS3(&lv_temp, lv_next.ip_ptr, lv_tail.iv_count + 1); LFQ_CAS(&iv_tail, lv_tail, lv_temp); // Tail is falling behind. Try to advance it } else { // No need to deal with Tail // read value before CAS, otherwise another dequeue might free the next node lp_node = reinterpret_cast<LFNT *>(lv_next.ip_ptr); lp_ret = lp_node->ip_data; lp_node->ip_data = NULL; // mark removed LFQ_PT_AS3(&lv_temp, lv_next.ip_ptr, lv_head.iv_count + 1); if (LFQ_CAS(&iv_head, lv_head, lv_temp)) { // Try to swing Head to the next node break; // Dequeue is done. Exit loop } } } } #ifdef SB_Q_ALLOW_QALLOC if (gv_ms_trace_qalloc) trace_where_printf(WHERE, "this=%p(%s), delete=%p\n", pfp(this), ia_q_name, pfp(lv_head.ip_ptr)); #endif delete lv_head.ip_ptr; // It is safe now to free the old dummy node return lp_ret; // Queue was not empty, dequeue succeeded }
SB_Timer::Timer::~Timer() { const char *WHERE = "Timer::~Timer"; cancel_int(true); if (cv_trace_enabled) trace_where_printf(WHERE, "timer=%p, user-param=%ld, interval=" PF64 "\n", pfp(this), iv_user_param, iv_interval); }
// // print completion list // void sb_timer_comp_q_print(SB_Timer_Comp_Queue *pp_comp_q) { char la_to[40]; Timer_TLE_Type *lp_tle; lp_tle = reinterpret_cast<Timer_TLE_Type *>(pp_comp_q->head()); if (lp_tle != NULL) trace_printf("TIMER comp-q q=%p, tid=%d\n", pfp(pp_comp_q), pp_comp_q->get_tid()); while (lp_tle != NULL) { sb_timer_to_fmt(la_to, lp_tle->iv_to); trace_printf("tle id=%d, addr=%p, p=%p, n=%p, q=%p, stid=%d, ttid=%d, mgr=%p, to=%s, toval=%d, p1=%d(0x%x), p2=%ld(0x%lx)\n", lp_tle->iv_tleid, pfp(lp_tle), pfp(lp_tle->iv_link.ip_prev), pfp(lp_tle->iv_link.ip_next), pfp(pp_comp_q), lp_tle->iv_stid, lp_tle->iv_ttid, pfp(lp_tle->ip_mgr), la_to, lp_tle->iv_toval, lp_tle->iv_parm1, lp_tle->iv_parm1, lp_tle->iv_parm2, lp_tle->iv_parm2); lp_tle = reinterpret_cast<Timer_TLE_Type *>(lp_tle->iv_link.ip_next); } }
void SB_Imap::printself(bool pv_traverse) { SB_ML_Type *lp_item; int lv_hash; Key_Type lv_id; int lv_inx; printf("this=%p, type=%s, name=%s, size=%d, buckets=%d\n", pfp(this), ip_map_type, ia_map_name, iv_count, iv_buckets); if (pv_traverse) { lv_inx = 0; for (lv_hash = 0; lv_hash < iv_buckets; lv_hash++) { lp_item = ipp_HT[lv_hash]; for (; lp_item != NULL; lp_item = lp_item->ip_next) { lv_id = lp_item->iv_id.i; printf(" inx=%d, Item=%p, Hash=%d, id=%d (0x%x)\n", lv_inx, pfp(lp_item), lv_hash, lv_id, lv_id); lv_inx++; } } } }
void SB_Lf_Queue::printself(bool pv_traverse) { LFNT *lp_next; LFNT *lp_node; SB_QL_Type *lp_item; int lv_count; int lv_inx; lv_count = size(); #ifdef PRINTSELF_NORMAL printf("this=%p(%s), size=%d\n", pfp(this), ia_q_name, lv_count); #else printf("this=%p(%s), h=%p-%x, t=%p-%x, size=%d %s\n", pfp(this), ia_q_name, pfp(iv_head.ip_ptr), iv_head.iv_count, pfp(iv_tail.ip_ptr), iv_tail.iv_count, lv_count, (iv_head.ip_ptr == iv_tail.ip_ptr) ? "empty" : ""); #endif if (pv_traverse) { lp_node = reinterpret_cast<LFNT *>(iv_head.ip_ptr); for (lv_inx = 0; lp_node != NULL; lp_node = lp_next) { lp_item = lp_node->ip_data; lp_next = reinterpret_cast<LFNT *>(lp_node->iv_next.ip_ptr); #ifdef PRINTSELF_NORMAL printf(" inx=%d, Item=%p\n", lv_inx, pfp(lp_item)); #else int lv_id; if (lp_item == NULL) lv_id = 0; else lv_id = lp_item->iv_id.i; printf(" inx=%d, Node=%p, Next=%p, Count=%x, Item=%p, Id=%x %s\n", lv_inx, pfp(lp_node), pfp(lp_next), lp_node->iv_next.iv_count, pfp(lp_item), lv_id, (lp_item == NULL) ? "dummy" : ""); #endif if (lp_item != NULL) lv_inx++; } } }
void SB_Timer_Comp_Queue::add(SB_DQL_Type *pp_item) { const char *WHERE = "SB_Timer_Comp_Queue::add"; char la_to[40]; Timer_TLE_Type *lp_tle; lp_tle = reinterpret_cast<Timer_TLE_Type *>(pp_item); if (gv_ms_trace_params) { gettimeofday(&lp_tle->iv_comp_q_on_tod, NULL); sb_timer_to_fmt(la_to, lp_tle->iv_to); trace_where_printf(WHERE, "adding to comp-q qid=%d(%s), tleid=%d, tle=%p, to=%s\n", iv_qid, ia_d_q_name, lp_tle->iv_tleid, pfp(lp_tle), la_to); } SB_Ts_D_Queue::add(pp_item); }
// // Purpose: event manager - remove_from_event_all // void SB_Ms_Event_Mgr::remove_from_event_all() { const char *WHERE = "SB_Ms_Event_Mgr::remove_from_event_all"; int lv_status; if (gv_ms_trace_events) trace_where_printf(WHERE, "id=%ld, mgr=%p\n", iv_id, pfp(this)); lv_status = cv_sl_map.lock(); SB_util_assert_ieq(lv_status, 0); cv_all_list.remove_list(&iv_all_list_entry.iv_link); lv_status = cv_sl_map.unlock(); SB_util_assert_ieq(lv_status, 0); }
void SB_NPVmap::printself(bool pv_traverse) { SB_NPVML_Type *lp_item; int lv_hash; int lv_inx; printf("this=%p, type=npvmap, name=%s, size=%d, buckets=%d\n", pfp(this), ia_map_name, iv_count, iv_buckets); if (pv_traverse) { lv_inx = 0; for (lv_hash = 0; lv_hash < iv_buckets; lv_hash++) { lp_item = ipp_HT[lv_hash]; for (; lp_item != NULL; lp_item = lp_item->ip_next) { printf(" inx=%d, Item=%p, Hash=%d, id=%d/%d/" PFVY ")\n", lv_inx, pfp(lp_item), lv_hash, lp_item->iv_id.npv.iv_nid, lp_item->iv_id.npv.iv_pid, lp_item->iv_id.npv.iv_verif); lv_inx++; } } } }
void SB_Timer_Comp_Queue::remove_account(void *pp_item) { const char *WHERE = "SB_Timer_Comp_Queue::remove_account"; Timer_TLE_Type *lp_tle; long long lv_delta; lp_tle = static_cast<Timer_TLE_Type *>(pp_item); gettimeofday(&lp_tle->iv_comp_q_off_tod, NULL); lv_delta = (lp_tle->iv_comp_q_off_tod.tv_sec * SB_US_PER_SEC + lp_tle->iv_comp_q_off_tod.tv_usec) - (lp_tle->iv_comp_q_on_tod.tv_sec * SB_US_PER_SEC + lp_tle->iv_comp_q_on_tod.tv_usec); trace_where_printf(WHERE, "comp-q-to-de-queue qid=%d(%s), (tleid=%d, tle=%p) time=%lld us\n", iv_qid, ia_d_q_name, lp_tle->iv_tleid, pfp(lp_tle), lv_delta); }
// // Purpose: emulate SIGNALTIMEOUT // SB_Export _xcc_status XSIGNALTIMEOUT(int pv_toval, short pv_parm1, long pv_parm2, short *pp_tleid, pid_t pv_tid) { const char *WHERE = "XSIGNALTIMEOUT"; SB_Ms_Event_Mgr *lp_mgr; int lv_fserr; short lv_tleid; SB_API_CTR (lv_zctr, XSIGNALTIMEOUT); if (gv_ms_trace_params) trace_where_printf(WHERE, "ENTER toval=%d, parm1=%d(0x%x), parm2=%ld(0x%lx), tleid=%p, ttid=%d\n", pv_toval, pv_parm1, pv_parm1, pv_parm2, pv_parm2, pfp(pp_tleid), pv_tid); if (pv_tid == 0) lp_mgr = gv_ms_event_mgr.get_mgr(NULL); else { lp_mgr = gv_ms_event_mgr.get_mgr_tid(pv_tid); if (lp_mgr == NULL) { if (gv_ms_trace_params) trace_where_printf(WHERE, "ttid=%d not registered\n", pv_tid); lv_fserr = XZFIL_ERR_BADERR; // CCL lv_tleid = -1; *pp_tleid = lv_tleid; if (gv_ms_trace_params) trace_where_printf(WHERE, "EXIT tleid=%d, ret=%d\n", lv_tleid, lv_fserr); RETURNFSCC(lv_fserr); } } lv_fserr = sb_timer_start_com(WHERE, true, TIMER_TLE_KIND_COMPQ, pv_toval, pv_parm1, pv_parm2, pp_tleid, pv_tid, lp_mgr, NULL); RETURNFSCC(lv_fserr); }
SB_Export short XPROCESSHANDLE_GETMINE_(SB_Phandle_Type *pp_phandle) { const char *WHERE = "XPROCESSHANDLE_GETMINE_"; SB_API_CTR (lv_zctr, XPROCESSHANDLE_GETMINE_); if (gv_ms_trace_params) trace_where_printf(WHERE, "ENTER phandle=%p\n", pfp(pp_phandle)); if (!gv_ms_calls_ok) return ms_err_rtn_msg(WHERE, "msg_init() not called or shutdown", XZFIL_ERR_INVALIDSTATE); ms_od_get_my_phandle(pp_phandle); if (gv_ms_trace_params) { char la_phandle[MSG_UTIL_PHANDLE_LEN]; msg_util_format_phandle(la_phandle, pp_phandle); trace_where_printf(WHERE, "EXIT OK, phandle=%s\n", la_phandle); } return XZFIL_ERR_OK; }
SB_Export short XPROCESSHANDLE_NULLIT_(SB_Phandle_Type *pp_phandle) { const char *WHERE = "XPROCESSHANDLE_NULLIT_"; SB_API_CTR (lv_zctr, XPROCESSHANDLE_NULLIT_); if (gv_ms_trace_params) trace_where_printf(WHERE, "ENTER phandle=%p\n", pfp(pp_phandle)); if (!gv_ms_calls_ok) return ms_err_rtn_msg(WHERE, "msg_init() not called or shutdown", XZFIL_ERR_INVALIDSTATE); if (pp_phandle == NULL) return ms_err_rtn_msg(WHERE, "invalid phandle", XZFIL_ERR_BOUNDSERR); for (int lv_inx = 0; lv_inx < SB_PHANDLE_LL_SIZE; lv_inx++) pp_phandle->_data[lv_inx] = -1; if (gv_ms_trace_params) trace_where_printf(WHERE, "EXIT OK\n"); return XZFIL_ERR_OK; }