/********************************************************************* * * smthread_t::~smthread_t() * * Destroy smthread. Thread is already defunct the object is * destroyed. * *********************************************************************/ smthread_t::~smthread_t() { user = NULL; if(lock_timeout() > WAIT_NOT_USED) { _uninitialize_fingerprint(); } w_assert2( tcb().xct == NULL);; w_assert2( tcb().pin_count == 0); w_assert2( tcb()._lock_hierarchy == 0 ); w_assert2( tcb()._sdesc_cache == 0 ); w_assert2( tcb()._xct_log == 0 ); }
w_link_t* w_link_t::detach() { if (_next != this) { w_assert2(_prev != this); _prev->_next = _next, _next->_prev = _prev; _list->_cnt--; w_assert2(_list->_cnt || (_list->_tail._prev == & _list->_tail && _list->_tail._next == & _list->_tail)); _next = _prev = this, _list = 0; } return this; }
latch_t::~latch_t() { #if W_DEBUG_LEVEL > 1 int t = _total_count; // do this just to get the symbol to remain if(t) { fprintf(stderr, "t=%d\n", t); } w_assert2(t == 0);// BUG_SEMANTICS_FIX w_assert2(mode() == LATCH_NL); w_assert2(num_holders() == 0); #endif }
bool htab_remove(bf_core_m *core, bfpid_t const &pid, bf_core_m::Tstats &s) { bool ret(false); bfcb_t *cb = core->_htab->lookup(pid); if(cb) { // find the bucket so we can acquire the lock, // necessary for removal. // also ensure pin count is zero. int idx = core->_htab->hash(cb->hash_func(), pid); bf_core_m::htab::bucket &b = core->_htab->_table[idx]; cb->zero_pin_cnt(); CRITICAL_SECTION(cs, b._lock); bool bull = core->_htab->remove(cb); w_assert0(bull); w_assert1(cb->pin_cnt() == 0); } // It's possible that it couldn't remove the item // because the lock is not held or the pin count is > 0 if(ret) { w_assert2(cb->hash_func() == bf_core_m::htab::HASH_COUNT); } s = me()->TL_stats().bfht; return ret; }
void mcs_rwlock::release_read() { w_assert2(has_reader()); membar_exit(); // flush protected modified data before releasing lock; // update and complete any loads by others before I do this write atomic_add_32(&_holders, -READER); }
bfcb_t* htab_insert(bf_core_m *core, bfpid_t const &pid, bf_core_m::Tstats &s) { // avoid double-insertions w/o a removal. bool already_there(false); bfcb_t *ret = core->_htab->lookup(pid); if(ret) { already_there = true; htab_remove(core, pid, s); } bfcb_t *ret2 = core->_htab->lookup(pid); w_assert0(ret2 == NULL); bfcb_t *cb ; if(already_there) { cb = ret; } else { ret = NULL; cb = core->replacement(); w_assert0(cb->latch.is_mine()); cb->latch.latch_release(); } if(cb == NULL) { cerr << " htab_insert could not get a replacement frame " << endl; } if(cb) { if(cb->old_pid_valid()) { // it's a replacement // ... obsolete check removed.. } cb->set_pid(pid); cb->zero_pin_cnt(); ret = core->_htab->insert(cb); s = me()->TL_stats().bfht; } #if W_DEBUG_LEVEL > 1 int sz= core->_htab->_size; for(int i=0; i < sz; i++) { bf_core_m::htab::bucket &b = core->_htab->_table[i]; w_assert2(b._lock.is_mine()==false); } #endif return ret; }
void w_link_t::attach(w_link_t* prev_link) { w_assert2(_prev == this && _next == this); // not in any list _list = prev_link->_list; _next = prev_link->_next; _prev = prev_link; prev_link->_next = this; _next->_prev = this; ++(_list->_cnt); }
/// Insert latch_holder_t for given latch if not already there. holder_search(latch_t const* l) : _holders(latch_holder_t::thread_local_holders), _freelist(latch_holder_t::thread_local_freelist), _end(_holders.end()), _it(find(_holders, l)) { // if we didn't find the latch in the list, // create a new latch_holder_t (with mode LATCH_NL) // to return, just so that the value() method always // returns a non-null ptr. It might be used, might not. if(_it == _end) { latch_holder_t* h = _freelist; if(h) _freelist = h->_next; // need to clear out the latch either way if(h) // h->latch_holder_t(); // reinit h = new(h) latch_holder_t(); else h = new latch_holder_t; _holders.push_front(h); _it = _holders.begin(); } w_assert2(count(_holders, l) <= 1); }
~old_xct_tracker() { w_assert2(! _count); while(_list.pop()) ; }