inline void decrement_running_counter() { // now, a bit of care is needed here size_t r = threads_running.dec(); if (r == 0) { join_lock.lock(); if (join_waiting) { join_cond.signal(); } join_lock.unlock(); } }
bool try_terminate(size_t cpuid, std::pair<size_t, bool> &job) { job.second = false; numactive.dec(); cons.begin_done_critical_section(cpuid); job = queue.try_dequeue(); if (job.second == false) { bool ret = cons.end_done_critical_section(cpuid); numactive.inc(); return ret; } else { cons.cancel_critical_section(cpuid); numactive.inc(); return false; } }
inline void rdunlock(request *I) { __sync_synchronize(); if (I->next != NULL || !__sync_bool_compare_and_swap(&tail, I, (request*)NULL)) { while(I->next == NULL) sched_yield(); if (I->s.state.successor_class == QUEUED_RW_LOCK_REQUEST_WRITE) { next_writer = (request*)(I->next); __sync_synchronize(); } } if (reader_count.dec() == 0) { __sync_synchronize(); request * w = __sync_lock_test_and_set(&next_writer, (request*)NULL); if (w != NULL) { w->s.state.blocked = false; __sync_synchronize(); } } }