void* wsd_work_stealing_deque_steal(wsd_work_stealing_deque_t* d) { assert(d); const int64_t t = d->top; load_load_barrier(); const int64_t b = d->bottom; wsd_circular_array_t* const a = d->underlying_array; const int64_t size = b - t; if(size <= 0) { return WSD_EMPTY; } void* const ret = wsd_circular_array_get(a, t); if(!__sync_bool_compare_and_swap(&d->top, t, t + 1)) { return WSD_ABORT; } return ret; }
size_t fiber_poll_events_blocking(uint32_t seconds, uint32_t useconds) { if(!fiber_loop) { fiber_do_real_sleep(seconds, useconds); return 0; } load_load_barrier();//needed to ensure we read fiber_loop first - we need a valid active_thread count, //so init writes active_threads then fiber_loop, we read here in the reverse order //only allow the final thread to perform a blocking poll - this prevents the //thread from locking out other threads trying to register for events const int local_count = __sync_sub_and_fetch(&active_threads, 1); assert(local_count >= 0); if(local_count > 0) { fiber_do_real_sleep(seconds, useconds); __sync_add_and_fetch(&active_threads, 1); return 0; } fiber_spinlock_lock(&fiber_loop_spinlock); if(!fiber_loop) { __sync_add_and_fetch(&active_threads, 1); fiber_spinlock_unlock(&fiber_loop_spinlock); return 0; } num_events_triggered = 0; fiber_manager_get()->poll_count += 1; ev_run(fiber_loop, EVRUN_ONCE); const int local_copy = num_events_triggered; fiber_spinlock_unlock(&fiber_loop_spinlock); __sync_add_and_fetch(&active_threads, 1); return local_copy; }