mm_event_listener_finish(struct mm_event_listener *listener, uint32_t listen_stamp) { // Bump the listen stamp. listen_stamp += MM_EVENT_LISTENER_STATUS + 1; // Store it first as the notify stamp then as the proper listen stamp. // This order ensures that regardless of a possible race with the CAS // operation in the notify function the end result will be correct. mm_memory_store(listener->notify_stamp, listen_stamp); mm_memory_store_fence(); mm_memory_store(listener->listen_stamp, listen_stamp); }
static void mc_action_access_entry(struct mc_entry *entry) { uint8_t state = entry->state; if (state >= MC_ENTRY_USED_MIN && state < MC_ENTRY_USED_MAX) mm_memory_store(entry->state, state + 1); }
mm_thread_barrier_wait(struct mm_thread_barrier *const barrier, struct mm_thread_barrier_local *local) { uint32_t sense = ~local->sense; if (mm_atomic_uint32_dec_and_test(&barrier->value) == 0) { mm_memory_store(barrier->value, barrier->count); mm_memory_store_fence(); mm_memory_store(barrier->sense, sense); } else { mm_memory_fence(); // TODO: atomic_load fence while (mm_memory_load(barrier->sense) != sense) mm_cpu_backoff(); } local->sense = sense; }
mm_event_listener_wait(struct mm_event_listener *listener, mm_timeout_t timeout) { ENTER(); #if ENABLE_NOTIFY_STAMP // Get the next expected notify stamp. const mm_ring_seqno_t stamp = mm_event_listener_dequeue_stamp(listener); if (timeout != 0) { // Advertise that the thread is about to sleep. uintptr_t state = (stamp << 2) | MM_EVENT_LISTENER_WAITING; mm_memory_store(listener->state, state); mm_memory_strict_fence(); // TODO: store_load fence // Wait for a wake-up notification or timeout unless // an already pending notification is detected. if (stamp == mm_event_listener_enqueue_stamp(listener)) mm_event_listener_timedwait(listener, stamp, timeout); } // Advertise the start of another working cycle. mm_event_listener_finish(listener); #else // Get the current listener state. const uint32_t listen_stamp = listener->listen_stamp; ASSERT((listen_stamp & MM_EVENT_LISTENER_STATE) == MM_EVENT_LISTENER_RUNNING); if (timeout != 0) { // Advertise that the thread is about to sleep. uint32_t wait_stamp = listen_stamp | MM_EVENT_LISTENER_WAITING; mm_memory_store(listener->listen_stamp, wait_stamp); mm_memory_strict_fence(); // TODO: store_load fence // Wait for a wake-up notification or timeout unless // an already pending notification is detected. if (listen_stamp == mm_memory_load(listener->notify_stamp)) mm_event_listener_timedwait(listener, listen_stamp, timeout); } // Advertise the start of another working cycle. mm_event_listener_finish(listener, listen_stamp); #endif LEAVE(); }
mm_event_dispatch_advance_epoch(struct mm_event_dispatch *dispatch) { ENTER(); uint32_t epoch = mm_memory_load(dispatch->reclaim_epoch); bool rc = mm_event_dispatch_check_epoch(dispatch, epoch); if (rc) { mm_memory_fence(); // TODO: load_store fence mm_memory_store(dispatch->reclaim_epoch, epoch + 1); DEBUG("advance epoch %u", epoch + 1); } LEAVE(); return rc; }
static void mm_async_syscall_result(struct mm_async_node *node, intptr_t result) { // Store the result. node->result = result; if (result < 0) node->error = errno; // Ensure its visibility. mm_memory_store_fence(); // Indicate the operation completion. mm_memory_store(node->status, 0); // Notify the caller. mm_strand_run_fiber(node->fiber); }
mm_event_listener_poll(struct mm_event_listener *listener, mm_timeout_t timeout) { ENTER(); // Prepare to receive events. mm_event_receiver_start(&listener->receiver); #if ENABLE_NOTIFY_STAMP // Get the next expected notify stamp. const mm_ring_seqno_t stamp = mm_event_listener_dequeue_stamp(listener); if (timeout != 0) { // Cleanup stale event notifications. mm_event_backend_dampen(&listener->receiver.dispatch->backend); // Advertise that the thread is about to sleep. uintptr_t state = (stamp << 2) | MM_EVENT_LISTENER_POLLING; mm_memory_store(listener->state, state); mm_memory_strict_fence(); // TODO: store_load fence // Wait for a wake-up notification or timeout unless // an already pending notification is detected. if (stamp != mm_event_listener_enqueue_stamp(listener)) timeout = 0; } // Check incoming events and wait for notification/timeout. mm_event_backend_listen(&listener->receiver.dispatch->backend, &listener->changes, &listener->receiver, timeout); // Advertise the start of another working cycle. mm_event_listener_finish(listener); #else // Get the current listener state. const uint32_t listen_stamp = listener->listen_stamp; ASSERT((listen_stamp & MM_EVENT_LISTENER_STATE) == MM_EVENT_LISTENER_RUNNING); if (timeout != 0) { // Cleanup stale event notifications. mm_event_backend_dampen(&listener->receiver.dispatch->backend); // Advertise that the thread is about to sleep. uint32_t poll_stamp = listen_stamp | MM_EVENT_LISTENER_POLLING; mm_memory_store(listener->listen_stamp, poll_stamp); mm_memory_strict_fence(); // TODO: store_load fence // Wait for a wake-up notification or timeout unless // an already pending notification is detected. if (listen_stamp != mm_memory_load(listener->notify_stamp)) timeout = 0; } // Check incoming events and wait for notification/timeout. mm_event_backend_listen(&listener->receiver.dispatch->backend, &listener->changes, &listener->receiver, timeout); // Advertise the start of another working cycle. mm_event_listener_finish(listener, listen_stamp); #endif // Flush received events. mm_event_receiver_finish(&listener->receiver); LEAVE(); }
mm_event_listener_finish(struct mm_event_listener *listener) { mm_memory_store(listener->state, MM_EVENT_LISTENER_RUNNING); }