mm_event_dispatch_listen(struct mm_event_dispatch *dispatch, mm_thread_t thread, mm_timeout_t timeout) { ENTER(); ASSERT(thread < dispatch->nlisteners); struct mm_event_listener *listener = mm_event_dispatch_listener(dispatch, thread); if (listener->busywait) { // Presume that if there were incoming events moments ago then // there is a chance to get some more immediately. Spin a little // bit to avoid context switches. listener->busywait--; timeout = 0; } else if (mm_event_listener_has_changes(listener)) { // There may be changes that need to be immediately acknowledged. timeout = 0; } // The first arrived thread that is going to sleep is elected to conduct // the next event poll. bool is_poller_thread = false; if (timeout != 0) { mm_regular_lock(&dispatch->poller_lock); if (dispatch->poller_thread == MM_THREAD_NONE) { dispatch->poller_thread = thread; is_poller_thread = true; } mm_regular_unlock(&dispatch->poller_lock); } if (timeout == 0 || is_poller_thread) { if (timeout != 0) mm_event_dispatch_advance_epoch(dispatch); // Wait for incoming events or timeout expiration. mm_event_listener_poll(listener, timeout); // Give up the poller thread role. if (is_poller_thread) { mm_regular_lock(&dispatch->poller_lock); dispatch->poller_thread = MM_THREAD_NONE; mm_regular_unlock(&dispatch->poller_lock); } // Forget just handled change events. mm_event_listener_clear_changes(listener); // Arm busy-wait counter if got any events. if (listener->receiver.got_events) listener->busywait += mm_events_busywait; } else { // Wait for forwarded events or timeout expiration. mm_event_listener_wait(listener, timeout); } LEAVE(); }
static int mm_port_send_internal(struct mm_port *port, uint32_t *start, uint32_t count, bool blocking) { ENTER(); ASSERT(count <= (MM_PORT_SIZE / 2)); ASSERT(port->task != mm_task_selfptr()); int rc = 0; again: mm_regular_lock(&port->lock); if (unlikely((port->count + count) > MM_PORT_SIZE)) { if (blocking) { mm_waitset_wait(&port->blocked_senders, &port->lock); mm_task_testcancel(); goto again; } else { mm_regular_unlock(&port->lock); rc = -1; goto leave; } } uint32_t ring_end = (port->start + port->count) % MM_PORT_SIZE; uint32_t *ring_ptr = &port->ring[ring_end]; port->count += count; if (unlikely((ring_end + count) > MM_PORT_SIZE)) { uint32_t top_count = MM_PORT_SIZE - ring_end; count -= top_count; while (top_count--) { *ring_ptr++ = *start++; } ring_ptr = &port->ring[0]; } while (count--) { *ring_ptr++ = *start++; } mm_regular_unlock(&port->lock); mm_core_run_task(port->task); leave: LEAVE(); return rc; }
static void mm_pool_grow_unlock(struct mm_pool *pool) { if (pool->global) mm_common_unlock(&pool->global_data.grow_lock); #if ENABLE_SMP else if (pool->shared) mm_regular_unlock(&pool->shared_data.grow_lock); #endif }
static int mm_port_receive_internal(struct mm_port *port, uint32_t *start, uint32_t count, bool blocking) { ENTER(); ASSERT(count <= (MM_PORT_SIZE / 2)); ASSERT(port->task == mm_task_selfptr()); int rc = 0; again: mm_regular_lock(&port->lock); if (port->count < count) { mm_regular_unlock(&port->lock); if (blocking) { mm_task_block(); mm_task_testcancel(); goto again; } else { rc = -1; goto leave; } } uint32_t *ring_ptr = &port->ring[port->start]; port->count -= count; if (unlikely((port->start + count) > MM_PORT_SIZE)) { uint32_t top_count = MM_PORT_SIZE - port->start; count -= top_count; while (top_count--) { *start++ = *ring_ptr++; } ring_ptr = &port->ring[0]; port->start = 0; } port->start = (port->start + count) % MM_PORT_SIZE; while (count--) { *start++ = *ring_ptr++; } mm_waitset_broadcast(&port->blocked_senders, &port->lock); leave: LEAVE(); return rc; }