mm_event_dispatch_prepare(struct mm_event_dispatch *dispatch, struct mm_domain *domain, mm_thread_t nthreads, struct mm_thread *threads[]) { ENTER(); ASSERT(nthreads > 0); // Store associated domain. dispatch->domain = domain; mm_domain_setdispatch(domain, dispatch); // Prepare listener info. dispatch->nlisteners = nthreads; dispatch->listeners = mm_common_calloc(nthreads, sizeof(struct mm_event_listener)); for (mm_thread_t i = 0; i < nthreads; i++) { mm_event_listener_prepare(&dispatch->listeners[i], dispatch, threads[i]); mm_thread_setlistener(threads[i], &dispatch->listeners[i]); } dispatch->poller_lock = (mm_regular_lock_t) MM_REGULAR_LOCK_INIT; dispatch->poller_thread = MM_THREAD_NONE; dispatch->reclaim_epoch = 0; // Initialize system-specific resources. mm_event_backend_prepare(&dispatch->backend); LEAVE(); }
mm_event_forward_prepare(struct mm_event_forward_cache *cache, mm_thread_t ntargets) { ENTER(); cache->buffers = mm_common_calloc(ntargets, sizeof(cache->buffers[0])); for (mm_thread_t i = 0; i < ntargets; i++) { cache->buffers[i].nsinks = 0; cache->buffers[i].ntotal = 0; } mm_bitset_prepare(&cache->targets, &mm_common_space.xarena, ntargets); LEAVE(); }