Exemplo n.º 1
0
static bool
mm_event_dispatch_check_epoch(struct mm_event_dispatch *dispatch, uint32_t epoch)
{
	ENTER();
	bool rc = true;

	mm_thread_t n = dispatch->nlisteners;
	struct mm_event_listener *listeners = dispatch->listeners;
	for (mm_thread_t i = 0; i < n; i++) {
		struct mm_event_listener *listener = &listeners[i];
		struct mm_event_receiver *receiver = &listener->receiver;
		uint32_t local = mm_memory_load(receiver->reclaim_epoch);
		if (local == epoch)
			continue;

		mm_memory_load_fence();
		bool active = mm_memory_load(receiver->reclaim_active);
		if (active) {
			mm_thread_post_1(listener->thread, mm_event_dispatch_observe_req,
					 (uintptr_t) receiver);
			rc = false;
			break;
		}
	}

	LEAVE();
	return rc;
}
Exemplo n.º 2
0
mm_event_listener_notify(struct mm_event_listener *listener, mm_ring_seqno_t stamp UNUSED)
{
	ENTER();

#if ENABLE_NOTIFY_STAMP
	uintptr_t state = mm_memory_load(listener->state);
	if ((stamp << 2) == (state & ~MM_EVENT_LISTENER_STATUS)) {
		// Get the current status of the listener. It might
		// become obsolete by the time the notification is
		// sent. This is not a problem however as it implies
		// the listener thread has woken up on its own and
		// seen all the sent data.
		//
		// Sometimes this might lead to an extra listener
		// wake up (if the listener makes a full cycle) or
		// a wrong listener being waken (if another listener
		// becomes polling). So listeners should be prepared
		// to get spurious wake up notifications.
		mm_event_listener_status_t status = state & MM_EVENT_LISTENER_STATUS;
		if (status == MM_EVENT_LISTENER_WAITING)
			mm_event_listener_signal(listener);
		else if (status == MM_EVENT_LISTENER_POLLING)
			mm_event_backend_notify(&listener->receiver.dispatch->backend);
	}
#else
	// Compare the notify and listen stamps. If the notify stamp has not
	// been already bumped relative to the listen stamp then do it now.
	// Do it atomically so that only a thread that succeeds with this
	// is elected to send a wake-up notification to the target listener.
	const uint32_t listen_stamp = mm_memory_load(listener->listen_stamp);
	mm_memory_load_fence();
	const uint32_t notify_stamp = mm_memory_load(listener->notify_stamp);
	if (notify_stamp == (listen_stamp & ~MM_EVENT_LISTENER_STATUS)) {
		uint32_t next = notify_stamp + MM_EVENT_LISTENER_STATUS + 1;
		uint32_t prev = mm_atomic_uint32_cas(&listener->notify_stamp, notify_stamp, next);
		if (prev == notify_stamp) {
			// Get the current state of the listener. It might
			// become obsolete by the time the notification is
			// sent. This is not a problem however as it implies
			// the listener thread has woken up on its own and
			// seen all the sent data.
			//
			// Sometimes this might lead to an extra listener
			// wake up (if the listener makes a full cycle) or
			// a wrong listener being waken (if another listener
			// becomes polling). So listeners should be prepared
			// to get spurious wake up notifications.
			mm_event_listener_status_t status = listen_stamp & MM_EVENT_LISTENER_STATUS;
			if (status == MM_EVENT_LISTENER_WAITING)
				mm_event_listener_signal(listener);
			else if (status == MM_EVENT_LISTENER_POLLING)
				mm_event_backend_notify(&listener->receiver.dispatch->backend);
		}
	}
#endif

	LEAVE();
}
Exemplo n.º 3
0
static struct mm_lock_stat_set *
mm_lock_find_stat_set(uint32_t bucket,
		      const char *location,
		      const char *moreinfo)
{
	// Go through bucket entries trying to find a match.
	struct mm_slink *link = mm_stack_atomic_load_head(&mm_lock_stat_table[bucket]);
	while (link != NULL) {
		struct mm_lock_stat_set *stat_set
			= containerof(link, struct mm_lock_stat_set, bucket_link);
		mm_memory_load_fence();
		// Match the current entry.
		if (mm_lock_match_stat_set(stat_set, location, moreinfo))
			return stat_set;
		link = mm_memory_load(link->next);
	}
	return NULL;
}
Exemplo n.º 4
0
struct mm_lock_domain_stat *
mm_lock_find_domain_stat(struct mm_lock_stat_set *stat_set,
			 struct mm_domain *domain)
{
	// Go through domain entries trying to find a match.
	struct mm_slink *link = mm_stack_atomic_load_head(&stat_set->domain_list);
	while (link != NULL) {
		struct mm_lock_domain_stat *dom_stat
			= containerof(link, struct mm_lock_domain_stat, link);
		if (dom_stat->domain == domain) {
			// If the entry is not yet ready then wait a bit until
			// it becomes ready.  It shouldn't take long.  Really.
			while (mm_memory_load(dom_stat->ready) == 0)
				mm_spin_pause();
			mm_memory_load_fence();
			return dom_stat;
		}
		link = mm_memory_load(link->next);
	}
	return NULL;
}
Exemplo n.º 5
0
void
mm_lock_stats(void)
{
#if ENABLE_LOCK_STATS
	struct mm_slink *set_link = mm_stack_atomic_load_head(&mm_lock_stat_list);
	while (set_link != NULL) {
		struct mm_lock_stat_set *stat_set
			= containerof(set_link, struct mm_lock_stat_set, common_link);
		mm_memory_load_fence();

		struct mm_slink *dom_link = mm_stack_atomic_load_head(&stat_set->domain_list);
		while (dom_link != NULL) {
			struct mm_lock_domain_stat *dom_stat
				= containerof(dom_link, struct mm_lock_domain_stat, link);
			struct mm_domain *domain = dom_stat->domain;
			for (mm_thread_t c = 0; c < domain->nthreads; c++) {
				struct mm_lock_stat *stat
					= MM_THREAD_LOCAL_DEREF(c, dom_stat->stat);
				struct mm_thread *thread = domain->threads[c];
				mm_lock_print_stat(thread, stat_set, stat);
			}
			dom_link = mm_memory_load(dom_link->next);
		}

		struct mm_slink *thr_link = mm_stack_atomic_load_head(&stat_set->thread_list);
		while (thr_link != NULL) {
			struct mm_lock_thread_stat *thr_stat
				= containerof(thr_link, struct mm_lock_thread_stat, link);
			struct mm_thread *thread = thr_stat->thread;
			mm_lock_print_stat(thread, stat_set, &thr_stat->stat);
			thr_link = mm_memory_load(thr_link->next);
		}

		set_link = mm_memory_load(set_link->next);
	}
#endif
}