Ejemplo n.º 1
0
static bool
mm_event_dispatch_check_epoch(struct mm_event_dispatch *dispatch, uint32_t epoch)
{
	ENTER();
	bool rc = true;

	mm_thread_t n = dispatch->nlisteners;
	struct mm_event_listener *listeners = dispatch->listeners;
	for (mm_thread_t i = 0; i < n; i++) {
		struct mm_event_listener *listener = &listeners[i];
		struct mm_event_receiver *receiver = &listener->receiver;
		uint32_t local = mm_memory_load(receiver->reclaim_epoch);
		if (local == epoch)
			continue;

		mm_memory_load_fence();
		bool active = mm_memory_load(receiver->reclaim_active);
		if (active) {
			mm_thread_post_1(listener->thread, mm_event_dispatch_observe_req,
					 (uintptr_t) receiver);
			rc = false;
			break;
		}
	}

	LEAVE();
	return rc;
}
Ejemplo n.º 2
0
mm_event_listener_notify(struct mm_event_listener *listener, mm_ring_seqno_t stamp UNUSED)
{
	ENTER();

#if ENABLE_NOTIFY_STAMP
	uintptr_t state = mm_memory_load(listener->state);
	if ((stamp << 2) == (state & ~MM_EVENT_LISTENER_STATUS)) {
		// Get the current status of the listener. It might
		// become obsolete by the time the notification is
		// sent. This is not a problem however as it implies
		// the listener thread has woken up on its own and
		// seen all the sent data.
		//
		// Sometimes this might lead to an extra listener
		// wake up (if the listener makes a full cycle) or
		// a wrong listener being waken (if another listener
		// becomes polling). So listeners should be prepared
		// to get spurious wake up notifications.
		mm_event_listener_status_t status = state & MM_EVENT_LISTENER_STATUS;
		if (status == MM_EVENT_LISTENER_WAITING)
			mm_event_listener_signal(listener);
		else if (status == MM_EVENT_LISTENER_POLLING)
			mm_event_backend_notify(&listener->receiver.dispatch->backend);
	}
#else
	// Compare the notify and listen stamps. If the notify stamp has not
	// been already bumped relative to the listen stamp then do it now.
	// Do it atomically so that only a thread that succeeds with this
	// is elected to send a wake-up notification to the target listener.
	const uint32_t listen_stamp = mm_memory_load(listener->listen_stamp);
	mm_memory_load_fence();
	const uint32_t notify_stamp = mm_memory_load(listener->notify_stamp);
	if (notify_stamp == (listen_stamp & ~MM_EVENT_LISTENER_STATUS)) {
		uint32_t next = notify_stamp + MM_EVENT_LISTENER_STATUS + 1;
		uint32_t prev = mm_atomic_uint32_cas(&listener->notify_stamp, notify_stamp, next);
		if (prev == notify_stamp) {
			// Get the current state of the listener. It might
			// become obsolete by the time the notification is
			// sent. This is not a problem however as it implies
			// the listener thread has woken up on its own and
			// seen all the sent data.
			//
			// Sometimes this might lead to an extra listener
			// wake up (if the listener makes a full cycle) or
			// a wrong listener being waken (if another listener
			// becomes polling). So listeners should be prepared
			// to get spurious wake up notifications.
			mm_event_listener_status_t status = listen_stamp & MM_EVENT_LISTENER_STATUS;
			if (status == MM_EVENT_LISTENER_WAITING)
				mm_event_listener_signal(listener);
			else if (status == MM_EVENT_LISTENER_POLLING)
				mm_event_backend_notify(&listener->receiver.dispatch->backend);
		}
	}
#endif

	LEAVE();
}
Ejemplo n.º 3
0
struct mm_lock_stat *
mm_lock_get_thread_stat(struct mm_lock_stat_set *stat_set,
			struct mm_thread *thread)
{
	struct mm_lock_thread_stat *thr_stat;

	// Look for a matching thread entry.
	struct mm_slink *link = mm_stack_atomic_load_head(&stat_set->thread_list);
	while (link != NULL) {
		thr_stat = containerof(link, struct mm_lock_thread_stat, link);
		if (thr_stat->thread == thread)
			return &thr_stat->stat;
		link = mm_memory_load(link->next);
	}

	// If not found create a new entry.
	thr_stat = mm_global_alloc(sizeof(struct mm_lock_thread_stat));
	thr_stat->thread = thread;
	thr_stat->stat.lock_count = 0;
	thr_stat->stat.fail_count = 0;

	// Link the entry into list.
	struct mm_slink *head = mm_stack_atomic_load_head(&stat_set->thread_list);
	for (uint32_t b = 0; ; b = mm_thread_backoff(b)) {
		thr_stat->link.next = head;
		link = mm_stack_atomic_cas_head(&stat_set->thread_list, head, &thr_stat->link);
		if (link == head)
			break;
		head = link;
	}

	return &thr_stat->stat;
}
Ejemplo n.º 4
0
struct mm_lock_domain_stat *
mm_lock_find_domain_stat(struct mm_lock_stat_set *stat_set,
			 struct mm_domain *domain)
{
	// Go through domain entries trying to find a match.
	struct mm_slink *link = mm_stack_atomic_load_head(&stat_set->domain_list);
	while (link != NULL) {
		struct mm_lock_domain_stat *dom_stat
			= containerof(link, struct mm_lock_domain_stat, link);
		if (dom_stat->domain == domain) {
			// If the entry is not yet ready then wait a bit until
			// it becomes ready.  It shouldn't take long.  Really.
			while (mm_memory_load(dom_stat->ready) == 0)
				mm_spin_pause();
			mm_memory_load_fence();
			return dom_stat;
		}
		link = mm_memory_load(link->next);
	}
	return NULL;
}
Ejemplo n.º 5
0
void
mm_lock_stats(void)
{
#if ENABLE_LOCK_STATS
	struct mm_slink *set_link = mm_stack_atomic_load_head(&mm_lock_stat_list);
	while (set_link != NULL) {
		struct mm_lock_stat_set *stat_set
			= containerof(set_link, struct mm_lock_stat_set, common_link);
		mm_memory_load_fence();

		struct mm_slink *dom_link = mm_stack_atomic_load_head(&stat_set->domain_list);
		while (dom_link != NULL) {
			struct mm_lock_domain_stat *dom_stat
				= containerof(dom_link, struct mm_lock_domain_stat, link);
			struct mm_domain *domain = dom_stat->domain;
			for (mm_thread_t c = 0; c < domain->nthreads; c++) {
				struct mm_lock_stat *stat
					= MM_THREAD_LOCAL_DEREF(c, dom_stat->stat);
				struct mm_thread *thread = domain->threads[c];
				mm_lock_print_stat(thread, stat_set, stat);
			}
			dom_link = mm_memory_load(dom_link->next);
		}

		struct mm_slink *thr_link = mm_stack_atomic_load_head(&stat_set->thread_list);
		while (thr_link != NULL) {
			struct mm_lock_thread_stat *thr_stat
				= containerof(thr_link, struct mm_lock_thread_stat, link);
			struct mm_thread *thread = thr_stat->thread;
			mm_lock_print_stat(thread, stat_set, &thr_stat->stat);
			thr_link = mm_memory_load(thr_link->next);
		}

		set_link = mm_memory_load(set_link->next);
	}
#endif
}
Ejemplo n.º 6
0
mm_event_dispatch_advance_epoch(struct mm_event_dispatch *dispatch)
{
	ENTER();

	uint32_t epoch = mm_memory_load(dispatch->reclaim_epoch);
	bool rc = mm_event_dispatch_check_epoch(dispatch, epoch);
	if (rc) {
		mm_memory_fence(); // TODO: load_store fence
		mm_memory_store(dispatch->reclaim_epoch, epoch + 1);
		DEBUG("advance epoch %u", epoch + 1);
	}

	LEAVE();
	return rc;
}
Ejemplo n.º 7
0
mm_lock_getstat(struct mm_lock_stat_info *info)
{
	// Get statistics collection pertinent to the lock in question.
	struct mm_lock_stat_set *stat_set = mm_memory_load(info->stat);
	if (stat_set == NULL)
		stat_set = mm_lock_get_stat_set(info);

	// Get a statistic entry specific to the calling thread.
	struct mm_thread *thread = mm_thread_selfptr();
	struct mm_domain *domain = mm_thread_getdomain(thread);
	if (domain != NULL)
		return mm_lock_get_domain_stat(stat_set, thread, domain);
	else
		return mm_lock_get_thread_stat(stat_set, thread);
}
Ejemplo n.º 8
0
mm_thread_barrier_wait(struct mm_thread_barrier *const barrier, struct mm_thread_barrier_local *local)
{
	uint32_t sense = ~local->sense;

	if (mm_atomic_uint32_dec_and_test(&barrier->value) == 0) {
		mm_memory_store(barrier->value, barrier->count);
		mm_memory_store_fence();
		mm_memory_store(barrier->sense, sense);
	} else {
		mm_memory_fence(); // TODO: atomic_load fence
		while (mm_memory_load(barrier->sense) != sense)
			mm_cpu_backoff();
	}

	local->sense = sense;
}
Ejemplo n.º 9
0
mm_event_listener_wait(struct mm_event_listener *listener, mm_timeout_t timeout)
{
	ENTER();

#if ENABLE_NOTIFY_STAMP
	// Get the next expected notify stamp.
	const mm_ring_seqno_t stamp = mm_event_listener_dequeue_stamp(listener);

	if (timeout != 0) {
		// Advertise that the thread is about to sleep.
		uintptr_t state = (stamp << 2) | MM_EVENT_LISTENER_WAITING;
		mm_memory_store(listener->state, state);
		mm_memory_strict_fence(); // TODO: store_load fence

		// Wait for a wake-up notification or timeout unless
		// an already pending notification is detected.
		if (stamp == mm_event_listener_enqueue_stamp(listener))
			mm_event_listener_timedwait(listener, stamp, timeout);
	}

	// Advertise the start of another working cycle.
	mm_event_listener_finish(listener);
#else
	// Get the current listener state.
	const uint32_t listen_stamp = listener->listen_stamp;
	ASSERT((listen_stamp & MM_EVENT_LISTENER_STATE) == MM_EVENT_LISTENER_RUNNING);

	if (timeout != 0) {
		// Advertise that the thread is about to sleep.
		uint32_t wait_stamp = listen_stamp | MM_EVENT_LISTENER_WAITING;
		mm_memory_store(listener->listen_stamp, wait_stamp);

		mm_memory_strict_fence(); // TODO: store_load fence

		// Wait for a wake-up notification or timeout unless
		// an already pending notification is detected.
		if (listen_stamp == mm_memory_load(listener->notify_stamp))
			mm_event_listener_timedwait(listener, listen_stamp, timeout);
	}

	// Advertise the start of another working cycle.
	mm_event_listener_finish(listener, listen_stamp);
#endif

	LEAVE();
}
Ejemplo n.º 10
0
static struct mm_lock_stat_set *
mm_lock_find_stat_set(uint32_t bucket,
		      const char *location,
		      const char *moreinfo)
{
	// Go through bucket entries trying to find a match.
	struct mm_slink *link = mm_stack_atomic_load_head(&mm_lock_stat_table[bucket]);
	while (link != NULL) {
		struct mm_lock_stat_set *stat_set
			= containerof(link, struct mm_lock_stat_set, bucket_link);
		mm_memory_load_fence();
		// Match the current entry.
		if (mm_lock_match_stat_set(stat_set, location, moreinfo))
			return stat_set;
		link = mm_memory_load(link->next);
	}
	return NULL;
}
Ejemplo n.º 11
0
static uint32_t
mc_action_get_exp_time(void)
{
	return mm_memory_load(mc_action_exp_time);
}
Ejemplo n.º 12
0
mm_event_listener_poll(struct mm_event_listener *listener, mm_timeout_t timeout)
{
	ENTER();

	// Prepare to receive events.
	mm_event_receiver_start(&listener->receiver);

#if ENABLE_NOTIFY_STAMP
	// Get the next expected notify stamp.
	const mm_ring_seqno_t stamp = mm_event_listener_dequeue_stamp(listener);

	if (timeout != 0) {
		// Cleanup stale event notifications.
		mm_event_backend_dampen(&listener->receiver.dispatch->backend);

		// Advertise that the thread is about to sleep.
		uintptr_t state = (stamp << 2) | MM_EVENT_LISTENER_POLLING;
		mm_memory_store(listener->state, state);
		mm_memory_strict_fence(); // TODO: store_load fence

		// Wait for a wake-up notification or timeout unless
		// an already pending notification is detected.
		if (stamp != mm_event_listener_enqueue_stamp(listener))
			timeout = 0;
	}

	// Check incoming events and wait for notification/timeout.
	mm_event_backend_listen(&listener->receiver.dispatch->backend,
				&listener->changes, &listener->receiver, timeout);

	// Advertise the start of another working cycle.
	mm_event_listener_finish(listener);
#else
	// Get the current listener state.
	const uint32_t listen_stamp = listener->listen_stamp;
	ASSERT((listen_stamp & MM_EVENT_LISTENER_STATE) == MM_EVENT_LISTENER_RUNNING);

	if (timeout != 0) {
		// Cleanup stale event notifications.
		mm_event_backend_dampen(&listener->receiver.dispatch->backend);

		// Advertise that the thread is about to sleep.
		uint32_t poll_stamp = listen_stamp | MM_EVENT_LISTENER_POLLING;
		mm_memory_store(listener->listen_stamp, poll_stamp);

		mm_memory_strict_fence(); // TODO: store_load fence

		// Wait for a wake-up notification or timeout unless
		// an already pending notification is detected.
		if (listen_stamp != mm_memory_load(listener->notify_stamp))
			timeout = 0;
	}

	// Check incoming events and wait for notification/timeout.
	mm_event_backend_listen(&listener->receiver.dispatch->backend,
				&listener->changes, &listener->receiver, timeout);

	// Advertise the start of another working cycle.
	mm_event_listener_finish(listener, listen_stamp);
#endif

	// Flush received events.
	mm_event_receiver_finish(&listener->receiver);

	LEAVE();
}