Пример #1
0
struct mm_lock_stat *
mm_lock_get_thread_stat(struct mm_lock_stat_set *stat_set,
			struct mm_thread *thread)
{
	struct mm_lock_thread_stat *thr_stat;

	// Look for a matching thread entry.
	struct mm_slink *link = mm_stack_atomic_load_head(&stat_set->thread_list);
	while (link != NULL) {
		thr_stat = containerof(link, struct mm_lock_thread_stat, link);
		if (thr_stat->thread == thread)
			return &thr_stat->stat;
		link = mm_memory_load(link->next);
	}

	// If not found create a new entry.
	thr_stat = mm_global_alloc(sizeof(struct mm_lock_thread_stat));
	thr_stat->thread = thread;
	thr_stat->stat.lock_count = 0;
	thr_stat->stat.fail_count = 0;

	// Link the entry into list.
	struct mm_slink *head = mm_stack_atomic_load_head(&stat_set->thread_list);
	for (uint32_t b = 0; ; b = mm_thread_backoff(b)) {
		thr_stat->link.next = head;
		link = mm_stack_atomic_cas_head(&stat_set->thread_list, head, &thr_stat->link);
		if (link == head)
			break;
		head = link;
	}

	return &thr_stat->stat;
}
Пример #2
0
mm_chunk_enqueue_deferred(struct mm_thread *thread, bool flush)
{
	if (!flush && thread->deferred_chunks_count < MM_CHUNK_FLUSH_THRESHOLD)
		return;

	// Capture all the deferred chunks.
	struct mm_stack chunks = thread->deferred_chunks;
	mm_stack_prepare(&thread->deferred_chunks);
	thread->deferred_chunks_count = 0;

	// Try to submit the chunks to respective reclamation queues.
	while (!mm_stack_empty(&chunks)) {
		struct mm_chunk *chunk = mm_chunk_stack_remove(&chunks);

		struct mm_domain *domain = mm_regular_domain;
#if ENABLE_SMP
		mm_chunk_t tag = mm_chunk_gettag(chunk);
		struct mm_thread *origin = mm_domain_getthread(domain, tag);
#else
		struct mm_thread *origin = mm_domain_getthread(domain, 0);
#endif
		uint32_t backoff = 0;
		while (!mm_thread_trypost_1(origin, mm_chunk_free_req, (uintptr_t) chunk)) {
			if (backoff >= MM_BACKOFF_SMALL) {
				// If failed to submit the chunk after a number
				// of attempts then defer it again.
				mm_chunk_stack_insert(&thread->deferred_chunks, chunk);
				thread->deferred_chunks_count++;
				break;
			}
			backoff = mm_thread_backoff(backoff);
		}
	}

	// Let know if chunk reclamation consistently has problems.
	if (thread->deferred_chunks_count > MM_CHUNK_ERROR_THRESHOLD) {
		if (thread->deferred_chunks_count < MM_CHUNK_FATAL_THRESHOLD)
			mm_error(0, "Problem with chunk reclamation");
		else
			mm_fatal(0, "Problem with chunk reclamation");
	}
}