Exemple #1
0
mm_chunk_destroy(struct mm_chunk *chunk)
{
	mm_chunk_t tag = mm_chunk_gettag(chunk);

	// A chunk from a shared memory space can be freed by any thread in
	// the same manner utilizing synchronization mechanisms built-in to
	// the corresponding memory allocation routines.
	if (tag == MM_CHUNK_COMMON) {
		mm_common_free(chunk);
		return;
	}
	if (unlikely(tag == MM_CHUNK_GLOBAL)) {
		mm_global_free(chunk);
		return;
	}

	if (tag == MM_CHUNK_REGULAR) {
#if ENABLE_SMP
		// In SMP mode regular memory space is just another case of
		// shared space with built-in synchronization. So it can be
		// freed by any thread alike.
		mm_regular_free(chunk);
		return;
#else
		struct mm_domain *domain = mm_domain_selfptr();
		if (domain == mm_regular_domain) {
			mm_regular_free(chunk);
			return;
		}
#endif
	}

	// A chunk from a private space can be immediately freed by its
	// originating thread but it is a subject for asynchronous memory
	// reclamation mechanism for any other thread.
	struct mm_thread *thread = mm_thread_selfptr();
	struct mm_domain *domain = mm_thread_getdomain(thread);
	if (domain == mm_regular_domain && tag == mm_thread_getnumber(thread)) {
		mm_private_free(chunk);
		return;
	}

	thread->deferred_chunks_count++;
	mm_chunk_stack_insert(&thread->deferred_chunks, chunk);
	mm_chunk_enqueue_deferred(thread, false);
}
Exemple #2
0
struct mm_lock_stat *
mm_lock_get_domain_stat(struct mm_lock_stat_set *stat_set,
			struct mm_thread *thread,
			struct mm_domain *domain)
{
	mm_thread_t dom_index = mm_thread_getnumber(thread);

	// Try to find domain entry optimistically (w/o acquiring a lock).
	struct mm_lock_domain_stat *dom_stat
		= mm_lock_find_domain_stat(stat_set, domain);
	if (likely(dom_stat != NULL))
		return MM_THREAD_LOCAL_DEREF(dom_index, dom_stat->stat);

	// Allocate a new statistics entry.
	dom_stat = mm_global_alloc(sizeof(struct mm_lock_domain_stat));
	dom_stat->domain = domain;

	// Mark it as not ready.
	dom_stat->ready = 0;

	// Start critical section.
	mm_global_lock(&stat_set->domain_lock);

	// Try to find it again in case it was added concurrently.
	struct mm_lock_domain_stat *recheck_stat
		= mm_lock_find_domain_stat(stat_set, domain);
	if (unlikely(recheck_stat != NULL)) {
		// Bail out if so.
		mm_global_unlock(&stat_set->domain_lock);
		mm_global_free(dom_stat);
		return MM_THREAD_LOCAL_DEREF(dom_index, recheck_stat->stat);
	}

	mm_stack_insert(&stat_set->domain_list, &dom_stat->link);

	// End critical section.
	mm_global_unlock(&stat_set->domain_lock);

	// Initialize per-thread data.
	char *name;
	if (stat_set->moreinfo != NULL)
		name = mm_format(&mm_global_arena, "lock %s (%s)",
				 stat_set->location, stat_set->moreinfo);
	else
		name = mm_format(&mm_global_arena, "lock %s",
				 stat_set->location);

	MM_THREAD_LOCAL_ALLOC(domain, name, dom_stat->stat);
	for (mm_thread_t c = 0; c < domain->nthreads; c++) {
		struct mm_lock_stat *stat = MM_THREAD_LOCAL_DEREF(c, dom_stat->stat);
		stat->lock_count = 0;
		stat->fail_count = 0;
	}
	mm_global_free(name);

	// Mark it as ready.
	mm_memory_store_fence();
	dom_stat->ready = 1;

	return MM_THREAD_LOCAL_DEREF(dom_index, dom_stat->stat);
}