コード例 #1
0
ファイル: chunk.c プロジェクト: ademakov/MainMemory
mm_chunk_destroy(struct mm_chunk *chunk)
{
	mm_chunk_t tag = mm_chunk_gettag(chunk);

	// A chunk from a shared memory space can be freed by any thread in
	// the same manner utilizing synchronization mechanisms built-in to
	// the corresponding memory allocation routines.
	if (tag == MM_CHUNK_COMMON) {
		mm_common_free(chunk);
		return;
	}
	if (unlikely(tag == MM_CHUNK_GLOBAL)) {
		mm_global_free(chunk);
		return;
	}

	if (tag == MM_CHUNK_REGULAR) {
#if ENABLE_SMP
		// In SMP mode regular memory space is just another case of
		// shared space with built-in synchronization. So it can be
		// freed by any thread alike.
		mm_regular_free(chunk);
		return;
#else
		struct mm_thread *thread = mm_thread_selfptr();
		if (mm_thread_ident(thread) == 0) {
			mm_regular_free(chunk);
			return;
		}
#endif
	}

	// A chunk from a private space can be immediately freed by its
	// originating thread but it is a subject for asynchronous memory
	// reclamation mechanism for any other thread.
	struct mm_thread *thread = mm_thread_selfptr();
	if (tag == mm_thread_ident(thread)) {
		mm_private_free(chunk);
		return;
	}

	thread->deferred_chunks_count++;
	mm_chunk_stack_insert(&thread->deferred_chunks, chunk);
	mm_chunk_enqueue_deferred(thread, false);
}
コード例 #2
0
mm_lock_getstat(struct mm_lock_stat_info *info)
{
	// Get statistics collection pertinent to the lock in question.
	struct mm_lock_stat_set *stat_set = mm_memory_load(info->stat);
	if (stat_set == NULL)
		stat_set = mm_lock_get_stat_set(info);

	// Get a statistic entry specific to the calling thread.
	struct mm_thread *thread = mm_thread_selfptr();
	struct mm_domain *domain = mm_thread_getdomain(thread);
	if (domain != NULL)
		return mm_lock_get_domain_stat(stat_set, thread, domain);
	else
		return mm_lock_get_thread_stat(stat_set, thread);
}
コード例 #3
0
ファイル: chunk.c プロジェクト: ademakov/MainMemory
struct mm_chunk * MALLOC
mm_chunk_create(size_t size)
{
	// Prefer private space if available.
#if ENABLE_SMP
	struct mm_private_space *space = mm_private_space_get();
	if (mm_private_space_ready(space))
		return mm_chunk_create_private(size);
#else
	struct mm_thread *thread = mm_thread_selfptr();
	if (mm_thread_ident(thread) == 0 && mm_private_space_ready(&mm_regular_space))
		return mm_chunk_create_regular(size);
#endif

	// Common space could only be used after it gets
	// initialized during bootstrap.
	if (likely(mm_common_space_ready()))
		return mm_chunk_create_common(size);

	// Use global allocator if everything else fails
	// (that is during bootstrap and shutdown).
	return mm_chunk_create_global(size);
}