Пример #1
0
static struct mm_lock_stat_set *
mm_lock_get_stat_set(struct mm_lock_stat_info *info)
{
	ASSERT(info->location != NULL);

	// Find the pertinent hash table bucket.
	uint32_t hash = mm_hash_fnv(info->location, strlen(info->location));
	if (info->moreinfo != NULL)
		hash = mm_hash_fnv_with_seed(info->moreinfo, strlen(info->moreinfo), hash);
	uint32_t bucket = hash % MM_LOCK_STAT_TABLE_SIZE;

	// Try to find statistics optimistically (w/o acquiring a lock).
	struct mm_lock_stat_set *stat_set
		= mm_lock_find_stat_set(bucket, info->location, info->moreinfo);
	if (likely(stat_set != NULL))
		return stat_set;

	// Copy identification information.
	char *location = mm_global_strdup(info->location);
	char *moreinfo = info->moreinfo == NULL ? NULL : mm_global_strdup(info->moreinfo);

	// Allocate a new statistics collection entry.
	stat_set = mm_global_alloc(sizeof(struct mm_lock_stat_set));
	stat_set->location = location;
	stat_set->moreinfo = moreinfo; 

	// Initialize thread statistics.
	mm_stack_prepare(&stat_set->domain_list);
	mm_stack_prepare(&stat_set->thread_list);
	stat_set->domain_lock = (mm_lock_t) MM_LOCK_INIT;

	// Start critical section.
	mm_global_lock(&mm_lock_stat_lock);

	// Try to find it again in case it was added concurrently.
	struct mm_lock_stat_set *recheck_stat
		= mm_lock_find_stat_set(bucket, location, moreinfo);
	if (unlikely(recheck_stat != NULL)) {
		// Bail out if so.
		mm_global_unlock(&mm_lock_stat_lock);
		mm_global_free(location);
		mm_global_free(moreinfo);
		mm_global_free(stat_set);
		return recheck_stat;
	}

	// Make the entry globally visible.
	stat_set->common_link.next = mm_lock_stat_list.head.next;
	stat_set->bucket_link.next = mm_lock_stat_table[bucket].head.next;
	mm_memory_store_fence();
	mm_lock_stat_list.head.next = &stat_set->common_link;
	mm_lock_stat_table[bucket].head.next = &stat_set->bucket_link;

	// End critical section.
	mm_global_unlock(&mm_lock_stat_lock);

	return stat_set;
}
Пример #2
0
mm_port_destroy(struct mm_port *port)
{
	ENTER();

	mm_list_delete(&port->ports);
	mm_global_free(port);

	LEAVE();
}
Пример #3
0
mm_pool_cleanup(struct mm_pool *pool)
{
	ENTER();

	for (uint32_t i = 0; i < pool->block_array_used; i++)
		mm_arena_free(pool->arena, pool->block_array[i]);
	mm_arena_free(pool->arena, pool->block_array);

	mm_global_free(pool->pool_name);

	LEAVE();
}
Пример #4
0
mm_chunk_destroy(struct mm_chunk *chunk)
{
	mm_chunk_t tag = mm_chunk_gettag(chunk);

	// A chunk from a shared memory space can be freed by any thread in
	// the same manner utilizing synchronization mechanisms built-in to
	// the corresponding memory allocation routines.
	if (tag == MM_CHUNK_COMMON) {
		mm_common_free(chunk);
		return;
	}
	if (unlikely(tag == MM_CHUNK_GLOBAL)) {
		mm_global_free(chunk);
		return;
	}

	if (tag == MM_CHUNK_REGULAR) {
#if ENABLE_SMP
		// In SMP mode regular memory space is just another case of
		// shared space with built-in synchronization. So it can be
		// freed by any thread alike.
		mm_regular_free(chunk);
		return;
#else
		struct mm_domain *domain = mm_domain_selfptr();
		if (domain == mm_regular_domain) {
			mm_regular_free(chunk);
			return;
		}
#endif
	}

	// A chunk from a private space can be immediately freed by its
	// originating thread but it is a subject for asynchronous memory
	// reclamation mechanism for any other thread.
	struct mm_thread *thread = mm_thread_selfptr();
	struct mm_domain *domain = mm_thread_getdomain(thread);
	if (domain == mm_regular_domain && tag == mm_thread_getnumber(thread)) {
		mm_private_free(chunk);
		return;
	}

	thread->deferred_chunks_count++;
	mm_chunk_stack_insert(&thread->deferred_chunks, chunk);
	mm_chunk_enqueue_deferred(thread, false);
}
Пример #5
0
struct mm_lock_stat *
mm_lock_get_domain_stat(struct mm_lock_stat_set *stat_set,
			struct mm_thread *thread,
			struct mm_domain *domain)
{
	mm_thread_t dom_index = mm_thread_getnumber(thread);

	// Try to find domain entry optimistically (w/o acquiring a lock).
	struct mm_lock_domain_stat *dom_stat
		= mm_lock_find_domain_stat(stat_set, domain);
	if (likely(dom_stat != NULL))
		return MM_THREAD_LOCAL_DEREF(dom_index, dom_stat->stat);

	// Allocate a new statistics entry.
	dom_stat = mm_global_alloc(sizeof(struct mm_lock_domain_stat));
	dom_stat->domain = domain;

	// Mark it as not ready.
	dom_stat->ready = 0;

	// Start critical section.
	mm_global_lock(&stat_set->domain_lock);

	// Try to find it again in case it was added concurrently.
	struct mm_lock_domain_stat *recheck_stat
		= mm_lock_find_domain_stat(stat_set, domain);
	if (unlikely(recheck_stat != NULL)) {
		// Bail out if so.
		mm_global_unlock(&stat_set->domain_lock);
		mm_global_free(dom_stat);
		return MM_THREAD_LOCAL_DEREF(dom_index, recheck_stat->stat);
	}

	mm_stack_insert(&stat_set->domain_list, &dom_stat->link);

	// End critical section.
	mm_global_unlock(&stat_set->domain_lock);

	// Initialize per-thread data.
	char *name;
	if (stat_set->moreinfo != NULL)
		name = mm_format(&mm_global_arena, "lock %s (%s)",
				 stat_set->location, stat_set->moreinfo);
	else
		name = mm_format(&mm_global_arena, "lock %s",
				 stat_set->location);

	MM_THREAD_LOCAL_ALLOC(domain, name, dom_stat->stat);
	for (mm_thread_t c = 0; c < domain->nthreads; c++) {
		struct mm_lock_stat *stat = MM_THREAD_LOCAL_DEREF(c, dom_stat->stat);
		stat->lock_count = 0;
		stat->fail_count = 0;
	}
	mm_global_free(name);

	// Mark it as ready.
	mm_memory_store_fence();
	dom_stat->ready = 1;

	return MM_THREAD_LOCAL_DEREF(dom_index, dom_stat->stat);
}