Пример #1
0
struct mm_lock_stat *
mm_lock_get_thread_stat(struct mm_lock_stat_set *stat_set,
			struct mm_thread *thread)
{
	struct mm_lock_thread_stat *thr_stat;

	// Look for a matching thread entry.
	struct mm_slink *link = mm_stack_atomic_load_head(&stat_set->thread_list);
	while (link != NULL) {
		thr_stat = containerof(link, struct mm_lock_thread_stat, link);
		if (thr_stat->thread == thread)
			return &thr_stat->stat;
		link = mm_memory_load(link->next);
	}

	// If not found create a new entry.
	thr_stat = mm_global_alloc(sizeof(struct mm_lock_thread_stat));
	thr_stat->thread = thread;
	thr_stat->stat.lock_count = 0;
	thr_stat->stat.fail_count = 0;

	// Link the entry into list.
	struct mm_slink *head = mm_stack_atomic_load_head(&stat_set->thread_list);
	for (uint32_t b = 0; ; b = mm_thread_backoff(b)) {
		thr_stat->link.next = head;
		link = mm_stack_atomic_cas_head(&stat_set->thread_list, head, &thr_stat->link);
		if (link == head)
			break;
		head = link;
	}

	return &thr_stat->stat;
}
Пример #2
0
static struct mm_lock_stat_set *
mm_lock_get_stat_set(struct mm_lock_stat_info *info)
{
	ASSERT(info->location != NULL);

	// Find the pertinent hash table bucket.
	uint32_t hash = mm_hash_fnv(info->location, strlen(info->location));
	if (info->moreinfo != NULL)
		hash = mm_hash_fnv_with_seed(info->moreinfo, strlen(info->moreinfo), hash);
	uint32_t bucket = hash % MM_LOCK_STAT_TABLE_SIZE;

	// Try to find statistics optimistically (w/o acquiring a lock).
	struct mm_lock_stat_set *stat_set
		= mm_lock_find_stat_set(bucket, info->location, info->moreinfo);
	if (likely(stat_set != NULL))
		return stat_set;

	// Copy identification information.
	char *location = mm_global_strdup(info->location);
	char *moreinfo = info->moreinfo == NULL ? NULL : mm_global_strdup(info->moreinfo);

	// Allocate a new statistics collection entry.
	stat_set = mm_global_alloc(sizeof(struct mm_lock_stat_set));
	stat_set->location = location;
	stat_set->moreinfo = moreinfo; 

	// Initialize thread statistics.
	mm_stack_prepare(&stat_set->domain_list);
	mm_stack_prepare(&stat_set->thread_list);
	stat_set->domain_lock = (mm_lock_t) MM_LOCK_INIT;

	// Start critical section.
	mm_global_lock(&mm_lock_stat_lock);

	// Try to find it again in case it was added concurrently.
	struct mm_lock_stat_set *recheck_stat
		= mm_lock_find_stat_set(bucket, location, moreinfo);
	if (unlikely(recheck_stat != NULL)) {
		// Bail out if so.
		mm_global_unlock(&mm_lock_stat_lock);
		mm_global_free(location);
		mm_global_free(moreinfo);
		mm_global_free(stat_set);
		return recheck_stat;
	}

	// Make the entry globally visible.
	stat_set->common_link.next = mm_lock_stat_list.head.next;
	stat_set->bucket_link.next = mm_lock_stat_table[bucket].head.next;
	mm_memory_store_fence();
	mm_lock_stat_list.head.next = &stat_set->common_link;
	mm_lock_stat_table[bucket].head.next = &stat_set->bucket_link;

	// End critical section.
	mm_global_unlock(&mm_lock_stat_lock);

	return stat_set;
}
Пример #3
0
struct mm_chunk * MALLOC
mm_chunk_create_global(size_t size)
{
	size += sizeof(struct mm_chunk);
	struct mm_chunk *chunk = mm_global_alloc(size);
	chunk->base.tag = MM_CHUNK_GLOBAL;
	mm_slink_prepare(&chunk->base.slink);
	return chunk;
}
Пример #4
0
mm_port_create(struct mm_task *task)
{
	ENTER();

	struct mm_port *port = mm_global_alloc(sizeof(struct mm_port));
	port->lock = (mm_regular_lock_t) MM_REGULAR_LOCK_INIT;
	port->task = task;
	port->start = 0;
	port->count = 0;
	mm_waitset_prepare(&port->blocked_senders);

	mm_list_append(&task->ports, &port->ports);

	LEAVE();
	return port;
}
Пример #5
0
struct mm_lock_stat *
mm_lock_get_domain_stat(struct mm_lock_stat_set *stat_set,
			struct mm_thread *thread,
			struct mm_domain *domain)
{
	mm_thread_t dom_index = mm_thread_getnumber(thread);

	// Try to find domain entry optimistically (w/o acquiring a lock).
	struct mm_lock_domain_stat *dom_stat
		= mm_lock_find_domain_stat(stat_set, domain);
	if (likely(dom_stat != NULL))
		return MM_THREAD_LOCAL_DEREF(dom_index, dom_stat->stat);

	// Allocate a new statistics entry.
	dom_stat = mm_global_alloc(sizeof(struct mm_lock_domain_stat));
	dom_stat->domain = domain;

	// Mark it as not ready.
	dom_stat->ready = 0;

	// Start critical section.
	mm_global_lock(&stat_set->domain_lock);

	// Try to find it again in case it was added concurrently.
	struct mm_lock_domain_stat *recheck_stat
		= mm_lock_find_domain_stat(stat_set, domain);
	if (unlikely(recheck_stat != NULL)) {
		// Bail out if so.
		mm_global_unlock(&stat_set->domain_lock);
		mm_global_free(dom_stat);
		return MM_THREAD_LOCAL_DEREF(dom_index, recheck_stat->stat);
	}

	mm_stack_insert(&stat_set->domain_list, &dom_stat->link);

	// End critical section.
	mm_global_unlock(&stat_set->domain_lock);

	// Initialize per-thread data.
	char *name;
	if (stat_set->moreinfo != NULL)
		name = mm_format(&mm_global_arena, "lock %s (%s)",
				 stat_set->location, stat_set->moreinfo);
	else
		name = mm_format(&mm_global_arena, "lock %s",
				 stat_set->location);

	MM_THREAD_LOCAL_ALLOC(domain, name, dom_stat->stat);
	for (mm_thread_t c = 0; c < domain->nthreads; c++) {
		struct mm_lock_stat *stat = MM_THREAD_LOCAL_DEREF(c, dom_stat->stat);
		stat->lock_count = 0;
		stat->fail_count = 0;
	}
	mm_global_free(name);

	// Mark it as ready.
	mm_memory_store_fence();
	dom_stat->ready = 1;

	return MM_THREAD_LOCAL_DEREF(dom_index, dom_stat->stat);
}