예제 #1
0
static struct mm_lock_stat_set *
mm_lock_get_stat_set(struct mm_lock_stat_info *info)
{
	ASSERT(info->location != NULL);

	// Find the pertinent hash table bucket.
	uint32_t hash = mm_hash_fnv(info->location, strlen(info->location));
	if (info->moreinfo != NULL)
		hash = mm_hash_fnv_with_seed(info->moreinfo, strlen(info->moreinfo), hash);
	uint32_t bucket = hash % MM_LOCK_STAT_TABLE_SIZE;

	// Try to find statistics optimistically (w/o acquiring a lock).
	struct mm_lock_stat_set *stat_set
		= mm_lock_find_stat_set(bucket, info->location, info->moreinfo);
	if (likely(stat_set != NULL))
		return stat_set;

	// Copy identification information.
	char *location = mm_global_strdup(info->location);
	char *moreinfo = info->moreinfo == NULL ? NULL : mm_global_strdup(info->moreinfo);

	// Allocate a new statistics collection entry.
	stat_set = mm_global_alloc(sizeof(struct mm_lock_stat_set));
	stat_set->location = location;
	stat_set->moreinfo = moreinfo; 

	// Initialize thread statistics.
	mm_stack_prepare(&stat_set->domain_list);
	mm_stack_prepare(&stat_set->thread_list);
	stat_set->domain_lock = (mm_lock_t) MM_LOCK_INIT;

	// Start critical section.
	mm_global_lock(&mm_lock_stat_lock);

	// Try to find it again in case it was added concurrently.
	struct mm_lock_stat_set *recheck_stat
		= mm_lock_find_stat_set(bucket, location, moreinfo);
	if (unlikely(recheck_stat != NULL)) {
		// Bail out if so.
		mm_global_unlock(&mm_lock_stat_lock);
		mm_global_free(location);
		mm_global_free(moreinfo);
		mm_global_free(stat_set);
		return recheck_stat;
	}

	// Make the entry globally visible.
	stat_set->common_link.next = mm_lock_stat_list.head.next;
	stat_set->bucket_link.next = mm_lock_stat_table[bucket].head.next;
	mm_memory_store_fence();
	mm_lock_stat_list.head.next = &stat_set->common_link;
	mm_lock_stat_table[bucket].head.next = &stat_set->bucket_link;

	// End critical section.
	mm_global_unlock(&mm_lock_stat_lock);

	return stat_set;
}
예제 #2
0
mm_event_listener_finish(struct mm_event_listener *listener, uint32_t listen_stamp)
{
	// Bump the listen stamp.
	listen_stamp += MM_EVENT_LISTENER_STATUS + 1;

	// Store it first as the notify stamp then as the proper listen stamp.
	// This order ensures that regardless of a possible race with the CAS
	// operation in the notify function the end result will be correct.
	mm_memory_store(listener->notify_stamp, listen_stamp);
	mm_memory_store_fence();
	mm_memory_store(listener->listen_stamp, listen_stamp);
}
예제 #3
0
파일: async.c 프로젝트: ademakov/MainMemory
static void
mm_async_syscall_result(struct mm_async_node *node, intptr_t result)
{
	// Store the result.
	node->result = result;
	if (result < 0)
		node->error = errno;

	// Ensure its visibility.
	mm_memory_store_fence();
	// Indicate the operation completion.
	mm_memory_store(node->status, 0);

	// Notify the caller.
	mm_strand_run_fiber(node->fiber);
}
예제 #4
0
mm_thread_barrier_wait(struct mm_thread_barrier *const barrier, struct mm_thread_barrier_local *local)
{
	uint32_t sense = ~local->sense;

	if (mm_atomic_uint32_dec_and_test(&barrier->value) == 0) {
		mm_memory_store(barrier->value, barrier->count);
		mm_memory_store_fence();
		mm_memory_store(barrier->sense, sense);
	} else {
		mm_memory_fence(); // TODO: atomic_load fence
		while (mm_memory_load(barrier->sense) != sense)
			mm_cpu_backoff();
	}

	local->sense = sense;
}
예제 #5
0
struct mm_lock_stat *
mm_lock_get_domain_stat(struct mm_lock_stat_set *stat_set,
			struct mm_thread *thread,
			struct mm_domain *domain)
{
	mm_thread_t dom_index = mm_thread_getnumber(thread);

	// Try to find domain entry optimistically (w/o acquiring a lock).
	struct mm_lock_domain_stat *dom_stat
		= mm_lock_find_domain_stat(stat_set, domain);
	if (likely(dom_stat != NULL))
		return MM_THREAD_LOCAL_DEREF(dom_index, dom_stat->stat);

	// Allocate a new statistics entry.
	dom_stat = mm_global_alloc(sizeof(struct mm_lock_domain_stat));
	dom_stat->domain = domain;

	// Mark it as not ready.
	dom_stat->ready = 0;

	// Start critical section.
	mm_global_lock(&stat_set->domain_lock);

	// Try to find it again in case it was added concurrently.
	struct mm_lock_domain_stat *recheck_stat
		= mm_lock_find_domain_stat(stat_set, domain);
	if (unlikely(recheck_stat != NULL)) {
		// Bail out if so.
		mm_global_unlock(&stat_set->domain_lock);
		mm_global_free(dom_stat);
		return MM_THREAD_LOCAL_DEREF(dom_index, recheck_stat->stat);
	}

	mm_stack_insert(&stat_set->domain_list, &dom_stat->link);

	// End critical section.
	mm_global_unlock(&stat_set->domain_lock);

	// Initialize per-thread data.
	char *name;
	if (stat_set->moreinfo != NULL)
		name = mm_format(&mm_global_arena, "lock %s (%s)",
				 stat_set->location, stat_set->moreinfo);
	else
		name = mm_format(&mm_global_arena, "lock %s",
				 stat_set->location);

	MM_THREAD_LOCAL_ALLOC(domain, name, dom_stat->stat);
	for (mm_thread_t c = 0; c < domain->nthreads; c++) {
		struct mm_lock_stat *stat = MM_THREAD_LOCAL_DEREF(c, dom_stat->stat);
		stat->lock_count = 0;
		stat->fail_count = 0;
	}
	mm_global_free(name);

	// Mark it as ready.
	mm_memory_store_fence();
	dom_stat->ready = 1;

	return MM_THREAD_LOCAL_DEREF(dom_index, dom_stat->stat);
}