コード例 #1
0
mm_task_combiner_prepare(struct mm_task_combiner *combiner, const char *name,
			 size_t size, size_t handoff)
{
	ENTER();

	mm_combiner_prepare(&combiner->combiner, size, handoff);

	MM_THREAD_LOCAL_ALLOC(mm_domain_selfptr(), name, combiner->wait_queue);
	for (mm_core_t core = 0; core < mm_core_getnum(); core++) {
		struct mm_list *wait_queue = MM_THREAD_LOCAL_DEREF(core, combiner->wait_queue);
		mm_list_prepare(wait_queue);
	}

	LEAVE();
}
コード例 #2
0
mm_task_combiner_execute(struct mm_task_combiner *combiner,
			 mm_combiner_routine_t routine, uintptr_t data)
{
	ENTER();

	// Disable cancellation as the enqueue algorithm cannot be
	// safely undone if interrupted in the middle.
	int cancelstate;
	mm_task_setcancelstate(MM_TASK_CANCEL_DISABLE, &cancelstate);

	// Get per-core queue of pending requests.
	mm_core_t core = mm_core_self();
	struct mm_list *wait_queue = MM_THREAD_LOCAL_DEREF(core, combiner->wait_queue);

	// Add the current request to the per-core queue.
	struct mm_task *task = mm_task_selfptr();
	task->flags |= MM_TASK_COMBINING;
	mm_list_append(wait_queue, &task->wait_queue);

	// Wait until the current request becomes the head of the
	// per-core queue.
	while (mm_list_head(wait_queue) != &task->wait_queue)
		mm_task_block();

	mm_combiner_execute(&combiner->combiner, routine, data);

	// Remove the request from the per-core queue.
	mm_list_delete(&task->wait_queue);
	task->flags &= ~MM_TASK_COMBINING;

	// If the per-core queue is not empty then let its new head take
	// the next turn.
	if (!mm_list_empty(wait_queue)) {
		struct mm_link *link = mm_list_head(wait_queue);
		task = containerof(link, struct mm_task, wait_queue);
		mm_task_run(task);
	}

	// Restore cancellation.
	mm_task_setcancelstate(cancelstate, NULL);

	LEAVE();
}
コード例 #3
0
void
mm_lock_stats(void)
{
#if ENABLE_LOCK_STATS
	struct mm_slink *set_link = mm_stack_atomic_load_head(&mm_lock_stat_list);
	while (set_link != NULL) {
		struct mm_lock_stat_set *stat_set
			= containerof(set_link, struct mm_lock_stat_set, common_link);
		mm_memory_load_fence();

		struct mm_slink *dom_link = mm_stack_atomic_load_head(&stat_set->domain_list);
		while (dom_link != NULL) {
			struct mm_lock_domain_stat *dom_stat
				= containerof(dom_link, struct mm_lock_domain_stat, link);
			struct mm_domain *domain = dom_stat->domain;
			for (mm_thread_t c = 0; c < domain->nthreads; c++) {
				struct mm_lock_stat *stat
					= MM_THREAD_LOCAL_DEREF(c, dom_stat->stat);
				struct mm_thread *thread = domain->threads[c];
				mm_lock_print_stat(thread, stat_set, stat);
			}
			dom_link = mm_memory_load(dom_link->next);
		}

		struct mm_slink *thr_link = mm_stack_atomic_load_head(&stat_set->thread_list);
		while (thr_link != NULL) {
			struct mm_lock_thread_stat *thr_stat
				= containerof(thr_link, struct mm_lock_thread_stat, link);
			struct mm_thread *thread = thr_stat->thread;
			mm_lock_print_stat(thread, stat_set, &thr_stat->stat);
			thr_link = mm_memory_load(thr_link->next);
		}

		set_link = mm_memory_load(set_link->next);
	}
#endif
}
コード例 #4
0
struct mm_lock_stat *
mm_lock_get_domain_stat(struct mm_lock_stat_set *stat_set,
			struct mm_thread *thread,
			struct mm_domain *domain)
{
	mm_thread_t dom_index = mm_thread_getnumber(thread);

	// Try to find domain entry optimistically (w/o acquiring a lock).
	struct mm_lock_domain_stat *dom_stat
		= mm_lock_find_domain_stat(stat_set, domain);
	if (likely(dom_stat != NULL))
		return MM_THREAD_LOCAL_DEREF(dom_index, dom_stat->stat);

	// Allocate a new statistics entry.
	dom_stat = mm_global_alloc(sizeof(struct mm_lock_domain_stat));
	dom_stat->domain = domain;

	// Mark it as not ready.
	dom_stat->ready = 0;

	// Start critical section.
	mm_global_lock(&stat_set->domain_lock);

	// Try to find it again in case it was added concurrently.
	struct mm_lock_domain_stat *recheck_stat
		= mm_lock_find_domain_stat(stat_set, domain);
	if (unlikely(recheck_stat != NULL)) {
		// Bail out if so.
		mm_global_unlock(&stat_set->domain_lock);
		mm_global_free(dom_stat);
		return MM_THREAD_LOCAL_DEREF(dom_index, recheck_stat->stat);
	}

	mm_stack_insert(&stat_set->domain_list, &dom_stat->link);

	// End critical section.
	mm_global_unlock(&stat_set->domain_lock);

	// Initialize per-thread data.
	char *name;
	if (stat_set->moreinfo != NULL)
		name = mm_format(&mm_global_arena, "lock %s (%s)",
				 stat_set->location, stat_set->moreinfo);
	else
		name = mm_format(&mm_global_arena, "lock %s",
				 stat_set->location);

	MM_THREAD_LOCAL_ALLOC(domain, name, dom_stat->stat);
	for (mm_thread_t c = 0; c < domain->nthreads; c++) {
		struct mm_lock_stat *stat = MM_THREAD_LOCAL_DEREF(c, dom_stat->stat);
		stat->lock_count = 0;
		stat->fail_count = 0;
	}
	mm_global_free(name);

	// Mark it as ready.
	mm_memory_store_fence();
	dom_stat->ready = 1;

	return MM_THREAD_LOCAL_DEREF(dom_index, dom_stat->stat);
}