예제 #1
0
static void
mc_action_bucket_finish(struct mc_action *action, struct mm_stack *freelist)
{
	mc_table_lookup_unlock(action->part);

	if (!mm_stack_empty(freelist)) {
		mc_table_freelist_lock(action->part);
		mc_action_free_entries(action->part, freelist);
		mc_table_freelist_unlock(action->part);
	}
}
예제 #2
0
static void
mc_action_free_entries(struct mc_tpart *part, struct mm_stack *victims)
{
	while (!mm_stack_empty(victims)) {
		struct mm_slink *link = mm_stack_remove(victims);
		struct mc_entry *entry = containerof(link, struct mc_entry, link);
		if (mc_action_unref_entry(entry)) {
			mc_action_free_chunks(part, entry);
			mc_action_free_entry(part, entry);
		}
	}
}
예제 #3
0
파일: pool.c 프로젝트: ademakov/MainMemory
mm_pool_local_alloc(struct mm_pool *pool)
{
	ENTER();
	void *item;

	if (!mm_stack_empty(&pool->free_list))
		item = mm_stack_remove(&pool->free_list);
	else
		item = mm_pool_alloc_new(pool);

	LEAVE();
	return item;
}
예제 #4
0
mm_chunk_enqueue_deferred(struct mm_thread *thread, bool flush)
{
	if (!flush && thread->deferred_chunks_count < MM_CHUNK_FLUSH_THRESHOLD)
		return;

	// Capture all the deferred chunks.
	struct mm_stack chunks = thread->deferred_chunks;
	mm_stack_prepare(&thread->deferred_chunks);
	thread->deferred_chunks_count = 0;

	// Try to submit the chunks to respective reclamation queues.
	while (!mm_stack_empty(&chunks)) {
		struct mm_chunk *chunk = mm_chunk_stack_remove(&chunks);

		struct mm_domain *domain = mm_regular_domain;
#if ENABLE_SMP
		mm_chunk_t tag = mm_chunk_gettag(chunk);
		struct mm_thread *origin = mm_domain_getthread(domain, tag);
#else
		struct mm_thread *origin = mm_domain_getthread(domain, 0);
#endif
		uint32_t backoff = 0;
		while (!mm_thread_trypost_1(origin, mm_chunk_free_req, (uintptr_t) chunk)) {
			if (backoff >= MM_BACKOFF_SMALL) {
				// If failed to submit the chunk after a number
				// of attempts then defer it again.
				mm_chunk_stack_insert(&thread->deferred_chunks, chunk);
				thread->deferred_chunks_count++;
				break;
			}
			backoff = mm_thread_backoff(backoff);
		}
	}

	// Let know if chunk reclamation consistently has problems.
	if (thread->deferred_chunks_count > MM_CHUNK_ERROR_THRESHOLD) {
		if (thread->deferred_chunks_count < MM_CHUNK_FATAL_THRESHOLD)
			mm_error(0, "Problem with chunk reclamation");
		else
			mm_fatal(0, "Problem with chunk reclamation");
	}
}