Esempio n. 1
0
mm_task_combiner_destroy(struct mm_task_combiner *combiner)
{
	ENTER();

	mm_common_free(combiner);

	LEAVE();
}
Esempio n. 2
0
mm_event_batch_cleanup(struct mm_event_batch *batch)
{
	ENTER();

	mm_common_free(batch->changes);

	LEAVE();
}
Esempio n. 3
0
mm_event_forward_cleanup(struct mm_event_forward_cache *cache)
{
	ENTER();

	// Release forward buffers.
	mm_common_free(cache->buffers);
	mm_bitset_cleanup(&cache->targets, &mm_common_space.xarena);

	LEAVE();
}
Esempio n. 4
0
mm_event_dispatch_cleanup(struct mm_event_dispatch *dispatch)
{
	ENTER();

	// Release system-specific resources.
	mm_event_backend_cleanup(&dispatch->backend);

	// Release listener info.
	for (mm_thread_t i = 0; i < dispatch->nlisteners; i++)
		mm_event_listener_cleanup(&dispatch->listeners[i]);
	mm_common_free(dispatch->listeners);

	LEAVE();
}
Esempio n. 5
0
mm_chunk_destroy(struct mm_chunk *chunk)
{
	mm_chunk_t tag = mm_chunk_gettag(chunk);

	// A chunk from a shared memory space can be freed by any thread in
	// the same manner utilizing synchronization mechanisms built-in to
	// the corresponding memory allocation routines.
	if (tag == MM_CHUNK_COMMON) {
		mm_common_free(chunk);
		return;
	}
	if (unlikely(tag == MM_CHUNK_GLOBAL)) {
		mm_global_free(chunk);
		return;
	}

	if (tag == MM_CHUNK_REGULAR) {
#if ENABLE_SMP
		// In SMP mode regular memory space is just another case of
		// shared space with built-in synchronization. So it can be
		// freed by any thread alike.
		mm_regular_free(chunk);
		return;
#else
		struct mm_domain *domain = mm_domain_selfptr();
		if (domain == mm_regular_domain) {
			mm_regular_free(chunk);
			return;
		}
#endif
	}

	// A chunk from a private space can be immediately freed by its
	// originating thread but it is a subject for asynchronous memory
	// reclamation mechanism for any other thread.
	struct mm_thread *thread = mm_thread_selfptr();
	struct mm_domain *domain = mm_thread_getdomain(thread);
	if (domain == mm_regular_domain && tag == mm_thread_getnumber(thread)) {
		mm_private_free(chunk);
		return;
	}

	thread->deferred_chunks_count++;
	mm_chunk_stack_insert(&thread->deferred_chunks, chunk);
	mm_chunk_enqueue_deferred(thread, false);
}