Example #1
0
static void*
codechunk_valloc (void *preferred, guint32 size)
{
	void *ptr;
	GSList *freelist;

	if (!valloc_freelists) {
		InitializeCriticalSection (&valloc_mutex);
		valloc_freelists = g_hash_table_new (NULL, NULL);
	}

	/*
	 * Keep a small freelist of memory blocks to decrease pressure on the kernel memory subsystem to avoid #3321.
	 */
	EnterCriticalSection (&valloc_mutex);
	freelist = g_hash_table_lookup (valloc_freelists, GUINT_TO_POINTER (size));
	if (freelist) {
		ptr = freelist->data;
		memset (ptr, 0, size);
		freelist = g_slist_delete_link (freelist, freelist);
		g_hash_table_insert (valloc_freelists, GUINT_TO_POINTER (size), freelist);
	} else {
		ptr = mono_valloc (preferred, size, MONO_PROT_RWX | ARCH_MAP_FLAGS);
		if (!ptr && preferred)
			ptr = mono_valloc (NULL, size, MONO_PROT_RWX | ARCH_MAP_FLAGS);
	}
	LeaveCriticalSection (&valloc_mutex);
	return ptr;
}
Example #2
0
static void*
codechunk_valloc (void *preferred, guint32 size)
{
	void *ptr;
	GSList *freelist;

	if (!valloc_freelists) {
		mono_os_mutex_init_recursive (&valloc_mutex);
		valloc_freelists = g_hash_table_new (NULL, NULL);
	}

	/*
	 * Keep a small freelist of memory blocks to decrease pressure on the kernel memory subsystem to avoid #3321.
	 */
	mono_os_mutex_lock (&valloc_mutex);
	freelist = (GSList *) g_hash_table_lookup (valloc_freelists, GUINT_TO_POINTER (size));
	if (freelist) {
		ptr = freelist->data;
		memset (ptr, 0, size);
		freelist = g_slist_delete_link (freelist, freelist);
		g_hash_table_insert (valloc_freelists, GUINT_TO_POINTER (size), freelist);
	} else {
		ptr = mono_valloc (preferred, size, MONO_PROT_RWX | ARCH_MAP_FLAGS, MONO_MEM_ACCOUNT_CODE);
		if (!ptr && preferred)
			ptr = mono_valloc (NULL, size, MONO_PROT_RWX | ARCH_MAP_FLAGS, MONO_MEM_ACCOUNT_CODE);
	}
	mono_os_mutex_unlock (&valloc_mutex);
	return ptr;
}
Example #3
0
/*
 * Allocate a big chunk of memory from the OS (usually 64KB to several megabytes).
 * This must not require any lock.
 */
void*
sgen_alloc_os_memory (size_t size, int activate)
{
	void *ptr = mono_valloc (0, size, prot_flags_for_activate (activate));
	if (ptr)
		SGEN_ATOMIC_ADD_P (total_alloc, size);
	return ptr;
}
Example #4
0
static void*
mono_sgen_alloc_os_memory (size_t size, int activate)
{
	void *ptr;
	unsigned long prot_flags = activate? MONO_MMAP_READ|MONO_MMAP_WRITE: MONO_MMAP_NONE;

	prot_flags |= MONO_MMAP_PRIVATE | MONO_MMAP_ANON;
	ptr = mono_valloc (0, size, prot_flags);
	return ptr;
}
Example #5
0
static Descriptor*
desc_alloc (MonoMemAccountType type)
{
	MonoThreadHazardPointers *hp = mono_hazard_pointer_get ();
	Descriptor *desc;

	for (;;) {
		gboolean success;

		desc = (Descriptor *) mono_get_hazardous_pointer ((volatile gpointer *)&desc_avail, hp, 1);
		if (desc) {
			Descriptor *next = desc->next;
			success = (mono_atomic_cas_ptr ((volatile gpointer *)&desc_avail, next, desc) == desc);
		} else {
			size_t desc_size = sizeof (Descriptor);
			Descriptor *d;
			int i;

			desc = (Descriptor *) mono_valloc (NULL, desc_size * NUM_DESC_BATCH, prot_flags_for_activate (TRUE), type);
			g_assertf (desc, "Failed to allocate memory for the lock free allocator");

			/* Organize into linked list. */
			d = desc;
			for (i = 0; i < NUM_DESC_BATCH; ++i) {
				Descriptor *next = (i == (NUM_DESC_BATCH - 1)) ? NULL : (Descriptor*)((char*)desc + ((i + 1) * desc_size));
				d->next = next;
				mono_lock_free_queue_node_init (&d->node, TRUE);
				d = next;
			}

			mono_memory_write_barrier ();

			success = (mono_atomic_cas_ptr ((volatile gpointer *)&desc_avail, desc->next, NULL) == NULL);

			if (!success)
				mono_vfree (desc, desc_size * NUM_DESC_BATCH, type);
		}

		mono_hazard_pointer_clear (hp, 1);

		if (success)
			break;
	}

	g_assert (!desc->in_use);
	desc->in_use = TRUE;

	return desc;
}
Example #6
0
/*
 * Allocate a big chunk of memory from the OS (usually 64KB to several megabytes).
 * This must not require any lock.
 */
void*
sgen_alloc_os_memory (size_t size, SgenAllocFlags flags, const char *assert_description)
{
	void *ptr;

	g_assert (!(flags & ~(SGEN_ALLOC_HEAP | SGEN_ALLOC_ACTIVATE)));

	ptr = mono_valloc (0, size, prot_flags_for_activate (flags & SGEN_ALLOC_ACTIVATE));
	sgen_assert_memory_alloc (ptr, size, assert_description);
	if (ptr) {
		SGEN_ATOMIC_ADD_P (total_alloc, size);
		total_alloc_max = MAX (total_alloc_max, total_alloc);
	}
	return ptr;
}
Example #7
0
static Descriptor*
desc_alloc (void)
{
	MonoThreadHazardPointers *hp = mono_hazard_pointer_get ();
	Descriptor *desc;

	for (;;) {
		gboolean success;

		desc = (Descriptor *) get_hazardous_pointer ((gpointer * volatile)&desc_avail, hp, 1);
		if (desc) {
			Descriptor *next = desc->next;
			success = (InterlockedCompareExchangePointer ((gpointer * volatile)&desc_avail, next, desc) == desc);
		} else {
			size_t desc_size = sizeof (Descriptor);
			Descriptor *d;
			int i;

			desc = (Descriptor *) mono_valloc (0, desc_size * NUM_DESC_BATCH, prot_flags_for_activate (TRUE));

			/* Organize into linked list. */
			d = desc;
			for (i = 0; i < NUM_DESC_BATCH; ++i) {
				Descriptor *next = (i == (NUM_DESC_BATCH - 1)) ? NULL : (Descriptor*)((char*)desc + ((i + 1) * desc_size));
				d->next = next;
				mono_lock_free_queue_node_init (&d->node, TRUE);
				d = next;
			}

			mono_memory_write_barrier ();

			success = (InterlockedCompareExchangePointer ((gpointer * volatile)&desc_avail, desc->next, NULL) == NULL);

			if (!success)
				mono_vfree (desc, desc_size * NUM_DESC_BATCH);
		}

		mono_hazard_pointer_clear (hp, 1);

		if (success)
			break;
	}

	g_assert (!desc->in_use);
	desc->in_use = TRUE;

	return desc;
}
Example #8
0
void*
mono_valloc_aligned (size_t size, size_t alignment, int flags)
{
	/* Allocate twice the memory to be able to put the block on an aligned address */
	char *mem = mono_valloc (NULL, size + alignment, flags);
	char *aligned;

	if (!mem)
		return NULL;

	aligned = aligned_address (mem, size, alignment);

	if (aligned > mem)
		mono_vfree (mem, aligned - mem);
	if (aligned + size < mem + size + alignment)
		mono_vfree (aligned + size, (mem + size + alignment) - (aligned + size));

	return aligned;
}
Example #9
0
static gpointer
alloc_sb (Descriptor *desc)
{
	static int pagesize = -1;

	gpointer sb_header;

	if (pagesize == -1)
		pagesize = mono_pagesize ();

	sb_header = desc->block_size == pagesize ?
		mono_valloc (0, desc->block_size, prot_flags_for_activate (TRUE)) :
		mono_valloc_aligned (desc->block_size, desc->block_size, prot_flags_for_activate (TRUE));

	g_assert (sb_header == sb_header_for_addr (sb_header, desc->block_size));

	*(Descriptor**)sb_header = desc;
	//g_print ("sb %p for %p\n", sb_header, desc);

	return (char*)sb_header + LOCK_FREE_ALLOC_SB_HEADER_SIZE;
}
Example #10
0
static gpointer
alloc_sb (Descriptor *desc)
{
	static int pagesize = -1;

	gpointer sb_header;

	if (pagesize == -1)
		pagesize = mono_pagesize ();

	sb_header = desc->block_size == pagesize ?
		mono_valloc (NULL, desc->block_size, prot_flags_for_activate (TRUE), desc->heap->account_type) :
		mono_valloc_aligned (desc->block_size, desc->block_size, prot_flags_for_activate (TRUE), desc->heap->account_type);

	g_assertf (sb_header, "Failed to allocate memory for the lock free allocator");
	g_assert (sb_header == sb_header_for_addr (sb_header, desc->block_size));

	*(Descriptor**)sb_header = desc;
	//g_print ("sb %p for %p\n", sb_header, desc);

	return (char*)sb_header + LOCK_FREE_ALLOC_SB_HEADER_SIZE;
}
Example #11
0
/*
 * Allocate a small thread id.
 *
 * FIXME: The biggest part of this function is very similar to
 * domain_id_alloc() in domain.c and should be merged.
 */
int
mono_thread_small_id_alloc (void)
{
	int i, id = -1;

	mono_os_mutex_lock (&small_id_mutex);

	if (!small_id_table)
		small_id_table = mono_bitset_new (1, 0);

	id = mono_bitset_find_first_unset (small_id_table, small_id_next - 1);
	if (id == -1)
		id = mono_bitset_find_first_unset (small_id_table, -1);

	if (id == -1) {
		MonoBitSet *new_table;
		if (small_id_table->size * 2 >= (1 << 16))
			g_assert_not_reached ();
		new_table = mono_bitset_clone (small_id_table, small_id_table->size * 2);
		id = mono_bitset_find_first_unset (new_table, small_id_table->size - 1);

		mono_bitset_free (small_id_table);
		small_id_table = new_table;
	}

	g_assert (!mono_bitset_test_fast (small_id_table, id));
	mono_bitset_set_fast (small_id_table, id);

	small_id_next++;
	if (small_id_next >= small_id_table->size)
		small_id_next = 0;

	g_assert (id < HAZARD_TABLE_MAX_SIZE);
	if (id >= hazard_table_size) {
#if MONO_SMALL_CONFIG
		hazard_table = g_malloc0 (sizeof (MonoThreadHazardPointers) * HAZARD_TABLE_MAX_SIZE);
		hazard_table_size = HAZARD_TABLE_MAX_SIZE;
#else
		gpointer page_addr;
#if defined(__PASE__)
		/*
		 * HACK: allocating the table with none prot will cause i 7.1
		 * to segfault when accessing or protecting it
		 */
		int table_prot = MONO_MMAP_READ | MONO_MMAP_WRITE;
#else
		int table_prot = MONO_MMAP_NONE;
#endif
		int pagesize = mono_pagesize ();
		int num_pages = (hazard_table_size * sizeof (MonoThreadHazardPointers) + pagesize - 1) / pagesize;

		if (hazard_table == NULL) {
			hazard_table = (MonoThreadHazardPointers *volatile) mono_valloc (NULL,
				sizeof (MonoThreadHazardPointers) * HAZARD_TABLE_MAX_SIZE,
				table_prot, MONO_MEM_ACCOUNT_HAZARD_POINTERS);
		}

		g_assert (hazard_table != NULL);
		page_addr = (guint8*)hazard_table + num_pages * pagesize;

		mono_mprotect (page_addr, pagesize, MONO_MMAP_READ | MONO_MMAP_WRITE);

		++num_pages;
		hazard_table_size = num_pages * pagesize / sizeof (MonoThreadHazardPointers);

#endif
		g_assert (id < hazard_table_size);
		for (i = 0; i < HAZARD_POINTER_COUNT; ++i)
			hazard_table [id].hazard_pointers [i] = NULL;
	}

	if (id > highest_small_id) {
		highest_small_id = id;
		mono_memory_write_barrier ();
	}

	mono_os_mutex_unlock (&small_id_mutex);

	return id;
}
Example #12
0
/*
 * Allocate a small thread id.
 *
 * FIXME: The biggest part of this function is very similar to
 * domain_id_alloc() in domain.c and should be merged.
 */
int
mono_thread_small_id_alloc (void)
{
	int i, id = -1;

	EnterCriticalSection (&small_id_mutex);

	if (!small_id_table)
		small_id_table = mono_bitset_new (1, 0);

	id = mono_bitset_find_first_unset (small_id_table, small_id_next);
	if (id == -1)
		id = mono_bitset_find_first_unset (small_id_table, -1);

	if (id == -1) {
		MonoBitSet *new_table;
		if (small_id_table->size * 2 >= (1 << 16))
			g_assert_not_reached ();
		new_table = mono_bitset_clone (small_id_table, small_id_table->size * 2);
		id = mono_bitset_find_first_unset (new_table, small_id_table->size - 1);

		mono_bitset_free (small_id_table);
		small_id_table = new_table;
	}

	g_assert (!mono_bitset_test_fast (small_id_table, id));
	mono_bitset_set_fast (small_id_table, id);

	small_id_next++;
	if (small_id_next >= small_id_table->size)
		small_id_next = 0;

	g_assert (id < HAZARD_TABLE_MAX_SIZE);
	if (id >= hazard_table_size) {
#if MONO_SMALL_CONFIG
		hazard_table = g_malloc0 (sizeof (MonoThreadHazardPointers) * HAZARD_TABLE_MAX_SIZE);
		hazard_table_size = HAZARD_TABLE_MAX_SIZE;
#else
		gpointer page_addr;
		int pagesize = mono_pagesize ();
		int num_pages = (hazard_table_size * sizeof (MonoThreadHazardPointers) + pagesize - 1) / pagesize;

		if (hazard_table == NULL) {
			hazard_table = mono_valloc (NULL,
				sizeof (MonoThreadHazardPointers) * HAZARD_TABLE_MAX_SIZE,
				MONO_MMAP_NONE);
		}

		g_assert (hazard_table != NULL);
		page_addr = (guint8*)hazard_table + num_pages * pagesize;

		mono_mprotect (page_addr, pagesize, MONO_MMAP_READ | MONO_MMAP_WRITE);

		++num_pages;
		hazard_table_size = num_pages * pagesize / sizeof (MonoThreadHazardPointers);

#endif
		g_assert (id < hazard_table_size);
		for (i = 0; i < HAZARD_POINTER_COUNT; ++i)
			hazard_table [id].hazard_pointers [i] = NULL;
	}

	if (id > highest_small_id) {
		highest_small_id = id;
		mono_memory_write_barrier ();
	}

	LeaveCriticalSection (&small_id_mutex);

	return id;
}
Example #13
0
static void*
mono_sgen_alloc_os_memory (size_t size, int activate)
{
	return mono_valloc (0, size, prot_flags_for_activate (activate));
}