Exemple #1
0
/*
Search @list for element with key @key.
The nodes next, cur and prev are returned in @hp
Returns true if @value was removed by this call.
This function cannot be called from a signal nor with the world stopped.
*/
gboolean
mono_lls_remove (MonoLinkedListSet *list, MonoThreadHazardPointers *hp, MonoLinkedListSetNode *value)
{
    MonoLinkedListSetNode *cur, **prev, *next;
    while (1) {
        if (!mono_lls_find (list, hp, value->key))
            return FALSE;

        next = mono_hazard_pointer_get_val (hp, 0);
        cur = mono_hazard_pointer_get_val (hp, 1);
        prev = mono_hazard_pointer_get_val (hp, 2);

        g_assert (cur == value);

        if (InterlockedCompareExchangePointer ((volatile gpointer*)&cur->next, mask (next, 1), next) != next)
            continue;
        /* The second CAS must happen before the first. */
        mono_memory_write_barrier ();
        if (InterlockedCompareExchangePointer ((volatile gpointer*)prev, mono_lls_pointer_unmask (next), cur) == cur) {
            /* The CAS must happen before the hazard pointer clear. */
            mono_memory_write_barrier ();
            mono_hazard_pointer_clear (hp, 1);
            if (list->free_node_func)
                mono_thread_hazardous_free_or_queue (value, list->free_node_func, FALSE, TRUE);
        } else
            mono_lls_find (list, hp, value->key);
        return TRUE;
    }
}
Exemple #2
0
void
mono_hazard_pointer_restore_for_signal_handler (int small_id)
{
	MonoThreadHazardPointers *hp = mono_hazard_pointer_get ();
	MonoThreadHazardPointers *hp_overflow;
	int i;

	if (small_id < 0)
		return;

	g_assert (small_id < HAZARD_TABLE_OVERFLOW);
	g_assert (overflow_busy [small_id]);

	for (i = 0; i < HAZARD_POINTER_COUNT; ++i)
		g_assert (!hp->hazard_pointers [i]);

	hp_overflow = &hazard_table [small_id];

	*hp = *hp_overflow;

	mono_memory_write_barrier ();

	memset (hp_overflow, 0, sizeof (MonoThreadHazardPointers));

	mono_memory_write_barrier ();

	overflow_busy [small_id] = 0;
}
void
mono_lock_free_queue_enqueue (MonoLockFreeQueue *q, MonoLockFreeQueueNode *node)
{
	MonoThreadHazardPointers *hp = mono_hazard_pointer_get ();
	MonoLockFreeQueueNode *tail;

#ifdef QUEUE_DEBUG
	g_assert (!node->in_queue);
	node->in_queue = TRUE;
	mono_memory_write_barrier ();
#endif

	g_assert (node->next == FREE_NEXT);
	node->next = END_MARKER;
	for (;;) {
		MonoLockFreeQueueNode *next;

		tail = (MonoLockFreeQueueNode *) get_hazardous_pointer ((gpointer volatile*)&q->tail, hp, 0);
		mono_memory_read_barrier ();
		/*
		 * We never dereference next so we don't need a
		 * hazardous load.
		 */
		next = tail->next;
		mono_memory_read_barrier ();

		/* Are tail and next consistent? */
		if (tail == q->tail) {
			g_assert (next != INVALID_NEXT && next != FREE_NEXT);
			g_assert (next != tail);

			if (next == END_MARKER) {
				/*
				 * Here we require that nodes that
				 * have been dequeued don't have
				 * next==END_MARKER.  If they did, we
				 * might append to a node that isn't
				 * in the queue anymore here.
				 */
				if (InterlockedCompareExchangePointer ((gpointer volatile*)&tail->next, node, END_MARKER) == END_MARKER)
					break;
			} else {
				/* Try to advance tail */
				InterlockedCompareExchangePointer ((gpointer volatile*)&q->tail, next, tail);
			}
		}

		mono_memory_write_barrier ();
		mono_hazard_pointer_clear (hp, 0);
	}

	/* Try to advance tail */
	InterlockedCompareExchangePointer ((gpointer volatile*)&q->tail, node, tail);

	mono_memory_write_barrier ();
	mono_hazard_pointer_clear (hp, 0);
}
Exemple #4
0
mono_handle_new (MonoObject *obj, MonoThreadInfo *info, const char *owner)
#endif
{
	info = info ? info : mono_thread_info_current ();
	HandleStack *handles = info->handle_stack;
	HandleChunk *top = handles->top;
#ifdef MONO_HANDLE_TRACK_SP
	mono_handle_chunk_leak_check (handles);
#endif

retry:
	if (G_LIKELY (top->size < OBJECTS_PER_HANDLES_CHUNK)) {
		int idx = top->size;
		gpointer* objslot = &top->elems [idx].o;
		/* can be interrupted anywhere here, so:
		 * 1. make sure the new slot is null
		 * 2. make the new slot scannable (increment size)
		 * 3. put a valid object in there
		 *
		 * (have to do 1 then 3 so that if we're interrupted
		 * between 1 and 2, the object is still live)
		 */
		*objslot = NULL;
		SET_OWNER (top,idx);
		SET_SP (handles, top, idx);
		mono_memory_write_barrier ();
		top->size++;
		mono_memory_write_barrier ();
		*objslot = obj;
		return objslot;
	}
	if (G_LIKELY (top->next)) {
		top->next->size = 0;
		/* make sure size == 0 is visible to a GC thread before it sees the new top */
		mono_memory_write_barrier ();
		top = top->next;
		handles->top = top;
		goto retry;
	}
	HandleChunk *new_chunk = new_handle_chunk ();
	new_chunk->size = 0;
	new_chunk->prev = top;
	new_chunk->next = NULL;
	/* make sure size == 0 before new chunk is visible */
	mono_memory_write_barrier ();
	top->next = new_chunk;
	handles->top = new_chunk;
	goto retry;
}
Exemple #5
0
void
sgen_gray_object_enqueue_section (SgenGrayQueue *queue, GrayQueueSection *section, gboolean is_parallel)
{
	STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_FLOATING, GRAY_QUEUE_SECTION_STATE_ENQUEUED);

	if (queue->first)
		queue->first->size = queue->cursor - queue->first->entries + 1;

	section->next = queue->first;
	section->prev = NULL;
	if (queue->first)
		queue->first->prev = section;
	else
		queue->last = section;
	queue->first = section;
	queue->cursor = queue->first->entries + queue->first->size - 1;
#ifdef SGEN_CHECK_GRAY_OBJECT_ENQUEUE
	if (queue->enqueue_check_func) {
		int i;
		for (i = 0; i < section->size; ++i)
			queue->enqueue_check_func (section->entries [i].obj);
	}
#endif
	if (is_parallel) {
		mono_memory_write_barrier ();
		mono_atomic_inc_i32 (&queue->num_sections);
	} else {
		queue->num_sections++;
	}
}
static gboolean
add_stage_entry (int num_entries, volatile gint32 *next_entry, StageEntry *entries, MonoObject *obj, void *user_data)
{
	gint32 index;

	do {
		do {
			index = *next_entry;
			if (index >= num_entries)
				return FALSE;
		} while (InterlockedCompareExchange (next_entry, index + 1, index) != index);

		/*
		 * We don't need a write barrier here.  *next_entry is just a
		 * help for finding an index, its value is irrelevant for
		 * correctness.
		 */
	} while (entries [index].state != STAGE_ENTRY_FREE ||
			InterlockedCompareExchange (&entries [index].state, STAGE_ENTRY_BUSY, STAGE_ENTRY_FREE) != STAGE_ENTRY_FREE);

	entries [index].obj = obj;
	entries [index].user_data = user_data;

	mono_memory_write_barrier ();

	entries [index].state = STAGE_ENTRY_USED;

	return TRUE;
}
/* LOCKING: requires that the GC lock is held */
static void
process_stage_entries (int num_entries, volatile gint32 *next_entry, StageEntry *entries, void (*process_func) (MonoObject*, void*))
{
	int i;
	int num_registered = 0;
	int num_busy = 0;

	for (i = 0; i < num_entries; ++i) {
		gint32 state = entries [i].state;

		if (state == STAGE_ENTRY_BUSY)
			++num_busy;

		if (state != STAGE_ENTRY_USED ||
				InterlockedCompareExchange (&entries [i].state, STAGE_ENTRY_BUSY, STAGE_ENTRY_USED) != STAGE_ENTRY_USED) {
			continue;
		}

		process_func (entries [i].obj, entries [i].user_data);

		entries [i].obj = NULL;
		entries [i].user_data = NULL;

		mono_memory_write_barrier ();

		entries [i].state = STAGE_ENTRY_FREE;

		++num_registered;
	}

	*next_entry = 0;

	/* g_print ("stage busy %d reg %d\n", num_busy, num_registered); */
}
Exemple #8
0
void
sgen_workers_start_all_workers (SgenObjectOperations *object_ops)
{
    idle_func_object_ops = object_ops;
    mono_memory_write_barrier ();

    sgen_workers_ensure_awake ();
}
Exemple #9
0
/*
Search @list for element with key @key.
The nodes next, cur and prev are returned in @hp.
Returns true if a node with key @key was found.
This function cannot be called from a signal nor within interrupt context*.
XXX A variant that works within interrupted is possible if needed.

* interrupt context is when the current thread is reposible for another thread
been suspended at an arbritary point. This is a limitation of our SMR implementation.
*/
gboolean
mono_lls_find (MonoLinkedListSet *list, MonoThreadHazardPointers *hp, uintptr_t key)
{
	MonoLinkedListSetNode *cur, *next;
	MonoLinkedListSetNode **prev;
	uintptr_t cur_key;

try_again:
	prev = &list->head;

	/*
	 * prev is not really a hazardous pointer, but we return prev
	 * in hazard pointer 2, so we set it here.  Note also that
	 * prev is not a pointer to a node.  We use here the fact that
	 * the first element in a node is the next pointer, so it
	 * works, but it's not pretty.
	 */
	mono_hazard_pointer_set (hp, 2, prev);

	cur = get_hazardous_pointer_with_mask ((gpointer*)prev, hp, 1);

	while (1) {
		if (cur == NULL)
			return FALSE;
		next = get_hazardous_pointer_with_mask ((gpointer*)&cur->next, hp, 0);
		cur_key = cur->key;

		/*
		 * We need to make sure that we dereference prev below
		 * after reading cur->next above, so we need a read
		 * barrier.
		 */
		mono_memory_read_barrier ();

		if (*prev != cur)
			goto try_again;

		if (!mono_lls_pointer_get_mark (next)) {
			if (cur_key >= key)
				return cur_key == key;

			prev = &cur->next;
			mono_hazard_pointer_set (hp, 2, cur);
		} else {
			next = mono_lls_pointer_unmask (next);
			if (InterlockedCompareExchangePointer ((volatile gpointer*)prev, next, cur) == cur) {
				/* The hazard pointer must be cleared after the CAS. */
				mono_memory_write_barrier ();
				mono_hazard_pointer_clear (hp, 1);
				if (list->free_node_func)
					mono_thread_hazardous_free_or_queue (cur, list->free_node_func);
			} else
				goto try_again;
		}
		cur = mono_lls_pointer_unmask (next);
		mono_hazard_pointer_set (hp, 1, cur);
	}
}
Exemple #10
0
mono_handle_new (MonoObject *object, const char *owner)
#endif
{
	MonoThreadInfo *info = mono_thread_info_current ();
	HandleStack *handles = (HandleStack *)info->handle_stack;
	HandleChunk *top = handles->top;

retry:
	if (G_LIKELY (top->size < OBJECTS_PER_HANDLES_CHUNK)) {
		int idx = top->size;
		MonoObject** objslot = chunk_element_objslot (top, idx);
		/* can be interrupted anywhere here, so:
		 * 1. make sure the new slot is null
		 * 2. make the new slot scannable (increment size)
		 * 3. put a valid object in there
		 *
		 * (have to do 1 then 3 so that if we're interrupted
		 * between 1 and 2, the object is still live)
		 */
		*objslot = NULL;
		mono_memory_write_barrier ();
		top->size++;
		mono_memory_write_barrier ();
		*objslot = object;
		SET_OWNER (top,idx);
		return objslot;
	}
	if (G_LIKELY (top->next)) {
		top->next->size = 0;
		/* make sure size == 0 is visible to a GC thread before it sees the new top */
		mono_memory_write_barrier ();
		top = top->next;
		handles->top = top;
		goto retry;
	}
	HandleChunk *new_chunk = g_new (HandleChunk, 1);
	new_chunk->size = 0;
	new_chunk->prev = top;
	new_chunk->next = NULL;
	/* make sure size == 0 before new chunk is visible */
	mono_memory_write_barrier ();
	top->next = new_chunk;
	handles->top = new_chunk;
	goto retry;
}
Exemple #11
0
static void
free_dummy (gpointer _dummy)
{
	MonoLockFreeQueueDummy *dummy = (MonoLockFreeQueueDummy *) _dummy;
	mono_lock_free_queue_node_free (&dummy->node);
	g_assert (dummy->in_use);
	mono_memory_write_barrier ();
	dummy->in_use = 0;
}
Exemple #12
0
HandleStack*
mono_handle_stack_alloc (void)
{
	HandleStack *stack = g_new (HandleStack, 1);
	HandleChunk *chunk = g_new (HandleChunk, 1);

	chunk->size = 0;
	chunk->prev = chunk->next = NULL;
	mono_memory_write_barrier ();
	stack->top = stack->bottom = chunk;
	return stack;
}
Exemple #13
0
static Descriptor*
desc_alloc (MonoMemAccountType type)
{
	MonoThreadHazardPointers *hp = mono_hazard_pointer_get ();
	Descriptor *desc;

	for (;;) {
		gboolean success;

		desc = (Descriptor *) mono_get_hazardous_pointer ((volatile gpointer *)&desc_avail, hp, 1);
		if (desc) {
			Descriptor *next = desc->next;
			success = (mono_atomic_cas_ptr ((volatile gpointer *)&desc_avail, next, desc) == desc);
		} else {
			size_t desc_size = sizeof (Descriptor);
			Descriptor *d;
			int i;

			desc = (Descriptor *) mono_valloc (NULL, desc_size * NUM_DESC_BATCH, prot_flags_for_activate (TRUE), type);
			g_assertf (desc, "Failed to allocate memory for the lock free allocator");

			/* Organize into linked list. */
			d = desc;
			for (i = 0; i < NUM_DESC_BATCH; ++i) {
				Descriptor *next = (i == (NUM_DESC_BATCH - 1)) ? NULL : (Descriptor*)((char*)desc + ((i + 1) * desc_size));
				d->next = next;
				mono_lock_free_queue_node_init (&d->node, TRUE);
				d = next;
			}

			mono_memory_write_barrier ();

			success = (mono_atomic_cas_ptr ((volatile gpointer *)&desc_avail, desc->next, NULL) == NULL);

			if (!success)
				mono_vfree (desc, desc_size * NUM_DESC_BATCH, type);
		}

		mono_hazard_pointer_clear (hp, 1);

		if (success)
			break;
	}

	g_assert (!desc->in_use);
	desc->in_use = TRUE;

	return desc;
}
Exemple #14
0
static void
desc_enqueue_avail (gpointer _desc)
{
	Descriptor *desc = (Descriptor *) _desc;
	Descriptor *old_head;

	g_assert (desc->anchor.data.state == STATE_EMPTY);
	g_assert (!desc->in_use);

	do {
		old_head = desc_avail;
		desc->next = old_head;
		mono_memory_write_barrier ();
	} while (mono_atomic_cas_ptr ((volatile gpointer *)&desc_avail, desc, old_head) != old_head);
}
Exemple #15
0
static void
desc_enqueue_avail (gpointer _desc)
{
	Descriptor *desc = (Descriptor *) _desc;
	Descriptor *old_head;

	g_assert (desc->anchor.data.state == STATE_EMPTY);
	g_assert (!desc->in_use);

	do {
		old_head = desc_avail;
		desc->next = old_head;
		mono_memory_write_barrier ();
	} while (InterlockedCompareExchangePointer ((gpointer * volatile)&desc_avail, desc, old_head) != old_head);
}
Exemple #16
0
HandleStack*
mono_handle_stack_alloc (void)
{
	HandleStack *stack = new_handle_stack ();
	HandleChunk *chunk = new_handle_chunk ();

	chunk->prev = chunk->next = NULL;
	chunk->size = 0;
	mono_memory_write_barrier ();
	stack->top = stack->bottom = chunk;
#ifdef MONO_HANDLE_TRACK_SP
	stack->stackmark_sp = NULL;
#endif
	return stack;
}
Exemple #17
0
static Descriptor*
desc_alloc (void)
{
	MonoThreadHazardPointers *hp = mono_hazard_pointer_get ();
	Descriptor *desc;

	for (;;) {
		gboolean success;

		desc = (Descriptor *) get_hazardous_pointer ((gpointer * volatile)&desc_avail, hp, 1);
		if (desc) {
			Descriptor *next = desc->next;
			success = (InterlockedCompareExchangePointer ((gpointer * volatile)&desc_avail, next, desc) == desc);
		} else {
			size_t desc_size = sizeof (Descriptor);
			Descriptor *d;
			int i;

			desc = (Descriptor *) mono_valloc (0, desc_size * NUM_DESC_BATCH, prot_flags_for_activate (TRUE));

			/* Organize into linked list. */
			d = desc;
			for (i = 0; i < NUM_DESC_BATCH; ++i) {
				Descriptor *next = (i == (NUM_DESC_BATCH - 1)) ? NULL : (Descriptor*)((char*)desc + ((i + 1) * desc_size));
				d->next = next;
				mono_lock_free_queue_node_init (&d->node, TRUE);
				d = next;
			}

			mono_memory_write_barrier ();

			success = (InterlockedCompareExchangePointer ((gpointer * volatile)&desc_avail, desc->next, NULL) == NULL);

			if (!success)
				mono_vfree (desc, desc_size * NUM_DESC_BATCH);
		}

		mono_hazard_pointer_clear (hp, 1);

		if (success)
			break;
	}

	g_assert (!desc->in_use);
	desc->in_use = TRUE;

	return desc;
}
Exemple #18
0
void
mono_handle_stack_free (HandleStack *stack)
{
	if (!stack)
		return;
	HandleChunk *c = stack->bottom;
	stack->top = stack->bottom = NULL;
	mono_memory_write_barrier ();
	while (c) {
		HandleChunk *next = c->next;
		free_handle_chunk (c);
		c = next;
	}
	free_handle_chunk (c);
	free_handle_stack (stack);
}
Exemple #19
0
void
sgen_gray_object_alloc_queue_section (SgenGrayQueue *queue, gboolean is_parallel)
{
	GrayQueueSection *section;

	if (queue->free_list) {
		/* Use the previously allocated queue sections if possible */
		section = queue->free_list;
		queue->free_list = section->next;
		STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_FREE_LIST, GRAY_QUEUE_SECTION_STATE_FLOATING);
	} else {
		HEAVY_STAT (stat_gray_queue_section_alloc ++);

		/* Allocate a new section */
		section = (GrayQueueSection *)sgen_alloc_internal (INTERNAL_MEM_GRAY_QUEUE);
		STATE_SET (section, GRAY_QUEUE_SECTION_STATE_FLOATING);
	}

	/* Section is empty */
	section->size = 0;

	STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_FLOATING, GRAY_QUEUE_SECTION_STATE_ENQUEUED);

	/* Link it with the others */
	section->next = queue->first;
	section->prev = NULL;
	if (queue->first)
		queue->first->prev = section;
	else
		queue->last = section;
	queue->first = section;
	queue->cursor = section->entries - 1;

	if (is_parallel) {
		mono_memory_write_barrier ();
		/*
		 * FIXME
		 * we could probably optimize the code to only rely on the write barrier
		 * for synchronization with the stealer thread. Additionally we could also
		 * do a write barrier once every other gray queue change, and request
		 * to have a minimum of sections before stealing, to keep consistency.
		 */
		mono_atomic_inc_i32 (&queue->num_sections);
	} else {
		queue->num_sections++;
	}
}
Exemple #20
0
/*
 * Clean up the threadpool of all domain jobs.
 * Can only be called as part of the domain unloading process as
 * it will wait for all jobs to be visible to the interruption code. 
 */
gboolean
mono_thread_pool_remove_domain_jobs (MonoDomain *domain, int timeout)
{
	HANDLE sem_handle;
	int result = TRUE;
	guint32 start_time = 0;

	g_assert (domain->state == MONO_APPDOMAIN_UNLOADING);

	threadpool_clear_queue (&async_tp, domain);
	threadpool_clear_queue (&async_io_tp, domain);

	EnterCriticalSection (&socket_io_data.io_lock);
	if (socket_io_data.sock_to_state)
		mono_g_hash_table_foreach_remove (socket_io_data.sock_to_state, remove_sockstate_for_domain, domain);

	LeaveCriticalSection (&socket_io_data.io_lock);
	
	/*
	 * There might be some threads out that could be about to execute stuff from the given domain.
	 * We avoid that by setting up a semaphore to be pulsed by the thread that reaches zero.
	 */
	sem_handle = CreateSemaphore (NULL, 0, 1, NULL);

	domain->cleanup_semaphore = sem_handle;
	/*
	 * The memory barrier here is required to have global ordering between assigning to cleanup_semaphone
	 * and reading threadpool_jobs.
	 * Otherwise this thread could read a stale version of threadpool_jobs and wait forever.
	 */
	mono_memory_write_barrier ();

	if (domain->threadpool_jobs && timeout != -1)
		start_time = mono_msec_ticks ();
	while (domain->threadpool_jobs) {
		WaitForSingleObject (sem_handle, timeout);
		if (timeout != -1 && (mono_msec_ticks () - start_time) > timeout) {
			result = FALSE;
			break;
		}
	}

	domain->cleanup_semaphore = NULL;
	CloseHandle (sem_handle);
	return result;
}
Exemple #21
0
int
mono_hazard_pointer_save_for_signal_handler (void)
{
	int small_id, i;
	MonoThreadHazardPointers *hp = mono_hazard_pointer_get ();
	MonoThreadHazardPointers *hp_overflow;

	for (i = 0; i < HAZARD_POINTER_COUNT; ++i)
		if (hp->hazard_pointers [i])
			goto search;
	return -1;

 search:
	for (small_id = 0; small_id < HAZARD_TABLE_OVERFLOW; ++small_id) {
		if (!overflow_busy [small_id])
			break;
	}

	/*
	 * If this assert fails we don't have enough overflow slots.
	 * We should contemplate adding them dynamically.  If we can
	 * make mono_thread_small_id_alloc() lock-free we can just
	 * allocate them on-demand.
	 */
	g_assert (small_id < HAZARD_TABLE_OVERFLOW);

	if (mono_atomic_cas_i32 (&overflow_busy [small_id], 1, 0) != 0)
		goto search;

	hp_overflow = &hazard_table [small_id];

	for (i = 0; i < HAZARD_POINTER_COUNT; ++i)
		g_assert (!hp_overflow->hazard_pointers [i]);
	*hp_overflow = *hp;

	mono_memory_write_barrier ();

	memset (hp, 0, sizeof (MonoThreadHazardPointers));

	return small_id;
}
Exemple #22
0
static gpointer
alloc_from_new_sb (MonoLockFreeAllocator *heap)
{
	unsigned int slot_size, block_size, count, i;
	Descriptor *desc = desc_alloc (heap->account_type);

	slot_size = desc->slot_size = heap->sc->slot_size;
	block_size = desc->block_size = heap->sc->block_size;
	count = LOCK_FREE_ALLOC_SB_USABLE_SIZE (block_size) / slot_size;

	desc->heap = heap;
	/*
	 * Setting avail to 1 because 0 is the block we're allocating
	 * right away.
	 */
	desc->anchor.data.avail = 1;
	desc->slot_size = heap->sc->slot_size;
	desc->max_count = count;

	desc->anchor.data.count = desc->max_count - 1;
	desc->anchor.data.state = STATE_PARTIAL;

	desc->sb = alloc_sb (desc);

	/* Organize blocks into linked list. */
	for (i = 1; i < count - 1; ++i)
		*(unsigned int*)((char*)desc->sb + i * slot_size) = i + 1;

	*(unsigned int*)((char*)desc->sb + (count - 1) * slot_size) = 0;

	mono_memory_write_barrier ();

	/* Make it active or free it again. */
	if (mono_atomic_cas_ptr ((volatile gpointer *)&heap->active, desc, NULL) == NULL) {
		return desc->sb;
	} else {
		desc->anchor.data.state = STATE_EMPTY;
		desc_retire (desc);
		return NULL;
	}
}
Exemple #23
0
/*
Insert @value into @list.
The nodes value, cur and prev are returned in @hp.
Return true if @value was inserted by this call. If it returns FALSE, it's the caller
resposibility to release memory.
This function cannot be called from a signal nor with the world stopped.
*/
gboolean
mono_lls_insert (MonoLinkedListSet *list, MonoThreadHazardPointers *hp, MonoLinkedListSetNode *value)
{
	MonoLinkedListSetNode *cur, **prev;
	/*We must do a store barrier before inserting 
	to make sure all values in @node are globally visible.*/
	mono_memory_barrier ();

	while (1) {
		if (mono_lls_find (list, hp, value->key))
			return FALSE;
		cur = mono_hazard_pointer_get_val (hp, 1);
		prev = mono_hazard_pointer_get_val (hp, 2);

		value->next = cur;
		mono_hazard_pointer_set (hp, 0, value);
		/* The CAS must happen after setting the hazard pointer. */
		mono_memory_write_barrier ();
		if (InterlockedCompareExchangePointer ((volatile gpointer*)prev, value, cur) == cur)
			return TRUE;
	}
}
Exemple #24
0
static gpointer
alloc_from_new_sb (MonoLockFreeAllocator *heap)
{
	unsigned int slot_size, count, i;
	Descriptor *desc = desc_alloc ();

	desc->sb = alloc_sb (desc);

	slot_size = desc->slot_size = heap->sc->slot_size;
	count = SB_USABLE_SIZE / slot_size;

	/* Organize blocks into linked list. */
	for (i = 1; i < count - 1; ++i)
		*(unsigned int*)((char*)desc->sb + i * slot_size) = i + 1;

	desc->heap = heap;
	/*
	 * Setting avail to 1 because 0 is the block we're allocating
	 * right away.
	 */
	desc->anchor.data.avail = 1;
	desc->slot_size = heap->sc->slot_size;
	desc->max_count = count;

	desc->anchor.data.count = desc->max_count - 1;
	desc->anchor.data.state = STATE_PARTIAL;

	mono_memory_write_barrier ();

	/* Make it active or free it again. */
	if (InterlockedCompareExchangePointer ((gpointer * volatile)&heap->active, desc, NULL) == NULL) {
		return desc->sb;
	} else {
		desc->anchor.data.state = STATE_EMPTY;
		desc_retire (desc);
		return NULL;
	}
}
Exemple #25
0
static void*
alloc_from_fragment (Fragment *frag, size_t size)
{
	char *p = frag->fragment_next;
	char *end = p + size;

	if (end > frag->fragment_end)
		return NULL;

	/* p = frag->fragment_next must happen before */
	mono_memory_barrier ();

	if (InterlockedCompareExchangePointer ((volatile gpointer*)&frag->fragment_next, end, p) != p)
		return NULL;

	if (frag->fragment_end - end < SGEN_MAX_NURSERY_WASTE) {
		Fragment *next, **prev_ptr;
		
		/*
		 * Before we clean the remaining nursery, we must claim the remaining space
		 * as it could end up been used by the range allocator since it can end up
		 * allocating from this dying fragment as it doesn't respect SGEN_MAX_NURSERY_WASTE
		 * when doing second chance allocation.
		 */
		if (mono_sgen_get_nursery_clear_policy () == CLEAR_AT_TLAB_CREATION && claim_remaining_size (frag, end)) {
			/* Clear the remaining space, pinning depends on this. FIXME move this to use phony arrays */
			memset (end, 0, frag->fragment_end - end);
			HEAVY_STAT (InterlockedExchangeAdd (&stat_wasted_bytes_trailer, frag->fragment_end - end));
#ifdef NALLOC_DEBUG
			add_alloc_record (end, frag->fragment_end - end, BLOCK_ZEROING);
#endif
		}

		prev_ptr = find_previous_pointer_fragment (frag);

		/*Use Michaels linked list remove*/

		/*prev_ptr will be null is the fragment was removed concurrently */
		while (prev_ptr) {
			next = frag->next;

			/*already deleted*/
			if (!get_mark (next)) {
				/*frag->next read must happen before the first CAS*/
				mono_memory_write_barrier ();

				/*Fail if the next done is removed concurrently and its CAS wins */
				if (InterlockedCompareExchangePointer ((volatile gpointer*)&frag->next, mask (next, 1), next) != next) {
					continue;
				}
			}

			/* The second CAS must happen after the first CAS or frag->next. */
			mono_memory_write_barrier ();

			/* Fail if the previous node was deleted and its CAS wins */
			if (InterlockedCompareExchangePointer ((volatile gpointer*)prev_ptr, next, frag) != frag) {
				prev_ptr = find_previous_pointer_fragment (frag);
				continue;
			}

			/* No need to membar here since the worst that can happen is a CAS failure. */
			do {
				frag->next_free = fragment_freelist;
			} while (InterlockedCompareExchangePointer ((volatile gpointer*)&fragment_freelist, frag, frag->next_free) != frag->next_free);

			break;
		}
	}

	return p;
}
Exemple #26
0
/*
 * Allocate a small thread id.
 *
 * FIXME: The biggest part of this function is very similar to
 * domain_id_alloc() in domain.c and should be merged.
 */
int
mono_thread_small_id_alloc (void)
{
	int i, id = -1;

	mono_os_mutex_lock (&small_id_mutex);

	if (!small_id_table)
		small_id_table = mono_bitset_new (1, 0);

	id = mono_bitset_find_first_unset (small_id_table, small_id_next - 1);
	if (id == -1)
		id = mono_bitset_find_first_unset (small_id_table, -1);

	if (id == -1) {
		MonoBitSet *new_table;
		if (small_id_table->size * 2 >= (1 << 16))
			g_assert_not_reached ();
		new_table = mono_bitset_clone (small_id_table, small_id_table->size * 2);
		id = mono_bitset_find_first_unset (new_table, small_id_table->size - 1);

		mono_bitset_free (small_id_table);
		small_id_table = new_table;
	}

	g_assert (!mono_bitset_test_fast (small_id_table, id));
	mono_bitset_set_fast (small_id_table, id);

	small_id_next++;
	if (small_id_next >= small_id_table->size)
		small_id_next = 0;

	g_assert (id < HAZARD_TABLE_MAX_SIZE);
	if (id >= hazard_table_size) {
#if MONO_SMALL_CONFIG
		hazard_table = g_malloc0 (sizeof (MonoThreadHazardPointers) * HAZARD_TABLE_MAX_SIZE);
		hazard_table_size = HAZARD_TABLE_MAX_SIZE;
#else
		gpointer page_addr;
#if defined(__PASE__)
		/*
		 * HACK: allocating the table with none prot will cause i 7.1
		 * to segfault when accessing or protecting it
		 */
		int table_prot = MONO_MMAP_READ | MONO_MMAP_WRITE;
#else
		int table_prot = MONO_MMAP_NONE;
#endif
		int pagesize = mono_pagesize ();
		int num_pages = (hazard_table_size * sizeof (MonoThreadHazardPointers) + pagesize - 1) / pagesize;

		if (hazard_table == NULL) {
			hazard_table = (MonoThreadHazardPointers *volatile) mono_valloc (NULL,
				sizeof (MonoThreadHazardPointers) * HAZARD_TABLE_MAX_SIZE,
				table_prot, MONO_MEM_ACCOUNT_HAZARD_POINTERS);
		}

		g_assert (hazard_table != NULL);
		page_addr = (guint8*)hazard_table + num_pages * pagesize;

		mono_mprotect (page_addr, pagesize, MONO_MMAP_READ | MONO_MMAP_WRITE);

		++num_pages;
		hazard_table_size = num_pages * pagesize / sizeof (MonoThreadHazardPointers);

#endif
		g_assert (id < hazard_table_size);
		for (i = 0; i < HAZARD_POINTER_COUNT; ++i)
			hazard_table [id].hazard_pointers [i] = NULL;
	}

	if (id > highest_small_id) {
		highest_small_id = id;
		mono_memory_write_barrier ();
	}

	mono_os_mutex_unlock (&small_id_mutex);

	return id;
}
static void*
par_alloc_from_fragment (SgenFragmentAllocator *allocator, SgenFragment *frag, size_t size)
{
	char *p = frag->fragment_next;
	char *end = p + size;

	if (end > frag->fragment_end)
		return NULL;

	/* p = frag->fragment_next must happen before */
	mono_memory_barrier ();

	if (InterlockedCompareExchangePointer ((volatile gpointer*)&frag->fragment_next, end, p) != p)
		return NULL;

	if (frag->fragment_end - end < SGEN_MAX_NURSERY_WASTE) {
		SgenFragment *next, **prev_ptr;
		
		/*
		 * Before we clean the remaining nursery, we must claim the remaining space
		 * as it could end up been used by the range allocator since it can end up
		 * allocating from this dying fragment as it doesn't respect SGEN_MAX_NURSERY_WASTE
		 * when doing second chance allocation.
		 */
		if ((sgen_get_nursery_clear_policy () == CLEAR_AT_TLAB_CREATION || sgen_get_nursery_clear_policy () == CLEAR_AT_TLAB_CREATION_DEBUG) && claim_remaining_size (frag, end)) {
			sgen_clear_range (end, frag->fragment_end);
			HEAVY_STAT (stat_wasted_bytes_trailer += frag->fragment_end - end);
#ifdef NALLOC_DEBUG
			add_alloc_record (end, frag->fragment_end - end, BLOCK_ZEROING);
#endif
		}

		prev_ptr = find_previous_pointer_fragment (allocator, frag);

		/*Use Michaels linked list remove*/

		/*prev_ptr will be null if the fragment was removed concurrently */
		while (prev_ptr) {
			next = frag->next;

			/*already deleted*/
			if (!get_mark (next)) {
				/*frag->next read must happen before the first CAS*/
				mono_memory_write_barrier ();

				/*Fail if the next node is removed concurrently and its CAS wins */
				if (InterlockedCompareExchangePointer ((volatile gpointer*)&frag->next, mask (next, 1), next) != next) {
					continue;
				}
			}

			/* The second CAS must happen after the first CAS or frag->next. */
			mono_memory_write_barrier ();

			/* Fail if the previous node was deleted and its CAS wins */
			if (InterlockedCompareExchangePointer ((volatile gpointer*)prev_ptr, unmask (next), frag) != frag) {
				prev_ptr = find_previous_pointer_fragment (allocator, frag);
				continue;
			}
			break;
		}
	}

	return p;
}
Exemple #28
0
MonoLockFreeQueueNode*
mono_lock_free_queue_dequeue (MonoLockFreeQueue *q)
{
	MonoThreadHazardPointers *hp = mono_hazard_pointer_get ();
	MonoLockFreeQueueNode *head;

 retry:
	for (;;) {
		MonoLockFreeQueueNode *tail, *next;

		head = (MonoLockFreeQueueNode *) get_hazardous_pointer ((gpointer volatile*)&q->head, hp, 0);
		tail = (MonoLockFreeQueueNode*)q->tail;
		mono_memory_read_barrier ();
		next = head->next;
		mono_memory_read_barrier ();

		/* Are head, tail and next consistent? */
		if (head == q->head) {
			g_assert (next != INVALID_NEXT && next != FREE_NEXT);
			g_assert (next != head);

			/* Is queue empty or tail behind? */
			if (head == tail) {
				if (next == END_MARKER) {
					/* Queue is empty */
					mono_hazard_pointer_clear (hp, 0);

					/*
					 * We only continue if we
					 * reenqueue the dummy
					 * ourselves, so as not to
					 * wait for threads that might
					 * not actually run.
					 */
					if (!is_dummy (q, head) && try_reenqueue_dummy (q))
						continue;

					return NULL;
				}

				/* Try to advance tail */
				InterlockedCompareExchangePointer ((gpointer volatile*)&q->tail, next, tail);
			} else {
				g_assert (next != END_MARKER);
				/* Try to dequeue head */
				if (InterlockedCompareExchangePointer ((gpointer volatile*)&q->head, next, head) == head)
					break;
			}
		}

		mono_memory_write_barrier ();
		mono_hazard_pointer_clear (hp, 0);
	}

	/*
	 * The head is dequeued now, so we know it's this thread's
	 * responsibility to free it - no other thread can.
	 */
	mono_memory_write_barrier ();
	mono_hazard_pointer_clear (hp, 0);

	g_assert (head->next);
	/*
	 * Setting next here isn't necessary for correctness, but we
	 * do it to make sure that we catch dereferencing next in a
	 * node that's not in the queue anymore.
	 */
	head->next = INVALID_NEXT;
#if QUEUE_DEBUG
	g_assert (head->in_queue);
	head->in_queue = FALSE;
	mono_memory_write_barrier ();
#endif

	if (is_dummy (q, head)) {
		g_assert (q->has_dummy);
		q->has_dummy = 0;
		mono_memory_write_barrier ();
		mono_thread_hazardous_free_or_queue (head, free_dummy, FALSE, TRUE);
		if (try_reenqueue_dummy (q))
			goto retry;
		return NULL;
	}

	/* The caller must hazardously free the node. */
	return head;
}
Exemple #29
0
/*
 * Allocate a small thread id.
 *
 * FIXME: The biggest part of this function is very similar to
 * domain_id_alloc() in domain.c and should be merged.
 */
int
mono_thread_small_id_alloc (void)
{
	int i, id = -1;

	EnterCriticalSection (&small_id_mutex);

	if (!small_id_table)
		small_id_table = mono_bitset_new (1, 0);

	id = mono_bitset_find_first_unset (small_id_table, small_id_next);
	if (id == -1)
		id = mono_bitset_find_first_unset (small_id_table, -1);

	if (id == -1) {
		MonoBitSet *new_table;
		if (small_id_table->size * 2 >= (1 << 16))
			g_assert_not_reached ();
		new_table = mono_bitset_clone (small_id_table, small_id_table->size * 2);
		id = mono_bitset_find_first_unset (new_table, small_id_table->size - 1);

		mono_bitset_free (small_id_table);
		small_id_table = new_table;
	}

	g_assert (!mono_bitset_test_fast (small_id_table, id));
	mono_bitset_set_fast (small_id_table, id);

	small_id_next++;
	if (small_id_next >= small_id_table->size)
		small_id_next = 0;

	g_assert (id < HAZARD_TABLE_MAX_SIZE);
	if (id >= hazard_table_size) {
#if MONO_SMALL_CONFIG
		hazard_table = g_malloc0 (sizeof (MonoThreadHazardPointers) * HAZARD_TABLE_MAX_SIZE);
		hazard_table_size = HAZARD_TABLE_MAX_SIZE;
#else
		gpointer page_addr;
		int pagesize = mono_pagesize ();
		int num_pages = (hazard_table_size * sizeof (MonoThreadHazardPointers) + pagesize - 1) / pagesize;

		if (hazard_table == NULL) {
			hazard_table = mono_valloc (NULL,
				sizeof (MonoThreadHazardPointers) * HAZARD_TABLE_MAX_SIZE,
				MONO_MMAP_NONE);
		}

		g_assert (hazard_table != NULL);
		page_addr = (guint8*)hazard_table + num_pages * pagesize;

		mono_mprotect (page_addr, pagesize, MONO_MMAP_READ | MONO_MMAP_WRITE);

		++num_pages;
		hazard_table_size = num_pages * pagesize / sizeof (MonoThreadHazardPointers);

#endif
		g_assert (id < hazard_table_size);
		for (i = 0; i < HAZARD_POINTER_COUNT; ++i)
			hazard_table [id].hazard_pointers [i] = NULL;
	}

	if (id > highest_small_id) {
		highest_small_id = id;
		mono_memory_write_barrier ();
	}

	LeaveCriticalSection (&small_id_mutex);

	return id;
}
Exemple #30
0
/* We add elements to the table by first making space for them by
 * shifting the elements at the back to the right, one at a time.
 * This results in duplicate entries during the process, but during
 * all the time the table is in a sorted state.  Also, when an element
 * is replaced by another one, the element that replaces it has an end
 * address that is equal to or lower than that of the replaced
 * element.  That property is necessary to guarantee that when
 * searching for an element we end up at a position not higher than
 * the one we're looking for (i.e. we either find the element directly
 * or we end up to the left of it).
 */
static void
jit_info_table_add (MonoDomain *domain, MonoJitInfoTable *volatile *table_ptr, MonoJitInfo *ji)
{
	MonoJitInfoTable *table;
	MonoJitInfoTableChunk *chunk;
	int chunk_pos, pos;
	int num_elements;
	int i;

	table = *table_ptr;

 restart:
	chunk_pos = jit_info_table_index (table, (gint8*)ji->code_start + ji->code_size);
	g_assert (chunk_pos < table->num_chunks);
	chunk = table->chunks [chunk_pos];

	if (chunk->num_elements >= MONO_JIT_INFO_TABLE_CHUNK_SIZE) {
		MonoJitInfoTable *new_table = jit_info_table_chunk_overflow (table, chunk);

		/* Debugging code, should be removed. */
		//jit_info_table_check (new_table);

		*table_ptr = new_table;
		mono_memory_barrier ();
		domain->num_jit_info_tables++;
		mono_thread_hazardous_free_or_queue (table, (MonoHazardousFreeFunc)mono_jit_info_table_free, TRUE, FALSE);
		table = new_table;

		goto restart;
	}

	/* Debugging code, should be removed. */
	//jit_info_table_check (table);

	num_elements = chunk->num_elements;

	pos = jit_info_table_chunk_index (chunk, NULL, (gint8*)ji->code_start + ji->code_size);

	/* First we need to size up the chunk by one, by copying the
	   last item, or inserting the first one, if the table is
	   empty. */
	if (num_elements > 0)
		chunk->data [num_elements] = chunk->data [num_elements - 1];
	else
		chunk->data [0] = ji;
	mono_memory_write_barrier ();
	chunk->num_elements = ++num_elements;

	/* Shift the elements up one by one. */
	for (i = num_elements - 2; i >= pos; --i) {
		mono_memory_write_barrier ();
		chunk->data [i + 1] = chunk->data [i];
	}

	/* Now we have room and can insert the new item. */
	mono_memory_write_barrier ();
	chunk->data [pos] = ji;

	/* Set the high code end address chunk entry. */
	chunk->last_code_end = (gint8*)chunk->data [chunk->num_elements - 1]->code_start
		+ chunk->data [chunk->num_elements - 1]->code_size;

	/* Debugging code, should be removed. */
	//jit_info_table_check (table);
}