Esempio n. 1
0
void
mono_merp_enable (const char *appBundleID, const char *appSignature, const char *appVersion, const char *merpGUIPath, const char *eventType, const char *appPath, const char *configDir)
{
	mono_memory_barrier ();

	g_assert (!config.enable_merp);

	char *prefix = NULL;

	if (!configDir) {
		const char *home = g_get_home_dir ();
		prefix = g_strdup_printf ("%s/Library/Group Containers/UBF8T346G9.ms/", home);
	} else {
		prefix = g_strdup (configDir);
	}
	config.merpFilePath = g_strdup_printf ("%s%s", prefix, "MERP.uploadparams.txt");
	config.crashLogPath = g_strdup_printf ("%s%s", prefix, "lastcrashlog.txt");
	config.werXmlPath = g_strdup_printf ("%s%s", prefix, "CustomLogsMetadata.xml");
	g_free (prefix);

	config.moduleVersion = mono_get_runtime_callbacks ()->get_runtime_build_info ();

	config.appBundleID = g_strdup (appBundleID);
	config.appSignature = g_strdup (appSignature);
	config.appVersion = g_strdup (appVersion);
	config.merpGUIPath = g_strdup (merpGUIPath);
	config.eventType = g_strdup (eventType);
	config.appPath = g_strdup (appPath);

	config.log = g_getenv ("MONO_MERP_VERBOSE") != NULL;

	config.enable_merp = TRUE;

	mono_memory_barrier ();
}
Esempio n. 2
0
/**
 * mono_conc_hashtable_insert
 *
 * @Returns the old value if key is already present or null
 */
gpointer
mono_conc_hashtable_insert (MonoConcurrentHashTable *hash_table, gpointer key, gpointer value)
{
	conc_table *table;
	key_value_pair *kvs;
	int hash, i, table_mask;

	g_assert (key != NULL && key != TOMBSTONE);
	g_assert (value != NULL);

	hash = mix_hash (hash_table->hash_func (key));
	mono_mutex_lock (hash_table->mutex);

	if (hash_table->element_count >= hash_table->overflow_count)
		expand_table (hash_table);

	table = (conc_table*)hash_table->table;
	kvs = table->kvs;
	table_mask = table->table_size - 1;
	i = hash & table_mask;

	if (!hash_table->equal_func) {
		for (;;) {
			if (!kvs [i].key || kvs [i].key == TOMBSTONE) {
				kvs [i].value = value;
				/* The write to values must happen after the write to keys */
				mono_memory_barrier ();
				kvs [i].key = key;
				++hash_table->element_count;
				mono_mutex_unlock (hash_table->mutex);
				return NULL;
			}
			if (key == kvs [i].key) {
				gpointer value = kvs [i].value;
				mono_mutex_unlock (hash_table->mutex);
				return value;
			}
			i = (i + 1) & table_mask;
		}
	} else {
		GEqualFunc equal = hash_table->equal_func;
		for (;;) {
			if (!kvs [i].key || kvs [i].key == TOMBSTONE) {
				kvs [i].value = value;
				/* The write to values must happen after the write to keys */
				mono_memory_barrier ();
				kvs [i].key = key;
				++hash_table->element_count;
				mono_mutex_unlock (hash_table->mutex);
				return NULL;
			}
			if (equal (key, kvs [i].key)) {
				gpointer value = kvs [i].value;
				mono_mutex_unlock (hash_table->mutex);
				return value;
			}
			i = (i + 1) & table_mask;
		}
	}
}
Esempio n. 3
0
gpointer
mono_conc_hashtable_lookup (MonoConcurrentHashTable *hash_table, gpointer key)
{
	MonoThreadHazardPointers* hp;
	conc_table *table;
	int hash, i, table_mask;
	key_value_pair *kvs;
	hash = mix_hash (hash_table->hash_func (key));
	hp = mono_hazard_pointer_get ();

retry:
	table = (conc_table *)get_hazardous_pointer ((gpointer volatile*)&hash_table->table, hp, 0);
	table_mask = table->table_size - 1;
	kvs = table->kvs;
	i = hash & table_mask;

	if (G_LIKELY (!hash_table->equal_func)) {
		while (kvs [i].key) {
			if (key == kvs [i].key) {
				gpointer value;
				/* The read of keys must happen before the read of values */
				mono_memory_barrier ();
				value = kvs [i].value;
				/* FIXME check for NULL if we add suppport for removal */
				mono_hazard_pointer_clear (hp, 0);
				return value;
			}
			i = (i + 1) & table_mask;
		}
	} else {
		GEqualFunc equal = hash_table->equal_func;

		while (kvs [i].key) {
			if (kvs [i].key != TOMBSTONE && equal (key, kvs [i].key)) {
				gpointer value;
				/* The read of keys must happen before the read of values */
				mono_memory_barrier ();
				value = kvs [i].value;

				/* We just read a value been deleted, try again. */
				if (G_UNLIKELY (!value))
					goto retry;

				mono_hazard_pointer_clear (hp, 0);
				return value;
			}
			i = (i + 1) & table_mask;
		}
	}

	/* The table might have expanded and the value is now on the newer table */
	mono_memory_barrier ();
	if (hash_table->table != table)
		goto retry;

	mono_hazard_pointer_clear (hp, 0);
	return NULL;
}
Esempio n. 4
0
MonoMethod*
mono_gc_get_managed_allocator_by_type (int atype)
{
#ifdef MANAGED_ALLOCATION
	MonoMethod *res;

	if (!use_managed_allocator)
		return NULL;

	if (!mono_runtime_has_tls_get ())
		return NULL;

	res = alloc_method_cache [atype];
	if (res)
		return res;

	res = create_allocator (atype);
	LOCK_GC;
	if (alloc_method_cache [atype]) {
		mono_free_method (res);
		res = alloc_method_cache [atype];
	} else {
		mono_memory_barrier ();
		alloc_method_cache [atype] = res;
	}
	UNLOCK_GC;

	return res;
#else
	return NULL;
#endif
}
Esempio n. 5
0
/**
 * mono_gc_get_managed_allocator_by_type:
 *
 *   Return a managed allocator method corresponding to allocator type ATYPE.
 */
MonoMethod*
mono_gc_get_managed_allocator_by_type (int atype)
{
    int offset = -1;
    MonoMethod *res;
    MONO_THREAD_VAR_OFFSET (GC_thread_tls, offset);

    mono_tls_key_set_offset (TLS_KEY_BOEHM_GC_THREAD, offset);

    res = alloc_method_cache [atype];
    if (res)
        return res;

    res = create_allocator (atype, TLS_KEY_BOEHM_GC_THREAD);
    mono_mutex_lock (&mono_gc_lock);
    if (alloc_method_cache [atype]) {
        mono_free_method (res);
        res = alloc_method_cache [atype];
    } else {
        mono_memory_barrier ();
        alloc_method_cache [atype] = res;
    }
    mono_mutex_unlock (&mono_gc_lock);
    return res;
}
Esempio n. 6
0
void
ves_icall_System_IOSelector_Add (gpointer handle, MonoIOSelectorJob *job)
{
	ThreadPoolIOUpdate *update;

	g_assert (handle >= 0);

	g_assert (job->operation == EVENT_IN ^ job->operation == EVENT_OUT);
	g_assert (job->callback);

	if (mono_runtime_is_shutting_down ())
		return;
	if (mono_domain_is_unloading (mono_object_domain (job)))
		return;

	mono_lazy_initialize (&io_status, initialize);

	mono_mutex_lock (&threadpool_io->updates_lock);

	update = update_get_new ();
	update->type = UPDATE_ADD;
	update->data.add.fd = GPOINTER_TO_INT (handle);
	update->data.add.job = job;
	mono_memory_barrier (); /* Ensure this is safely published before we wake up the selector */

	selector_thread_wakeup ();

	mono_mutex_unlock (&threadpool_io->updates_lock);
}
Esempio n. 7
0
gpointer
get_hazardous_pointer_with_mask (gpointer volatile *pp, MonoThreadHazardPointers *hp, int hazard_index)
{
	gpointer p;

	for (;;) {
		/* Get the pointer */
		p = *pp;
		/* If we don't have hazard pointers just return the
		   pointer. */
		if (!hp)
			return p;
		/* Make it hazardous */
		mono_hazard_pointer_set (hp, hazard_index, mono_lls_pointer_unmask (p));

		mono_memory_barrier ();

		/* Check that it's still the same.  If not, try
		   again. */
		if (*pp != p) {
			mono_hazard_pointer_clear (hp, hazard_index);
			continue;
		}
		break;
	}

	return p;
}
Esempio n. 8
0
static void
workers_wait (void)
{
	State old_state, new_state;
	++stat_workers_num_waited;
	do {
		old_state = new_state = workers_state;
		/*
		 * Only the last worker thread awake can set the done
		 * posted flag, and since we're awake and haven't set
		 * it yet, it cannot be set.
		 */
		g_assert (!old_state.data.done_posted);
		++new_state.data.num_waiting;
		/*
		 * This is the only place where we use
		 * workers_gc_in_progress in the worker threads.
		 */
		if (new_state.data.num_waiting == workers_num && !old_state.data.gc_in_progress)
			new_state.data.done_posted = 1;
	} while (!set_state (old_state, new_state));
	mono_memory_barrier ();
	if (new_state.data.done_posted)
		MONO_SEM_POST (&workers_done_sem);
	MONO_SEM_WAIT (&workers_waiting_sem);
}
Esempio n. 9
0
static void
unlock_exclusive (void)
{
	mono_memory_barrier ();
	SGEN_ASSERT (0, binary_protocol_use_count == -1, "Exclusively locked count must be -1");
	if (InterlockedCompareExchange (&binary_protocol_use_count, 0, -1) != -1)
		SGEN_ASSERT (0, FALSE, "Somebody messed with the exclusive lock");
}
Esempio n. 10
0
static gboolean
try_lock_exclusive (void)
{
	do {
		if (binary_protocol_use_count)
			return FALSE;
	} while (InterlockedCompareExchange (&binary_protocol_use_count, -1, 0) != 0);
	mono_memory_barrier ();
	return TRUE;
}
Esempio n. 11
0
static void
unlock_recursive (void)
{
	int old_count;
	mono_memory_barrier ();
	do {
		old_count = binary_protocol_use_count;
		SGEN_ASSERT (0, old_count > 0, "Locked use count must be at least 1");
	} while (InterlockedCompareExchange (&binary_protocol_use_count, old_count - 1, old_count) != old_count);
}
Esempio n. 12
0
void
mono_merp_disable (void)
{
	mono_memory_barrier ();

	if (!config.enable_merp)
		return;

	g_free ((char*)config.appBundleID); // cast away const
	g_free ((char*)config.appSignature);
	g_free ((char*)config.appVersion);
	g_free ((char*)config.merpGUIPath);
	g_free ((char*)config.eventType);
	g_free ((char*)config.appPath); 
	g_free ((char*)config.moduleVersion);
	g_slist_free (config.annotations);
	memset (&config, 0, sizeof (config));

	mono_memory_barrier ();
}
Esempio n. 13
0
static void
lock_recursive (void)
{
	int old_count;
	do {
	retry:
		old_count = binary_protocol_use_count;
		if (old_count < 0) {
			/* Exclusively locked - retry */
			/* FIXME: short back-off */
			goto retry;
		}
	} while (InterlockedCompareExchange (&binary_protocol_use_count, old_count + 1, old_count) != old_count);
	mono_memory_barrier ();
}
Esempio n. 14
0
/* LOCKING: Must be called holding hash_table->mutex */
static void
expand_table (MonoConcurrentHashTable *hash_table)
{
	conc_table *old_table = (conc_table*)hash_table->table;
	conc_table *new_table = conc_table_new (old_table->table_size * 2);
	key_value_pair *kvs = old_table->kvs;
	int i;

	for (i = 0; i < old_table->table_size; ++i) {
		if (kvs [i].key && kvs [i].key != TOMBSTONE)
			insert_one_local (new_table, hash_table->hash_func, kvs [i].key, kvs [i].value);
	}
	mono_memory_barrier ();
	hash_table->table = new_table;
	hash_table->overflow_count = (int)(new_table->table_size * LOAD_FACTOR);
	conc_table_lf_free (old_table);
}
Esempio n. 15
0
static void
init_reg_map (void)
{
	int i;

	g_assert (NUM_REGS > 0);
	for (i = 0; i < sizeof (map_hw_reg_to_dwarf_reg) / sizeof (int); ++i) {
		map_dwarf_reg_to_hw_reg [mono_hw_reg_to_dwarf_reg (i)] = i;
	}

#ifdef TARGET_POWERPC
	map_dwarf_reg_to_hw_reg [DWARF_PC_REG] = ppc_lr;
#endif

	mono_memory_barrier ();
	dwarf_reg_to_hw_reg_inited = TRUE;
}
Esempio n. 16
0
/**
 * mono_conc_hashtable_foreach_steal:
 *
 * Calls @func for each entry in the hashtable, if @func returns true, remove from the hashtable. Requires external locking.
 * Same semantics as g_hash_table_foreach_steal.
 */
void
mono_conc_hashtable_foreach_steal (MonoConcurrentHashTable *hash_table, GHRFunc func, gpointer userdata)
{
	int i;
	conc_table *table = (conc_table*)hash_table->table;
	key_value_pair *kvs = table->kvs;

	for (i = 0; i < table->table_size; ++i) {
		if (kvs [i].key && kvs [i].key != TOMBSTONE) {
			if (func (kvs [i].key, kvs [i].value, userdata)) {
				kvs [i].value = NULL;
				mono_memory_barrier ();
				kvs [i].key = TOMBSTONE;
				--hash_table->element_count;
			}
		}
	}
}
Esempio n. 17
0
static void
mono_init_merp (const intptr_t crashed_pid, const char *signal, MonoStackHash *hashes, MERPStruct *merp)
{
	mono_memory_barrier ();
	g_assert (mono_merp_enabled ());

	merp->merpFilePath = config.merpFilePath;
	merp->crashLogPath = config.crashLogPath;
	merp->werXmlPath = config.werXmlPath;

	// If these aren't set, icall wasn't made
	// don't do merp? / don't set the variable to use merp;
	g_assert (config.appBundleID);
	g_assert (config.appVersion);
	merp->bundleIDArg = config.appSignature;
	merp->versionArg = config.appVersion;

	merp->archArg = get_merp_arch ();
	merp->exceptionArg = parse_exception_type (signal);

	merp->serviceNameArg = config.appBundleID;
	merp->servicePathArg = config.appPath;

	merp->moduleName = "Mono Exception";
	merp->moduleVersion = config.moduleVersion;

	merp->moduleOffset = 0;

	merp->uiLidArg = MONO_LOCALE_INVARIANT;

	merp->osVersion = os_version_string ();

	// FIXME: THis is apple-only for now
	merp->systemManufacturer = "apple";
	get_apple_model ((char *) merp->systemModel, sizeof (merp->systemModel));

	merp->eventType = config.eventType;

	merp->hashes = *hashes;

	merp->annotations = config.annotations;
}
Esempio n. 18
0
void
mono_summarize_toggle_assertions (gboolean enable)
{
#if defined(ENABLE_CHECKED_BUILD_CRASH_REPORTING) && defined (ENABLE_OVERRIDABLE_ALLOCATORS)
	static GMemVTable g_mem_vtable_backup;
	static gboolean saved;

	if (enable) {
		g_mem_get_vtable (&g_mem_vtable_backup);
		saved = TRUE;

		GMemVTable g_mem_vtable_assert = { assert_not_reached_fn_ptr_malloc, assert_not_reached_fn_ptr_realloc, assert_not_reached_fn_ptr_free, assert_not_reached_fn_ptr_calloc };
		g_mem_set_vtable (&g_mem_vtable_assert);
	} else if (saved) {
		g_mem_set_vtable (&g_mem_vtable_backup);
		saved = FALSE;
	}

	mono_memory_barrier ();
#endif
}
Esempio n. 19
0
void
mono_threadpool_ms_io_remove_domain_jobs (MonoDomain *domain)
{
	ThreadPoolIOUpdate *update;

	if (!mono_lazy_is_initialized (&io_status))
		return;

	mono_mutex_lock (&threadpool_io->updates_lock);

	update = update_get_new ();
	update->type = UPDATE_REMOVE_DOMAIN;
	update->data.remove_domain.domain = domain;
	mono_memory_barrier (); /* Ensure this is safely published before we wake up the selector */

	selector_thread_wakeup ();

	mono_cond_wait (&threadpool_io->updates_cond, &threadpool_io->updates_lock);

	mono_mutex_unlock (&threadpool_io->updates_lock);
}
Esempio n. 20
0
void
mono_threadpool_ms_io_remove_socket (int fd)
{
	ThreadPoolIOUpdate *update;

	if (!mono_lazy_is_initialized (&io_status))
		return;

	mono_mutex_lock (&threadpool_io->updates_lock);

	update = update_get_new ();
	update->type = UPDATE_REMOVE_SOCKET;
	update->data.add.fd = fd;
	mono_memory_barrier (); /* Ensure this is safely published before we wake up the selector */

	selector_thread_wakeup ();

	mono_cond_wait (&threadpool_io->updates_cond, &threadpool_io->updates_lock);

	mono_mutex_unlock (&threadpool_io->updates_lock);
}
Esempio n. 21
0
/*
Insert @value into @list.
The nodes value, cur and prev are returned in @hp.
Return true if @value was inserted by this call. If it returns FALSE, it's the caller
resposibility to release memory.
This function cannot be called from a signal nor with the world stopped.
*/
gboolean
mono_lls_insert (MonoLinkedListSet *list, MonoThreadHazardPointers *hp, MonoLinkedListSetNode *value)
{
	MonoLinkedListSetNode *cur, **prev;
	/*We must do a store barrier before inserting 
	to make sure all values in @node are globally visible.*/
	mono_memory_barrier ();

	while (1) {
		if (mono_lls_find (list, hp, value->key))
			return FALSE;
		cur = mono_hazard_pointer_get_val (hp, 1);
		prev = mono_hazard_pointer_get_val (hp, 2);

		value->next = cur;
		mono_hazard_pointer_set (hp, 0, value);
		/* The CAS must happen after setting the hazard pointer. */
		mono_memory_write_barrier ();
		if (InterlockedCompareExchangePointer ((volatile gpointer*)prev, value, cur) == cur)
			return TRUE;
	}
}
Esempio n. 22
0
void*
mono_sgen_nursery_alloc_range (size_t desired_size, size_t minimum_size, int *out_alloc_size)
{
	Fragment *frag, *min_frag;
	DEBUG (4, fprintf (gc_debug_file, "Searching for byte range desired size: %zd minimum size %zd\n", desired_size, minimum_size));

	HEAVY_STAT (InterlockedIncrement (&stat_nursery_alloc_range_requests));

restart:
	min_frag = NULL;

#ifdef NALLOC_DEBUG
	InterlockedIncrement (&alloc_count);
#endif

	for (frag = unmask (nursery_fragments); frag; frag = unmask (frag->next)) {
		int frag_size = frag->fragment_end - frag->fragment_next;

		HEAVY_STAT (InterlockedIncrement (&stat_alloc_range_iterations));

		if (desired_size <= frag_size) {
			void *p;
			*out_alloc_size = desired_size;

			p = alloc_from_fragment (frag, desired_size);
			if (!p) {
				HEAVY_STAT (InterlockedIncrement (&stat_alloc_range_retries));
				goto restart;
			}
#ifdef NALLOC_DEBUG
			add_alloc_record (p, desired_size, RANGE_ALLOC);
#endif
			return p;
		}
		if (minimum_size <= frag_size)
			min_frag = frag;
	}

	/* The second fragment_next read should be ordered in respect to the first code block */
	mono_memory_barrier ();

	if (min_frag) {
		void *p;
		int frag_size;

		frag_size = min_frag->fragment_end - min_frag->fragment_next;
		if (frag_size < minimum_size)
			goto restart;

		*out_alloc_size = frag_size;

		mono_memory_barrier ();
		p = alloc_from_fragment (min_frag, frag_size);

		/*XXX restarting here is quite dubious given this is already second chance allocation. */
		if (!p) {
			HEAVY_STAT (InterlockedIncrement (&stat_alloc_retries));
			goto restart;
		}
#ifdef NALLOC_DEBUG
		add_alloc_record (p, frag_size, RANGE_ALLOC);
#endif
		return p;
	}

	return NULL;
}
Esempio n. 23
0
/* We add elements to the table by first making space for them by
 * shifting the elements at the back to the right, one at a time.
 * This results in duplicate entries during the process, but during
 * all the time the table is in a sorted state.  Also, when an element
 * is replaced by another one, the element that replaces it has an end
 * address that is equal to or lower than that of the replaced
 * element.  That property is necessary to guarantee that when
 * searching for an element we end up at a position not higher than
 * the one we're looking for (i.e. we either find the element directly
 * or we end up to the left of it).
 */
static void
jit_info_table_add (MonoDomain *domain, MonoJitInfoTable *volatile *table_ptr, MonoJitInfo *ji)
{
	MonoJitInfoTable *table;
	MonoJitInfoTableChunk *chunk;
	int chunk_pos, pos;
	int num_elements;
	int i;

	table = *table_ptr;

 restart:
	chunk_pos = jit_info_table_index (table, (gint8*)ji->code_start + ji->code_size);
	g_assert (chunk_pos < table->num_chunks);
	chunk = table->chunks [chunk_pos];

	if (chunk->num_elements >= MONO_JIT_INFO_TABLE_CHUNK_SIZE) {
		MonoJitInfoTable *new_table = jit_info_table_chunk_overflow (table, chunk);

		/* Debugging code, should be removed. */
		//jit_info_table_check (new_table);

		*table_ptr = new_table;
		mono_memory_barrier ();
		domain->num_jit_info_tables++;
		mono_thread_hazardous_free_or_queue (table, (MonoHazardousFreeFunc)mono_jit_info_table_free, TRUE, FALSE);
		table = new_table;

		goto restart;
	}

	/* Debugging code, should be removed. */
	//jit_info_table_check (table);

	num_elements = chunk->num_elements;

	pos = jit_info_table_chunk_index (chunk, NULL, (gint8*)ji->code_start + ji->code_size);

	/* First we need to size up the chunk by one, by copying the
	   last item, or inserting the first one, if the table is
	   empty. */
	if (num_elements > 0)
		chunk->data [num_elements] = chunk->data [num_elements - 1];
	else
		chunk->data [0] = ji;
	mono_memory_write_barrier ();
	chunk->num_elements = ++num_elements;

	/* Shift the elements up one by one. */
	for (i = num_elements - 2; i >= pos; --i) {
		mono_memory_write_barrier ();
		chunk->data [i + 1] = chunk->data [i];
	}

	/* Now we have room and can insert the new item. */
	mono_memory_write_barrier ();
	chunk->data [pos] = ji;

	/* Set the high code end address chunk entry. */
	chunk->last_code_end = (gint8*)chunk->data [chunk->num_elements - 1]->code_start
		+ chunk->data [chunk->num_elements - 1]->code_size;

	/* Debugging code, should be removed. */
	//jit_info_table_check (table);
}
Esempio n. 24
0
MonoArray * 
ves_icall_System_String_InternalSplit (MonoString *me, MonoArray *separator, gint32 count, gint32 options)
{
	static MonoClass *String_array;
	MonoString * tmpstr;
	MonoArray * retarr;
	gunichar2 *src;
	gint32 arrsize, srcsize, splitsize;
	gint32 i, lastpos, arrpos;
	gint32 tmpstrsize;
	gint32 remempty;
	gint32 flag;
	gunichar2 *tmpstrptr;

	remempty = options & STRINGSPLITOPTIONS_REMOVE_EMPTY_ENTRIES;
	src = mono_string_chars (me);
	srcsize = mono_string_length (me);
	arrsize = mono_array_length (separator);

	if (!String_array) {
		MonoClass *klass = mono_array_class_get (mono_get_string_class (), 1);
		mono_memory_barrier ();
		String_array = klass;
	}

	splitsize = 1;
	/* Count the number of elements we will return. Note that this operation
	 * guarantees that we will return exactly splitsize elements, and we will
	 * have enough data to fill each. This allows us to skip some checks later on.
	 */
	if (remempty == 0) {
		for (i = 0; i != srcsize && splitsize < count; i++) {
			if (string_icall_is_in_array (separator, arrsize, src [i]))
				splitsize++;
		}
	} else if (count > 1) {
		/* Require pattern "Nondelim + Delim + Nondelim" to increment counter.
		 * Lastpos != 0 means first nondelim found.
		 * Flag = 0 means last char was delim.
		 * Efficient, though perhaps confusing.
		 */
		lastpos = 0;
		flag = 0;
		for (i = 0; i != srcsize && splitsize < count; i++) {
			if (string_icall_is_in_array (separator, arrsize, src [i])) {
				flag = 0;
			} else if (flag == 0) {
				if (lastpos == 1)
					splitsize++;
				flag = 1;
				lastpos = 1;
			}
		}

		/* Nothing but separators */
		if (lastpos == 0) {
			retarr = mono_array_new_specific (mono_class_vtable (mono_domain_get (), String_array), 0);
			return retarr;
		}
	}

	/* if no split chars found return the string */
	if (splitsize == 1) {
		if (remempty == 0 || count == 1) {
			/* Copy the whole string */
			retarr = mono_array_new_specific (mono_class_vtable (mono_domain_get (), String_array), 1);
			mono_array_setref (retarr, 0, me);
		} else {
			/* otherwise we have to filter out leading & trailing delims */

			/* find first non-delim char */
			for (; srcsize != 0; srcsize--, src++) {
				if (!string_icall_is_in_array (separator, arrsize, src [0]))
					break;
			}
			/* find last non-delim char */
			for (; srcsize != 0; srcsize--) {
				if (!string_icall_is_in_array (separator, arrsize, src [srcsize - 1]))
					break;
			}
			tmpstr = mono_string_new_size (mono_domain_get (), srcsize);
			tmpstrptr = mono_string_chars (tmpstr);

			memcpy (tmpstrptr, src, srcsize * sizeof (gunichar2));
			retarr = mono_array_new_specific (mono_class_vtable (mono_domain_get (), String_array), 1);
			mono_array_setref (retarr, 0, tmpstr);
		}
		return retarr;
	}

	lastpos = 0;
	arrpos = 0;
	
	retarr = mono_array_new_specific (mono_class_vtable (mono_domain_get (), String_array), splitsize);

	for (i = 0; i != srcsize && arrpos != splitsize; i++) {
		if (string_icall_is_in_array (separator, arrsize, src [i])) {
			
			if (lastpos != i || remempty == 0) {
				tmpstrsize = i - lastpos;
				tmpstr = mono_string_new_size (mono_domain_get (), tmpstrsize);
				tmpstrptr = mono_string_chars (tmpstr);

				memcpy (tmpstrptr, src + lastpos, tmpstrsize * sizeof (gunichar2));
				mono_array_setref (retarr, arrpos, tmpstr);
				arrpos++;

				if (arrpos == splitsize - 1) {
					/* Shortcut the last array element */

					lastpos = i + 1;
					if (remempty != 0) {
						/* Search for non-delim starting char (guaranteed to find one) Note that loop
						 * condition is only there for safety. It will never actually terminate the loop. */
						for (; lastpos != srcsize ; lastpos++) {
							if (!string_icall_is_in_array (separator, arrsize, src [lastpos])) 
								break;
						}
						if (count > splitsize) {
							/* Since we have fewer results than our limit, we must remove
							 * trailing delimiters as well. 
							 */
							for (; srcsize != lastpos + 1 ; srcsize--) {
								if (!string_icall_is_in_array (separator, arrsize, src [srcsize - 1])) 
									break;
							}
						}
					}

					tmpstrsize = srcsize - lastpos;
					tmpstr = mono_string_new_size (mono_domain_get (), tmpstrsize);
					tmpstrptr = mono_string_chars (tmpstr);

					memcpy (tmpstrptr, src + lastpos, tmpstrsize * sizeof (gunichar2));
					mono_array_setref (retarr, arrpos, tmpstr);

					/* Loop will ALWAYS end here. Test criteria in the FOR loop is technically unnecessary. */
					break;
				}
			}
			lastpos = i + 1;
		}
	}

	return retarr;
}
Esempio n. 25
0
void*
sgen_fragment_allocator_par_range_alloc (SgenFragmentAllocator *allocator, size_t desired_size, size_t minimum_size, size_t *out_alloc_size)
{
	SgenFragment *frag, *min_frag;
	size_t current_minimum;

restart:
	min_frag = NULL;
	current_minimum = minimum_size;

#ifdef NALLOC_DEBUG
	InterlockedIncrement (&alloc_count);
#endif

	for (frag = (SgenFragment *)unmask (allocator->alloc_head); frag; frag = (SgenFragment *)unmask (frag->next)) {
		size_t frag_size = frag->fragment_end - frag->fragment_next;

		HEAVY_STAT (++stat_alloc_range_iterations);

		if (desired_size <= frag_size) {
			void *p;
			*out_alloc_size = desired_size;

			p = par_alloc_from_fragment (allocator, frag, desired_size);
			if (!p) {
				HEAVY_STAT (++stat_alloc_range_retries);
				goto restart;
			}
#ifdef NALLOC_DEBUG
			add_alloc_record (p, desired_size, RANGE_ALLOC);
#endif
			return p;
		}
		if (current_minimum <= frag_size) {
			min_frag = frag;
			current_minimum = frag_size;
		}
	}

	/* The second fragment_next read should be ordered in respect to the first code block */
	mono_memory_barrier ();

	if (min_frag) {
		void *p;
		size_t frag_size;

		frag_size = min_frag->fragment_end - min_frag->fragment_next;
		if (frag_size < minimum_size)
			goto restart;

		*out_alloc_size = frag_size;

		mono_memory_barrier ();
		p = par_alloc_from_fragment (allocator, min_frag, frag_size);

		/*XXX restarting here is quite dubious given this is already second chance allocation. */
		if (!p) {
			HEAVY_STAT (++stat_alloc_retries);
			goto restart;
		}
#ifdef NALLOC_DEBUG
		add_alloc_record (p, frag_size, RANGE_ALLOC);
#endif
		return p;
	}

	return NULL;
}
Esempio n. 26
0
static void*
par_alloc_from_fragment (SgenFragmentAllocator *allocator, SgenFragment *frag, size_t size)
{
	char *p = frag->fragment_next;
	char *end = p + size;

	if (end > frag->fragment_end)
		return NULL;

	/* p = frag->fragment_next must happen before */
	mono_memory_barrier ();

	if (InterlockedCompareExchangePointer ((volatile gpointer*)&frag->fragment_next, end, p) != p)
		return NULL;

	if (frag->fragment_end - end < SGEN_MAX_NURSERY_WASTE) {
		SgenFragment *next, **prev_ptr;
		
		/*
		 * Before we clean the remaining nursery, we must claim the remaining space
		 * as it could end up been used by the range allocator since it can end up
		 * allocating from this dying fragment as it doesn't respect SGEN_MAX_NURSERY_WASTE
		 * when doing second chance allocation.
		 */
		if ((sgen_get_nursery_clear_policy () == CLEAR_AT_TLAB_CREATION || sgen_get_nursery_clear_policy () == CLEAR_AT_TLAB_CREATION_DEBUG) && claim_remaining_size (frag, end)) {
			sgen_clear_range (end, frag->fragment_end);
			HEAVY_STAT (stat_wasted_bytes_trailer += frag->fragment_end - end);
#ifdef NALLOC_DEBUG
			add_alloc_record (end, frag->fragment_end - end, BLOCK_ZEROING);
#endif
		}

		prev_ptr = find_previous_pointer_fragment (allocator, frag);

		/*Use Michaels linked list remove*/

		/*prev_ptr will be null if the fragment was removed concurrently */
		while (prev_ptr) {
			next = frag->next;

			/*already deleted*/
			if (!get_mark (next)) {
				/*frag->next read must happen before the first CAS*/
				mono_memory_write_barrier ();

				/*Fail if the next node is removed concurrently and its CAS wins */
				if (InterlockedCompareExchangePointer ((volatile gpointer*)&frag->next, mask (next, 1), next) != next) {
					continue;
				}
			}

			/* The second CAS must happen after the first CAS or frag->next. */
			mono_memory_write_barrier ();

			/* Fail if the previous node was deleted and its CAS wins */
			if (InterlockedCompareExchangePointer ((volatile gpointer*)prev_ptr, unmask (next), frag) != frag) {
				prev_ptr = find_previous_pointer_fragment (allocator, frag);
				continue;
			}
			break;
		}
	}

	return p;
}
Esempio n. 27
0
/**
 * mono_conc_hashtable_remove:
 *
 * Remove a value from the hashtable. Requires external locking
 *
 * @Returns the old value if key is already present or null
 */
gpointer
mono_conc_hashtable_remove (MonoConcurrentHashTable *hash_table, gpointer key)
{
	conc_table *table;
	key_value_pair *kvs;
	int hash, i, table_mask;

	g_assert (key != NULL && key != TOMBSTONE);

	hash = mix_hash (hash_table->hash_func (key));

	table = (conc_table*)hash_table->table;
	kvs = table->kvs;
	table_mask = table->table_size - 1;
	i = hash & table_mask;

	if (!hash_table->equal_func) {
		for (;;) {
			if (!kvs [i].key) {
				return NULL; /*key not found*/
			}

			if (key == kvs [i].key) {
				gpointer value = kvs [i].value;
				kvs [i].value = NULL;
				mono_memory_barrier ();
				kvs [i].key = TOMBSTONE;

				if (hash_table->key_destroy_func != NULL)
					(*hash_table->key_destroy_func) (key);
				if (hash_table->value_destroy_func != NULL)
					(*hash_table->value_destroy_func) (value);

				return value;
			}
			i = (i + 1) & table_mask;
		}
	} else {
		GEqualFunc equal = hash_table->equal_func;
		for (;;) {
			if (!kvs [i].key) {
				return NULL; /*key not found*/
			}

			if (kvs [i].key != TOMBSTONE && equal (key, kvs [i].key)) {
				gpointer old_key = kvs [i].key;
				gpointer value = kvs [i].value;
				kvs [i].value = NULL;
				mono_memory_barrier ();
				kvs [i].key = TOMBSTONE;

				if (hash_table->key_destroy_func != NULL)
					(*hash_table->key_destroy_func) (old_key);
				if (hash_table->value_destroy_func != NULL)
					(*hash_table->value_destroy_func) (value);
				return value;
			}

			i = (i + 1) & table_mask;
		}
	}
}
Esempio n. 28
0
static void*
alloc_from_fragment (Fragment *frag, size_t size)
{
	char *p = frag->fragment_next;
	char *end = p + size;

	if (end > frag->fragment_end)
		return NULL;

	/* p = frag->fragment_next must happen before */
	mono_memory_barrier ();

	if (InterlockedCompareExchangePointer ((volatile gpointer*)&frag->fragment_next, end, p) != p)
		return NULL;

	if (frag->fragment_end - end < SGEN_MAX_NURSERY_WASTE) {
		Fragment *next, **prev_ptr;
		
		/*
		 * Before we clean the remaining nursery, we must claim the remaining space
		 * as it could end up been used by the range allocator since it can end up
		 * allocating from this dying fragment as it doesn't respect SGEN_MAX_NURSERY_WASTE
		 * when doing second chance allocation.
		 */
		if (mono_sgen_get_nursery_clear_policy () == CLEAR_AT_TLAB_CREATION && claim_remaining_size (frag, end)) {
			/* Clear the remaining space, pinning depends on this. FIXME move this to use phony arrays */
			memset (end, 0, frag->fragment_end - end);
			HEAVY_STAT (InterlockedExchangeAdd (&stat_wasted_bytes_trailer, frag->fragment_end - end));
#ifdef NALLOC_DEBUG
			add_alloc_record (end, frag->fragment_end - end, BLOCK_ZEROING);
#endif
		}

		prev_ptr = find_previous_pointer_fragment (frag);

		/*Use Michaels linked list remove*/

		/*prev_ptr will be null is the fragment was removed concurrently */
		while (prev_ptr) {
			next = frag->next;

			/*already deleted*/
			if (!get_mark (next)) {
				/*frag->next read must happen before the first CAS*/
				mono_memory_write_barrier ();

				/*Fail if the next done is removed concurrently and its CAS wins */
				if (InterlockedCompareExchangePointer ((volatile gpointer*)&frag->next, mask (next, 1), next) != next) {
					continue;
				}
			}

			/* The second CAS must happen after the first CAS or frag->next. */
			mono_memory_write_barrier ();

			/* Fail if the previous node was deleted and its CAS wins */
			if (InterlockedCompareExchangePointer ((volatile gpointer*)prev_ptr, next, frag) != frag) {
				prev_ptr = find_previous_pointer_fragment (frag);
				continue;
			}

			/* No need to membar here since the worst that can happen is a CAS failure. */
			do {
				frag->next_free = fragment_freelist;
			} while (InterlockedCompareExchangePointer ((volatile gpointer*)&fragment_freelist, frag, frag->next_free) != frag->next_free);

			break;
		}
	}

	return p;
}