Example #1
0
static mono_native_thread_return_t
thread_func (void *thread_data)
{
	thread_init_func (thread_data);

	mono_mutex_lock (&lock);
	for (;;) {
		/*
		 * It's important that we check the continue idle flag with the lock held.
		 * Suppose we didn't check with the lock held, and the result is FALSE.  The
		 * main thread might then set continue idle and signal us before we can take
		 * the lock, and we'd lose the signal.
		 */
		gboolean do_idle = continue_idle_job ();
		SgenThreadPoolJob *job = get_job_and_set_in_progress ();

		if (!job && !do_idle) {
			/*
			 * pthread_cond_wait() can return successfully despite the condition
			 * not being signalled, so we have to run this in a loop until we
			 * really have work to do.
			 */
			mono_cond_wait (&work_cond, &lock);
			continue;
		}

		mono_mutex_unlock (&lock);

		if (job) {
			job->func (thread_data, job);

			mono_mutex_lock (&lock);

			SGEN_ASSERT (0, job->state == STATE_IN_PROGRESS, "The job should still be in progress.");
			job->state = STATE_DONE;
			remove_job (job);
			/*
			 * Only the main GC thread will ever wait on the done condition, so we don't
			 * have to broadcast.
			 */
			mono_cond_signal (&done_cond);
		} else {
			SGEN_ASSERT (0, do_idle, "Why did we unlock if we still have to wait for idle?");
			SGEN_ASSERT (0, idle_job_func, "Why do we have idle work when there's no idle job function?");
			do {
				idle_job_func (thread_data);
				do_idle = continue_idle_job ();
			} while (do_idle && !job_queue.next_slot);

			mono_mutex_lock (&lock);

			if (!do_idle)
				mono_cond_signal (&done_cond);
		}
	}
}
Example #2
0
static void mono_portability_remember_string (MonoProfiler *prof, MonoDomain *domain, MonoString *str)
{
	SavedString *head, *entry;

	if (!str || !domain || !runtime_initialized)
		return;

	entry = (SavedString*)g_malloc0 (sizeof (SavedString));
	entry->string = str;
	entry->domain = domain;
	entry->stack_entries = mono_stack_backtrace (prof, domain, entry->stack, BACKTRACE_SIZE);
	if (entry->stack_entries == 0) {
		g_free (entry);
		return;
	}

	mono_mutex_lock (&mismatched_files_section);
	head = (SavedString*)g_hash_table_lookup (prof->saved_strings_hash, (gpointer)str);
	if (head) {
		while (head->next)
			head = head->next;
		head->next = entry;
	} else
		g_hash_table_insert (prof->saved_strings_hash, (gpointer)str, (gpointer)entry);
	mono_mutex_unlock (&mismatched_files_section);
}
Example #3
0
gboolean
mono_thread_info_resume (MonoNativeThreadId tid)
{
	gboolean result = TRUE;
	MonoThreadHazardPointers *hp = mono_hazard_pointer_get ();	
	MonoThreadInfo *info = mono_thread_info_lookup (tid); /*info on HP1*/
	if (!info)
		return FALSE;

	mono_mutex_lock (&info->suspend_lock);

	THREADS_DEBUG ("resume %x IN COUNT %d\n",tid, info->suspend_count);

	if (info->suspend_count <= 0) {
		mono_mutex_unlock (&info->suspend_lock);
		mono_hazard_pointer_clear (hp, 1);
		return FALSE;
	}

	/*
	 * The theory here is that if we manage to suspend the thread it means it did not
	 * start cleanup since it take the same lock. 
	*/
	g_assert (mono_thread_info_get_tid (info));

	if (--info->suspend_count == 0)
		result = mono_thread_info_resume_internal (info);

	mono_mutex_unlock (&info->suspend_lock);
	mono_hazard_pointer_clear (hp, 1);

	return result;
}
Example #4
0
void
mono_thread_info_self_suspend (void)
{
	gboolean ret;
	MonoThreadInfo *info = mono_thread_info_current ();
	if (!info)
		return;

	mono_mutex_lock (&info->suspend_lock);

	THREADS_DEBUG ("self suspend IN COUNT %d\n", info->suspend_count);

	g_assert (info->suspend_count == 0);
	++info->suspend_count;

	info->thread_state |= STATE_SELF_SUSPENDED;

	ret = mono_threads_get_runtime_callbacks ()->thread_state_init_from_sigctx (&info->suspend_state, NULL);
	g_assert (ret);

	mono_mutex_unlock (&info->suspend_lock);

	while (MONO_SEM_WAIT (&info->resume_semaphore) != 0) {
		/*if (EINTR != errno) ABORT("sem_wait failed"); */
	}

	g_assert (!info->async_target); /*FIXME this should happen normally for suspend. */
	MONO_SEM_POST (&info->finish_resume_semaphore);
}
Example #5
0
void
sgen_workers_enqueue_job (const char *name, JobFunc func, void *data)
{
	int num_entries;
	JobQueueEntry *entry;

	if (!collection_needs_workers ()) {
		func (NULL, data);
		return;
	}

	entry = sgen_alloc_internal (INTERNAL_MEM_JOB_QUEUE_ENTRY);
	entry->name = name;
	entry->func = func;
	entry->data = data;

	mono_mutex_lock (&workers_job_queue_mutex);
	entry->next = workers_job_queue;
	workers_job_queue = entry;
	num_entries = ++workers_job_queue_num_entries;
	++workers_num_jobs_enqueued;
	mono_mutex_unlock (&workers_job_queue_mutex);

	if (workers_state.data.state != STATE_NURSERY_COLLECTION)
		workers_signal_enqueue_work_if_necessary (num_entries < workers_num ? num_entries : workers_num);
}
Example #6
0
static void*
codechunk_valloc (void *preferred, guint32 size)
{
	void *ptr;
	GSList *freelist;

	if (!valloc_freelists) {
		mono_mutex_init_recursive (&valloc_mutex);
		valloc_freelists = g_hash_table_new (NULL, NULL);
	}

	/*
	 * Keep a small freelist of memory blocks to decrease pressure on the kernel memory subsystem to avoid #3321.
	 */
	mono_mutex_lock (&valloc_mutex);
	freelist = (GSList *) g_hash_table_lookup (valloc_freelists, GUINT_TO_POINTER (size));
	if (freelist) {
		ptr = freelist->data;
		memset (ptr, 0, size);
		freelist = g_slist_delete_link (freelist, freelist);
		g_hash_table_insert (valloc_freelists, GUINT_TO_POINTER (size), freelist);
	} else {
		ptr = mono_valloc (preferred, size, MONO_PROT_RWX | ARCH_MAP_FLAGS);
		if (!ptr && preferred)
			ptr = mono_valloc (NULL, size, MONO_PROT_RWX | ARCH_MAP_FLAGS);
	}
	mono_mutex_unlock (&valloc_mutex);
	return ptr;
}
Example #7
0
static void mono_portability_iomap_event (MonoProfiler *prof, const char *report, const char *pathname, const char *new_pathname)
{
	guint32 hash, pathnameHash;
	MismatchedFilesStats *stats;

	if (!runtime_initialized)
		return;

	mono_mutex_lock (&mismatched_files_section);
	hash = calc_strings_hash (pathname, new_pathname, &pathnameHash);
	stats = (MismatchedFilesStats*)g_hash_table_lookup (prof->mismatched_files_hash, &hash);
	if (stats == NULL) {
		guint32 *hashptr;

		stats = (MismatchedFilesStats*) g_malloc (sizeof (MismatchedFilesStats));
		stats->count = 1;
		stats->requestedName = g_strdup (pathname);
		stats->actualName = g_strdup (new_pathname);
		hashptr = (guint32*)g_malloc (sizeof (guint32));
		if (hashptr) {
			*hashptr = hash;
			g_hash_table_insert (prof->mismatched_files_hash, (gpointer)hashptr, stats);
		} else
			g_error ("Out of memory allocating integer pointer for mismatched files hash table.");

		store_string_location (prof, (const gchar*)stats->requestedName, pathnameHash, strlen (stats->requestedName));
		mono_mutex_unlock (&mismatched_files_section);

		print_report ("%s -     Found file path: '%s'\n", report, new_pathname);
	} else {
		mono_mutex_unlock (&mismatched_files_section);
		stats->count++;
	}
}
Example #8
0
/**
 * mono_conc_hashtable_insert
 *
 * @Returns the old value if key is already present or null
 */
gpointer
mono_conc_hashtable_insert (MonoConcurrentHashTable *hash_table, gpointer key, gpointer value)
{
	conc_table *table;
	key_value_pair *kvs;
	int hash, i, table_mask;

	g_assert (key != NULL && key != TOMBSTONE);
	g_assert (value != NULL);

	hash = mix_hash (hash_table->hash_func (key));
	mono_mutex_lock (hash_table->mutex);

	if (hash_table->element_count >= hash_table->overflow_count)
		expand_table (hash_table);

	table = (conc_table*)hash_table->table;
	kvs = table->kvs;
	table_mask = table->table_size - 1;
	i = hash & table_mask;

	if (!hash_table->equal_func) {
		for (;;) {
			if (!kvs [i].key || kvs [i].key == TOMBSTONE) {
				kvs [i].value = value;
				/* The write to values must happen after the write to keys */
				mono_memory_barrier ();
				kvs [i].key = key;
				++hash_table->element_count;
				mono_mutex_unlock (hash_table->mutex);
				return NULL;
			}
			if (key == kvs [i].key) {
				gpointer value = kvs [i].value;
				mono_mutex_unlock (hash_table->mutex);
				return value;
			}
			i = (i + 1) & table_mask;
		}
	} else {
		GEqualFunc equal = hash_table->equal_func;
		for (;;) {
			if (!kvs [i].key || kvs [i].key == TOMBSTONE) {
				kvs [i].value = value;
				/* The write to values must happen after the write to keys */
				mono_memory_barrier ();
				kvs [i].key = key;
				++hash_table->element_count;
				mono_mutex_unlock (hash_table->mutex);
				return NULL;
			}
			if (equal (key, kvs [i].key)) {
				gpointer value = kvs [i].value;
				mono_mutex_unlock (hash_table->mutex);
				return value;
			}
			i = (i + 1) & table_mask;
		}
	}
}
Example #9
0
void
sgen_workers_enqueue_job (JobFunc func, void *data)
{
	int num_entries;
	JobQueueEntry *entry;

	if (!collection_needs_workers ()) {
		func (NULL, data);
		return;
	}

	g_assert (workers_state.data.gc_in_progress);

	entry = sgen_alloc_internal (INTERNAL_MEM_JOB_QUEUE_ENTRY);
	entry->func = func;
	entry->data = data;

	mono_mutex_lock (&workers_job_queue_mutex);
	entry->next = workers_job_queue;
	workers_job_queue = entry;
	num_entries = ++workers_job_queue_num_entries;
	++workers_num_jobs_enqueued;
	mono_mutex_unlock (&workers_job_queue_mutex);

	workers_wake_up (num_entries);
}
Example #10
0
static void
try_steal (MonoWSQ *local_wsq, gpointer *data, gboolean retry)
{
	int i;
	int ms;

	if (wsqs == NULL || data == NULL || *data != NULL)
		return;

	ms = 0;
	do {
		if (mono_runtime_is_shutting_down ())
			return;

		mono_mutex_lock (&wsqs_lock);
		for (i = 0; wsqs != NULL && i < wsqs->len; i++) {
			MonoWSQ *wsq;

			wsq = wsqs->pdata [i];
			if (wsq == local_wsq || mono_wsq_count (wsq) == 0)
				continue;
			mono_wsq_try_steal (wsqs->pdata [i], data, ms);
			if (*data != NULL) {
				mono_mutex_unlock (&wsqs_lock);
				return;
			}
		}
		mono_mutex_unlock (&wsqs_lock);
		ms += 10;
	} while (retry && ms < 11);
}
Example #11
0
static ThreadPoolDomain *
domain_get_next (ThreadPoolDomain *current)
{
	ThreadPoolDomain *tpdomain = NULL;
	guint len;

	mono_mutex_lock (&threadpool->domains_lock);
	len = threadpool->domains->len;
	if (len > 0) {
		guint i, current_idx = -1;
		if (current) {
			for (i = 0; i < len; ++i) {
				if (current == g_ptr_array_index (threadpool->domains, i)) {
					current_idx = i;
					break;
				}
			}
			g_assert (current_idx >= 0);
		}
		for (i = current_idx + 1; i < len + current_idx + 1; ++i) {
			ThreadPoolDomain *tmp = g_ptr_array_index (threadpool->domains, i % len);
			if (tmp->outstanding_request > 0) {
				tpdomain = tmp;
				tpdomain->outstanding_request --;
				g_assert (tpdomain->outstanding_request >= 0);
				break;
			}
		}
	}
	mono_mutex_unlock (&threadpool->domains_lock);
	return tpdomain;
}
Example #12
0
static gboolean
workers_dequeue_and_do_job (WorkerData *data)
{
	JobQueueEntry *entry;

	/*
	 * At this point the GC might not be running anymore.  We
	 * could have been woken up by a job that was then taken by
	 * another thread, after which the collection finished, so we
	 * first have to successfully dequeue a job before doing
	 * anything assuming that the collection is still ongoing.
	 */

	if (!workers_job_queue_num_entries)
		return FALSE;

	mono_mutex_lock (&workers_job_queue_mutex);
	entry = (JobQueueEntry*)workers_job_queue;
	if (entry) {
		workers_job_queue = entry->next;
		--workers_job_queue_num_entries;
	}
	mono_mutex_unlock (&workers_job_queue_mutex);

	if (!entry)
		return FALSE;

	g_assert (sgen_collection_is_parallel ());

	entry->func (data, entry->data);
	sgen_free_internal (entry, INTERNAL_MEM_JOB_QUEUE_ENTRY);
	return TRUE;
}
Example #13
0
void
ves_icall_System_IOSelector_Add (gpointer handle, MonoIOSelectorJob *job)
{
	ThreadPoolIOUpdate *update;

	g_assert (handle >= 0);

	g_assert (job->operation == EVENT_IN ^ job->operation == EVENT_OUT);
	g_assert (job->callback);

	if (mono_runtime_is_shutting_down ())
		return;
	if (mono_domain_is_unloading (mono_object_domain (job)))
		return;

	mono_lazy_initialize (&io_status, initialize);

	mono_mutex_lock (&threadpool_io->updates_lock);

	update = update_get_new ();
	update->type = UPDATE_ADD;
	update->data.add.fd = GPOINTER_TO_INT (handle);
	update->data.add.job = job;
	mono_memory_barrier (); /* Ensure this is safely published before we wake up the selector */

	selector_thread_wakeup ();

	mono_mutex_unlock (&threadpool_io->updates_lock);
}
Example #14
0
static void
print_pool_info (ThreadPool *tp)
{

//	if (tp->tail - tp->head == 0)
//		return;

	g_print ("Pool status? %d\n", InterlockedCompareExchange (&tp->pool_status, 0, 0));
	g_print ("Min. threads: %d\n", InterlockedCompareExchange (&tp->min_threads, 0, 0));
	g_print ("Max. threads: %d\n", InterlockedCompareExchange (&tp->max_threads, 0, 0));
	g_print ("nthreads: %d\n", InterlockedCompareExchange (&tp->nthreads, 0, 0));
	g_print ("busy threads: %d\n", InterlockedCompareExchange (&tp->busy_threads, 0, 0));
	g_print ("Waiting: %d\n", InterlockedCompareExchange (&tp->waiting, 0, 0));
	g_print ("Queued: %d\n", (tp->tail - tp->head));
	if (tp == &async_tp) {
		int i;
		mono_mutex_lock (&wsqs_lock);
		for (i = 0; i < wsqs->len; i++) {
			g_print ("\tWSQ %d: %d\n", i, mono_wsq_count (g_ptr_array_index (wsqs, i)));
		}
		mono_mutex_unlock (&wsqs_lock);
	} else {
		g_print ("\tSockets: %d\n", mono_g_hash_table_size (socket_io_data.sock_to_state));
	}
	g_print ("-------------\n");
}
Example #15
0
/**
 * mono_gc_get_managed_allocator_by_type:
 *
 *   Return a managed allocator method corresponding to allocator type ATYPE.
 */
MonoMethod*
mono_gc_get_managed_allocator_by_type (int atype)
{
    int offset = -1;
    MonoMethod *res;
    MONO_THREAD_VAR_OFFSET (GC_thread_tls, offset);

    mono_tls_key_set_offset (TLS_KEY_BOEHM_GC_THREAD, offset);

    res = alloc_method_cache [atype];
    if (res)
        return res;

    res = create_allocator (atype, TLS_KEY_BOEHM_GC_THREAD);
    mono_mutex_lock (&mono_gc_lock);
    if (alloc_method_cache [atype]) {
        mono_free_method (res);
        res = alloc_method_cache [atype];
    } else {
        mono_memory_barrier ();
        alloc_method_cache [atype] = res;
    }
    mono_mutex_unlock (&mono_gc_lock);
    return res;
}
Example #16
0
/*
 * select/poll wake up when a socket is closed, but epoll just removes
 * the socket from its internal list without notification.
 */
void
mono_thread_pool_remove_socket (int sock)
{
	MonoMList *list;
	MonoSocketAsyncResult *state;
	MonoObject *ares;

	if (socket_io_data.inited == 0)
		return;

	mono_mutex_lock (&socket_io_data.io_lock);
	if (socket_io_data.sock_to_state == NULL) {
		mono_mutex_unlock (&socket_io_data.io_lock);
		return;
	}
	list = mono_g_hash_table_lookup (socket_io_data.sock_to_state, GINT_TO_POINTER (sock));
	if (list)
		mono_g_hash_table_remove (socket_io_data.sock_to_state, GINT_TO_POINTER (sock));
	mono_mutex_unlock (&socket_io_data.io_lock);
	
	while (list) {
		state = (MonoSocketAsyncResult *) mono_mlist_get_data (list);
		if (state->operation == AIO_OP_RECEIVE)
			state->operation = AIO_OP_RECV_JUST_CALLBACK;
		else if (state->operation == AIO_OP_SEND)
			state->operation = AIO_OP_SEND_JUST_CALLBACK;

		ares = get_io_event (&list, MONO_POLLIN);
		threadpool_append_job (&async_io_tp, ares);
		if (list) {
			ares = get_io_event (&list, MONO_POLLOUT);
			threadpool_append_job (&async_io_tp, ares);
		}
	}
}
Example #17
0
static void
socket_io_init (SocketIOData *data)
{
	int inited;

	if (data->inited >= 2) // 2 -> initialized, 3-> cleaned up
		return;

	inited = InterlockedCompareExchange (&data->inited, 1, 0);
	if (inited >= 1) {
		while (TRUE) {
			if (data->inited >= 2)
				return;
			SleepEx (1, FALSE);
		}
	}

	mono_mutex_lock (&data->io_lock);
	data->sock_to_state = mono_g_hash_table_new_type (g_direct_hash, g_direct_equal, MONO_HASH_VALUE_GC);
#ifdef HAVE_EPOLL
	data->event_system = EPOLL_BACKEND;
#elif defined(USE_KQUEUE_FOR_THREADPOOL)
	data->event_system = KQUEUE_BACKEND;
#else
	data->event_system = POLL_BACKEND;
#endif
	if (g_getenv ("MONO_DISABLE_AIO") != NULL)
		data->event_system = POLL_BACKEND;

	init_event_system (data);
	mono_thread_create_internal (mono_get_root_domain (), data->wait, data, TRUE, SMALL_STACK);
	mono_mutex_unlock (&data->io_lock);
	data->inited = 2;
	threadpool_start_thread (&async_io_tp);
}
Example #18
0
static gboolean
threadpool_start_thread (ThreadPool *tp)
{
	gint n;
	guint32 stack_size;
	MonoInternalThread *thread;

	stack_size = (!tp->is_io) ? 0 : SMALL_STACK;
	while (!mono_runtime_is_shutting_down () && (n = tp->nthreads) < tp->max_threads) {
		if (InterlockedCompareExchange (&tp->nthreads, n + 1, n) == n) {
#ifndef DISABLE_PERFCOUNTERS
			mono_perfcounter_update_value (tp->pc_nthreads, TRUE, 1);
#endif
			if (tp->is_io) {
				thread = mono_thread_create_internal (mono_get_root_domain (), tp->async_invoke, tp, TRUE, stack_size);
			} else {
				mono_mutex_lock (&threads_lock);
				thread = mono_thread_create_internal (mono_get_root_domain (), tp->async_invoke, tp, TRUE, stack_size);
				g_assert (threads != NULL);
				g_ptr_array_add (threads, thread);
				mono_mutex_unlock (&threads_lock);
			}
			return TRUE;
		}
	}

	return FALSE;
}
Example #19
0
static void
remove_wsq (MonoWSQ *wsq)
{
	gpointer data;

	if (wsq == NULL)
		return;

	mono_mutex_lock (&wsqs_lock);
	if (wsqs == NULL) {
		mono_mutex_unlock (&wsqs_lock);
		return;
	}
	g_ptr_array_remove_fast (wsqs, wsq);
	data = NULL;
	/*
	 * Only clean this up when shutting down, any other case will error out
	 * if we're removing a queue that still has work items.
	 */
	if (mono_runtime_is_shutting_down ()) {
		while (mono_wsq_local_pop (&data)) {
			threadpool_jobs_dec (data);
			data = NULL;
		}
	}
	mono_wsq_destroy (wsq);
	mono_mutex_unlock (&wsqs_lock);
}
Example #20
0
/**
 * mono_counters_dump:
 * @section_mask: The sections to dump counters for
 * @outfile: a FILE to dump the results to
 *
 * Displays the counts of all the enabled counters registered. 
 * To filter by variance, you can OR one or more variance with the specific section you want.
 * Use MONO_COUNTER_SECTION_MASK to dump all categories of a specific variance.
 */
void
mono_counters_dump (int section_mask, FILE *outfile)
{
	int i, j;
	int variance;
	section_mask &= valid_mask;

	if (!initialized)
		return;

	mono_mutex_lock (&counters_mutex);

	if (!counters) {
		mono_mutex_unlock (&counters_mutex);
		return;
	}

	variance = section_mask & MONO_COUNTER_VARIANCE_MASK;

	/* If no variance mask is supplied, we default to all kinds. */
	if (!variance)
		variance = MONO_COUNTER_VARIANCE_MASK;
	section_mask &= ~MONO_COUNTER_VARIANCE_MASK;

	for (j = 0, i = MONO_COUNTER_JIT; i < MONO_COUNTER_LAST_SECTION; j++, i <<= 1) {
		if ((section_mask & i) && (set_mask & i)) {
			fprintf (outfile, "\n%s statistics\n", section_names [j]);
			mono_counters_dump_section (i, variance, outfile);
		}
	}

	fflush (outfile);
	mono_mutex_unlock (&counters_mutex);
}
Example #21
0
static void
lock_section_queue (SgenSectionGrayQueue *queue)
{
	if (!queue->locked)
		return;

	mono_mutex_lock (&queue->lock);
}
Example #22
0
void
sgen_thread_pool_wait_for_all_jobs (void)
{
	mono_mutex_lock (&lock);

	while (!sgen_pointer_queue_is_empty (&job_queue))
		mono_cond_wait (&done_cond, &lock);

	mono_mutex_unlock (&lock);
}
Example #23
0
static int
noshm_sem_lock (int sem)
{
	int ret;
	
	DEBUGLOG ("%s: locking nosem %d", __func__, sem);
	
	ret = mono_mutex_lock (&noshm_sems[sem]);
	
	return ret;
}
Example #24
0
void
sgen_thread_pool_job_wait (SgenThreadPoolJob *job)
{
	SGEN_ASSERT (0, job, "Where's the job?");

	mono_mutex_lock (&lock);

	while (find_job_in_queue (job) >= 0)
		mono_cond_wait (&done_cond, &lock);

	mono_mutex_unlock (&lock);
}
Example #25
0
void
mono_thread_small_id_free (int id)
{
	/* MonoBitSet operations are not atomic. */
	mono_mutex_lock (&small_id_mutex);

	g_assert (id >= 0 && id < small_id_table->size);
	g_assert (mono_bitset_test_fast (small_id_table, id));
	mono_bitset_clear_fast (small_id_table, id);

	mono_mutex_unlock (&small_id_mutex);
}
Example #26
0
void
sgen_thread_pool_idle_wait (void)
{
	SGEN_ASSERT (0, idle_job_func, "Why are we waiting for idle without an idle function?");

	mono_mutex_lock (&lock);

	while (continue_idle_job_func ())
		mono_cond_wait (&done_cond, &lock);

	mono_mutex_unlock (&lock);
}
Example #27
0
void
sgen_thread_pool_idle_signal (void)
{
	SGEN_ASSERT (0, idle_job_func, "Why are we signaling idle without an idle function?");

	mono_mutex_lock (&lock);

	if (continue_idle_job_func ())
		mono_cond_signal (&work_cond);

	mono_mutex_unlock (&lock);
}
Example #28
0
/**
 * mono_counters_on_register
 * @callback : function to callback when a counter is registered
 *
 * Add a callback that is going to be called when a counter is registered
 */
void
mono_counters_on_register (MonoCounterRegisterCallback callback)
{
	if (!initialized) {
		g_warning ("counters not enabled");
		return;
	}

	mono_mutex_lock (&counters_mutex);
	register_callbacks = g_slist_append (register_callbacks, callback);
	mono_mutex_unlock (&counters_mutex);
}
Example #29
0
void
socket_io_cleanup (SocketIOData *data)
{
	mono_mutex_lock (&data->io_lock);
	if (data->inited != 2) {
		mono_mutex_unlock (&data->io_lock);
		return;
	}
	data->inited = 3;
	data->shutdown (data->event_data);
	mono_mutex_unlock (&data->io_lock);
}
Example #30
0
static gboolean
domain_remove (ThreadPoolDomain *tpdomain)
{
	gboolean res;

	g_assert (tpdomain);

	mono_mutex_lock (&threadpool->domains_lock);
	res = g_ptr_array_remove (threadpool->domains, tpdomain);
	mono_mutex_unlock (&threadpool->domains_lock);

	return res;
}