static void ensure_cleanedup (void) { if (status == STATUS_NOT_INITIALIZED && InterlockedCompareExchange (&status, STATUS_CLEANED_UP, STATUS_NOT_INITIALIZED) == STATUS_NOT_INITIALIZED) return; if (status == STATUS_INITIALIZING) { while (status == STATUS_INITIALIZING) mono_thread_info_yield (); } if (status == STATUS_CLEANED_UP) return; if (status == STATUS_CLEANING_UP || InterlockedCompareExchange (&status, STATUS_CLEANING_UP, STATUS_INITIALIZED) != STATUS_INITIALIZED) { while (status == STATUS_CLEANING_UP) mono_thread_info_yield (); g_assert (status == STATUS_CLEANED_UP); return; } /* we make the assumption along the code that we are * cleaning up only if the runtime is shutting down */ g_assert (mono_runtime_is_shutting_down ()); /* Unpark all worker threads */ mono_mutex_lock (&threadpool->parked_threads_lock); for (;;) { guint i; ThreadPoolCounter counter = COUNTER_READ (); if (counter._.active == 0 && counter._.parked == 0) break; if (counter._.active == 1) { MonoInternalThread *thread = mono_thread_internal_current (); if (thread->threadpool_thread) { /* if there is only one active thread * left and it's the current one */ break; } } for (i = 0; i < threadpool->parked_threads->len; ++i) { mono_cond_t *cond = (mono_cond_t*) g_ptr_array_index (threadpool->parked_threads, i); mono_cond_signal (cond); } mono_mutex_unlock (&threadpool->parked_threads_lock); usleep (1000); mono_mutex_lock (&threadpool->parked_threads_lock); } mono_mutex_unlock (&threadpool->parked_threads_lock); while (monitor_status != MONITOR_STATUS_NOT_RUNNING) usleep (1000); g_ptr_array_free (threadpool->domains, TRUE); mono_mutex_destroy (&threadpool->domains_lock); g_ptr_array_free (threadpool->parked_threads, TRUE); mono_mutex_destroy (&threadpool->parked_threads_lock); g_ptr_array_free (threadpool->working_threads, TRUE); mono_mutex_destroy (&threadpool->working_threads_lock); mono_mutex_destroy (&threadpool->heuristic_lock); g_free (threadpool->heuristic_hill_climbing.samples); g_free (threadpool->heuristic_hill_climbing.thread_counts); rand_free (threadpool->heuristic_hill_climbing.random_interval_generator); g_free (threadpool->cpu_usage_state); g_assert (threadpool); g_free (threadpool); threadpool = NULL; g_assert (!threadpool); status = STATUS_CLEANED_UP; }
void mono_thread_info_suspend_unlock (void) { mono_mutex_unlock (&global_suspend_lock); }
void mono_debugger_unlock (void) { g_assert (initialized); mono_mutex_unlock (&debugger_lock_mutex); }
/* * Allocate a small thread id. * * FIXME: The biggest part of this function is very similar to * domain_id_alloc() in domain.c and should be merged. */ int mono_thread_small_id_alloc (void) { int i, id = -1; mono_mutex_lock (&small_id_mutex); if (!small_id_table) small_id_table = mono_bitset_new (1, 0); id = mono_bitset_find_first_unset (small_id_table, small_id_next - 1); if (id == -1) id = mono_bitset_find_first_unset (small_id_table, -1); if (id == -1) { MonoBitSet *new_table; if (small_id_table->size * 2 >= (1 << 16)) g_assert_not_reached (); new_table = mono_bitset_clone (small_id_table, small_id_table->size * 2); id = mono_bitset_find_first_unset (new_table, small_id_table->size - 1); mono_bitset_free (small_id_table); small_id_table = new_table; } g_assert (!mono_bitset_test_fast (small_id_table, id)); mono_bitset_set_fast (small_id_table, id); small_id_next++; if (small_id_next >= small_id_table->size) small_id_next = 0; g_assert (id < HAZARD_TABLE_MAX_SIZE); if (id >= hazard_table_size) { #if MONO_SMALL_CONFIG hazard_table = g_malloc0 (sizeof (MonoThreadHazardPointers) * HAZARD_TABLE_MAX_SIZE); hazard_table_size = HAZARD_TABLE_MAX_SIZE; #else gpointer page_addr; int pagesize = mono_pagesize (); int num_pages = (hazard_table_size * sizeof (MonoThreadHazardPointers) + pagesize - 1) / pagesize; if (hazard_table == NULL) { hazard_table = (MonoThreadHazardPointers *volatile) mono_valloc (NULL, sizeof (MonoThreadHazardPointers) * HAZARD_TABLE_MAX_SIZE, MONO_MMAP_NONE); } g_assert (hazard_table != NULL); page_addr = (guint8*)hazard_table + num_pages * pagesize; mono_mprotect (page_addr, pagesize, MONO_MMAP_READ | MONO_MMAP_WRITE); ++num_pages; hazard_table_size = num_pages * pagesize / sizeof (MonoThreadHazardPointers); #endif g_assert (id < hazard_table_size); for (i = 0; i < HAZARD_POINTER_COUNT; ++i) hazard_table [id].hazard_pointers [i] = NULL; } if (id > highest_small_id) { highest_small_id = id; mono_memory_write_barrier (); } mono_mutex_unlock (&small_id_mutex); return id; }
/** * mono_conc_hashtable_remove * * @Returns the old value if key is already present or null */ gpointer mono_conc_hashtable_remove (MonoConcurrentHashTable *hash_table, gpointer key) { conc_table *table; key_value_pair *kvs; int hash, i, table_mask; g_assert (key != NULL && key != TOMBSTONE); hash = mix_hash (hash_table->hash_func (key)); mono_mutex_lock (hash_table->mutex); table = (conc_table*)hash_table->table; kvs = table->kvs; table_mask = table->table_size - 1; i = hash & table_mask; if (!hash_table->equal_func) { for (;;) { if (!kvs [i].key) { mono_mutex_unlock (hash_table->mutex); return NULL; /*key not found*/ } if (key == kvs [i].key) { gpointer value = kvs [i].value; kvs [i].value = NULL; mono_memory_barrier (); kvs [i].key = TOMBSTONE; mono_mutex_unlock (hash_table->mutex); if (hash_table->key_destroy_func != NULL) (*hash_table->key_destroy_func) (key); if (hash_table->value_destroy_func != NULL) (*hash_table->value_destroy_func) (value); return value; } i = (i + 1) & table_mask; } } else { GEqualFunc equal = hash_table->equal_func; for (;;) { if (!kvs [i].key) { mono_mutex_unlock (hash_table->mutex); return NULL; /*key not found*/ } if (kvs [i].key != TOMBSTONE && equal (key, kvs [i].key)) { gpointer old_key = kvs [i].key; gpointer value = kvs [i].value; kvs [i].value = NULL; mono_memory_barrier (); kvs [i].key = TOMBSTONE; mono_mutex_unlock (hash_table->mutex); if (hash_table->key_destroy_func != NULL) (*hash_table->key_destroy_func) (old_key); if (hash_table->value_destroy_func != NULL) (*hash_table->value_destroy_func) (value); return value; } i = (i + 1) & table_mask; } } }
static void tp_kqueue_wait (gpointer p) { SocketIOData *socket_io_data; int kfd; struct kevent *events, *evt; int ready = 0, i; gpointer async_results [KQUEUE_NEVENTS * 2]; // * 2 because each loop can add up to 2 results here gint nresults; tp_kqueue_data *data; socket_io_data = p; data = socket_io_data->event_data; kfd = data->fd; events = g_new0 (struct kevent, KQUEUE_NEVENTS); while (1) { mono_gc_set_skip_thread (TRUE); do { if (ready == -1) { check_for_interruption_critical (); } ready = kevent (kfd, NULL, 0, events, KQUEUE_NEVENTS, NULL); } while (ready == -1 && errno == EINTR); mono_gc_set_skip_thread (FALSE); if (ready == -1) { int err = errno; g_free (events); if (err != EBADF) g_warning ("kevent wait: %d %s", err, g_strerror (err)); return; } mono_mutex_lock (&socket_io_data->io_lock); if (socket_io_data->inited == 3) { g_free (events); mono_mutex_unlock (&socket_io_data->io_lock); return; /* cleanup called */ } nresults = 0; for (i = 0; i < ready; i++) { int fd; MonoMList *list; MonoObject *ares; evt = &events [i]; fd = evt->ident; list = mono_g_hash_table_lookup (socket_io_data->sock_to_state, GINT_TO_POINTER (fd)); if (list != NULL && (evt->filter == EVFILT_READ || (evt->flags & EV_ERROR) != 0)) { ares = get_io_event (&list, MONO_POLLIN); if (ares != NULL) async_results [nresults++] = ares; } if (list != NULL && (evt->filter == EVFILT_WRITE || (evt->flags & EV_ERROR) != 0)) { ares = get_io_event (&list, MONO_POLLOUT); if (ares != NULL) async_results [nresults++] = ares; } if (list != NULL) { int p; mono_g_hash_table_replace (socket_io_data->sock_to_state, GINT_TO_POINTER (fd), list); p = get_events_from_list (list); if (evt->filter == EVFILT_READ && (p & MONO_POLLIN) != 0) { EV_SET (evt, fd, EVFILT_READ, EV_ADD | EV_ENABLE | EV_ONESHOT, 0, 0, 0); kevent_change (kfd, evt, "READD read"); } if (evt->filter == EVFILT_WRITE && (p & MONO_POLLOUT) != 0) { EV_SET (evt, fd, EVFILT_WRITE, EV_ADD | EV_ENABLE | EV_ONESHOT, 0, 0, 0); kevent_change (kfd, evt, "READD write"); } } else { mono_g_hash_table_remove (socket_io_data->sock_to_state, GINT_TO_POINTER (fd)); } } mono_mutex_unlock (&socket_io_data->io_lock); threadpool_append_jobs (&async_io_tp, (MonoObject **) async_results, nresults); mono_gc_bzero_aligned (async_results, sizeof (gpointer) * nresults); } }