static void mono_portability_iomap_event (MonoProfiler *prof, const char *report, const char *pathname, const char *new_pathname) { guint32 hash, pathnameHash; MismatchedFilesStats *stats; if (!runtime_initialized) return; mono_os_mutex_lock (&mismatched_files_section); hash = calc_strings_hash (pathname, new_pathname, &pathnameHash); stats = (MismatchedFilesStats*)g_hash_table_lookup (prof->mismatched_files_hash, &hash); if (stats == NULL) { guint32 *hashptr; stats = (MismatchedFilesStats*) g_malloc (sizeof (MismatchedFilesStats)); stats->count = 1; stats->requestedName = g_strdup (pathname); stats->actualName = g_strdup (new_pathname); hashptr = (guint32*)g_malloc (sizeof (guint32)); if (hashptr) { *hashptr = hash; g_hash_table_insert (prof->mismatched_files_hash, (gpointer)hashptr, stats); } else g_error ("Out of memory allocating integer pointer for mismatched files hash table."); store_string_location (prof, (const gchar*)stats->requestedName, pathnameHash, strlen (stats->requestedName)); mono_os_mutex_unlock (&mismatched_files_section); print_report ("%s - Found file path: '%s'\n", report, new_pathname); } else { mono_os_mutex_unlock (&mismatched_files_section); stats->count++; } }
/** * mono_counters_dump: * @section_mask: The sections to dump counters for * @outfile: a FILE to dump the results to * * Displays the counts of all the enabled counters registered. * To filter by variance, you can OR one or more variance with the specific section you want. * Use MONO_COUNTER_SECTION_MASK to dump all categories of a specific variance. */ void mono_counters_dump (int section_mask, FILE *outfile) { int i, j; int variance; section_mask &= valid_mask; if (!initialized) return; mono_os_mutex_lock (&counters_mutex); if (!counters) { mono_os_mutex_unlock (&counters_mutex); return; } variance = section_mask & MONO_COUNTER_VARIANCE_MASK; /* If no variance mask is supplied, we default to all kinds. */ if (!variance) variance = MONO_COUNTER_VARIANCE_MASK; section_mask &= ~MONO_COUNTER_VARIANCE_MASK; for (j = 0, i = MONO_COUNTER_JIT; i < MONO_COUNTER_LAST_SECTION; j++, i <<= 1) { if ((section_mask & i) && (set_mask & i)) { fprintf (outfile, "\n%s statistics\n", section_names [j]); mono_counters_dump_section (i, variance, outfile); } } fflush (outfile); mono_os_mutex_unlock (&counters_mutex); }
static void register_internal (const char *name, int type, void *addr, int size) { MonoCounter *counter; GSList *register_callback; g_assert (size >= 0); if ((type & MONO_COUNTER_VARIANCE_MASK) == 0) type |= MONO_COUNTER_MONOTONIC; mono_os_mutex_lock (&counters_mutex); for (counter = counters; counter; counter = counter->next) { if (counter->addr == addr) { g_warning ("you are registering twice the same counter address"); mono_os_mutex_unlock (&counters_mutex); return; } } counter = (MonoCounter *) malloc (sizeof (MonoCounter)); if (!counter) { mono_os_mutex_unlock (&counters_mutex); return; } counter->name = g_strdup (name); counter->type = type; counter->addr = addr; counter->next = NULL; counter->size = size; set_mask |= type; /* Append */ if (counters) { MonoCounter *item = counters; while (item->next) item = item->next; item->next = counter; } else { counters = counter; } for (register_callback = register_callbacks; register_callback; register_callback = register_callback->next) ((MonoCounterRegisterCallback)register_callback->data) (counter); mono_os_mutex_unlock (&counters_mutex); }
void sgen_finish_pinning (void) { last_num_pinned = pin_queue.next_slot; sgen_pointer_queue_clear (&pin_queue); mono_os_mutex_unlock (&pin_queue_mutex); }
static void mono_portability_remember_string (MonoProfiler *prof, MonoDomain *domain, MonoString *str) { SavedString *head, *entry; if (!str || !domain || !runtime_initialized) return; entry = (SavedString*)g_malloc0 (sizeof (SavedString)); entry->string = str; entry->domain = domain; entry->stack_entries = mono_stack_backtrace (prof, domain, entry->stack, BACKTRACE_SIZE); if (entry->stack_entries == 0) { g_free (entry); return; } mono_os_mutex_lock (&mismatched_files_section); head = (SavedString*)g_hash_table_lookup (prof->saved_strings_hash, (gpointer)str); if (head) { while (head->next) head = head->next; head->next = entry; } else g_hash_table_insert (prof->saved_strings_hash, (gpointer)str, (gpointer)entry); mono_os_mutex_unlock (&mismatched_files_section); }
static void* codechunk_valloc (void *preferred, guint32 size) { void *ptr; GSList *freelist; if (!valloc_freelists) { mono_os_mutex_init_recursive (&valloc_mutex); valloc_freelists = g_hash_table_new (NULL, NULL); } /* * Keep a small freelist of memory blocks to decrease pressure on the kernel memory subsystem to avoid #3321. */ mono_os_mutex_lock (&valloc_mutex); freelist = (GSList *) g_hash_table_lookup (valloc_freelists, GUINT_TO_POINTER (size)); if (freelist) { ptr = freelist->data; memset (ptr, 0, size); freelist = g_slist_delete_link (freelist, freelist); g_hash_table_insert (valloc_freelists, GUINT_TO_POINTER (size), freelist); } else { ptr = mono_valloc (preferred, size, MONO_PROT_RWX | ARCH_MAP_FLAGS, MONO_MEM_ACCOUNT_CODE); if (!ptr && preferred) ptr = mono_valloc (NULL, size, MONO_PROT_RWX | ARCH_MAP_FLAGS, MONO_MEM_ACCOUNT_CODE); } mono_os_mutex_unlock (&valloc_mutex); return ptr; }
static void mono_backtrace (int size) { void *array[BACKTRACE_DEPTH]; char **names; int i, symbols; static gboolean inited; if (!inited) { mono_os_mutex_init_recursive (&mempool_tracing_lock); inited = TRUE; } mono_os_mutex_lock (&mempool_tracing_lock); g_print ("Allocating %d bytes\n", size); MONO_ENTER_GC_SAFE; symbols = backtrace (array, BACKTRACE_DEPTH); names = backtrace_symbols (array, symbols); MONO_EXIT_GC_SAFE; for (i = 1; i < symbols; ++i) { g_print ("\t%s\n", names [i]); } g_free (names); mono_os_mutex_unlock (&mempool_tracing_lock); }
static void unlock_section_queue (SgenSectionGrayQueue *queue) { if (!queue->locked) return; mono_os_mutex_unlock (&queue->lock); }
GrayQueueEntry sgen_gray_object_dequeue (SgenGrayQueue *queue, gboolean is_parallel) { GrayQueueEntry entry; HEAVY_STAT (stat_gray_queue_dequeue_slow_path ++); if (sgen_gray_object_queue_is_empty (queue)) { entry.obj = NULL; return entry; } STATE_ASSERT (queue->first, GRAY_QUEUE_SECTION_STATE_ENQUEUED); SGEN_ASSERT (9, queue->cursor >= GRAY_FIRST_CURSOR_POSITION (queue->first), "gray queue %p underflow", queue); entry = *queue->cursor--; #ifdef SGEN_HEAVY_BINARY_PROTOCOL binary_protocol_gray_dequeue (queue, queue->cursor + 1, entry.obj); #endif if (G_UNLIKELY (queue->cursor < GRAY_FIRST_CURSOR_POSITION (queue->first))) { GrayQueueSection *section; gint32 old_num_sections = 0; if (is_parallel) old_num_sections = mono_atomic_dec_i32 (&queue->num_sections); else queue->num_sections--; if (is_parallel && old_num_sections <= 0) { mono_os_mutex_lock (&queue->steal_mutex); } section = queue->first; queue->first = section->next; if (queue->first) { queue->first->prev = NULL; } else { queue->last = NULL; SGEN_ASSERT (0, !old_num_sections, "Why do we have an inconsistent number of sections ?"); } section->next = queue->free_list; STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_ENQUEUED, GRAY_QUEUE_SECTION_STATE_FREE_LIST); queue->free_list = section; queue->cursor = queue->first ? queue->first->entries + queue->first->size - 1 : NULL; if (is_parallel && old_num_sections <= 0) { mono_os_mutex_unlock (&queue->steal_mutex); } } return entry; }
static int noshm_sem_unlock (int sem) { int ret; DEBUGLOG ("%s: unlocking nosem %d", __func__, sem); ret = mono_os_mutex_unlock (&noshm_sems[sem]); return ret; }
static int mono_w32handle_unlock_signal_mutex (void) { #ifdef DEBUG g_message ("%s: unlock global signal mutex", __func__); #endif mono_os_mutex_unlock (&global_signal_mutex); return 0; }
static int single_writer_single_reader (void) { mono_mutex_t mutex; MonoConcurrentHashTable *h; int res = 0; mono_os_mutex_init (&mutex); h = mono_conc_hashtable_new (NULL, NULL); mono_os_mutex_lock (&mutex); mono_conc_hashtable_insert (h, GUINT_TO_POINTER (10), GUINT_TO_POINTER (20)); mono_os_mutex_unlock (&mutex); mono_os_mutex_lock (&mutex); mono_conc_hashtable_insert (h, GUINT_TO_POINTER (30), GUINT_TO_POINTER (40)); mono_os_mutex_unlock (&mutex); mono_os_mutex_lock (&mutex); mono_conc_hashtable_insert (h, GUINT_TO_POINTER (50), GUINT_TO_POINTER (60)); mono_os_mutex_unlock (&mutex); mono_os_mutex_lock (&mutex); mono_conc_hashtable_insert (h, GUINT_TO_POINTER (2), GUINT_TO_POINTER (3)); mono_os_mutex_unlock (&mutex); if (mono_conc_hashtable_lookup (h, GUINT_TO_POINTER (30)) != GUINT_TO_POINTER (40)) res = 1; if (mono_conc_hashtable_lookup (h, GUINT_TO_POINTER (10)) != GUINT_TO_POINTER (20)) res = 2; if (mono_conc_hashtable_lookup (h, GUINT_TO_POINTER (2)) != GUINT_TO_POINTER (3)) res = 3; if (mono_conc_hashtable_lookup (h, GUINT_TO_POINTER (50)) != GUINT_TO_POINTER (60)) res = 4; mono_conc_hashtable_destroy (h); mono_os_mutex_destroy (&mutex); if (res) printf ("SERIAL TEST FAILED %d\n", res); return res; }
/** * mono_counters_on_register * @callback : function to callback when a counter is registered * * Add a callback that is going to be called when a counter is registered */ void mono_counters_on_register (MonoCounterRegisterCallback callback) { if (!initialized) { g_debug ("counters not enabled"); return; } mono_os_mutex_lock (&counters_mutex); register_callbacks = g_slist_append (register_callbacks, (gpointer) callback); mono_os_mutex_unlock (&counters_mutex); }
/** * mono_counters_foreach: * @cb: The callback that will be called for each counter. * @user_data: Value passed as second argument of the callback. * * Iterate over all counters and call @cb for each one of them. Stop iterating if * the callback returns FALSE. * */ void mono_counters_foreach (CountersEnumCallback cb, gpointer user_data) { MonoCounter *counter; if (!initialized) { g_debug ("counters not enabled"); return; } mono_os_mutex_lock (&counters_mutex); for (counter = counters; counter; counter = counter->next) { if (!cb (counter, user_data)) { mono_os_mutex_unlock (&counters_mutex); return; } } mono_os_mutex_unlock (&counters_mutex); }
void mono_thread_small_id_free (int id) { /* MonoBitSet operations are not atomic. */ mono_os_mutex_lock (&small_id_mutex); g_assert (id >= 0 && id < small_id_table->size); g_assert (mono_bitset_test_fast (small_id_table, id)); mono_bitset_clear_fast (small_id_table, id); mono_os_mutex_unlock (&small_id_mutex); }
static int single_writer_parallel_reader (void) { pthread_t a,b,c; gpointer ra, rb, rc; int i, res = 0; ra = rb = rc = GINT_TO_POINTER (1); mono_os_mutex_init (&global_mutex); hash = mono_conc_hashtable_new (NULL, NULL); pthread_create (&a, NULL, pr_sw_thread, GINT_TO_POINTER (0)); pthread_create (&b, NULL, pr_sw_thread, GINT_TO_POINTER (1)); pthread_create (&c, NULL, pr_sw_thread, GINT_TO_POINTER (2)); for (i = 0; i < 100; ++i) { mono_os_mutex_lock (&global_mutex); mono_conc_hashtable_insert (hash, GINT_TO_POINTER (i + 0 + 1), GINT_TO_POINTER ((i + 0) * 2 + 1)); mono_os_mutex_unlock (&global_mutex); mono_os_mutex_lock (&global_mutex); mono_conc_hashtable_insert (hash, GINT_TO_POINTER (i + 100 + 1), GINT_TO_POINTER ((i + 100) * 2 + 1)); mono_os_mutex_unlock (&global_mutex); mono_os_mutex_lock (&global_mutex); mono_conc_hashtable_insert (hash, GINT_TO_POINTER (i + 200 + 1), GINT_TO_POINTER ((i + 200) * 2 + 1)); mono_os_mutex_unlock (&global_mutex); } pthread_join (a, &ra); pthread_join (b, &rb); pthread_join (c, &rc); res = GPOINTER_TO_INT (ra) + GPOINTER_TO_INT (rb) + GPOINTER_TO_INT (rc); mono_conc_hashtable_destroy (hash); mono_os_mutex_destroy (&global_mutex); if (res) printf ("SINGLE_WRITER_PAR_READER TEST FAILED %d\n", res); return res; }
static void* pw_sr_thread (void *arg) { int i, idx = 1000 * GPOINTER_TO_INT (arg); mono_thread_info_attach (); for (i = 0; i < 1000; ++i) { mono_os_mutex_lock (&global_mutex); mono_conc_hashtable_insert (hash, GINT_TO_POINTER (i + idx), GINT_TO_POINTER (i + 1)); mono_os_mutex_unlock (&global_mutex); } return NULL; }
void sgen_scan_pin_queue_objects (ScanCopyContext ctx) { int i; ScanObjectFunc scan_func = ctx.ops->scan_object; mono_os_mutex_lock (&pin_queue_mutex); for (i = 0; i < pin_queue_objs.next_slot; ++i) { GCObject *obj = (GCObject *)pin_queue_objs.data [i]; scan_func (obj, sgen_obj_get_descriptor_safe (obj), ctx.queue); } mono_os_mutex_unlock (&pin_queue_mutex); }
void mono_os_event_reset (MonoOSEvent *event) { g_assert (mono_lazy_is_initialized (&status)); g_assert (event); mono_os_mutex_lock (&signal_mutex); event->signalled = FALSE; mono_os_mutex_unlock (&signal_mutex); }
static void* pw_pr_w_del_thread (void *arg) { int i, idx = 1000 * GPOINTER_TO_INT (arg); mono_thread_info_attach (); for (i = idx; i < idx + 1000; i++) { mono_os_mutex_lock (&global_mutex); mono_conc_hashtable_remove (hash, GINT_TO_POINTER (i + 1)); mono_os_mutex_unlock (&global_mutex); } return NULL; }
static void codechunk_vfree (void *ptr, guint32 size) { GSList *freelist; mono_os_mutex_lock (&valloc_mutex); freelist = (GSList *) g_hash_table_lookup (valloc_freelists, GUINT_TO_POINTER (size)); if (!freelist || g_slist_length (freelist) < VALLOC_FREELIST_SIZE) { freelist = g_slist_prepend (freelist, ptr); g_hash_table_insert (valloc_freelists, GUINT_TO_POINTER (size), freelist); } else { mono_vfree (ptr, size, MONO_MEM_ACCOUNT_CODE); } mono_os_mutex_unlock (&valloc_mutex); }
GrayQueueSection* sgen_gray_object_steal_section (SgenGrayQueue *queue) { gint32 sections_remaining; GrayQueueSection *section = NULL; /* * With each push/pop into the queue we increment the number of sections. * There is only one thread accessing the top (the owner) and potentially * multiple workers trying to steal sections from the bottom, so we need * to lock. A num sections decrement from the owner means that the first * section is reserved, while a decrement by the stealer means that the * last section is reserved. If after we decrement the num sections, we * have at least one more section present, it means we can't race with * the other thread. If this is not the case the steal end abandons the * pop, setting back the num_sections, while the owner end will take a * lock to make sure we are not racing with the stealer (since the stealer * might have popped an entry and be in the process of updating the entry * that the owner is trying to pop. */ if (queue->num_sections <= 1) return NULL; /* Give up if there is contention on the last section */ if (mono_os_mutex_trylock (&queue->steal_mutex) != 0) return NULL; sections_remaining = mono_atomic_dec_i32 (&queue->num_sections); if (sections_remaining <= 0) { /* The section that we tried to steal might be the head of the queue. */ mono_atomic_inc_i32 (&queue->num_sections); } else { /* We have reserved for us the tail section of the queue */ section = queue->last; SGEN_ASSERT (0, section, "Why we don't have any sections to steal?"); SGEN_ASSERT (0, !section->next, "Why aren't we stealing the tail?"); queue->last = section->prev; section->prev = NULL; SGEN_ASSERT (0, queue->last, "Why are we stealing the last section?"); queue->last->next = NULL; STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_ENQUEUED, GRAY_QUEUE_SECTION_STATE_FLOATING); } mono_os_mutex_unlock (&queue->steal_mutex); return section; }
gpointer mono_w32handle_new_fd (MonoW32HandleType type, int fd, gpointer handle_specific) { MonoW32HandleBase *handle_data; int fd_index, fd_offset; g_assert (!shutting_down); g_assert(type_is_fd(type)); if (fd >= mono_w32handle_fd_reserve) { mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_W32HANDLE, "%s: failed to create %s handle, fd is too big", __func__, mono_w32handle_ops_typename (type)); return(GUINT_TO_POINTER (INVALID_HANDLE_VALUE)); } fd_index = SLOT_INDEX (fd); fd_offset = SLOT_OFFSET (fd); /* Initialize the array entries on demand */ if (!private_handles [fd_index]) { mono_os_mutex_lock (&scan_mutex); if (!private_handles [fd_index]) private_handles [fd_index] = g_new0 (MonoW32HandleBase, HANDLE_PER_SLOT); mono_os_mutex_unlock (&scan_mutex); } handle_data = &private_handles [fd_index][fd_offset]; if (handle_data->type != MONO_W32HANDLE_UNUSED) { mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_W32HANDLE, "%s: failed to create %s handle, fd is already in use", __func__, mono_w32handle_ops_typename (type)); /* FIXME: clean up this handle? We can't do anything * with the fd, cos thats the new one */ return(GUINT_TO_POINTER (INVALID_HANDLE_VALUE)); } mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_W32HANDLE, "%s: create %s handle %p", __func__, mono_w32handle_ops_typename (type), GUINT_TO_POINTER(fd)); mono_w32handle_init_handle (handle_data, type, handle_specific); return(GUINT_TO_POINTER(fd)); }
void mono_os_event_set (MonoOSEvent *event) { gsize i; g_assert (mono_lazy_is_initialized (&status)); g_assert (event); mono_os_mutex_lock (&signal_mutex); event->signalled = TRUE; for (i = 0; i < event->conds->len; ++i) mono_os_cond_signal ((mono_cond_t*) event->conds->pdata [i]); mono_os_mutex_unlock (&signal_mutex); }
gpointer mono_w32handle_new (MonoW32HandleType type, gpointer handle_specific) { guint32 handle_idx = 0; gpointer handle; g_assert (!shutting_down); g_assert(!type_is_fd(type)); mono_os_mutex_lock (&scan_mutex); while ((handle_idx = mono_w32handle_new_internal (type, handle_specific)) == 0) { /* Try and expand the array, and have another go */ int idx = SLOT_INDEX (private_handles_count); if (idx >= SLOT_MAX) { break; } private_handles [idx] = g_new0 (MonoW32HandleBase, HANDLE_PER_SLOT); private_handles_count += HANDLE_PER_SLOT; private_handles_slots_count ++; } mono_os_mutex_unlock (&scan_mutex); if (handle_idx == 0) { /* We ran out of slots */ handle = INVALID_HANDLE_VALUE; mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_W32HANDLE, "%s: failed to create %s handle", __func__, mono_w32handle_ops_typename (type)); goto done; } /* Make sure we left the space for fd mappings */ g_assert (handle_idx >= mono_w32handle_fd_reserve); handle = GUINT_TO_POINTER (handle_idx); mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_W32HANDLE, "%s: create %s handle %p", __func__, mono_w32handle_ops_typename (type), handle); done: return(handle); }
void mono_w32handle_set_signal_state (gpointer handle, gboolean state, gboolean broadcast) { MonoW32HandleBase *handle_data; if (!mono_w32handle_lookup_data (handle, &handle_data)) { return; } #ifdef DEBUG g_message ("%s: setting state of %p to %s (broadcast %s)", __func__, handle, state?"TRUE":"FALSE", broadcast?"TRUE":"FALSE"); #endif if (state == TRUE) { /* Tell everyone blocking on a single handle */ /* The condition the global signal cond is waiting on is the signalling of * _any_ handle. So lock it before setting the signalled state. */ mono_os_mutex_lock (&global_signal_mutex); /* This function _must_ be called with * handle->signal_mutex locked */ handle_data->signalled=state; if (broadcast == TRUE) { mono_os_cond_broadcast (&handle_data->signal_cond); } else { mono_os_cond_signal (&handle_data->signal_cond); } /* Tell everyone blocking on multiple handles that something * was signalled */ mono_os_cond_broadcast (&global_signal_cond); mono_os_mutex_unlock (&global_signal_mutex); } else { handle_data->signalled=state; } }
void mono_w32handle_foreach (gboolean (*on_each)(gpointer handle, gpointer data, gpointer user_data), gpointer user_data) { guint32 i, k; mono_os_mutex_lock (&scan_mutex); for (i = SLOT_INDEX (0); i < private_handles_slots_count; i++) { if (!private_handles [i]) continue; for (k = SLOT_OFFSET (0); k < HANDLE_PER_SLOT; k++) { MonoW32HandleBase *handle_data = NULL; gpointer handle; gboolean destroy, finished; handle_data = &private_handles [i][k]; if (handle_data->type == MONO_W32HANDLE_UNUSED) continue; handle = GUINT_TO_POINTER (i * HANDLE_PER_SLOT + k); if (!mono_w32handle_ref_core (handle, handle_data)) { /* we are racing with mono_w32handle_unref: * the handle ref has been decremented, but it * hasn't yet been destroyed. */ continue; } finished = on_each (handle, handle_data->specific, user_data); /* we do not want to have to destroy the handle here, * as it would means the ref/unref are unbalanced */ destroy = mono_w32handle_unref_core (handle, handle_data, 2); g_assert (!destroy); if (finished) goto done; } } done: mono_os_mutex_unlock (&scan_mutex); }
int mono_w32handle_unlock_handle (gpointer handle) { MonoW32HandleBase *handle_data; #ifdef DEBUG g_message ("%s: unlocking handle %p", __func__, handle); #endif if (!mono_w32handle_lookup_data (handle, &handle_data)) { return(0); } mono_os_mutex_unlock (&handle_data->signal_mutex); mono_w32handle_unref (handle); return 0; }
/** * mono_counters_cleanup: * * Perform any needed cleanup at process exit. */ void mono_counters_cleanup (void) { MonoCounter *counter; if (!initialized) return; mono_os_mutex_lock (&counters_mutex); counter = counters; counters = NULL; while (counter) { MonoCounter *tmp = counter; counter = counter->next; free ((void*)tmp->name); free (tmp); } mono_os_mutex_unlock (&counters_mutex); }
/* * Allocate a small thread id. * * FIXME: The biggest part of this function is very similar to * domain_id_alloc() in domain.c and should be merged. */ int mono_thread_small_id_alloc (void) { int i, id = -1; mono_os_mutex_lock (&small_id_mutex); if (!small_id_table) small_id_table = mono_bitset_new (1, 0); id = mono_bitset_find_first_unset (small_id_table, small_id_next - 1); if (id == -1) id = mono_bitset_find_first_unset (small_id_table, -1); if (id == -1) { MonoBitSet *new_table; if (small_id_table->size * 2 >= (1 << 16)) g_assert_not_reached (); new_table = mono_bitset_clone (small_id_table, small_id_table->size * 2); id = mono_bitset_find_first_unset (new_table, small_id_table->size - 1); mono_bitset_free (small_id_table); small_id_table = new_table; } g_assert (!mono_bitset_test_fast (small_id_table, id)); mono_bitset_set_fast (small_id_table, id); small_id_next++; if (small_id_next >= small_id_table->size) small_id_next = 0; g_assert (id < HAZARD_TABLE_MAX_SIZE); if (id >= hazard_table_size) { #if MONO_SMALL_CONFIG hazard_table = g_malloc0 (sizeof (MonoThreadHazardPointers) * HAZARD_TABLE_MAX_SIZE); hazard_table_size = HAZARD_TABLE_MAX_SIZE; #else gpointer page_addr; #if defined(__PASE__) /* * HACK: allocating the table with none prot will cause i 7.1 * to segfault when accessing or protecting it */ int table_prot = MONO_MMAP_READ | MONO_MMAP_WRITE; #else int table_prot = MONO_MMAP_NONE; #endif int pagesize = mono_pagesize (); int num_pages = (hazard_table_size * sizeof (MonoThreadHazardPointers) + pagesize - 1) / pagesize; if (hazard_table == NULL) { hazard_table = (MonoThreadHazardPointers *volatile) mono_valloc (NULL, sizeof (MonoThreadHazardPointers) * HAZARD_TABLE_MAX_SIZE, table_prot, MONO_MEM_ACCOUNT_HAZARD_POINTERS); } g_assert (hazard_table != NULL); page_addr = (guint8*)hazard_table + num_pages * pagesize; mono_mprotect (page_addr, pagesize, MONO_MMAP_READ | MONO_MMAP_WRITE); ++num_pages; hazard_table_size = num_pages * pagesize / sizeof (MonoThreadHazardPointers); #endif g_assert (id < hazard_table_size); for (i = 0; i < HAZARD_POINTER_COUNT; ++i) hazard_table [id].hazard_pointers [i] = NULL; } if (id > highest_small_id) { highest_small_id = id; mono_memory_write_barrier (); } mono_os_mutex_unlock (&small_id_mutex); return id; }