void sgen_thread_pool_idle_wait (void) { SGEN_ASSERT (0, idle_job_func, "Why are we waiting for idle without an idle function?"); mono_mutex_lock (&lock); while (continue_idle_job_func ()) mono_cond_wait (&done_cond, &lock); mono_mutex_unlock (&lock); }
static void sgen_memgov_calculate_minor_collection_allowance (void) { size_t new_major, new_heap_size, allowance_target, allowance; size_t decrease; if (!need_calculate_minor_collection_allowance) return; SGEN_ASSERT (0, major_collector.have_swept (), "Can only calculate allowance if heap is swept"); new_major = major_collector.get_bytes_survived_last_sweep (); new_heap_size = new_major + last_collection_los_memory_usage; /* * We allow the heap to grow by one third its current size before we start the next * major collection. */ allowance_target = new_heap_size * SGEN_DEFAULT_ALLOWANCE_HEAP_SIZE_RATIO; allowance = MAX (allowance_target, MIN_MINOR_COLLECTION_ALLOWANCE); /* * For the concurrent collector, we decrease the allowance relative to the memory * growth during the M&S phase, survival rate of the collection and the allowance * ratio. */ decrease = (major_pre_sweep_heap_size - major_start_heap_size) * ((float)new_heap_size / major_pre_sweep_heap_size) * (SGEN_DEFAULT_ALLOWANCE_HEAP_SIZE_RATIO + 1); if (decrease > allowance) decrease = allowance; allowance -= decrease; if (new_heap_size + allowance > soft_heap_limit) { if (new_heap_size > soft_heap_limit) allowance = MIN_MINOR_COLLECTION_ALLOWANCE; else allowance = MAX (soft_heap_limit - new_heap_size, MIN_MINOR_COLLECTION_ALLOWANCE); } /* FIXME: Why is this here? */ if (major_collector.free_swept_blocks) major_collector.free_swept_blocks (allowance); major_collection_trigger_size = new_heap_size + allowance; need_calculate_minor_collection_allowance = FALSE; if (debug_print_allowance) { SGEN_LOG (0, "Surviving sweep: %ld bytes (%ld major, %ld LOS)", (long)new_heap_size, (long)new_major, (long)last_collection_los_memory_usage); SGEN_LOG (0, "Allowance: %ld bytes", (long)allowance); SGEN_LOG (0, "Trigger size: %ld bytes", (long)major_collection_trigger_size); } }
void* sgen_nursery_alloc (size_t size) { SGEN_ASSERT (1, size >= (SGEN_CLIENT_MINIMUM_OBJECT_SIZE + CANARY_SIZE) && size <= (SGEN_MAX_SMALL_OBJ_SIZE + CANARY_SIZE), "Invalid nursery object size"); SGEN_LOG (4, "Searching nursery for size: %zd", size); size = SGEN_ALIGN_UP (size); HEAVY_STAT (++stat_nursery_alloc_requests); return sgen_fragment_allocator_par_alloc (&mutator_allocator, size); }
void* sgen_nursery_alloc (size_t size) { SGEN_ASSERT (1, size >= sizeof (MonoObject) && size <= SGEN_MAX_SMALL_OBJ_SIZE, "Invalid nursery object size"); SGEN_LOG (4, "Searching nursery for size: %zd", size); size = SGEN_ALIGN_UP (size); HEAVY_STAT (InterlockedIncrement (&stat_nursery_alloc_requests)); return sgen_fragment_allocator_par_alloc (&mutator_allocator, size); }
gboolean sgen_memgov_try_alloc_space (mword size, int space) { if (sgen_memgov_available_free_space () < size) { SGEN_ASSERT (4, !sgen_thread_pool_is_thread_pool_thread (mono_native_thread_id_get ()), "Memory shouldn't run out in worker thread"); return FALSE; } SGEN_ATOMIC_ADD_P (allocated_heap, size); sgen_client_total_allocated_heap_changed (allocated_heap); return TRUE; }
static char* filename_for_index (int index) { char *filename; SGEN_ASSERT (0, file_size_limit > 0, "Indexed binary protocol filename must only be used with file size limit"); filename = (char *)sgen_alloc_internal_dynamic (strlen (filename_or_prefix) + 32, INTERNAL_MEM_BINARY_PROTOCOL, TRUE); sprintf (filename, "%s.%d", filename_or_prefix, index); return filename; }
void sgen_thread_pool_job_wait (SgenThreadPoolJob *job) { SGEN_ASSERT (0, job, "Where's the job?"); mono_mutex_lock (&lock); while (find_job_in_queue (job) >= 0) mono_cond_wait (&done_cond, &lock); mono_mutex_unlock (&lock); }
void sgen_thread_pool_idle_signal (void) { SGEN_ASSERT (0, idle_job_func, "Why are we signaling idle without an idle function?"); mono_mutex_lock (&lock); if (continue_idle_job_func ()) mono_cond_signal (&work_cond); mono_mutex_unlock (&lock); }
/* * Mark a given range of memory as invalid. * * This can be done either by zeroing memory or by placing * a phony byte[] array. This keeps the heap forward walkable. * * This function ignores calls with a zero range, even if * both start and end are NULL. */ void sgen_clear_range (char *start, char *end) { size_t size = end - start; if ((start && !end) || (start > end)) g_error ("Invalid range [%p %p]", start, end); if (sgen_client_array_fill_range (start, size)) { sgen_set_nursery_scan_start (start); SGEN_ASSERT (0, start + sgen_safe_object_get_size ((GCObject*)start) == end, "Array fill produced wrong size"); } }
void sgen_cement_iterate (IterateObjectCallbackFunc callback, void *callback_data) { int i; for (i = 0; i < SGEN_CEMENT_HASH_SIZE; ++i) { if (!cement_hash [i].count) continue; SGEN_ASSERT (5, cement_hash [i].count >= SGEN_CEMENT_THRESHOLD, "Cementing hash inconsistent"); callback (cement_hash [i].obj, 0, callback_data); } }
gboolean sgen_cement_lookup_or_register (GCObject *obj) { guint hv; int i; CementHashEntry *hash = cement_hash; if (!cement_enabled) return FALSE; hv = sgen_aligned_addr_hash (obj); i = SGEN_CEMENT_HASH (hv); SGEN_ASSERT (5, sgen_ptr_in_nursery (obj), "Can only cement pointers to nursery objects"); if (!hash [i].obj) { GCObject *old_obj; old_obj = InterlockedCompareExchangePointer ((gpointer*)&hash [i].obj, obj, NULL); /* Check if the slot was occupied by some other object */ if (old_obj != NULL && old_obj != obj) return FALSE; } else if (hash [i].obj != obj) { return FALSE; } if (hash [i].count >= SGEN_CEMENT_THRESHOLD) return TRUE; if (InterlockedIncrement ((gint32*)&hash [i].count) == SGEN_CEMENT_THRESHOLD) { SGEN_ASSERT (9, sgen_get_current_collection_generation () >= 0, "We can only cement objects when we're in a collection pause."); SGEN_ASSERT (9, SGEN_OBJECT_IS_PINNED (obj), "Can only cement pinned objects"); SGEN_CEMENT_OBJECT (obj); binary_protocol_cement (obj, (gpointer)SGEN_LOAD_VTABLE (obj), (int)sgen_safe_object_get_size (obj)); } return FALSE; }
static void marker_idle_func (void *data_untyped) { WorkerData *data = data_untyped; SGEN_ASSERT (0, continue_idle_func (), "Why are we called when we're not supposed to work?"); SGEN_ASSERT (0, sgen_concurrent_collection_in_progress (), "The worker should only mark in concurrent collections."); if (workers_state == STATE_WORK_ENQUEUED) { set_state (STATE_WORK_ENQUEUED, STATE_WORKING); SGEN_ASSERT (0, workers_state != STATE_NOT_WORKING, "How did we get from WORK ENQUEUED to NOT WORKING?"); } if (!sgen_gray_object_queue_is_empty (&data->private_gray_queue) || workers_get_work (data)) { ScanCopyContext ctx = CONTEXT_FROM_OBJECT_OPERATIONS (idle_func_object_ops, &data->private_gray_queue); SGEN_ASSERT (0, !sgen_gray_object_queue_is_empty (&data->private_gray_queue), "How is our gray queue empty if we just got work?"); sgen_drain_gray_stack (ctx); } else { worker_try_finish (); } }
void sgen_pin_cemented_objects (void) { int i; for (i = 0; i < SGEN_CEMENT_HASH_SIZE; ++i) { if (!cement_hash [i].count) continue; SGEN_ASSERT (5, cement_hash [i].count >= SGEN_CEMENT_THRESHOLD, "Cementing hash inconsistent"); sgen_pin_stage_ptr (cement_hash [i].obj); /* FIXME: do pin stats if enabled */ } }
static mono_native_thread_return_t workers_thread_func (void *data_untyped) { WorkerData *data = data_untyped; SgenMajorCollector *major = sgen_get_major_collector (); mono_thread_info_register_small_id (); if (major->init_worker_thread) major->init_worker_thread (data->major_collector_data); init_private_gray_queue (data); for (;;) { gboolean did_work = FALSE; SGEN_ASSERT (0, sgen_get_current_collection_generation () != GENERATION_NURSERY, "Why are we doing work while there's a nursery collection happening?"); while (workers_state.data.state == STATE_WORKING && workers_dequeue_and_do_job (data)) { did_work = TRUE; /* FIXME: maybe distribute the gray queue here? */ } if (!did_work && (!sgen_gray_object_queue_is_empty (&data->private_gray_queue) || workers_get_work (data))) { SgenObjectOperations *ops = sgen_concurrent_collection_in_progress () ? &major->major_concurrent_ops : &major->major_ops; ScanCopyContext ctx = { ops->scan_object, NULL, &data->private_gray_queue }; g_assert (!sgen_gray_object_queue_is_empty (&data->private_gray_queue)); while (!sgen_drain_gray_stack (32, ctx)) { if (workers_state.data.state == STATE_NURSERY_COLLECTION) workers_wait (); } g_assert (sgen_gray_object_queue_is_empty (&data->private_gray_queue)); init_private_gray_queue (data); did_work = TRUE; } if (!did_work) workers_wait (); } /* dummy return to make compilers happy */ return NULL; }
static void workers_signal_enqueue_work (int num_wake_up, gboolean from_nursery_collection) { State old_state = workers_state; State new_state = old_state; int i; gboolean did_set_state; SGEN_ASSERT (0, num_wake_up <= workers_num, "Cannot wake up more workers than are present"); if (from_nursery_collection) assert_nursery_collection (old_state, FALSE); else assert_not_working (old_state); new_state.data.state = STATE_WORKING; new_state.data.num_posted = num_wake_up; did_set_state = set_state (old_state, new_state); SGEN_ASSERT (0, did_set_state, "Nobody else should be mutating the state"); for (i = 0; i < num_wake_up; ++i) MONO_SEM_POST (&workers_waiting_sem); }
void sgen_thread_pool_init (int num_threads, SgenThreadPoolThreadInitFunc init_func, SgenThreadPoolIdleJobFunc idle_func, SgenThreadPoolContinueIdleJobFunc continue_idle_func, void **thread_datas) { SGEN_ASSERT (0, num_threads == 1, "We only support 1 thread pool thread for now."); mono_mutex_init (&lock); mono_cond_init (&work_cond, NULL); mono_cond_init (&done_cond, NULL); thread_init_func = init_func; idle_job_func = idle_func; continue_idle_job_func = continue_idle_func; mono_native_thread_create (&thread, thread_func, thread_datas ? thread_datas [0] : NULL); }
static const char* description_for_type (int type) { switch (type) { case INTERNAL_MEM_PIN_QUEUE: return "pin-queue"; case INTERNAL_MEM_FRAGMENT: return "fragment"; case INTERNAL_MEM_SECTION: return "section"; case INTERNAL_MEM_SCAN_STARTS: return "scan-starts"; case INTERNAL_MEM_FIN_TABLE: return "fin-table"; case INTERNAL_MEM_FINALIZE_ENTRY: return "finalize-entry"; case INTERNAL_MEM_FINALIZE_READY: return "finalize-ready"; case INTERNAL_MEM_DISLINK_TABLE: return "dislink-table"; case INTERNAL_MEM_DISLINK: return "dislink"; case INTERNAL_MEM_ROOTS_TABLE: return "roots-table"; case INTERNAL_MEM_ROOT_RECORD: return "root-record"; case INTERNAL_MEM_STATISTICS: return "statistics"; case INTERNAL_MEM_STAT_PINNED_CLASS: return "pinned-class"; case INTERNAL_MEM_STAT_REMSET_CLASS: return "remset-class"; case INTERNAL_MEM_GRAY_QUEUE: return "gray-queue"; case INTERNAL_MEM_MS_TABLES: return "marksweep-tables"; case INTERNAL_MEM_MS_BLOCK_INFO: return "marksweep-block-info"; case INTERNAL_MEM_MS_BLOCK_INFO_SORT: return "marksweep-block-info-sort"; case INTERNAL_MEM_WORKER_DATA: return "worker-data"; case INTERNAL_MEM_THREAD_POOL_JOB: return "thread-pool-job"; case INTERNAL_MEM_BRIDGE_DATA: return "bridge-data"; case INTERNAL_MEM_OLD_BRIDGE_HASH_TABLE: return "old-bridge-hash-table"; case INTERNAL_MEM_OLD_BRIDGE_HASH_TABLE_ENTRY: return "old-bridge-hash-table-entry"; case INTERNAL_MEM_BRIDGE_HASH_TABLE: return "bridge-hash-table"; case INTERNAL_MEM_BRIDGE_HASH_TABLE_ENTRY: return "bridge-hash-table-entry"; case INTERNAL_MEM_TARJAN_BRIDGE_HASH_TABLE: return "tarjan-bridge-hash-table"; case INTERNAL_MEM_TARJAN_BRIDGE_HASH_TABLE_ENTRY: return "tarjan-bridge-hash-table-entry"; case INTERNAL_MEM_TARJAN_OBJ_BUCKET: return "tarjan-bridge-object-buckets"; case INTERNAL_MEM_BRIDGE_ALIVE_HASH_TABLE: return "bridge-alive-hash-table"; case INTERNAL_MEM_BRIDGE_ALIVE_HASH_TABLE_ENTRY: return "bridge-alive-hash-table-entry"; case INTERNAL_MEM_BRIDGE_DEBUG: return "bridge-debug"; case INTERNAL_MEM_TOGGLEREF_DATA: return "toggleref-data"; case INTERNAL_MEM_CARDTABLE_MOD_UNION: return "cardtable-mod-union"; case INTERNAL_MEM_BINARY_PROTOCOL: return "binary-protocol"; case INTERNAL_MEM_TEMPORARY: return "temporary"; case INTERNAL_MEM_LOG_ENTRY: return "log-entry"; case INTERNAL_MEM_COMPLEX_DESCRIPTORS: return "complex-descriptors"; default: { const char *description = sgen_client_description_for_internal_mem_type (type); SGEN_ASSERT (0, description, "Unknown internal mem type"); return description; } } }
/* * Can only be called if the workers are stopped. * If we're stopped, there are also no pending jobs. */ gboolean sgen_workers_have_idle_work (void) { int i; SGEN_ASSERT (0, forced_stop && sgen_workers_all_done (), "Checking for idle work should only happen if the workers are stopped."); if (!sgen_section_gray_queue_is_empty (&workers_distribute_gray_queue)) return TRUE; for (i = 0; i < workers_num; ++i) { if (!sgen_gray_object_queue_is_empty (&workers_data [i].private_gray_queue)) return TRUE; } return FALSE; }
gboolean sgen_cement_lookup (char *obj) { int i = mono_aligned_addr_hash (obj) % SGEN_CEMENT_HASH_SIZE; SGEN_ASSERT (5, sgen_ptr_in_nursery (obj), "Looking up cementing for non-nursery objects makes no sense"); if (!cement_enabled) return FALSE; if (!cement_hash [i].obj) return FALSE; if (cement_hash [i].obj != obj) return FALSE; return cement_hash [i].count >= SGEN_CEMENT_THRESHOLD; }
static void pin_from_hash (CementHashEntry *hash, gboolean has_been_reset) { int i; for (i = 0; i < SGEN_CEMENT_HASH_SIZE; ++i) { if (!hash [i].count) continue; if (has_been_reset) SGEN_ASSERT (5, hash [i].count >= SGEN_CEMENT_THRESHOLD, "Cementing hash inconsistent"); sgen_pin_stage_ptr (hash [i].obj); /* FIXME: do pin stats if enabled */ SGEN_CEMENT_OBJECT (hash [i].obj); } }
gboolean sgen_cement_is_forced (GCObject *obj) { guint hv = sgen_aligned_addr_hash (obj); int i = SGEN_CEMENT_HASH (hv); SGEN_ASSERT (5, sgen_ptr_in_nursery (obj), "Looking up cementing for non-nursery objects makes no sense"); if (!cement_enabled) return FALSE; if (!cement_hash [i].obj) return FALSE; if (cement_hash [i].obj != obj) return FALSE; return cement_hash [i].forced; }
void* mono_gc_alloc_array (MonoVTable *vtable, size_t size, uintptr_t max_length, uintptr_t bounds_size) { MonoArray *arr; MonoArrayBounds *bounds; TLAB_ACCESS_INIT; if (!SGEN_CAN_ALIGN_UP (size)) return NULL; #ifndef DISABLE_CRITICAL_REGION ENTER_CRITICAL_REGION; arr = mono_gc_try_alloc_obj_nolock (vtable, size); if (arr) { /*This doesn't require fencing since EXIT_CRITICAL_REGION already does it for us*/ arr->max_length = (mono_array_size_t)max_length; bounds = (MonoArrayBounds*)((char*)arr + size - bounds_size); arr->bounds = bounds; EXIT_CRITICAL_REGION; goto done; } EXIT_CRITICAL_REGION; #endif LOCK_GC; arr = mono_gc_alloc_obj_nolock (vtable, size); if (G_UNLIKELY (!arr)) { UNLOCK_GC; return mono_gc_out_of_memory (size); } arr->max_length = (mono_array_size_t)max_length; bounds = (MonoArrayBounds*)((char*)arr + size - bounds_size); arr->bounds = bounds; UNLOCK_GC; done: SGEN_ASSERT (6, SGEN_ALIGN_UP (size) == SGEN_ALIGN_UP (sgen_par_object_get_size (vtable, (MonoObject*)arr)), "Array has incorrect size."); return arr; }
static void assert_nursery_collection (State state, gboolean from_worker) { SGEN_ASSERT (0, state.data.state == STATE_NURSERY_COLLECTION, "Must be in the nursery collection state"); if (from_worker) { SGEN_ASSERT (0, state.data.num_awake > 0, "We're awake, but num_awake is zero"); SGEN_ASSERT (0, state.data.post_done, "post_done must be set in the nursery collection state"); } SGEN_ASSERT (0, state.data.num_awake <= workers_num, "There are too many worker threads awake"); if (!state.data.post_done) { SGEN_ASSERT (0, state.data.num_awake == 0, "Once done has been posted no threads can be awake"); SGEN_ASSERT (0, state.data.num_posted == 0, "Once done has been posted no thread must be awoken"); } }
/* * Flushing buffers takes an exclusive lock, so it must only be done when the world is * stopped, otherwise we might end up with a deadlock because a stopped thread owns the * lock. * * The protocol entries that do flush have `FLUSH()` in their definition. */ gboolean binary_protocol_flush_buffers (gboolean force) { #ifdef HAVE_UNISTD_H int num_buffers = 0, i; BinaryProtocolBuffer *header; BinaryProtocolBuffer *buf; BinaryProtocolBuffer **bufs; if (binary_protocol_file == -1) return FALSE; if (!force && !try_lock_exclusive ()) return FALSE; header = binary_protocol_buffers; for (buf = header; buf != NULL; buf = buf->next) ++num_buffers; bufs = (BinaryProtocolBuffer **)sgen_alloc_internal_dynamic (num_buffers * sizeof (BinaryProtocolBuffer*), INTERNAL_MEM_BINARY_PROTOCOL, TRUE); for (buf = header, i = 0; buf != NULL; buf = buf->next, i++) bufs [i] = buf; SGEN_ASSERT (0, i == num_buffers, "Binary protocol buffer count error"); /* * This might be incorrect when forcing, but all bets are off in that case, anyway, * because we're trying to figure out a bug in the debugger. */ binary_protocol_buffers = NULL; for (i = num_buffers - 1; i >= 0; --i) { binary_protocol_flush_buffer (bufs [i]); binary_protocol_check_file_overflow (); } sgen_free_internal_dynamic (buf, num_buffers * sizeof (BinaryProtocolBuffer*), INTERNAL_MEM_BINARY_PROTOCOL); if (!force) unlock_exclusive (); return TRUE; #endif }
void sgen_gray_object_queue_init (SgenGrayQueue *queue) { GrayQueueSection *section, *next; int i; g_assert (sgen_gray_object_queue_is_empty (queue)); SGEN_ASSERT (9, queue->balance == 0, "unbalanced queue on init %d", queue->balance); /* Free the extra sections allocated during the last collection */ i = 0; for (section = queue->free_list; section && i < GRAY_QUEUE_LENGTH_LIMIT - 1; section = section->next) i ++; if (!section) return; while (section->next) { next = section->next; section->next = next->next; sgen_gray_object_free_queue_section (next); } }
void* sgen_alloc_internal (int type) { int index, size; void *p; index = fixed_type_allocator_indexes [type]; g_assert (index >= 0 && index < NUM_ALLOCATORS); #ifdef HEAVY_STATISTICS ++ allocator_sizes_stats [index]; #endif size = allocator_sizes [index]; p = mono_lock_free_alloc (&allocators [index]); memset (p, 0, size); SGEN_ASSERT (0, !(((mword)p) & (sizeof(gpointer) - 1)), "Why do we allocate unaligned addresses ?"); return p; }
char* sgen_gray_object_dequeue (SgenGrayQueue *queue) { char *obj; if (sgen_gray_object_queue_is_empty (queue)) return NULL; SGEN_ASSERT (9, queue->first->end, "gray queue %p underflow, first %p, end %d", queue, queue->first, queue->first->end); obj = queue->first->objects [--queue->first->end]; if (G_UNLIKELY (queue->first->end == 0)) { GrayQueueSection *section = queue->first; queue->first = section->next; section->next = queue->free_list; queue->free_list = section; } SGEN_LOG_DO (9, --queue->balance); return obj; }
void* mono_gc_make_descr_for_object (gsize *bitmap, int numbits, size_t obj_size) { int first_set = -1, num_set = 0, last_set = -1, i; mword desc = 0; size_t stored_size = obj_size; stored_size += SGEN_ALLOC_ALIGN - 1; stored_size &= ~(SGEN_ALLOC_ALIGN - 1); for (i = 0; i < numbits; ++i) { if (bitmap [i / GC_BITS_PER_WORD] & ((gsize)1 << (i % GC_BITS_PER_WORD))) { if (first_set < 0) first_set = i; last_set = i; num_set++; } } if (first_set < 0) { SGEN_LOG (6, "Ptrfree descriptor %p, size: %zd", (void*)desc, stored_size); if (stored_size <= MAX_RUNLEN_OBJECT_SIZE && stored_size <= SGEN_MAX_SMALL_OBJ_SIZE) return (void*)(DESC_TYPE_SMALL_PTRFREE | stored_size); return (void*)DESC_TYPE_COMPLEX_PTRFREE; } g_assert (!(stored_size & 0x7)); SGEN_ASSERT (5, stored_size == SGEN_ALIGN_UP (stored_size), "Size is not aligned"); /* we know the 2-word header is ptr-free */ if (last_set < BITMAP_NUM_BITS + OBJECT_HEADER_WORDS && stored_size <= SGEN_MAX_SMALL_OBJ_SIZE) { desc = DESC_TYPE_BITMAP | ((*bitmap >> OBJECT_HEADER_WORDS) << LOW_TYPE_BITS); SGEN_LOG (6, "Largebitmap descriptor %p, size: %zd, last set: %d", (void*)desc, stored_size, last_set); return (void*) desc; }
void sgen_workers_wait (void) { sgen_thread_pool_idle_wait (); SGEN_ASSERT (0, sgen_workers_all_done (), "Why are the workers not done after we wait for them?"); }
void sgen_pinning_trim_queue_to_section (GCMemSection *section) { SGEN_ASSERT (0, section->pin_queue_first_entry == 0, "Pin queue trimming assumes the whole pin queue is used by the nursery"); pin_queue.next_slot = section->pin_queue_last_entry; }