/* * Descriptor builders. */ SgenDescriptor mono_gc_make_descr_for_object (gsize *bitmap, int numbits, size_t obj_size) { int first_set = -1, num_set = 0, last_set = -1, i; SgenDescriptor desc = 0; size_t stored_size = SGEN_ALIGN_UP (obj_size); for (i = 0; i < numbits; ++i) { if (bitmap [i / GC_BITS_PER_WORD] & ((gsize)1 << (i % GC_BITS_PER_WORD))) { if (first_set < 0) first_set = i; last_set = i; num_set++; } } if (first_set < 0) { SGEN_LOG (6, "Ptrfree descriptor %p, size: %zd", (void*)desc, stored_size); if (stored_size <= MAX_RUNLEN_OBJECT_SIZE && stored_size <= SGEN_MAX_SMALL_OBJ_SIZE) return DESC_TYPE_SMALL_PTRFREE | stored_size; return DESC_TYPE_COMPLEX_PTRFREE; } g_assert (!(stored_size & 0x7)); SGEN_ASSERT (5, stored_size == SGEN_ALIGN_UP (stored_size), "Size is not aligned"); /* we know the 2-word header is ptr-free */ if (last_set < BITMAP_NUM_BITS + OBJECT_HEADER_WORDS && stored_size <= SGEN_MAX_SMALL_OBJ_SIZE) { desc = DESC_TYPE_BITMAP | ((*bitmap >> OBJECT_HEADER_WORDS) << LOW_TYPE_BITS); SGEN_LOG (6, "Largebitmap descriptor %p, size: %zd, last set: %d", (void*)desc, stored_size, last_set); return desc; }
static MonoGCBridgeObjectKind class_kind (MonoClass *klass) { MonoGCBridgeObjectKind res = bridge_callbacks.bridge_class_kind (klass); /* If it's a bridge, nothing we can do about it. */ if (res == GC_BRIDGE_TRANSPARENT_BRIDGE_CLASS || res == GC_BRIDGE_OPAQUE_BRIDGE_CLASS) return res; /* Non bridge classes with no pointers will never point to a bridge, so we can savely ignore them. */ if (!klass->has_references) { SGEN_LOG (6, "class %s is opaque\n", klass->name); return GC_BRIDGE_OPAQUE_CLASS; } /* Some arrays can be ignored */ if (klass->rank == 1) { MonoClass *elem_class = klass->element_class; /* FIXME the bridge check can be quite expensive, cache it at the class level. */ /* An array of a sealed type that is not a bridge will never get to a bridge */ if ((elem_class->flags & TYPE_ATTRIBUTE_SEALED) && !elem_class->has_references && !bridge_callbacks.bridge_class_kind (elem_class)) { SGEN_LOG (6, "class %s is opaque\n", klass->name); return GC_BRIDGE_OPAQUE_CLASS; } } return GC_BRIDGE_TRANSPARENT_CLASS; }
/* LOCKING: assumes the GC lock is held */ int sgen_stop_world (int generation) { int count, dead; mono_profiler_gc_event (MONO_GC_EVENT_PRE_STOP_WORLD, generation); MONO_GC_WORLD_STOP_BEGIN (); acquire_gc_locks (); /* We start to scan after locks are taking, this ensures we won't be interrupted. */ sgen_process_togglerefs (); update_current_thread_stack (&count); sgen_global_stop_count++; SGEN_LOG (3, "stopping world n %d from %p %p", sgen_global_stop_count, mono_thread_info_current (), (gpointer)mono_native_thread_id_get ()); TV_GETTIME (stop_world_time); count = sgen_thread_handshake (TRUE); dead = restart_threads_until_none_in_managed_allocator (); if (count < dead) g_error ("More threads have died (%d) that been initialy suspended %d", dead, count); count -= dead; SGEN_LOG (3, "world stopped %d thread(s)", count); mono_profiler_gc_event (MONO_GC_EVENT_POST_STOP_WORLD, generation); MONO_GC_WORLD_STOP_END (); sgen_memgov_collection_start (generation); sgen_bridge_reset_data (); return count; }
void sgen_client_clear_togglerefs (char *start, char *end, ScanCopyContext ctx) { CopyOrMarkObjectFunc copy_func = ctx.ops->copy_or_mark_object; SgenGrayQueue *queue = ctx.queue; int i; SGEN_LOG (4, "Clearing ToggleRefs %d", toggleref_array_size); for (i = 0; i < toggleref_array_size; ++i) { if (toggleref_array [i].weak_ref) { GCObject *object = toggleref_array [i].weak_ref; if ((char*)object >= start && (char*)object < end) { if (sgen_gc_is_object_ready_for_finalization (object)) { SGEN_LOG (6, "\tcleaning weak slot %d", i); toggleref_array [i].weak_ref = NULL; /* We defer compaction to only happen on the callback step. */ } else { SGEN_LOG (6, "\tkeeping weak slot %d", i); copy_func (&toggleref_array [i].weak_ref, queue); } } } } sgen_drain_gray_stack (ctx); }
/* LOCKING: assumes the GC lock is held */ void sgen_client_stop_world (int generation) { TV_DECLARE (end_handshake); /* notify the profiler of the leftovers */ /* FIXME this is the wrong spot at we can STW for non collection reasons. */ if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_GC_MOVES)) mono_sgen_gc_event_moves (); acquire_gc_locks (); mono_profiler_gc_event (MONO_GC_EVENT_PRE_STOP_WORLD_LOCKED, generation); /* We start to scan after locks are taking, this ensures we won't be interrupted. */ sgen_process_togglerefs (); update_current_thread_stack (&generation); sgen_global_stop_count++; SGEN_LOG (3, "stopping world n %d from %p %p", sgen_global_stop_count, mono_thread_info_current (), (gpointer) (gsize) mono_native_thread_id_get ()); TV_GETTIME (stop_world_time); sgen_unified_suspend_stop_world (); SGEN_LOG (3, "world stopped"); TV_GETTIME (end_handshake); time_stop_world += TV_ELAPSED (stop_world_time, end_handshake); sgen_memgov_collection_start (generation); if (sgen_need_bridge_processing ()) sgen_bridge_reset_data (); }
void sgen_find_section_pin_queue_start_end (GCMemSection *section) { SGEN_LOG (6, "Pinning from section %p (%p-%p)", section, section->data, section->end_data); section->pin_queue_start = sgen_find_optimized_pin_queue_area (section->data, section->end_data, §ion->pin_queue_num_entries); SGEN_LOG (6, "Found %d pinning addresses in section %p", section->pin_queue_num_entries, section); }
/* * Return whenever ADDR occurs in the remembered sets */ static gboolean sgen_ssb_find_address (char *addr) { int i; SgenThreadInfo *info; RememberedSet *remset; GenericStoreRememberedSet *store_remset; mword *p; gboolean found = FALSE; /* the global one */ for (remset = global_remset; remset; remset = remset->next) { SGEN_LOG (4, "Scanning global remset range: %p-%p, size: %td", remset->data, remset->store_next, remset->store_next - remset->data); for (p = remset->data; p < remset->store_next;) { p = find_in_remset_loc (p, addr, &found); if (found) return TRUE; } } /* the generic store ones */ for (store_remset = generic_store_remsets; store_remset; store_remset = store_remset->next) { for (i = 0; i < STORE_REMSET_BUFFER_SIZE - 1; ++i) { if (store_remset->data [i] == addr) return TRUE; } } /* the per-thread ones */ FOREACH_THREAD (info) { int j; for (remset = info->remset; remset; remset = remset->next) { SGEN_LOG (4, "Scanning remset for thread %p, range: %p-%p, size: %td", info, remset->data, remset->store_next, remset->store_next - remset->data); for (p = remset->data; p < remset->store_next;) { p = find_in_remset_loc (p, addr, &found); if (found) return TRUE; } } for (j = 0; j < *info->store_remset_buffer_index_addr; ++j) { if ((*info->store_remset_buffer_addr) [j + 1] == addr) return TRUE; } } END_FOREACH_THREAD /* the freed thread ones */ for (remset = freed_thread_remsets; remset; remset = remset->next) { SGEN_LOG (4, "Scanning remset for freed thread, range: %p-%p, size: %td", remset->data, remset->store_next, remset->store_next - remset->data); for (p = remset->data; p < remset->store_next;) { p = find_in_remset_loc (p, addr, &found); if (found) return TRUE; } } return FALSE; }
void sgen_find_section_pin_queue_start_end (GCMemSection *section) { SGEN_LOG (6, "Pinning from section %p (%p-%p)", section, section->data, section->end_data); sgen_find_optimized_pin_queue_area (section->data, section->end_data, §ion->pin_queue_first_entry, §ion->pin_queue_last_entry); SGEN_LOG (6, "Found %zd pinning addresses in section %p", section->pin_queue_last_entry - section->pin_queue_first_entry, section); }
/* LOCKING: requires that the GC lock is held */ void sgen_collect_bridge_objects (int generation, ScanCopyContext ctx) { CopyOrMarkObjectFunc copy_func = ctx.copy_func; GrayQueue *queue = ctx.queue; SgenHashTable *hash_table = get_finalize_entry_hash_table (generation); MonoObject *object; gpointer dummy; char *copy; if (no_finalize) return; SGEN_HASH_TABLE_FOREACH (hash_table, object, dummy) { int tag = tagged_object_get_tag (object); object = tagged_object_get_object (object); /* Bridge code told us to ignore this one */ if (tag == BRIDGE_OBJECT_MARKED) continue; /* Object is a bridge object and major heap says it's dead */ if (major_collector.is_object_live ((char*)object)) continue; /* Nursery says the object is dead. */ if (!sgen_gc_is_object_ready_for_finalization (object)) continue; if (!sgen_is_bridge_object (object)) continue; copy = (char*)object; copy_func ((void**)©, queue); sgen_bridge_register_finalized_object ((MonoObject*)copy); if (hash_table == &minor_finalizable_hash && !ptr_in_nursery (copy)) { /* remove from the list */ SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE); /* insert it into the major hash */ sgen_hash_table_replace (&major_finalizable_hash, tagged_object_apply (copy, tag), NULL, NULL); SGEN_LOG (5, "Promoting finalization of object %p (%s) (was at %p) to major table", copy, sgen_safe_name (copy), object); continue; } else { /* update pointer */ SGEN_LOG (5, "Updating object for finalization: %p (%s) (was at %p)", copy, sgen_safe_name (copy), object); SGEN_HASH_TABLE_FOREACH_SET_KEY (tagged_object_apply (copy, tag)); } }
static void sgen_memgov_calculate_minor_collection_allowance (void) { size_t new_major, new_heap_size, allowance_target, allowance; size_t decrease; if (!need_calculate_minor_collection_allowance) return; SGEN_ASSERT (0, major_collector.have_swept (), "Can only calculate allowance if heap is swept"); new_major = major_collector.get_bytes_survived_last_sweep (); new_heap_size = new_major + last_collection_los_memory_usage; /* * We allow the heap to grow by one third its current size before we start the next * major collection. */ allowance_target = new_heap_size * SGEN_DEFAULT_ALLOWANCE_HEAP_SIZE_RATIO; allowance = MAX (allowance_target, MIN_MINOR_COLLECTION_ALLOWANCE); /* * For the concurrent collector, we decrease the allowance relative to the memory * growth during the M&S phase, survival rate of the collection and the allowance * ratio. */ decrease = (major_pre_sweep_heap_size - major_start_heap_size) * ((float)new_heap_size / major_pre_sweep_heap_size) * (SGEN_DEFAULT_ALLOWANCE_HEAP_SIZE_RATIO + 1); if (decrease > allowance) decrease = allowance; allowance -= decrease; if (new_heap_size + allowance > soft_heap_limit) { if (new_heap_size > soft_heap_limit) allowance = MIN_MINOR_COLLECTION_ALLOWANCE; else allowance = MAX (soft_heap_limit - new_heap_size, MIN_MINOR_COLLECTION_ALLOWANCE); } /* FIXME: Why is this here? */ if (major_collector.free_swept_blocks) major_collector.free_swept_blocks (allowance); major_collection_trigger_size = new_heap_size + allowance; need_calculate_minor_collection_allowance = FALSE; if (debug_print_allowance) { SGEN_LOG (0, "Surviving sweep: %ld bytes (%ld major, %ld LOS)", (long)new_heap_size, (long)new_major, (long)last_collection_los_memory_usage); SGEN_LOG (0, "Allowance: %ld bytes", (long)allowance); SGEN_LOG (0, "Trigger size: %ld bytes", (long)major_collection_trigger_size); } }
static void sgen_ssb_record_pointer (gpointer ptr) { RememberedSet *rs; gboolean lock = sgen_collection_is_parallel (); gpointer obj = *(gpointer*)ptr; g_assert (!sgen_ptr_in_nursery (ptr) && sgen_ptr_in_nursery (obj)); if (lock) LOCK_GLOBAL_REMSET; if (!global_remset_location_was_not_added (ptr)) goto done; if (G_UNLIKELY (do_pin_stats)) sgen_pin_stats_register_global_remset (obj); SGEN_LOG (8, "Adding global remset for %p", ptr); binary_protocol_global_remset (ptr, *(gpointer*)ptr, (gpointer)SGEN_LOAD_VTABLE (obj)); HEAVY_STAT (++stat_global_remsets_added); /* * FIXME: If an object remains pinned, we need to add it at every minor collection. * To avoid uncontrolled growth of the global remset, only add each pointer once. */ if (global_remset->store_next + 3 < global_remset->end_set) { *(global_remset->store_next++) = (mword)ptr; goto done; } rs = sgen_alloc_remset (global_remset->end_set - global_remset->data, NULL, TRUE); rs->next = global_remset; global_remset = rs; *(global_remset->store_next++) = (mword)ptr; #if SGEN_MAX_DEBUG_LEVEL >= 4 { int global_rs_size = 0; for (rs = global_remset; rs; rs = rs->next) { global_rs_size += rs->store_next - rs->data; } SGEN_LOG (4, "Global remset now has size %d", global_rs_size); } #endif done: if (lock) UNLOCK_GLOBAL_REMSET; }
void sgen_process_togglerefs (void) { int i, w; int toggle_ref_counts [3] = { 0, 0, 0 }; SGEN_LOG (4, "Proccessing ToggleRefs %d", toggleref_array_size); for (i = w = 0; i < toggleref_array_size; ++i) { int res; MonoGCToggleRef r = toggleref_array [i]; MonoObject *obj; if (r.strong_ref) obj = r.strong_ref; else if (r.weak_ref) obj = r.weak_ref; else continue; res = toggleref_callback (obj); ++toggle_ref_counts [res]; switch (res) { case MONO_TOGGLE_REF_DROP: break; case MONO_TOGGLE_REF_STRONG: toggleref_array [w].strong_ref = obj; toggleref_array [w].weak_ref = NULL; ++w; break; case MONO_TOGGLE_REF_WEAK: toggleref_array [w].strong_ref = NULL; toggleref_array [w].weak_ref = obj; ++w; break; default: g_assert_not_reached (); } } toggleref_array_size = w; SGEN_LOG (4, "Done Proccessing ToggleRefs dropped %d strong %d weak %d final size %d", toggle_ref_counts [MONO_TOGGLE_REF_DROP], toggle_ref_counts [MONO_TOGGLE_REF_STRONG], toggle_ref_counts [MONO_TOGGLE_REF_WEAK], w); }
/* * To be used for interned strings and possibly MonoThread, reflection handles. * We may want to explicitly free these objects. */ void* mono_gc_alloc_pinned_obj (MonoVTable *vtable, size_t size) { void **p; if (!SGEN_CAN_ALIGN_UP (size)) return NULL; size = ALIGN_UP (size); LOCK_GC; if (size > SGEN_MAX_SMALL_OBJ_SIZE) { /* large objects are always pinned anyway */ p = sgen_los_alloc_large_inner (vtable, size); } else { SGEN_ASSERT (9, vtable->klass->inited, "class %s:%s is not initialized", vtable->klass->name_space, vtable->klass->name); p = major_collector.alloc_small_pinned_obj (vtable, size, SGEN_VTABLE_HAS_REFERENCES (vtable)); } if (G_LIKELY (p)) { SGEN_LOG (6, "Allocated pinned object %p, vtable: %p (%s), size: %zd", p, vtable, vtable->klass->name, size); if (size > SGEN_MAX_SMALL_OBJ_SIZE) MONO_GC_MAJOR_OBJ_ALLOC_LARGE ((mword)p, size, vtable->klass->name_space, vtable->klass->name); else MONO_GC_MAJOR_OBJ_ALLOC_PINNED ((mword)p, size, vtable->klass->name_space, vtable->klass->name); binary_protocol_alloc_pinned (p, vtable, size); } UNLOCK_GC; return p; }
int sgen_thread_handshake (BOOL suspend) { int count, result; SgenThreadInfo *info; int signum = suspend ? suspend_signal_num : restart_signal_num; MonoNativeThreadId me = mono_native_thread_id_get (); count = 0; mono_thread_info_current ()->client_info.suspend_done = TRUE; FOREACH_THREAD_SAFE (info) { if (mono_native_thread_id_equals (mono_thread_info_get_tid (info), me)) { continue; } info->client_info.suspend_done = FALSE; if (info->client_info.gc_disabled) continue; /*if (signum == suspend_signal_num && info->stop_count == global_stop_count) continue;*/ result = mono_threads_pthread_kill (info, signum); if (result == 0) { count++; } else { info->client_info.skip = 1; } } END_FOREACH_THREAD_SAFE sgen_wait_for_suspend_ack (count); SGEN_LOG (4, "%s handshake for %d threads\n", suspend ? "suspend" : "resume", count); return count; }
static void sgen_ssb_wbarrier_arrayref_copy (gpointer dest_ptr, gpointer src_ptr, int count) { RememberedSet *rs; TLAB_ACCESS_INIT; LOCK_GC; mono_gc_memmove (dest_ptr, src_ptr, count * sizeof (gpointer)); rs = REMEMBERED_SET; SGEN_LOG (8, "Adding remset at %p, %d", dest_ptr, count); if (rs->store_next + 1 < rs->end_set) { *(rs->store_next++) = (mword)dest_ptr | REMSET_RANGE; *(rs->store_next++) = count; UNLOCK_GC; return; } rs = sgen_alloc_remset (rs->end_set - rs->data, (void*)1, FALSE); rs->next = REMEMBERED_SET; REMEMBERED_SET = rs; #ifdef HAVE_KW_THREAD mono_thread_info_current ()->remset = rs; #endif *(rs->store_next++) = (mword)dest_ptr | REMSET_RANGE; *(rs->store_next++) = count; UNLOCK_GC; }
/* * We found a fragment of free memory in the nursery: memzero it and if * it is big enough, add it to the list of fragments that can be used for * allocation. */ static void add_nursery_frag (SgenFragmentAllocator *allocator, size_t frag_size, char* frag_start, char* frag_end) { SGEN_LOG (4, "Found empty fragment: %p-%p, size: %zd", frag_start, frag_end, frag_size); binary_protocol_empty (frag_start, frag_size); /* Not worth dealing with smaller fragments: need to tune */ if (frag_size >= SGEN_MAX_NURSERY_WASTE) { /* memsetting just the first chunk start is bound to provide better cache locality */ if (sgen_get_nursery_clear_policy () == CLEAR_AT_GC) memset (frag_start, 0, frag_size); else if (sgen_get_nursery_clear_policy () == CLEAR_AT_TLAB_CREATION_DEBUG) memset (frag_start, 0xff, frag_size); #ifdef NALLOC_DEBUG /* XXX convert this into a flight record entry printf ("\tfragment [%p %p] size %zd\n", frag_start, frag_end, frag_size); */ #endif sgen_fragment_allocator_add (allocator, frag_start, frag_end); fragment_total += frag_size; } else { /* Clear unused fragments, pinning depends on this */ sgen_clear_range (frag_start, frag_end); HEAVY_STAT (stat_wasted_bytes_small_areas += frag_size); } }
/* LOCKING: assumes the GC lock is held */ int sgen_restart_world (int generation, GGTimingInfo *timing) { int count; SgenThreadInfo *info; TV_DECLARE (end_sw); TV_DECLARE (end_bridge); unsigned long usec, bridge_usec; /* notify the profiler of the leftovers */ /* FIXME this is the wrong spot at we can STW for non collection reasons. */ if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_GC_MOVES)) sgen_gc_event_moves (); mono_profiler_gc_event (MONO_GC_EVENT_PRE_START_WORLD, generation); MONO_GC_WORLD_RESTART_BEGIN (generation); FOREACH_THREAD (info) { info->stack_start = NULL; #ifdef USE_MONO_CTX memset (&info->ctx, 0, sizeof (MonoContext)); #else memset (&info->regs, 0, sizeof (info->regs)); #endif } END_FOREACH_THREAD count = sgen_thread_handshake (FALSE); TV_GETTIME (end_sw); usec = TV_ELAPSED (stop_world_time, end_sw); max_pause_usec = MAX (usec, max_pause_usec); SGEN_LOG (2, "restarted %d thread(s) (pause time: %d usec, max: %d)", count, (int)usec, (int)max_pause_usec); mono_profiler_gc_event (MONO_GC_EVENT_POST_START_WORLD, generation); MONO_GC_WORLD_RESTART_END (generation); /* * We must release the thread info suspend lock after doing * the thread handshake. Otherwise, if the GC stops the world * and a thread is in the process of starting up, but has not * yet registered (it's not in the thread_list), it is * possible that the thread does register while the world is * stopped. When restarting the GC will then try to restart * said thread, but since it never got the suspend signal, it * cannot answer the restart signal, so a deadlock results. */ release_gc_locks (); sgen_try_free_some_memory = TRUE; sgen_bridge_processing_finish (generation); TV_GETTIME (end_bridge); bridge_usec = TV_ELAPSED (end_sw, end_bridge); if (timing) { timing [0].stw_time = usec; timing [0].bridge_time = bridge_usec; } sgen_memgov_collection_end (generation, timing, timing ? 2 : 0); return count; }
/* * To be used for interned strings and possibly MonoThread, reflection handles. * We may want to explicitly free these objects. */ GCObject* sgen_alloc_obj_pinned (GCVTable vtable, size_t size) { GCObject *p; if (!SGEN_CAN_ALIGN_UP (size)) return NULL; size = ALIGN_UP (size); LOCK_GC; if (size > SGEN_MAX_SMALL_OBJ_SIZE) { /* large objects are always pinned anyway */ p = (GCObject *)sgen_los_alloc_large_inner (vtable, size); } else { SGEN_ASSERT (9, sgen_client_vtable_is_inited (vtable), "class %s:%s is not initialized", sgen_client_vtable_get_namespace (vtable), sgen_client_vtable_get_name (vtable)); p = sgen_major_collector.alloc_small_pinned_obj (vtable, size, SGEN_VTABLE_HAS_REFERENCES (vtable)); } if (G_LIKELY (p)) { SGEN_LOG (6, "Allocated pinned object %p, vtable: %p (%s), size: %zd", p, vtable, sgen_client_vtable_get_name (vtable), size); sgen_binary_protocol_alloc_pinned (p, vtable, size, sgen_client_get_provenance ()); } UNLOCK_GC; return p; }
static void sgen_ssb_wbarrier_object_copy (MonoObject* obj, MonoObject *src) { int size; RememberedSet *rs; TLAB_ACCESS_INIT; size = mono_object_class (obj)->instance_size; rs = REMEMBERED_SET; SGEN_LOG (6, "Adding object remset for %p", obj); LOCK_GC; /* do not copy the sync state */ mono_gc_memmove ((char*)obj + sizeof (MonoObject), (char*)src + sizeof (MonoObject), size - sizeof (MonoObject)); if (rs->store_next < rs->end_set) { *(rs->store_next++) = (mword)obj | REMSET_OBJECT; UNLOCK_GC; return; } rs = sgen_alloc_remset (rs->end_set - rs->data, (void*)1, FALSE); rs->next = REMEMBERED_SET; REMEMBERED_SET = rs; #ifdef HAVE_KW_THREAD mono_thread_info_current ()->remset = rs; #endif *(rs->store_next++) = (mword)obj | REMSET_OBJECT; UNLOCK_GC; }
/* * Clear the info in the remembered sets: we're doing a major collection, so * the per-thread ones are not needed and the global ones will be reconstructed * during the copy. */ static void sgen_ssb_prepare_for_major_collection (void) { SgenThreadInfo *info; RememberedSet *remset, *next; sgen_ssb_prepare_for_minor_collection (); /* the global list */ for (remset = global_remset; remset; remset = next) { remset->store_next = remset->data; next = remset->next; remset->next = NULL; if (remset != global_remset) { SGEN_LOG (4, "Freed remset at %p", remset->data); sgen_free_internal_dynamic (remset, remset_byte_size (remset), INTERNAL_MEM_REMSET); } } /* the generic store ones */ while (generic_store_remsets) { GenericStoreRememberedSet *gs_next = generic_store_remsets->next; sgen_free_internal (generic_store_remsets, INTERNAL_MEM_STORE_REMSET); generic_store_remsets = gs_next; } /* the per-thread ones */ FOREACH_THREAD (info) { for (remset = info->remset; remset; remset = next) { remset->store_next = remset->data; next = remset->next; remset->next = NULL; if (remset != info->remset) { SGEN_LOG (3, "Freed remset at %p", remset->data); sgen_free_internal_dynamic (remset, remset_byte_size (remset), INTERNAL_MEM_REMSET); } } clear_thread_store_remset_buffer (info); } END_FOREACH_THREAD /* the freed thread ones */ while (freed_thread_remsets) { next = freed_thread_remsets->next; SGEN_LOG (4, "Freed remset at %p", freed_thread_remsets->data); sgen_free_internal_dynamic (freed_thread_remsets, remset_byte_size (freed_thread_remsets), INTERNAL_MEM_REMSET); freed_thread_remsets = next; } }
static int alloc_complex_descriptor (gsize *bitmap, int numbits) { int nwords, res, i; numbits = ALIGN_TO (numbits, GC_BITS_PER_WORD); nwords = numbits / GC_BITS_PER_WORD + 1; sgen_gc_lock (); res = complex_descriptors_next; /* linear search, so we don't have duplicates with domain load/unload * this should not be performance critical or we'd have bigger issues * (the number and size of complex descriptors should be small). */ for (i = 0; i < complex_descriptors_next; ) { if (complex_descriptors [i] == nwords) { int j, found = TRUE; for (j = 0; j < nwords - 1; ++j) { if (complex_descriptors [i + 1 + j] != bitmap [j]) { found = FALSE; break; } } if (found) { sgen_gc_unlock (); return i; } } i += (int)complex_descriptors [i]; } if (complex_descriptors_next + nwords > complex_descriptors_size) { int new_size = complex_descriptors_size * 2 + nwords; complex_descriptors = (gsize *)g_realloc (complex_descriptors, new_size * sizeof (gsize)); complex_descriptors_size = new_size; } SGEN_LOG (6, "Complex descriptor %d, size: %d (total desc memory: %d)", res, nwords, complex_descriptors_size); complex_descriptors_next += nwords; complex_descriptors [res] = nwords; for (i = 0; i < nwords - 1; ++i) { complex_descriptors [res + 1 + i] = bitmap [i]; SGEN_LOG (6, "\tvalue: %p", (void*)complex_descriptors [res + 1 + i]); } sgen_gc_unlock (); return res; }
void* sgen_nursery_alloc_range (size_t desired_size, size_t minimum_size, size_t *out_alloc_size) { SGEN_LOG (4, "Searching for byte range desired size: %zd minimum size %zd", desired_size, minimum_size); HEAVY_STAT (++stat_nursery_alloc_range_requests); return sgen_fragment_allocator_par_range_alloc (&mutator_allocator, desired_size, minimum_size, out_alloc_size); }
void sgen_dump_pin_queue (void) { int i; for (i = 0; i < last_num_pinned; ++i) { SGEN_LOG (3, "Bastard pinning obj %p (%s), size: %d", pin_queue [i], sgen_safe_name (pin_queue [i]), sgen_safe_object_get_size (pin_queue [i])); } }
void sgen_memgov_major_collection_start (void) { need_calculate_minor_collection_allowance = TRUE; major_start_heap_size = get_heap_size (); if (debug_print_allowance) { SGEN_LOG (0, "Starting collection with heap size %ld bytes", (long)major_start_heap_size); } }
void sgen_dump_pin_queue (void) { int i; for (i = 0; i < last_num_pinned; ++i) { void *ptr = pin_queue.data [i]; SGEN_LOG (3, "Bastard pinning obj %p (%s), size: %zd", ptr, sgen_safe_name (ptr), sgen_safe_object_get_size (ptr)); } }
void sgen_client_mark_togglerefs (char *start, char *end, ScanCopyContext ctx) { CopyOrMarkObjectFunc copy_func = ctx.ops->copy_or_mark_object; SgenGrayQueue *queue = ctx.queue; int i; SGEN_LOG (4, "Marking ToggleRefs %d", toggleref_array_size); for (i = 0; i < toggleref_array_size; ++i) { if (toggleref_array [i].strong_ref) { GCObject *object = toggleref_array [i].strong_ref; if ((char*)object >= start && (char*)object < end) { SGEN_LOG (6, "\tcopying strong slot %d", i); copy_func (&toggleref_array [i].strong_ref, queue); } } } sgen_drain_gray_stack (ctx); }
void sgen_dump_pin_queue (void) { int i; for (i = 0; i < last_num_pinned; ++i) { GCObject *ptr = (GCObject *)pin_queue.data [i]; SGEN_LOG (3, "Bastard pinning obj %p (%s), size: %zd", ptr, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (ptr)), sgen_safe_object_get_size (ptr)); } }
/* FIXME: later choose a size that takes into account the RememberedSet struct * and doesn't waste any alloc paddin space. */ static RememberedSet* sgen_alloc_remset (int size, gpointer id, gboolean global) { RememberedSet* res = sgen_alloc_internal_dynamic (sizeof (RememberedSet) + (size * sizeof (gpointer)), INTERNAL_MEM_REMSET, TRUE); res->store_next = res->data; res->end_set = res->data + size; res->next = NULL; SGEN_LOG (4, "Allocated%s remset size %d at %p for %p", global ? " global" : "", size, res->data, id); return res; }
/* LOCKING: assumes the GC lock is held */ int sgen_stop_world (int generation) { TV_DECLARE (end_handshake); int count, dead; mono_profiler_gc_event (MONO_GC_EVENT_PRE_STOP_WORLD, generation); MONO_GC_WORLD_STOP_BEGIN (); binary_protocol_world_stopping (sgen_timestamp ()); acquire_gc_locks (); /* We start to scan after locks are taking, this ensures we won't be interrupted. */ sgen_process_togglerefs (); update_current_thread_stack (&count); sgen_global_stop_count++; SGEN_LOG (3, "stopping world n %d from %p %p", sgen_global_stop_count, mono_thread_info_current (), (gpointer)mono_native_thread_id_get ()); TV_GETTIME (stop_world_time); count = sgen_thread_handshake (TRUE); dead = restart_threads_until_none_in_managed_allocator (); if (count < dead) g_error ("More threads have died (%d) that been initialy suspended %d", dead, count); count -= dead; SGEN_LOG (3, "world stopped %d thread(s)", count); mono_profiler_gc_event (MONO_GC_EVENT_POST_STOP_WORLD, generation); MONO_GC_WORLD_STOP_END (); if (binary_protocol_is_enabled ()) { long long major_total, major_marked, los_total, los_marked; count_cards (&major_total, &major_marked, &los_total, &los_marked); binary_protocol_world_stopped (sgen_timestamp (), major_total, major_marked, los_total, los_marked); } TV_GETTIME (end_handshake); time_stop_world += TV_ELAPSED (stop_world_time, end_handshake); sgen_memgov_collection_start (generation); if (sgen_need_bridge_processing ()) sgen_bridge_reset_data (); return count; }
MONO_SIG_HANDLER_FUNC (static, restart_handler) { SgenThreadInfo *info; int old_errno = errno; info = mono_thread_info_current (); info->client_info.signal = restart_signal_num; SGEN_LOG (4, "Restart handler in %p %p", info, (gpointer) (gsize) mono_native_thread_id_get ()); errno = old_errno; }