static void sweep_pinned_objects_callback (char *ptr, size_t size, void *data) { if (SGEN_OBJECT_IS_PINNED (ptr)) { SGEN_UNPIN_OBJECT (ptr); DEBUG (6, fprintf (gc_debug_file, "Unmarked pinned object %p (%s)\n", ptr, mono_sgen_safe_name (ptr))); } else { DEBUG (6, fprintf (gc_debug_file, "Freeing unmarked pinned object %p (%s)\n", ptr, mono_sgen_safe_name (ptr))); free_pinned_object (ptr, size); } }
static mword* handle_remset (mword *p, void *start_nursery, void *end_nursery, gboolean global, SgenGrayQueue *queue) { void **ptr; mword count; mword desc; if (global) HEAVY_STAT (++stat_global_remsets_processed); else HEAVY_STAT (++stat_local_remsets_processed); /* FIXME: exclude stack locations */ switch ((*p) & REMSET_TYPE_MASK) { case REMSET_LOCATION: ptr = (void**)(*p); //__builtin_prefetch (ptr); if (((void*)ptr < start_nursery || (void*)ptr >= end_nursery)) { gpointer old = *ptr; major_collector.copy_object (ptr, queue); DEBUG (9, fprintf (gc_debug_file, "Overwrote remset at %p with %p\n", ptr, *ptr)); if (old) binary_protocol_ptr_update (ptr, old, *ptr, (gpointer)LOAD_VTABLE (*ptr), mono_sgen_safe_object_get_size (*ptr)); if (!global && *ptr >= start_nursery && *ptr < end_nursery) { /* * If the object is pinned, each reference to it from nonpinned objects * becomes part of the global remset, which can grow very large. */ DEBUG (9, fprintf (gc_debug_file, "Add to global remset because of pinning %p (%p %s)\n", ptr, *ptr, mono_sgen_safe_name (*ptr))); mono_sgen_add_to_global_remset (ptr); } } else { DEBUG (9, fprintf (gc_debug_file, "Skipping remset at %p holding %p\n", ptr, *ptr)); } return p + 1; case REMSET_RANGE: ptr = (void**)(*p & ~REMSET_TYPE_MASK); if (((void*)ptr >= start_nursery && (void*)ptr < end_nursery)) return p + 2; count = p [1]; while (count-- > 0) { major_collector.copy_object (ptr, queue); DEBUG (9, fprintf (gc_debug_file, "Overwrote remset at %p with %p (count: %d)\n", ptr, *ptr, (int)count)); if (!global && *ptr >= start_nursery && *ptr < end_nursery) mono_sgen_add_to_global_remset (ptr); ++ptr; } return p + 2; case REMSET_OBJECT: ptr = (void**)(*p & ~REMSET_TYPE_MASK); if (((void*)ptr >= start_nursery && (void*)ptr < end_nursery)) return p + 1; mono_sgen_get_minor_scan_object () ((char*)ptr, queue); return p + 1; case REMSET_VTYPE: { ScanVTypeFunc scan_vtype = mono_sgen_get_minor_scan_vtype (); size_t skip_size; ptr = (void**)(*p & ~REMSET_TYPE_MASK); if (((void*)ptr >= start_nursery && (void*)ptr < end_nursery)) return p + 4; desc = p [1]; count = p [2]; skip_size = p [3]; while (count-- > 0) { scan_vtype ((char*)ptr, desc, queue); ptr = (void**)((char*)ptr + skip_size); } return p + 4; } default: g_assert_not_reached (); } return NULL; }
static void pin_pinned_object_callback (void *addr, size_t slot_size, SgenGrayQueue *queue) { binary_protocol_pin (addr, (gpointer)SGEN_LOAD_VTABLE (addr), mono_sgen_safe_object_get_size ((MonoObject*)addr)); if (!SGEN_OBJECT_IS_PINNED (addr)) mono_sgen_pin_stats_register_object ((char*) addr, mono_sgen_safe_object_get_size ((MonoObject*) addr)); SGEN_PIN_OBJECT (addr); GRAY_OBJECT_ENQUEUE (queue, addr); DEBUG (6, fprintf (gc_debug_file, "Marked pinned object %p (%s) from roots\n", addr, mono_sgen_safe_name (addr))); }
static void major_copy_or_mark_object (void **obj_slot, SgenGrayQueue *queue) { char *forwarded; char *obj = *obj_slot; mword objsize; DEBUG (9, g_assert (current_collection_generation == GENERATION_OLD)); HEAVY_STAT (++stat_copy_object_called_major); DEBUG (9, fprintf (gc_debug_file, "Precise copy of %p from %p", obj, obj_slot)); /* * obj must belong to one of: * * 1. the nursery * 2. the LOS * 3. a pinned chunk * 4. a non-to-space section of the major heap * 5. a to-space section of the major heap * * In addition, objects in 1, 2 and 4 might also be pinned. * Objects in 1 and 4 might be forwarded. * * Before we can copy the object we must make sure that we are * allowed to, i.e. that the object not pinned, not already * forwarded and doesn't belong to the LOS, a pinned chunk, or * a to-space section. * * We are usually called for to-space objects (5) when we have * two remset entries for the same reference. The first entry * copies the object and updates the reference and the second * calls us with the updated reference that points into * to-space. There might also be other circumstances where we * get to-space objects. */ if ((forwarded = SGEN_OBJECT_IS_FORWARDED (obj))) { DEBUG (9, g_assert (((MonoVTable*)SGEN_LOAD_VTABLE(obj))->gc_descr)); DEBUG (9, fprintf (gc_debug_file, " (already forwarded to %p)\n", forwarded)); HEAVY_STAT (++stat_major_copy_object_failed_forwarded); *obj_slot = forwarded; return; } if (SGEN_OBJECT_IS_PINNED (obj)) { DEBUG (9, g_assert (((MonoVTable*)SGEN_LOAD_VTABLE(obj))->gc_descr)); DEBUG (9, fprintf (gc_debug_file, " (pinned, no change)\n")); HEAVY_STAT (++stat_major_copy_object_failed_pinned); return; } if (ptr_in_nursery (obj)) goto copy; /* * At this point we know obj is not pinned, not forwarded and * belongs to 2, 3, 4, or 5. * * LOS object (2) are simple, at least until we always follow * the rule: if objsize > SGEN_MAX_SMALL_OBJ_SIZE, pin the * object and return it. At the end of major collections, we * walk the los list and if the object is pinned, it is * marked, otherwise it can be freed. * * Pinned chunks (3) and major heap sections (4, 5) both * reside in blocks, which are always aligned, so once we've * eliminated LOS objects, we can just access the block and * see whether it's a pinned chunk or a major heap section. */ objsize = SGEN_ALIGN_UP (mono_sgen_safe_object_get_size ((MonoObject*)obj)); if (G_UNLIKELY (objsize > SGEN_MAX_SMALL_OBJ_SIZE || obj_is_from_pinned_alloc (obj))) { if (SGEN_OBJECT_IS_PINNED (obj)) return; DEBUG (9, fprintf (gc_debug_file, " (marked LOS/Pinned %p (%s), size: %zd)\n", obj, mono_sgen_safe_name (obj), objsize)); binary_protocol_pin (obj, (gpointer)SGEN_LOAD_VTABLE (obj), mono_sgen_safe_object_get_size ((MonoObject*)obj)); SGEN_PIN_OBJECT (obj); GRAY_OBJECT_ENQUEUE (queue, obj); HEAVY_STAT (++stat_major_copy_object_failed_large_pinned); return; } /* * Now we know the object is in a major heap section. All we * need to do is check whether it's already in to-space (5) or * not (4). */ if (MAJOR_OBJ_IS_IN_TO_SPACE (obj)) { DEBUG (9, g_assert (objsize <= SGEN_MAX_SMALL_OBJ_SIZE)); DEBUG (9, fprintf (gc_debug_file, " (already copied)\n")); HEAVY_STAT (++stat_major_copy_object_failed_to_space); return; } copy: HEAVY_STAT (++stat_objects_copied_major); *obj_slot = copy_object_no_checks (obj, queue); }
mword mono_sgen_build_nursery_fragments (GCMemSection *nursery_section, void **start, int num_entries) { char *frag_start, *frag_end; size_t frag_size; int i; #ifdef NALLOC_DEBUG reset_alloc_records (); #endif while (unmask (nursery_fragments)) { Fragment *nf = unmask (nursery_fragments); Fragment *next = unmask (nf->next); nf->next_free = fragment_freelist; fragment_freelist = nf; nursery_fragments = next; } frag_start = nursery_start; fragment_total = 0; /* clear scan starts */ memset (nursery_section->scan_starts, 0, nursery_section->num_scan_start * sizeof (gpointer)); for (i = 0; i < num_entries; ++i) { frag_end = start [i]; /* remove the pin bit from pinned objects */ SGEN_UNPIN_OBJECT (frag_end); nursery_section->scan_starts [((char*)frag_end - (char*)nursery_section->data)/SGEN_SCAN_START_SIZE] = frag_end; frag_size = frag_end - frag_start; if (frag_size) add_nursery_frag (frag_size, frag_start, frag_end); frag_size = SGEN_ALIGN_UP (mono_sgen_safe_object_get_size ((MonoObject*)start [i])); #ifdef NALLOC_DEBUG add_alloc_record (start [i], frag_size, PINNING); #endif frag_start = (char*)start [i] + frag_size; } nursery_last_pinned_end = frag_start; frag_end = nursery_end; frag_size = frag_end - frag_start; if (frag_size) add_nursery_frag (frag_size, frag_start, frag_end); if (!unmask (nursery_fragments)) { DEBUG (1, fprintf (gc_debug_file, "Nursery fully pinned (%d)\n", num_entries)); for (i = 0; i < num_entries; ++i) { DEBUG (3, fprintf (gc_debug_file, "Bastard pinning obj %p (%s), size: %d\n", start [i], mono_sgen_safe_name (start [i]), mono_sgen_safe_object_get_size (start [i]))); } } return fragment_total; }