void describe_ptr (char *ptr) { MonoVTable *vtable; mword desc; int type; char *start; if (sgen_ptr_in_nursery (ptr)) { printf ("Pointer inside nursery.\n"); } else { if (sgen_ptr_is_in_los (ptr, &start)) { if (ptr == start) printf ("Pointer is the start of object %p in LOS space.\n", start); else printf ("Pointer is at offset 0x%x of object %p in LOS space.\n", (int)(ptr - start), start); ptr = start; } else if (major_collector.ptr_is_in_non_pinned_space (ptr)) { printf ("Pointer inside oldspace.\n"); } else if (major_collector.obj_is_from_pinned_alloc (ptr)) { printf ("Pointer is inside a pinned chunk.\n"); } else { printf ("Pointer unknown.\n"); return; } } if (object_is_pinned (ptr)) printf ("Object is pinned.\n"); if (object_is_forwarded (ptr)) printf ("Object is forwared.\n"); // FIXME: Handle pointers to the inside of objects vtable = (MonoVTable*)LOAD_VTABLE (ptr); printf ("VTable: %p\n", vtable); if (vtable == NULL) { printf ("VTable is invalid (empty).\n"); return; } if (sgen_ptr_in_nursery (vtable)) { printf ("VTable is invalid (points inside nursery).\n"); return; } printf ("Class: %s\n", vtable->klass->name); desc = ((GCVTable*)vtable)->desc; printf ("Descriptor: %lx\n", (long)desc); type = desc & 0x7; printf ("Descriptor type: %d (%s)\n", type, descriptor_types [type]); }
static void sgen_ssb_record_pointer (gpointer ptr) { RememberedSet *rs; gboolean lock = sgen_collection_is_parallel (); gpointer obj = *(gpointer*)ptr; g_assert (!sgen_ptr_in_nursery (ptr) && sgen_ptr_in_nursery (obj)); if (lock) LOCK_GLOBAL_REMSET; if (!global_remset_location_was_not_added (ptr)) goto done; if (G_UNLIKELY (do_pin_stats)) sgen_pin_stats_register_global_remset (obj); SGEN_LOG (8, "Adding global remset for %p", ptr); binary_protocol_global_remset (ptr, *(gpointer*)ptr, (gpointer)SGEN_LOAD_VTABLE (obj)); HEAVY_STAT (++stat_global_remsets_added); /* * FIXME: If an object remains pinned, we need to add it at every minor collection. * To avoid uncontrolled growth of the global remset, only add each pointer once. */ if (global_remset->store_next + 3 < global_remset->end_set) { *(global_remset->store_next++) = (mword)ptr; goto done; } rs = sgen_alloc_remset (global_remset->end_set - global_remset->data, NULL, TRUE); rs->next = global_remset; global_remset = rs; *(global_remset->store_next++) = (mword)ptr; #if SGEN_MAX_DEBUG_LEVEL >= 4 { int global_rs_size = 0; for (rs = global_remset; rs; rs = rs->next) { global_rs_size += rs->store_next - rs->data; } SGEN_LOG (4, "Global remset now has size %d", global_rs_size); } #endif done: if (lock) UNLOCK_GLOBAL_REMSET; }
static void concurrent_enqueue_check (GCObject *obj) { g_assert (sgen_concurrent_collection_in_progress ()); g_assert (!sgen_ptr_in_nursery (obj)); g_assert (SGEN_LOAD_VTABLE (obj)); }
gboolean sgen_cement_lookup_or_register (char *obj, gboolean concurrent_cementing) { int i; CementHashEntry *hash; if (!cement_enabled) return FALSE; if (concurrent_cementing) SGEN_ASSERT (5, cement_concurrent, "Cementing wasn't inited with concurrent flag"); if (concurrent_cementing) hash = cement_hash_concurrent; else hash = cement_hash; /* * We use modulus hashing, which is fine with constants as gcc * can optimize them to multiplication, but with variable * values it would be a bad idea given armv7 has no hardware * for division, making it 20x slower than a multiplication. * * This code path can be quite hot, depending on the workload, * so if we make the hash size user-adjustable we should * figure out something not involving division. */ i = mono_aligned_addr_hash (obj) % SGEN_CEMENT_HASH_SIZE; SGEN_ASSERT (5, sgen_ptr_in_nursery (obj), "Can only cement pointers to nursery objects"); if (!hash [i].obj) { SGEN_ASSERT (5, !hash [i].count, "Cementing hash inconsistent"); hash [i].obj = obj; } else if (hash [i].obj != obj) { return FALSE; } if (hash [i].count >= SGEN_CEMENT_THRESHOLD) return TRUE; ++hash [i].count; if (hash [i].count == SGEN_CEMENT_THRESHOLD) { if (G_UNLIKELY (MONO_GC_OBJ_CEMENTED_ENABLED())) { MonoVTable *vt = (MonoVTable*)SGEN_LOAD_VTABLE (obj); MONO_GC_OBJ_CEMENTED ((mword)obj, sgen_safe_object_get_size ((MonoObject*)obj), vt->klass->name_space, vt->klass->name); } #ifdef SGEN_BINARY_PROTOCOL binary_protocol_cement (obj, (gpointer)SGEN_LOAD_VTABLE (obj), sgen_safe_object_get_size ((MonoObject*)obj)); #endif } return FALSE; }
static void describe_pointer (char *ptr) { if (sgen_ptr_in_nursery (ptr)) { describe_nursery_ptr (ptr); } else if (major_collector.describe_pointer (ptr)) { //Nothing really } else if (!mono_sgen_los_describe_pointer (ptr)) { fprintf (gc_debug_file, "non-heap-ptr"); } }
static char* minor_par_alloc_for_promotion (char *obj, size_t objsize, gboolean has_references) { /* We only need to check for a non-nursery object if we're doing a major collection. */ if (!sgen_ptr_in_nursery (obj)) return major_collector.par_alloc_object (objsize, has_references); return par_alloc_for_promotion (obj, objsize, has_references); }
static gboolean ptr_in_heap (char *object) { if (sgen_ptr_in_nursery (object)) return TRUE; if (sgen_los_is_valid_object (object)) return TRUE; if (major_collector.is_valid_object (object)) return TRUE; return FALSE; }
static gboolean is_valid_object_pointer (char *object) { if (sgen_ptr_in_nursery (object)) return find_object_in_nursery_dump (object); if (sgen_los_is_valid_object (object)) return TRUE; if (major_collector.is_valid_object (object)) return TRUE; return FALSE; }
gboolean sgen_cement_lookup_or_register (char *obj) { guint hv; int i; CementHashEntry *hash; gboolean concurrent_cementing = sgen_concurrent_collection_in_progress (); if (!cement_enabled) return FALSE; if (concurrent_cementing) SGEN_ASSERT (5, cement_concurrent, "Cementing wasn't inited with concurrent flag"); if (concurrent_cementing) hash = cement_hash_concurrent; else hash = cement_hash; hv = mono_aligned_addr_hash (obj); i = SGEN_CEMENT_HASH (hv); SGEN_ASSERT (5, sgen_ptr_in_nursery (obj), "Can only cement pointers to nursery objects"); if (!hash [i].obj) { SGEN_ASSERT (5, !hash [i].count, "Cementing hash inconsistent"); hash [i].obj = obj; } else if (hash [i].obj != obj) { return FALSE; } if (hash [i].count >= SGEN_CEMENT_THRESHOLD) return TRUE; ++hash [i].count; if (hash [i].count == SGEN_CEMENT_THRESHOLD) { SGEN_ASSERT (9, SGEN_OBJECT_IS_PINNED (obj), "Can only cement pinned objects"); SGEN_CEMENT_OBJECT (obj); if (G_UNLIKELY (MONO_GC_OBJ_CEMENTED_ENABLED())) { MonoVTable *vt G_GNUC_UNUSED = (MonoVTable*)SGEN_LOAD_VTABLE (obj); MONO_GC_OBJ_CEMENTED ((mword)obj, sgen_safe_object_get_size ((MonoObject*)obj), vt->klass->name_space, vt->klass->name); } binary_protocol_cement (obj, (gpointer)SGEN_LOAD_VTABLE (obj), (int)sgen_safe_object_get_size ((MonoObject*)obj)); } return FALSE; }
gboolean sgen_cement_lookup (char *obj) { int i = mono_aligned_addr_hash (obj) % SGEN_CEMENT_HASH_SIZE; SGEN_ASSERT (5, sgen_ptr_in_nursery (obj), "Looking up cementing for non-nursery objects makes no sense"); if (!cement_enabled) return FALSE; if (!cement_hash [i].obj) return FALSE; if (cement_hash [i].obj != obj) return FALSE; return cement_hash [i].count >= SGEN_CEMENT_THRESHOLD; }
gboolean sgen_cement_is_forced (GCObject *obj) { guint hv = sgen_aligned_addr_hash (obj); int i = SGEN_CEMENT_HASH (hv); SGEN_ASSERT (5, sgen_ptr_in_nursery (obj), "Looking up cementing for non-nursery objects makes no sense"); if (!cement_enabled) return FALSE; if (!cement_hash [i].obj) return FALSE; if (cement_hash [i].obj != obj) return FALSE; return cement_hash [i].forced; }
gboolean sgen_cement_lookup_or_register (GCObject *obj) { guint hv; int i; CementHashEntry *hash = cement_hash; if (!cement_enabled) return FALSE; hv = sgen_aligned_addr_hash (obj); i = SGEN_CEMENT_HASH (hv); SGEN_ASSERT (5, sgen_ptr_in_nursery (obj), "Can only cement pointers to nursery objects"); if (!hash [i].obj) { GCObject *old_obj; old_obj = InterlockedCompareExchangePointer ((gpointer*)&hash [i].obj, obj, NULL); /* Check if the slot was occupied by some other object */ if (old_obj != NULL && old_obj != obj) return FALSE; } else if (hash [i].obj != obj) { return FALSE; } if (hash [i].count >= SGEN_CEMENT_THRESHOLD) return TRUE; if (InterlockedIncrement ((gint32*)&hash [i].count) == SGEN_CEMENT_THRESHOLD) { SGEN_ASSERT (9, sgen_get_current_collection_generation () >= 0, "We can only cement objects when we're in a collection pause."); SGEN_ASSERT (9, SGEN_OBJECT_IS_PINNED (obj), "Can only cement pinned objects"); SGEN_CEMENT_OBJECT (obj); binary_protocol_cement (obj, (gpointer)SGEN_LOAD_VTABLE (obj), (int)sgen_safe_object_get_size (obj)); } return FALSE; }
static void sgen_ssb_begin_scan_remsets (void *start_nursery, void *end_nursery, SgenGrayQueue *queue) { RememberedSet *remset; mword *p, *next_p, *store_pos; /* the global one */ for (remset = global_remset; remset; remset = remset->next) { SGEN_LOG (4, "Scanning global remset range: %p-%p, size: %td", remset->data, remset->store_next, remset->store_next - remset->data); store_pos = remset->data; for (p = remset->data; p < remset->store_next; p = next_p) { void **ptr = (void**)p [0]; /*Ignore previously processed remset.*/ if (!global_remset_location_was_not_added (ptr)) { next_p = p + 1; continue; } next_p = handle_remset (p, start_nursery, end_nursery, TRUE, queue); /* * Clear global remsets of locations which no longer point to the * nursery. Otherwise, they could grow indefinitely between major * collections. * * Since all global remsets are location remsets, we don't need to unmask the pointer. */ if (sgen_ptr_in_nursery (*ptr)) { *store_pos ++ = p [0]; HEAVY_STAT (++stat_global_remsets_readded); } } /* Truncate the remset */ remset->store_next = store_pos; } }