gboolean
sgen_cement_lookup_or_register (char *obj, gboolean concurrent_cementing)
{
	int i;
	CementHashEntry *hash;

	if (!cement_enabled)
		return FALSE;

	if (concurrent_cementing)
		SGEN_ASSERT (5, cement_concurrent, "Cementing wasn't inited with concurrent flag");

	if (concurrent_cementing)
		hash = cement_hash_concurrent;
	else
		hash = cement_hash;

	/*
	 * We use modulus hashing, which is fine with constants as gcc
	 * can optimize them to multiplication, but with variable
	 * values it would be a bad idea given armv7 has no hardware
	 * for division, making it 20x slower than a multiplication.
	 *
	 * This code path can be quite hot, depending on the workload,
	 * so if we make the hash size user-adjustable we should
	 * figure out something not involving division.
	 */
	i = mono_aligned_addr_hash (obj) % SGEN_CEMENT_HASH_SIZE;

	SGEN_ASSERT (5, sgen_ptr_in_nursery (obj), "Can only cement pointers to nursery objects");

	if (!hash [i].obj) {
		SGEN_ASSERT (5, !hash [i].count, "Cementing hash inconsistent");
		hash [i].obj = obj;
	} else if (hash [i].obj != obj) {
		return FALSE;
	}

	if (hash [i].count >= SGEN_CEMENT_THRESHOLD)
		return TRUE;

	++hash [i].count;
	if (hash [i].count == SGEN_CEMENT_THRESHOLD) {
		if (G_UNLIKELY (MONO_GC_OBJ_CEMENTED_ENABLED())) {
			MonoVTable *vt = (MonoVTable*)SGEN_LOAD_VTABLE (obj);
			MONO_GC_OBJ_CEMENTED ((mword)obj, sgen_safe_object_get_size ((MonoObject*)obj),
					vt->klass->name_space, vt->klass->name);
		}
#ifdef SGEN_BINARY_PROTOCOL
		binary_protocol_cement (obj, (gpointer)SGEN_LOAD_VTABLE (obj),
				sgen_safe_object_get_size ((MonoObject*)obj));
#endif
	}

	return FALSE;
}
Exemple #2
0
gboolean
sgen_cement_lookup_or_register (char *obj)
{
	guint hv;
	int i;
	CementHashEntry *hash;
	gboolean concurrent_cementing = sgen_concurrent_collection_in_progress ();

	if (!cement_enabled)
		return FALSE;

	if (concurrent_cementing)
		SGEN_ASSERT (5, cement_concurrent, "Cementing wasn't inited with concurrent flag");

	if (concurrent_cementing)
		hash = cement_hash_concurrent;
	else
		hash = cement_hash;

	hv = mono_aligned_addr_hash (obj);
	i = SGEN_CEMENT_HASH (hv);

	SGEN_ASSERT (5, sgen_ptr_in_nursery (obj), "Can only cement pointers to nursery objects");

	if (!hash [i].obj) {
		SGEN_ASSERT (5, !hash [i].count, "Cementing hash inconsistent");
		hash [i].obj = obj;
	} else if (hash [i].obj != obj) {
		return FALSE;
	}

	if (hash [i].count >= SGEN_CEMENT_THRESHOLD)
		return TRUE;

	++hash [i].count;
	if (hash [i].count == SGEN_CEMENT_THRESHOLD) {
		SGEN_ASSERT (9, SGEN_OBJECT_IS_PINNED (obj), "Can only cement pinned objects");
		SGEN_CEMENT_OBJECT (obj);

		if (G_UNLIKELY (MONO_GC_OBJ_CEMENTED_ENABLED())) {
			MonoVTable *vt G_GNUC_UNUSED = (MonoVTable*)SGEN_LOAD_VTABLE (obj);
			MONO_GC_OBJ_CEMENTED ((mword)obj, sgen_safe_object_get_size ((MonoObject*)obj),
					vt->klass->name_space, vt->klass->name);
		}
		binary_protocol_cement (obj, (gpointer)SGEN_LOAD_VTABLE (obj),
				(int)sgen_safe_object_get_size ((MonoObject*)obj));
	}

	return FALSE;
}
Exemple #3
0
static void
concurrent_enqueue_check (GCObject *obj)
{
    g_assert (sgen_concurrent_collection_in_progress ());
    g_assert (!sgen_ptr_in_nursery (obj));
    g_assert (SGEN_LOAD_VTABLE (obj));
}
static void
major_copy_or_mark_object (void **ptr, SgenGrayQueue *queue)
{
	void *obj = *ptr;
	MSBlockInfo *block;

	HEAVY_STAT (++stat_copy_object_called_major);

	DEBUG (9, g_assert (obj));
	DEBUG (9, g_assert (current_collection_generation == GENERATION_OLD));

	if (ptr_in_nursery (obj)) {
		int word, bit;
		char *forwarded;

		if ((forwarded = SGEN_OBJECT_IS_FORWARDED (obj))) {
			*ptr = forwarded;
			return;
		}
		if (SGEN_OBJECT_IS_PINNED (obj))
			return;

		HEAVY_STAT (++stat_objects_copied_major);

		obj = copy_object_no_checks (obj, queue);
		*ptr = obj;

		/*
		 * FIXME: See comment for copy_object_no_checks().  If
		 * we have that, we can let the allocation function
		 * give us the block info, too, and we won't have to
		 * re-fetch it.
		 */
		block = MS_BLOCK_FOR_OBJ (obj);
		MS_CALC_MARK_BIT (word, bit, obj);
		DEBUG (9, g_assert (!MS_MARK_BIT (block, word, bit)));
		MS_SET_MARK_BIT (block, word, bit);
	} else {
#ifdef FIXED_HEAP
		if (MS_PTR_IN_SMALL_MAJOR_HEAP (obj))
#else
		mword objsize;

		objsize = SGEN_ALIGN_UP (mono_sgen_safe_object_get_size ((MonoObject*)obj));

		if (objsize <= SGEN_MAX_SMALL_OBJ_SIZE)
#endif
		{
			block = MS_BLOCK_FOR_OBJ (obj);
			MS_MARK_OBJECT_AND_ENQUEUE (obj, block, queue);
		} else {
			if (SGEN_OBJECT_IS_PINNED (obj))
				return;
			binary_protocol_pin (obj, (gpointer)SGEN_LOAD_VTABLE (obj), mono_sgen_safe_object_get_size ((MonoObject*)obj));
			SGEN_PIN_OBJECT (obj);
			/* FIXME: only enqueue if object has references */
			GRAY_OBJECT_ENQUEUE (queue, obj);
		}
	}
}
Exemple #5
0
static void
pin_pinned_object_callback (void *addr, size_t slot_size, SgenGrayQueue *queue)
{
	binary_protocol_pin (addr, (gpointer)SGEN_LOAD_VTABLE (addr), sgen_safe_object_get_size ((MonoObject*)addr));
	if (!SGEN_OBJECT_IS_PINNED (addr))
		sgen_pin_stats_register_object ((char*) addr, sgen_safe_object_get_size ((MonoObject*) addr));
	SGEN_PIN_OBJECT (addr);
	GRAY_OBJECT_ENQUEUE (queue, addr);
	DEBUG (6, fprintf (gc_debug_file, "Marked pinned object %p (%s) from roots\n", addr, sgen_safe_name (addr)));
}
Exemple #6
0
static gboolean
is_opaque_object (GCObject *obj)
{
	MonoVTable *vt = SGEN_LOAD_VTABLE (obj);
	if ((vt->gc_bits & SGEN_GC_BIT_BRIDGE_OPAQUE_OBJECT) == SGEN_GC_BIT_BRIDGE_OPAQUE_OBJECT) {
		SGEN_LOG (6, "ignoring %s\n", vt->klass->name);
		++ignored_objects;
		return TRUE;
	}
	return FALSE;
}
Exemple #7
0
static void
sgen_ssb_record_pointer (gpointer ptr)
{
	RememberedSet *rs;
	gboolean lock = sgen_collection_is_parallel ();
	gpointer obj = *(gpointer*)ptr;

	g_assert (!sgen_ptr_in_nursery (ptr) && sgen_ptr_in_nursery (obj));

	if (lock)
		LOCK_GLOBAL_REMSET;

	if (!global_remset_location_was_not_added (ptr))
		goto done;

	if (G_UNLIKELY (do_pin_stats))
		sgen_pin_stats_register_global_remset (obj);

	SGEN_LOG (8, "Adding global remset for %p", ptr);
	binary_protocol_global_remset (ptr, *(gpointer*)ptr, (gpointer)SGEN_LOAD_VTABLE (obj));

	HEAVY_STAT (++stat_global_remsets_added);

	/* 
	 * FIXME: If an object remains pinned, we need to add it at every minor collection.
	 * To avoid uncontrolled growth of the global remset, only add each pointer once.
	 */
	if (global_remset->store_next + 3 < global_remset->end_set) {
		*(global_remset->store_next++) = (mword)ptr;
		goto done;
	}
	rs = sgen_alloc_remset (global_remset->end_set - global_remset->data, NULL, TRUE);
	rs->next = global_remset;
	global_remset = rs;
	*(global_remset->store_next++) = (mword)ptr;

#if SGEN_MAX_DEBUG_LEVEL >= 4
	{
		int global_rs_size = 0;

		for (rs = global_remset; rs; rs = rs->next) {
			global_rs_size += rs->store_next - rs->data;
		}
		SGEN_LOG (4, "Global remset now has size %d", global_rs_size);
	}
#endif

 done:
	if (lock)
		UNLOCK_GLOBAL_REMSET;
}
Exemple #8
0
gboolean
sgen_cement_lookup_or_register (GCObject *obj)
{
	guint hv;
	int i;
	CementHashEntry *hash = cement_hash;

	if (!cement_enabled)
		return FALSE;

	hv = sgen_aligned_addr_hash (obj);
	i = SGEN_CEMENT_HASH (hv);

	SGEN_ASSERT (5, sgen_ptr_in_nursery (obj), "Can only cement pointers to nursery objects");

	if (!hash [i].obj) {
		GCObject *old_obj;
		old_obj = InterlockedCompareExchangePointer ((gpointer*)&hash [i].obj, obj, NULL);
		/* Check if the slot was occupied by some other object */
		if (old_obj != NULL && old_obj != obj)
			return FALSE;
	} else if (hash [i].obj != obj) {
		return FALSE;
	}

	if (hash [i].count >= SGEN_CEMENT_THRESHOLD)
		return TRUE;

	if (InterlockedIncrement ((gint32*)&hash [i].count) == SGEN_CEMENT_THRESHOLD) {
		SGEN_ASSERT (9, sgen_get_current_collection_generation () >= 0, "We can only cement objects when we're in a collection pause.");
		SGEN_ASSERT (9, SGEN_OBJECT_IS_PINNED (obj), "Can only cement pinned objects");
		SGEN_CEMENT_OBJECT (obj);

		binary_protocol_cement (obj, (gpointer)SGEN_LOAD_VTABLE (obj),
				(int)sgen_safe_object_get_size (obj));
	}

	return FALSE;
}
mword
sgen_build_nursery_fragments (GCMemSection *nursery_section, SgenGrayQueue *unpin_queue)
{
	char *frag_start, *frag_end;
	size_t frag_size;
	SgenFragment *frags_ranges;
	void **pin_start, **pin_entry, **pin_end;

#ifdef NALLOC_DEBUG
	reset_alloc_records ();
#endif
	/*The mutator fragments are done. We no longer need them. */
	sgen_fragment_allocator_release (&mutator_allocator);

	frag_start = sgen_nursery_start;
	fragment_total = 0;

	/* The current nursery might give us a fragment list to exclude [start, next[*/
	frags_ranges = sgen_minor_collector.build_fragments_get_exclude_head ();

	/* clear scan starts */
	memset (nursery_section->scan_starts, 0, nursery_section->num_scan_start * sizeof (gpointer));

	pin_start = pin_entry = sgen_pinning_get_entry (nursery_section->pin_queue_first_entry);
	pin_end = sgen_pinning_get_entry (nursery_section->pin_queue_last_entry);

	while (pin_entry < pin_end || frags_ranges) {
		char *addr0, *addr1;
		size_t size;

		addr0 = addr1 = sgen_nursery_end;
		if (pin_entry < pin_end)
			addr0 = (char *)*pin_entry;
		if (frags_ranges)
			addr1 = frags_ranges->fragment_start;

		if (addr0 < addr1) {
			if (unpin_queue)
				GRAY_OBJECT_ENQUEUE (unpin_queue, (GCObject*)addr0, sgen_obj_get_descriptor_safe ((GCObject*)addr0));
			else
				SGEN_UNPIN_OBJECT (addr0);
			size = SGEN_ALIGN_UP (sgen_safe_object_get_size ((GCObject*)addr0));
			CANARIFY_SIZE (size);
			sgen_set_nursery_scan_start (addr0);
			frag_end = addr0;
			++pin_entry;
		} else {
			frag_end = addr1;
			size = frags_ranges->fragment_next - addr1;
			frags_ranges = frags_ranges->next_in_order;
		}

		frag_size = frag_end - frag_start;

		if (size == 0)
			continue;

		g_assert (frag_size >= 0);
		g_assert (size > 0);
		if (frag_size && size)
			add_nursery_frag (&mutator_allocator, frag_size, frag_start, frag_end);	

		frag_size = size;
#ifdef NALLOC_DEBUG
		add_alloc_record (*pin_entry, frag_size, PINNING);
#endif
		frag_start = frag_end + frag_size;
	}

	nursery_last_pinned_end = frag_start;
	frag_end = sgen_nursery_end;
	frag_size = frag_end - frag_start;
	if (frag_size)
		add_nursery_frag (&mutator_allocator, frag_size, frag_start, frag_end);

	/* Now it's safe to release the fragments exclude list. */
	sgen_minor_collector.build_fragments_release_exclude_head ();

	/* First we reorder the fragment list to be in ascending address order. This makes H/W prefetchers happier. */
	fragment_list_reverse (&mutator_allocator);

	/*The collector might want to do something with the final nursery fragment list.*/
	sgen_minor_collector.build_fragments_finish (&mutator_allocator);

	if (!unmask (mutator_allocator.alloc_head)) {
		SGEN_LOG (1, "Nursery fully pinned");
		for (pin_entry = pin_start; pin_entry < pin_end; ++pin_entry) {
			GCObject *p = (GCObject *)*pin_entry;
			SGEN_LOG (3, "Bastard pinning obj %p (%s), size: %zd", p, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (p)), sgen_safe_object_get_size (p));
		}
	}
	return fragment_total;
}
Exemple #10
0
static const char*
safe_name_bridge (GCObject *obj)
{
	GCVTable vt = SGEN_LOAD_VTABLE (obj);
	return vt->klass->name;
}
Exemple #11
0
void
sgen_dump_pin_queue (void)
{
	int i;

	for (i = 0; i < last_num_pinned; ++i) {
		GCObject *ptr = (GCObject *)pin_queue.data [i];
		SGEN_LOG (3, "Bastard pinning obj %p (%s), size: %zd", ptr, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (ptr)), sgen_safe_object_get_size (ptr));
	}
}
Exemple #12
0
static void
major_copy_or_mark_object (void **obj_slot, SgenGrayQueue *queue)
{
	char *forwarded;
	char *obj = *obj_slot;
	mword objsize;

	DEBUG (9, g_assert (current_collection_generation == GENERATION_OLD));

	HEAVY_STAT (++stat_copy_object_called_major);

	DEBUG (9, fprintf (gc_debug_file, "Precise copy of %p from %p", obj, obj_slot));

	/*
	 * obj must belong to one of:
	 *
	 * 1. the nursery
	 * 2. the LOS
	 * 3. a pinned chunk
	 * 4. a non-to-space section of the major heap
	 * 5. a to-space section of the major heap
	 *
	 * In addition, objects in 1, 2 and 4 might also be pinned.
	 * Objects in 1 and 4 might be forwarded.
	 *
	 * Before we can copy the object we must make sure that we are
	 * allowed to, i.e. that the object not pinned, not already
	 * forwarded, not in the nursery To Space and doesn't belong
	 * to the LOS, a pinned chunk, or a to-space section.
	 *
	 * We are usually called for to-space objects (5) when we have
	 * two remset entries for the same reference.  The first entry
	 * copies the object and updates the reference and the second
	 * calls us with the updated reference that points into
	 * to-space.  There might also be other circumstances where we
	 * get to-space objects.
	 */

	if ((forwarded = SGEN_OBJECT_IS_FORWARDED (obj))) {
		DEBUG (9, g_assert (((MonoVTable*)SGEN_LOAD_VTABLE(obj))->gc_descr));
		DEBUG (9, fprintf (gc_debug_file, " (already forwarded to %p)\n", forwarded));
		HEAVY_STAT (++stat_major_copy_object_failed_forwarded);
		*obj_slot = forwarded;
		return;
	}
	if (SGEN_OBJECT_IS_PINNED (obj)) {
		DEBUG (9, g_assert (((MonoVTable*)SGEN_LOAD_VTABLE(obj))->gc_descr));
		DEBUG (9, fprintf (gc_debug_file, " (pinned, no change)\n"));
		HEAVY_STAT (++stat_major_copy_object_failed_pinned);
		return;
	}

	if (ptr_in_nursery (obj)) {
		/* A To Space object is already on its final destination for the current collection. */
		if (sgen_nursery_is_to_space (obj))
			return;
		goto copy;
	}

	/*
	 * At this point we know obj is not pinned, not forwarded and
	 * belongs to 2, 3, 4, or 5.
	 *
	 * LOS object (2) are simple, at least until we always follow
	 * the rule: if objsize > SGEN_MAX_SMALL_OBJ_SIZE, pin the
	 * object and return it.  At the end of major collections, we
	 * walk the los list and if the object is pinned, it is
	 * marked, otherwise it can be freed.
	 *
	 * Pinned chunks (3) and major heap sections (4, 5) both
	 * reside in blocks, which are always aligned, so once we've
	 * eliminated LOS objects, we can just access the block and
	 * see whether it's a pinned chunk or a major heap section.
	 */

	objsize = SGEN_ALIGN_UP (sgen_safe_object_get_size ((MonoObject*)obj));

	if (G_UNLIKELY (objsize > SGEN_MAX_SMALL_OBJ_SIZE || obj_is_from_pinned_alloc (obj))) {
		if (SGEN_OBJECT_IS_PINNED (obj))
			return;
		DEBUG (9, fprintf (gc_debug_file, " (marked LOS/Pinned %p (%s), size: %td)\n", obj, sgen_safe_name (obj), objsize));
		binary_protocol_pin (obj, (gpointer)SGEN_LOAD_VTABLE (obj), sgen_safe_object_get_size ((MonoObject*)obj));
		SGEN_PIN_OBJECT (obj);
		GRAY_OBJECT_ENQUEUE (queue, obj);
		HEAVY_STAT (++stat_major_copy_object_failed_large_pinned);
		return;
	}

	/*
	 * Now we know the object is in a major heap section.  All we
	 * need to do is check whether it's already in to-space (5) or
	 * not (4).
	 */
	if (MAJOR_OBJ_IS_IN_TO_SPACE (obj)) {
		DEBUG (9, g_assert (objsize <= SGEN_MAX_SMALL_OBJ_SIZE));
		DEBUG (9, fprintf (gc_debug_file, " (already copied)\n"));
		HEAVY_STAT (++stat_major_copy_object_failed_to_space);
		return;
	}

 copy:
	HEAVY_STAT (++stat_objects_copied_major);

	*obj_slot = copy_object_no_checks (obj, queue);
}
Exemple #13
0
static mword*
handle_remset (mword *p, void *start_nursery, void *end_nursery, gboolean global, SgenGrayQueue *queue)
{
	void **ptr;
	mword count;
	mword desc;

	if (global)
		HEAVY_STAT (++stat_global_remsets_processed);
	else
		HEAVY_STAT (++stat_local_remsets_processed);

	/* FIXME: exclude stack locations */
	switch ((*p) & REMSET_TYPE_MASK) {
	case REMSET_LOCATION:
		ptr = (void**)(*p);
		//__builtin_prefetch (ptr);
		if (((void*)ptr < start_nursery || (void*)ptr >= end_nursery)) {
			gpointer old = *ptr;

			sgen_get_current_object_ops ()->copy_or_mark_object (ptr, queue);
			SGEN_LOG (9, "Overwrote remset at %p with %p", ptr, *ptr);
			if (old)
				binary_protocol_ptr_update (ptr, old, *ptr, (gpointer)SGEN_LOAD_VTABLE (*ptr), sgen_safe_object_get_size (*ptr));
			if (!global && *ptr >= start_nursery && *ptr < end_nursery) {
				/*
				 * If the object is pinned, each reference to it from nonpinned objects
				 * becomes part of the global remset, which can grow very large.
				 */
				SGEN_LOG (9, "Add to global remset because of pinning %p (%p %s)", ptr, *ptr, sgen_safe_name (*ptr));
				sgen_add_to_global_remset (ptr);
			}
		} else {
			SGEN_LOG (9, "Skipping remset at %p holding %p", ptr, *ptr);
		}
		return p + 1;
	case REMSET_RANGE: {
		CopyOrMarkObjectFunc copy_func = sgen_get_current_object_ops ()->copy_or_mark_object;

		ptr = (void**)(*p & ~REMSET_TYPE_MASK);
		if (((void*)ptr >= start_nursery && (void*)ptr < end_nursery))
			return p + 2;
		count = p [1];
		while (count-- > 0) {
			copy_func (ptr, queue);
			SGEN_LOG (9, "Overwrote remset at %p with %p (count: %d)", ptr, *ptr, (int)count);
			if (!global && *ptr >= start_nursery && *ptr < end_nursery)
				sgen_add_to_global_remset (ptr);
			++ptr;
		}
		return p + 2;
	}
	case REMSET_OBJECT:
		ptr = (void**)(*p & ~REMSET_TYPE_MASK);
		if (((void*)ptr >= start_nursery && (void*)ptr < end_nursery))
			return p + 1;
		sgen_get_current_object_ops ()->scan_object ((char*)ptr, queue);
		return p + 1;
	case REMSET_VTYPE: {
		size_t skip_size;

		ptr = (void**)(*p & ~REMSET_TYPE_MASK);
		if (((void*)ptr >= start_nursery && (void*)ptr < end_nursery))
			return p + 4;
		desc = p [1];
		count = p [2];
		skip_size = p [3];
		while (count-- > 0) {
			sgen_get_current_object_ops ()->scan_vtype ((char*)ptr, desc, queue);
			ptr = (void**)((char*)ptr + skip_size);
		}
		return p + 4;
	}
	default:
		g_assert_not_reached ();
	}
	return NULL;
}
/* LOCKING: requires that the GC lock is held */
void
sgen_collect_bridge_objects (int generation, ScanCopyContext ctx)
{
	CopyOrMarkObjectFunc copy_func = ctx.ops->copy_or_mark_object;
	GrayQueue *queue = ctx.queue;
	SgenHashTable *hash_table = get_finalize_entry_hash_table (generation);
	GCObject *object;
	gpointer dummy G_GNUC_UNUSED;
	GCObject *copy;
	SgenPointerQueue moved_fin_objects;

	sgen_pointer_queue_init (&moved_fin_objects, INTERNAL_MEM_TEMPORARY);

	if (no_finalize)
		return;

	SGEN_HASH_TABLE_FOREACH (hash_table, GCObject *, object, gpointer, dummy) {
		int tag = tagged_object_get_tag (object);
		object = tagged_object_get_object (object);

		/* Bridge code told us to ignore this one */
		if (tag == BRIDGE_OBJECT_MARKED)
			continue;

		/* Object is a bridge object and major heap says it's dead  */
		if (major_collector.is_object_live (object))
			continue;

		/* Nursery says the object is dead. */
		if (!sgen_gc_is_object_ready_for_finalization (object))
			continue;

		if (!sgen_client_bridge_is_bridge_object (object))
			continue;

		copy = object;
		copy_func (&copy, queue);

		sgen_client_bridge_register_finalized_object (copy);
		
		if (hash_table == &minor_finalizable_hash && !ptr_in_nursery (copy)) {
			/* remove from the list */
			SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);

			/* insert it into the major hash */
			sgen_hash_table_replace (&major_finalizable_hash, tagged_object_apply (copy, tag), NULL, NULL);

			SGEN_LOG (5, "Promoting finalization of object %p (%s) (was at %p) to major table", copy, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (copy)), object);

			continue;
		} else if (copy != object) {
			/* update pointer */
			SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);

			/* register for reinsertion */
			sgen_pointer_queue_add (&moved_fin_objects, tagged_object_apply (copy, tag));

			SGEN_LOG (5, "Updating object for finalization: %p (%s) (was at %p)", copy, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (copy)), object);

			continue;
		}
	} SGEN_HASH_TABLE_FOREACH_END;
Exemple #15
0
static void
major_copy_or_mark_object (void **ptr, SgenGrayQueue *queue)
{
	void *obj = *ptr;
	MSBlockInfo *block;

	HEAVY_STAT (++stat_copy_object_called_major);

	DEBUG (9, g_assert (obj));
	DEBUG (9, g_assert (current_collection_generation == GENERATION_OLD));

	if (ptr_in_nursery (obj)) {
		int word, bit;
		char *forwarded, *old_obj;

		if ((forwarded = SGEN_OBJECT_IS_FORWARDED (obj))) {
			*ptr = forwarded;
			return;
		}
		if (SGEN_OBJECT_IS_PINNED (obj))
			return;

		HEAVY_STAT (++stat_objects_copied_major);

	do_copy_object:
		old_obj = obj;
		obj = copy_object_no_checks (obj, queue);
		if (G_UNLIKELY (old_obj == obj)) {
			/*If we fail to evacuate an object we just stop doing it for a given block size as all other will surely fail too.*/
			if (!ptr_in_nursery (obj)) {
				int size_index;
				block = MS_BLOCK_FOR_OBJ (obj);
				size_index = block->obj_size_index;
				evacuate_block_obj_sizes [size_index] = FALSE;
				MS_MARK_OBJECT_AND_ENQUEUE (obj, block, queue);
			}
			return;
		}
		*ptr = obj;

		/*
		 * FIXME: See comment for copy_object_no_checks().  If
		 * we have that, we can let the allocation function
		 * give us the block info, too, and we won't have to
		 * re-fetch it.
		 */
		block = MS_BLOCK_FOR_OBJ (obj);
		MS_CALC_MARK_BIT (word, bit, obj);
		DEBUG (9, g_assert (!MS_MARK_BIT (block, word, bit)));
		MS_SET_MARK_BIT (block, word, bit);
	} else {
		char *forwarded;
#ifndef FIXED_HEAP
		mword objsize;
#endif

		if ((forwarded = SGEN_OBJECT_IS_FORWARDED (obj))) {
			*ptr = forwarded;
			return;
		}

#ifdef FIXED_HEAP
		if (MS_PTR_IN_SMALL_MAJOR_HEAP (obj))
#else
		objsize = SGEN_ALIGN_UP (mono_sgen_safe_object_get_size ((MonoObject*)obj));

		if (objsize <= SGEN_MAX_SMALL_OBJ_SIZE)
#endif
		{
			int size_index;

			block = MS_BLOCK_FOR_OBJ (obj);
			size_index = block->obj_size_index;

			if (!block->has_pinned && evacuate_block_obj_sizes [size_index]) {
				if (block->is_to_space)
					return;
				HEAVY_STAT (++stat_major_objects_evacuated);
				goto do_copy_object;
			} else {
				MS_MARK_OBJECT_AND_ENQUEUE (obj, block, queue);
			}
		} else {
			if (SGEN_OBJECT_IS_PINNED (obj))
				return;
			binary_protocol_pin (obj, (gpointer)SGEN_LOAD_VTABLE (obj), mono_sgen_safe_object_get_size ((MonoObject*)obj));
			SGEN_PIN_OBJECT (obj);
			/* FIXME: only enqueue if object has references */
			GRAY_OBJECT_ENQUEUE (queue, obj);
		}
	}
}