gboolean sgen_need_major_collection (mword space_needed) { size_t heap_size; if (sgen_concurrent_collection_in_progress ()) { heap_size = get_heap_size (); if (heap_size <= major_collection_trigger_size) return FALSE; /* * The more the heap grows, the more we need to decrease the allowance above, * in order to have similar trigger sizes as the synchronous collector. * If the heap grows so much that we would need to have a negative allowance, * we force the finishing of the collection, to avoid increased memory usage. */ if ((heap_size - major_start_heap_size) > major_start_heap_size * SGEN_DEFAULT_ALLOWANCE_HEAP_SIZE_RATIO) return TRUE; return FALSE; } /* FIXME: This is a cop-out. We should have some way of figuring this out. */ if (!major_collector.have_swept ()) return FALSE; if (space_needed > sgen_memgov_available_free_space ()) return TRUE; sgen_memgov_calculate_minor_collection_allowance (); heap_size = get_heap_size (); return heap_size > major_collection_trigger_size; }
static void concurrent_enqueue_check (GCObject *obj) { g_assert (sgen_concurrent_collection_in_progress ()); g_assert (!sgen_ptr_in_nursery (obj)); g_assert (SGEN_LOAD_VTABLE (obj)); }
void sgen_memgov_major_pre_sweep (void) { if (sgen_concurrent_collection_in_progress ()) { major_pre_sweep_heap_size = get_heap_size (); } else { /* We decrease the allowance only in the concurrent case */ major_pre_sweep_heap_size = major_start_heap_size; } }
gboolean sgen_need_major_collection (mword space_needed) { mword los_alloced; if (sgen_concurrent_collection_in_progress ()) return FALSE; los_alloced = los_memory_usage - MIN (last_collection_los_memory_usage, los_memory_usage); return (space_needed > sgen_memgov_available_free_space ()) || minor_collection_sections_alloced * major_collector.section_size + los_alloced > minor_collection_allowance; }
gboolean sgen_cement_lookup_or_register (char *obj) { guint hv; int i; CementHashEntry *hash; gboolean concurrent_cementing = sgen_concurrent_collection_in_progress (); if (!cement_enabled) return FALSE; if (concurrent_cementing) SGEN_ASSERT (5, cement_concurrent, "Cementing wasn't inited with concurrent flag"); if (concurrent_cementing) hash = cement_hash_concurrent; else hash = cement_hash; hv = mono_aligned_addr_hash (obj); i = SGEN_CEMENT_HASH (hv); SGEN_ASSERT (5, sgen_ptr_in_nursery (obj), "Can only cement pointers to nursery objects"); if (!hash [i].obj) { SGEN_ASSERT (5, !hash [i].count, "Cementing hash inconsistent"); hash [i].obj = obj; } else if (hash [i].obj != obj) { return FALSE; } if (hash [i].count >= SGEN_CEMENT_THRESHOLD) return TRUE; ++hash [i].count; if (hash [i].count == SGEN_CEMENT_THRESHOLD) { SGEN_ASSERT (9, SGEN_OBJECT_IS_PINNED (obj), "Can only cement pinned objects"); SGEN_CEMENT_OBJECT (obj); if (G_UNLIKELY (MONO_GC_OBJ_CEMENTED_ENABLED())) { MonoVTable *vt G_GNUC_UNUSED = (MonoVTable*)SGEN_LOAD_VTABLE (obj); MONO_GC_OBJ_CEMENTED ((mword)obj, sgen_safe_object_get_size ((MonoObject*)obj), vt->klass->name_space, vt->klass->name); } binary_protocol_cement (obj, (gpointer)SGEN_LOAD_VTABLE (obj), (int)sgen_safe_object_get_size ((MonoObject*)obj)); } return FALSE; }
static mono_native_thread_return_t workers_thread_func (void *data_untyped) { WorkerData *data = data_untyped; SgenMajorCollector *major = sgen_get_major_collector (); mono_thread_info_register_small_id (); if (major->init_worker_thread) major->init_worker_thread (data->major_collector_data); init_private_gray_queue (data); for (;;) { gboolean did_work = FALSE; SGEN_ASSERT (0, sgen_get_current_collection_generation () != GENERATION_NURSERY, "Why are we doing work while there's a nursery collection happening?"); while (workers_state.data.state == STATE_WORKING && workers_dequeue_and_do_job (data)) { did_work = TRUE; /* FIXME: maybe distribute the gray queue here? */ } if (!did_work && (!sgen_gray_object_queue_is_empty (&data->private_gray_queue) || workers_get_work (data))) { SgenObjectOperations *ops = sgen_concurrent_collection_in_progress () ? &major->major_concurrent_ops : &major->major_ops; ScanCopyContext ctx = { ops->scan_object, NULL, &data->private_gray_queue }; g_assert (!sgen_gray_object_queue_is_empty (&data->private_gray_queue)); while (!sgen_drain_gray_stack (32, ctx)) { if (workers_state.data.state == STATE_NURSERY_COLLECTION) workers_wait (); } g_assert (sgen_gray_object_queue_is_empty (&data->private_gray_queue)); init_private_gray_queue (data); did_work = TRUE; } if (!did_work) workers_wait (); } /* dummy return to make compilers happy */ return NULL; }
static mono_native_thread_return_t workers_thread_func (void *data_untyped) { WorkerData *data = data_untyped; SgenMajorCollector *major = sgen_get_major_collector (); mono_thread_info_register_small_id (); if (major->init_worker_thread) major->init_worker_thread (data->major_collector_data); init_private_gray_queue (data); for (;;) { gboolean did_work = FALSE; while (workers_dequeue_and_do_job (data)) { did_work = TRUE; /* FIXME: maybe distribute the gray queue here? */ } if (workers_marking && (!sgen_gray_object_queue_is_empty (&data->private_gray_queue) || workers_get_work (data))) { SgenObjectOperations *ops = sgen_concurrent_collection_in_progress () ? &major->major_concurrent_ops : &major->major_ops; ScanCopyContext ctx = { ops->scan_object, NULL, &data->private_gray_queue }; g_assert (!sgen_gray_object_queue_is_empty (&data->private_gray_queue)); while (!sgen_drain_gray_stack (32, ctx)) workers_gray_queue_share_redirect (&data->private_gray_queue); g_assert (sgen_gray_object_queue_is_empty (&data->private_gray_queue)); init_private_gray_queue (data); did_work = TRUE; } if (!did_work) workers_wait (); } /* dummy return to make compilers happy */ return NULL; }
static void marker_idle_func (void *data_untyped) { WorkerData *data = data_untyped; SGEN_ASSERT (0, continue_idle_func (), "Why are we called when we're not supposed to work?"); SGEN_ASSERT (0, sgen_concurrent_collection_in_progress (), "The worker should only mark in concurrent collections."); if (workers_state == STATE_WORK_ENQUEUED) { set_state (STATE_WORK_ENQUEUED, STATE_WORKING); SGEN_ASSERT (0, workers_state != STATE_NOT_WORKING, "How did we get from WORK ENQUEUED to NOT WORKING?"); } if (!sgen_gray_object_queue_is_empty (&data->private_gray_queue) || workers_get_work (data)) { ScanCopyContext ctx = CONTEXT_FROM_OBJECT_OPERATIONS (idle_func_object_ops, &data->private_gray_queue); SGEN_ASSERT (0, !sgen_gray_object_queue_is_empty (&data->private_gray_queue), "How is our gray queue empty if we just got work?"); sgen_drain_gray_stack (ctx); } else { worker_try_finish (); } }