void ObjectMemory::collect_young(GCData& data, YoungCollectStats* stats) { collect_young_now = false; timer::Running<size_t, 1000000> timer(young_collection_time); // validate_handles(data.handles()); // validate_handles(data.cached_handles()); young_->reset_stats(); young_->collect(data, stats); prune_handles(data.handles(), true); prune_handles(data.cached_handles(), true); young_collections++; data.global_cache()->prune_young(); if(data.threads()) { for(std::list<ManagedThread*>::iterator i = data.threads()->begin(); i != data.threads()->end(); i++) { assert(refill_slab((*i)->local_slab())); } } }
void ObjectMemory::collect_mature(GCData& data) { #ifdef RBX_GC_STATS stats::GCStats::get()->objects_seen.start(); stats::GCStats::get()->collect_mature.start(); #endif // validate_handles(data.handles()); // validate_handles(data.cached_handles()); timer::Running<size_t, 1000000> timer(full_collection_time); collect_mature_now = false; code_manager_.clear_marks(); immix_->reset_stats(); immix_->collect(data); immix_->clean_weakrefs(); code_manager_.sweep(); data.global_cache()->prune_unmarked(mark()); prune_handles(data.handles(), false); prune_handles(data.cached_handles(), false); // Have to do this after all things that check for mark bits is // done, as it free()s objects, invalidating mark bits. mark_sweep_->after_marked(); inflated_headers_->deallocate_headers(mark()); // We no longer need to unmark all, we use the rotating mark instead. // This means that the mark we just set on all reachable objects will // be ignored next time anyway. // // immix_->unmark_all(data); rotate_mark(); full_collections++; #ifdef RBX_GC_STATS stats::GCStats::get()->collect_mature.stop(); stats::GCStats::get()->objects_seen.stop(); #endif }
void ObjectMemory::collect_young(GCData& data, YoungCollectStats* stats) { collect_young_now = false; static int collect_times = 0; // validate_handles(data.handles()); // validate_handles(data.cached_handles()); young_->collect(data, stats); prune_handles(data.handles(), true); prune_handles(data.cached_handles(), true); collect_times++; data.global_cache()->prune_young(); }
void ObjectMemory::collect_mature_finish(STATE, GCData* data) { immix_->collect_finish(data); code_manager_.sweep(); data->global_cache()->prune_unmarked(mark()); prune_handles(data->handles(), data->cached_handles(), NULL); // Have to do this after all things that check for mark bits is // done, as it free()s objects, invalidating mark bits. mark_sweep_->after_marked(); inflated_headers_->deallocate_headers(mark()); #ifdef RBX_GC_DEBUG immix_->verify(data); #endif immix_->sweep(); rotate_mark(); gc_stats.full_collection_count++; if(FinalizerHandler* hdl = state->shared().finalizer_handler()) { hdl->finish_collection(state); } RUBINIUS_GC_END(1); young_autotune(); young_gc_while_marking_ = 0; }
void ObjectMemory::collect_young(GCData& data, YoungCollectStats* stats) { collect_young_now = false; timer::Running<1000000> timer(gc_stats.total_young_collection_time, gc_stats.last_young_collection_time); young_->reset_stats(); young_->collect(data, stats); prune_handles(data.handles(), data.cached_handles(), young_); gc_stats.young_collection_count++; data.global_cache()->prune_young(); if(data.threads()) { for(std::list<ManagedThread*>::iterator i = data.threads()->begin(); i != data.threads()->end(); ++i) { gc::Slab& slab = (*i)->local_slab(); gc_stats.slab_allocated(slab.allocations(), slab.byte_used()); // Reset the slab to a size of 0 so that the thread has to do // an allocation to get a proper refill. This keeps the number // of threads in the system from starving the available // number of slabs. slab.refill(0, 0); } } young_->reset(); }
void ObjectMemory::collect_young(GCData& data, YoungCollectStats* stats) { collect_young_now = false; timer::Running<size_t, 1000000> timer(young_collection_time); // validate_handles(data.handles()); // validate_handles(data.cached_handles()); young_->reset_stats(); young_->collect(data, stats); prune_handles(data.handles(), true); prune_handles(data.cached_handles(), true); young_collections++; data.global_cache()->prune_young(); }
void ObjectMemory::collect_young(STATE, GCData* data, YoungCollectStats* stats) { #ifndef RBX_GC_STRESS_YOUNG collect_young_now = false; #endif timer::StopWatch<timer::milliseconds> timerx( state->vm()->metrics().m.ruby_metrics.gc_young_last_ms, state->vm()->metrics().m.ruby_metrics.gc_young_total_ms ); young_gc_while_marking_++; young_->reset_stats(); young_->collect(data, stats); prune_handles(data->handles(), data->cached_handles(), young_); metrics::MetricsData& metrics = state->vm()->metrics(); metrics.m.ruby_metrics.gc_young_count++; metrics.m.ruby_metrics.memory_capi_handles = capi_handles_->size(); metrics.m.ruby_metrics.memory_inflated_headers = inflated_headers_->size(); data->global_cache()->prune_young(); if(data->threads()) { for(ThreadList::iterator i = data->threads()->begin(); i != data->threads()->end(); ++i) { gc::Slab& slab = (*i)->local_slab(); // Reset the slab to a size of 0 so that the thread has to do // an allocation to get a proper refill. This keeps the number // of threads in the system from starving the available // number of slabs. slab.refill(0, 0); } } young_->reset(); #ifdef RBX_GC_DEBUG young_->verify(data); #endif if(FinalizerThread* hdl = state->shared().finalizer_handler()) { hdl->finish_collection(state); } }
void ObjectMemory::collect_young(STATE, GCData* data, YoungCollectStats* stats) { #ifndef RBX_GC_STRESS_YOUNG collect_young_now = false; #endif timer::Running<1000000> timer(gc_stats.total_young_collection_time, gc_stats.last_young_collection_time); young_gc_while_marking_++; young_->reset_stats(); young_->collect(data, stats); prune_handles(data->handles(), data->cached_handles(), young_); gc_stats.young_collection_count++; data->global_cache()->prune_young(); if(data->threads()) { for(ThreadList::iterator i = data->threads()->begin(); i != data->threads()->end(); ++i) { gc::Slab& slab = (*i)->local_slab(); gc_stats.slab_allocated(slab.allocations(), slab.byte_used()); // Reset the slab to a size of 0 so that the thread has to do // an allocation to get a proper refill. This keeps the number // of threads in the system from starving the available // number of slabs. slab.refill(0, 0); } } young_->reset(); #ifdef RBX_GC_DEBUG young_->verify(data); #endif if(FinalizerHandler* hdl = state->shared().finalizer_handler()) { hdl->finish_collection(state); } }
void ObjectMemory::collect_mature_finish(STATE, GCData* data) { immix_->collect_finish(data); code_manager_.sweep(); data->global_cache()->prune_unmarked(mark()); prune_handles(data->handles(), data->cached_handles(), NULL); // Have to do this after all things that check for mark bits is // done, as it free()s objects, invalidating mark bits. mark_sweep_->after_marked(); inflated_headers_->deallocate_headers(mark()); #ifdef RBX_GC_DEBUG immix_->verify(data); #endif immix_->sweep(); rotate_mark(); metrics::MetricsData& metrics = state->vm()->metrics(); metrics.m.ruby_metrics.gc_immix_count++; metrics.m.ruby_metrics.gc_large_count++; metrics.m.ruby_metrics.memory_immix_bytes = immix_->bytes_allocated(); metrics.m.ruby_metrics.memory_large_bytes = mark_sweep_->allocated_bytes; metrics.m.ruby_metrics.memory_symbols_bytes = shared_.symbols.bytes_used(); metrics.m.ruby_metrics.memory_code_bytes = code_manager_.size(); metrics.m.ruby_metrics.memory_jit_bytes = data->jit_bytes_allocated(); if(FinalizerThread* hdl = state->shared().finalizer_handler()) { hdl->finish_collection(state); } RUBINIUS_GC_END(1); young_autotune(); young_gc_while_marking_ = 0; }
void ObjectMemory::collect_mature(GCData& data) { timer::Running<1000000> timer(gc_stats.total_full_collection_time, gc_stats.last_full_collection_time); collect_mature_now = false; code_manager_.clear_marks(); immix_->reset_stats(); immix_->collect(data); immix_->clean_weakrefs(); code_manager_.sweep(); data.global_cache()->prune_unmarked(mark()); prune_handles(data.handles(), data.cached_handles(), NULL); // Have to do this after all things that check for mark bits is // done, as it free()s objects, invalidating mark bits. mark_sweep_->after_marked(); inflated_headers_->deallocate_headers(mark()); // We no longer need to unmark all, we use the rotating mark instead. // This means that the mark we just set on all reachable objects will // be ignored next time anyway. // // immix_->unmark_all(data); rotate_mark(); gc_stats.full_collection_count++; }