// This method contains no policy. You should probably // be calling invoke() instead. bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) { assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); assert(ref_processor() != NULL, "Sanity"); if (GC_locker::check_active_before_gc()) { return false; } ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); GCCause::Cause gc_cause = heap->gc_cause(); _gc_timer->register_gc_start(); _gc_tracer->report_gc_start(gc_cause, _gc_timer->gc_start()); PSAdaptiveSizePolicy* size_policy = heap->size_policy(); // The scope of casr should end after code that can change // CollectorPolicy::_should_clear_all_soft_refs. ClearedAllSoftRefs casr(clear_all_softrefs, heap->collector_policy()); PSYoungGen* young_gen = heap->young_gen(); PSOldGen* old_gen = heap->old_gen(); PSPermGen* perm_gen = heap->perm_gen(); // Increment the invocation count heap->increment_total_collections(true /* full */); // Save information needed to minimize mangling heap->record_gen_tops_before_GC(); // We need to track unique mark sweep invocations as well. _total_invocations++; AdaptiveSizePolicyOutput(size_policy, heap->total_collections()); heap->print_heap_before_gc(); heap->trace_heap_before_gc(_gc_tracer); // Fill in TLABs heap->accumulate_statistics_all_tlabs(); heap->ensure_parsability(true); // retire TLABs if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification Universe::verify(" VerifyBeforeGC:"); } // Verify object start arrays if (VerifyObjectStartArray && VerifyBeforeGC) { old_gen->verify_object_start_array(); perm_gen->verify_object_start_array(); } heap->pre_full_gc_dump(_gc_timer); // Filled in below to track the state of the young gen after the collection. bool eden_empty; bool survivors_empty; bool young_gen_empty; { HandleMark hm; TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL); TraceCollectorStats tcs(counters()); TraceMemoryManagerStats tms(true /* Full GC */,gc_cause); if (TraceGen1Time) accumulated_time()->start(); // Let the size policy know we're starting size_policy->major_collection_begin(); // When collecting the permanent generation methodOops may be moving, // so we either have to flush all bcp data or convert it into bci. CodeCache::gc_prologue(); Threads::gc_prologue(); BiasedLocking::preserve_marks(); // Capture heap size before collection for printing. size_t prev_used = heap->used(); // Capture perm gen size before collection for sizing. size_t perm_gen_prev_used = perm_gen->used_in_bytes(); // For PrintGCDetails size_t old_gen_prev_used = old_gen->used_in_bytes(); size_t young_gen_prev_used = young_gen->used_in_bytes(); allocate_stacks(); COMPILER2_PRESENT(DerivedPointerTable::clear()); ref_processor()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/); ref_processor()->setup_policy(clear_all_softrefs); mark_sweep_phase1(clear_all_softrefs); mark_sweep_phase2(); // Don't add any more derived pointers during phase3 COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity")); COMPILER2_PRESENT(DerivedPointerTable::set_active(false)); mark_sweep_phase3(); mark_sweep_phase4(); restore_marks(); deallocate_stacks(); if (ZapUnusedHeapArea) { // Do a complete mangle (top to end) because the usage for // scratch does not maintain a top pointer. young_gen->to_space()->mangle_unused_area_complete(); } eden_empty = young_gen->eden_space()->is_empty(); if (!eden_empty) { eden_empty = absorb_live_data_from_eden(size_policy, young_gen, old_gen); } // Update heap occupancy information which is used as // input to soft ref clearing policy at the next gc. Universe::update_heap_info_at_gc(); survivors_empty = young_gen->from_space()->is_empty() && young_gen->to_space()->is_empty(); young_gen_empty = eden_empty && survivors_empty; BarrierSet* bs = heap->barrier_set(); if (bs->is_a(BarrierSet::ModRef)) { ModRefBarrierSet* modBS = (ModRefBarrierSet*)bs; MemRegion old_mr = heap->old_gen()->reserved(); MemRegion perm_mr = heap->perm_gen()->reserved(); assert(perm_mr.end() <= old_mr.start(), "Generations out of order"); if (young_gen_empty) { modBS->clear(MemRegion(perm_mr.start(), old_mr.end())); } else { modBS->invalidate(MemRegion(perm_mr.start(), old_mr.end())); } } BiasedLocking::restore_marks(); Threads::gc_epilogue(); CodeCache::gc_epilogue(); JvmtiExport::gc_epilogue(); COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); ref_processor()->enqueue_discovered_references(NULL); // Update time of last GC reset_millis_since_last_gc(); // Let the size policy know we're done size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause); if (UseAdaptiveSizePolicy) { if (PrintAdaptiveSizePolicy) { gclog_or_tty->print("AdaptiveSizeStart: "); gclog_or_tty->stamp(); gclog_or_tty->print_cr(" collection: %d ", heap->total_collections()); if (Verbose) { gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d" " perm_gen_capacity: %d ", old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes(), perm_gen->capacity_in_bytes()); } } // Don't check if the size_policy is ready here. Let // the size_policy check that internally. if (UseAdaptiveGenerationSizePolicyAtMajorCollection && ((gc_cause != GCCause::_java_lang_system_gc) || UseAdaptiveSizePolicyWithSystemGC)) { // Calculate optimal free space amounts assert(young_gen->max_size() > young_gen->from_space()->capacity_in_bytes() + young_gen->to_space()->capacity_in_bytes(), "Sizes of space in young gen are out-of-bounds"); size_t max_eden_size = young_gen->max_size() - young_gen->from_space()->capacity_in_bytes() - young_gen->to_space()->capacity_in_bytes(); size_policy->compute_generation_free_space(young_gen->used_in_bytes(), young_gen->eden_space()->used_in_bytes(), old_gen->used_in_bytes(), perm_gen->used_in_bytes(), young_gen->eden_space()->capacity_in_bytes(), old_gen->max_gen_size(), max_eden_size, true /* full gc*/, gc_cause, heap->collector_policy()); heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes()); // Don't resize the young generation at an major collection. A // desired young generation size may have been calculated but // resizing the young generation complicates the code because the // resizing of the old generation may have moved the boundary // between the young generation and the old generation. Let the // young generation resizing happen at the minor collections. } if (PrintAdaptiveSizePolicy) { gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ", heap->total_collections()); } } if (UsePerfData) { heap->gc_policy_counters()->update_counters(); heap->gc_policy_counters()->update_old_capacity( old_gen->capacity_in_bytes()); heap->gc_policy_counters()->update_young_capacity( young_gen->capacity_in_bytes()); } heap->resize_all_tlabs(); // We collected the perm gen, so we'll resize it here. perm_gen->compute_new_size(perm_gen_prev_used); if (TraceGen1Time) accumulated_time()->stop(); if (PrintGC) { if (PrintGCDetails) { // Don't print a GC timestamp here. This is after the GC so // would be confusing. young_gen->print_used_change(young_gen_prev_used); old_gen->print_used_change(old_gen_prev_used); } heap->print_heap_change(prev_used); // Do perm gen after heap becase prev_used does // not include the perm gen (done this way in the other // collectors). if (PrintGCDetails) { perm_gen->print_used_change(perm_gen_prev_used); } } // Track memory usage and detect low memory MemoryService::track_memory_usage(); heap->update_counters(); } if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification Universe::verify(" VerifyAfterGC:"); } // Re-verify object start arrays if (VerifyObjectStartArray && VerifyAfterGC) { old_gen->verify_object_start_array(); perm_gen->verify_object_start_array(); } if (ZapUnusedHeapArea) { old_gen->object_space()->check_mangled_unused_area_complete(); perm_gen->object_space()->check_mangled_unused_area_complete(); } NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); heap->print_heap_after_gc(); heap->trace_heap_after_gc(_gc_tracer); heap->post_full_gc_dump(_gc_timer); #ifdef TRACESPINNING ParallelTaskTerminator::print_termination_counts(); #endif _gc_timer->register_gc_end(); _gc_tracer->report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions()); return true; }
// This method contains no policy. You should probably // be calling invoke() instead. bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) { assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); assert(ref_processor() != NULL, "Sanity"); if (GC_locker::check_active_before_gc()) { return false; } ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); GCCause::Cause gc_cause = heap->gc_cause(); _gc_timer->register_gc_start(); _gc_tracer->report_gc_start(gc_cause, _gc_timer->gc_start()); PSAdaptiveSizePolicy* size_policy = heap->size_policy(); // The scope of casr should end after code that can change // CollectorPolicy::_should_clear_all_soft_refs. ClearedAllSoftRefs casr(clear_all_softrefs, heap->collector_policy()); PSYoungGen* young_gen = heap->young_gen(); PSOldGen* old_gen = heap->old_gen(); // Increment the invocation count heap->increment_total_collections(true /* full */); // Save information needed to minimize mangling heap->record_gen_tops_before_GC(); // We need to track unique mark sweep invocations as well. _total_invocations++; AdaptiveSizePolicyOutput(size_policy, heap->total_collections()); heap->print_heap_before_gc(); heap->trace_heap_before_gc(_gc_tracer); // Fill in TLABs heap->accumulate_statistics_all_tlabs(); heap->ensure_parsability(true); // retire TLABs if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification Universe::verify(" VerifyBeforeGC:"); } // Verify object start arrays if (VerifyObjectStartArray && VerifyBeforeGC) { old_gen->verify_object_start_array(); } heap->pre_full_gc_dump(_gc_timer); // Filled in below to track the state of the young gen after the collection. bool eden_empty; bool survivors_empty; bool young_gen_empty; { HandleMark hm; TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer->gc_id()); TraceCollectorStats tcs(counters()); TraceMemoryManagerStats tms(true /* Full GC */,gc_cause); if (TraceOldGenTime) accumulated_time()->start(); // Let the size policy know we're starting size_policy->major_collection_begin(); CodeCache::gc_prologue(); BiasedLocking::preserve_marks(); // Capture heap size before collection for printing. size_t prev_used = heap->used(); // Capture metadata size before collection for sizing. size_t metadata_prev_used = MetaspaceAux::used_bytes(); // For PrintGCDetails size_t old_gen_prev_used = old_gen->used_in_bytes(); size_t young_gen_prev_used = young_gen->used_in_bytes(); allocate_stacks(); COMPILER2_PRESENT(DerivedPointerTable::clear()); ref_processor()->enable_discovery(); ref_processor()->setup_policy(clear_all_softrefs); mark_sweep_phase1(clear_all_softrefs); mark_sweep_phase2(); // Don't add any more derived pointers during phase3 COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity")); COMPILER2_PRESENT(DerivedPointerTable::set_active(false)); mark_sweep_phase3(); mark_sweep_phase4(); restore_marks(); deallocate_stacks(); if (ZapUnusedHeapArea) { // Do a complete mangle (top to end) because the usage for // scratch does not maintain a top pointer. young_gen->to_space()->mangle_unused_area_complete(); } eden_empty = young_gen->eden_space()->is_empty(); if (!eden_empty) { eden_empty = absorb_live_data_from_eden(size_policy, young_gen, old_gen); } // Update heap occupancy information which is used as // input to soft ref clearing policy at the next gc. Universe::update_heap_info_at_gc(); survivors_empty = young_gen->from_space()->is_empty() && young_gen->to_space()->is_empty(); young_gen_empty = eden_empty && survivors_empty; ModRefBarrierSet* modBS = barrier_set_cast<ModRefBarrierSet>(heap->barrier_set()); MemRegion old_mr = heap->old_gen()->reserved(); if (young_gen_empty) { modBS->clear(MemRegion(old_mr.start(), old_mr.end())); } else { modBS->invalidate(MemRegion(old_mr.start(), old_mr.end())); } // Delete metaspaces for unloaded class loaders and clean up loader_data graph ClassLoaderDataGraph::purge(); MetaspaceAux::verify_metrics(); BiasedLocking::restore_marks(); CodeCache::gc_epilogue(); JvmtiExport::gc_epilogue(); COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); ref_processor()->enqueue_discovered_references(NULL); // Update time of last GC reset_millis_since_last_gc(); // Let the size policy know we're done size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause); if (UseAdaptiveSizePolicy) { if (PrintAdaptiveSizePolicy) { gclog_or_tty->print("AdaptiveSizeStart: "); gclog_or_tty->stamp(); gclog_or_tty->print_cr(" collection: %d ", heap->total_collections()); if (Verbose) { gclog_or_tty->print("old_gen_capacity: " SIZE_FORMAT " young_gen_capacity: " SIZE_FORMAT, old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes()); } } // Don't check if the size_policy is ready here. Let // the size_policy check that internally. if (UseAdaptiveGenerationSizePolicyAtMajorCollection && ((gc_cause != GCCause::_java_lang_system_gc) || UseAdaptiveSizePolicyWithSystemGC)) { // Swap the survivor spaces if from_space is empty. The // resize_young_gen() called below is normally used after // a successful young GC and swapping of survivor spaces; // otherwise, it will fail to resize the young gen with // the current implementation. if (young_gen->from_space()->is_empty()) { young_gen->from_space()->clear(SpaceDecorator::Mangle); young_gen->swap_spaces(); } // Calculate optimal free space amounts assert(young_gen->max_size() > young_gen->from_space()->capacity_in_bytes() + young_gen->to_space()->capacity_in_bytes(), "Sizes of space in young gen are out-of-bounds"); size_t young_live = young_gen->used_in_bytes(); size_t eden_live = young_gen->eden_space()->used_in_bytes(); size_t old_live = old_gen->used_in_bytes(); size_t cur_eden = young_gen->eden_space()->capacity_in_bytes(); size_t max_old_gen_size = old_gen->max_gen_size(); size_t max_eden_size = young_gen->max_size() - young_gen->from_space()->capacity_in_bytes() - young_gen->to_space()->capacity_in_bytes(); // Used for diagnostics size_policy->clear_generation_free_space_flags(); size_policy->compute_generations_free_space(young_live, eden_live, old_live, cur_eden, max_old_gen_size, max_eden_size, true /* full gc*/); size_policy->check_gc_overhead_limit(young_live, eden_live, max_old_gen_size, max_eden_size, true /* full gc*/, gc_cause, heap->collector_policy()); size_policy->decay_supplemental_growth(true /* full gc*/); heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes()); heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(), size_policy->calculated_survivor_size_in_bytes()); } if (PrintAdaptiveSizePolicy) { gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ", heap->total_collections()); } } if (UsePerfData) { heap->gc_policy_counters()->update_counters(); heap->gc_policy_counters()->update_old_capacity( old_gen->capacity_in_bytes()); heap->gc_policy_counters()->update_young_capacity( young_gen->capacity_in_bytes()); } heap->resize_all_tlabs(); // We collected the heap, recalculate the metaspace capacity MetaspaceGC::compute_new_size(); if (TraceOldGenTime) accumulated_time()->stop(); if (PrintGC) { if (PrintGCDetails) { // Don't print a GC timestamp here. This is after the GC so // would be confusing. young_gen->print_used_change(young_gen_prev_used); old_gen->print_used_change(old_gen_prev_used); } heap->print_heap_change(prev_used); if (PrintGCDetails) { MetaspaceAux::print_metaspace_change(metadata_prev_used); } } // Track memory usage and detect low memory MemoryService::track_memory_usage(); heap->update_counters(); } if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification Universe::verify(" VerifyAfterGC:"); } // Re-verify object start arrays if (VerifyObjectStartArray && VerifyAfterGC) { old_gen->verify_object_start_array(); } if (ZapUnusedHeapArea) { old_gen->object_space()->check_mangled_unused_area_complete(); } NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); heap->print_heap_after_gc(); heap->trace_heap_after_gc(_gc_tracer); heap->post_full_gc_dump(_gc_timer); #ifdef TRACESPINNING ParallelTaskTerminator::print_termination_counts(); #endif _gc_timer->register_gc_end(); _gc_tracer->report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions()); return true; }
// This method contains no policy. You should probably // be calling invoke() instead. bool PSScavenge::invoke_no_policy() { assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); assert(_preserved_mark_stack.is_empty(), "should be empty"); assert(_preserved_oop_stack.is_empty(), "should be empty"); _gc_timer.register_gc_start(); TimeStamp scavenge_entry; TimeStamp scavenge_midpoint; TimeStamp scavenge_exit; scavenge_entry.update(); if (GC_locker::check_active_before_gc()) { return false; } ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); GCCause::Cause gc_cause = heap->gc_cause(); assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); // Check for potential problems. if (!should_attempt_scavenge()) { return false; } _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start()); bool promotion_failure_occurred = false; PSYoungGen* young_gen = heap->young_gen(); PSOldGen* old_gen = heap->old_gen(); PSAdaptiveSizePolicy* size_policy = heap->size_policy(); heap->increment_total_collections(); AdaptiveSizePolicyOutput(size_policy, heap->total_collections()); if ((gc_cause != GCCause::_java_lang_system_gc) || UseAdaptiveSizePolicyWithSystemGC) { // Gather the feedback data for eden occupancy. young_gen->eden_space()->accumulate_statistics(); } if (ZapUnusedHeapArea) { // Save information needed to minimize mangling heap->record_gen_tops_before_GC(); } heap->print_heap_before_gc(); heap->trace_heap_before_gc(&_gc_tracer); assert(!NeverTenure || _tenuring_threshold == markOopDesc::max_age + 1, "Sanity"); assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity"); size_t prev_used = heap->used(); // Fill in TLABs heap->accumulate_statistics_all_tlabs(); heap->ensure_parsability(true); // retire TLABs if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification Universe::verify(" VerifyBeforeGC:"); } { ResourceMark rm; HandleMark hm; gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); GCTraceTime t1(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL); TraceCollectorStats tcs(counters()); TraceMemoryManagerStats tms(false /* not full GC */,gc_cause); if (TraceGen0Time) accumulated_time()->start(); // Let the size policy know we're starting size_policy->minor_collection_begin(); // Verify the object start arrays. if (VerifyObjectStartArray && VerifyBeforeGC) { old_gen->verify_object_start_array(); } // Verify no unmarked old->young roots if (VerifyRememberedSets) { CardTableExtension::verify_all_young_refs_imprecise(); } if (!ScavengeWithObjectsInToSpace) { assert(young_gen->to_space()->is_empty(), "Attempt to scavenge with live objects in to_space"); young_gen->to_space()->clear(SpaceDecorator::Mangle); } else if (ZapUnusedHeapArea) { young_gen->to_space()->mangle_unused_area(); } save_to_space_top_before_gc(); COMPILER2_PRESENT(DerivedPointerTable::clear()); reference_processor()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/); reference_processor()->setup_policy(false); // We track how much was promoted to the next generation for // the AdaptiveSizePolicy. size_t old_gen_used_before = old_gen->used_in_bytes(); // For PrintGCDetails size_t young_gen_used_before = young_gen->used_in_bytes(); // Reset our survivor overflow. set_survivor_overflow(false); // We need to save the old top values before // creating the promotion_manager. We pass the top // values to the card_table, to prevent it from // straying into the promotion labs. HeapWord* old_top = old_gen->object_space()->top(); // Release all previously held resources gc_task_manager()->release_all_resources(); // Set the number of GC threads to be used in this collection gc_task_manager()->set_active_gang(); gc_task_manager()->task_idle_workers(); // Get the active number of workers here and use that value // throughout the methods. uint active_workers = gc_task_manager()->active_workers(); heap->set_par_threads(active_workers); PSPromotionManager::pre_scavenge(); // We'll use the promotion manager again later. PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager(); { GCTraceTime tm("Scavenge", false, false, &_gc_timer); ParallelScavengeHeap::ParStrongRootsScope psrs; GCTaskQueue* q = GCTaskQueue::create(); if (!old_gen->object_space()->is_empty()) { // There are only old-to-young pointers if there are objects // in the old gen. uint stripe_total = active_workers; for(uint i=0; i < stripe_total; i++) { q->enqueue(new OldToYoungRootsTask(old_gen, old_top, i, stripe_total)); } } q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::universe)); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jni_handles)); // We scan the thread roots in parallel Threads::create_thread_roots_tasks(q); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::object_synchronizer)); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::flat_profiler)); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::management)); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::system_dictionary)); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::class_loader_data)); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jvmti)); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::code_cache)); ParallelTaskTerminator terminator( active_workers, (TaskQueueSetSuper*) promotion_manager->stack_array_depth()); if (active_workers > 1) { for (uint j = 0; j < active_workers; j++) { q->enqueue(new StealTask(&terminator)); } } gc_task_manager()->execute_and_wait(q); } scavenge_midpoint.update(); // Process reference objects discovered during scavenge { GCTraceTime tm("References", false, false, &_gc_timer); reference_processor()->setup_policy(false); // not always_clear reference_processor()->set_active_mt_degree(active_workers); PSKeepAliveClosure keep_alive(promotion_manager); PSEvacuateFollowersClosure evac_followers(promotion_manager); ReferenceProcessorStats stats; if (reference_processor()->processing_is_mt()) { PSRefProcTaskExecutor task_executor; stats = reference_processor()->process_discovered_references( &_is_alive_closure, &keep_alive, &evac_followers, &task_executor, &_gc_timer); } else { stats = reference_processor()->process_discovered_references( &_is_alive_closure, &keep_alive, &evac_followers, NULL, &_gc_timer); } _gc_tracer.report_gc_reference_stats(stats); // Enqueue reference objects discovered during scavenge. if (reference_processor()->processing_is_mt()) { PSRefProcTaskExecutor task_executor; reference_processor()->enqueue_discovered_references(&task_executor); } else { reference_processor()->enqueue_discovered_references(NULL); } } { GCTraceTime tm("StringTable", false, false, &_gc_timer); // Unlink any dead interned Strings and process the remaining live ones. PSScavengeRootsClosure root_closure(promotion_manager); StringTable::unlink_or_oops_do(&_is_alive_closure, &root_closure); } // Finally, flush the promotion_manager's labs, and deallocate its stacks. promotion_failure_occurred = PSPromotionManager::post_scavenge(_gc_tracer); if (promotion_failure_occurred) { clean_up_failed_promotion(); if (PrintGC) { gclog_or_tty->print("--"); } } // Let the size policy know we're done. Note that we count promotion // failure cleanup time as part of the collection (otherwise, we're // implicitly saying it's mutator time). size_policy->minor_collection_end(gc_cause); if (!promotion_failure_occurred) { // Swap the survivor spaces. young_gen->eden_space()->clear(SpaceDecorator::Mangle); young_gen->from_space()->clear(SpaceDecorator::Mangle); young_gen->swap_spaces(); size_t survived = young_gen->from_space()->used_in_bytes(); size_t promoted = old_gen->used_in_bytes() - old_gen_used_before; size_policy->update_averages(_survivor_overflow, survived, promoted); // A successful scavenge should restart the GC time limit count which is // for full GC's. size_policy->reset_gc_overhead_limit_count(); if (UseAdaptiveSizePolicy) { // Calculate the new survivor size and tenuring threshold if (PrintAdaptiveSizePolicy) { gclog_or_tty->print("AdaptiveSizeStart: "); gclog_or_tty->stamp(); gclog_or_tty->print_cr(" collection: %d ", heap->total_collections()); if (Verbose) { gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d", old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes()); } } if (UsePerfData) { PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); counters->update_old_eden_size( size_policy->calculated_eden_size_in_bytes()); counters->update_old_promo_size( size_policy->calculated_promo_size_in_bytes()); counters->update_old_capacity(old_gen->capacity_in_bytes()); counters->update_young_capacity(young_gen->capacity_in_bytes()); counters->update_survived(survived); counters->update_promoted(promoted); counters->update_survivor_overflowed(_survivor_overflow); } size_t max_young_size = young_gen->max_size(); // Deciding a free ratio in the young generation is tricky, so if // MinHeapFreeRatio or MaxHeapFreeRatio are in use (implicating // that the old generation size may have been limited because of them) we // should then limit our young generation size using NewRatio to have it // follow the old generation size. if (MinHeapFreeRatio != 0 || MaxHeapFreeRatio != 100) { max_young_size = MIN2(old_gen->capacity_in_bytes() / NewRatio, young_gen->max_size()); } size_t survivor_limit = size_policy->max_survivor_size(max_young_size); _tenuring_threshold = size_policy->compute_survivor_space_size_and_threshold( _survivor_overflow, _tenuring_threshold, survivor_limit); if (PrintTenuringDistribution) { gclog_or_tty->cr(); gclog_or_tty->print_cr("Desired survivor size " SIZE_FORMAT " bytes, new threshold %u (max %u)", size_policy->calculated_survivor_size_in_bytes(), _tenuring_threshold, MaxTenuringThreshold); } if (UsePerfData) { PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); counters->update_tenuring_threshold(_tenuring_threshold); counters->update_survivor_size_counters(); } // Do call at minor collections? // Don't check if the size_policy is ready at this // level. Let the size_policy check that internally. if (UseAdaptiveGenerationSizePolicyAtMinorCollection && ((gc_cause != GCCause::_java_lang_system_gc) || UseAdaptiveSizePolicyWithSystemGC)) { // Calculate optimial free space amounts assert(young_gen->max_size() > young_gen->from_space()->capacity_in_bytes() + young_gen->to_space()->capacity_in_bytes(), "Sizes of space in young gen are out-of-bounds"); size_t young_live = young_gen->used_in_bytes(); size_t eden_live = young_gen->eden_space()->used_in_bytes(); size_t cur_eden = young_gen->eden_space()->capacity_in_bytes(); size_t max_old_gen_size = old_gen->max_gen_size(); size_t max_eden_size = max_young_size - young_gen->from_space()->capacity_in_bytes() - young_gen->to_space()->capacity_in_bytes(); // Used for diagnostics size_policy->clear_generation_free_space_flags(); size_policy->compute_eden_space_size(young_live, eden_live, cur_eden, max_eden_size, false /* not full gc*/); size_policy->check_gc_overhead_limit(young_live, eden_live, max_old_gen_size, max_eden_size, false /* not full gc*/, gc_cause, heap->collector_policy()); size_policy->decay_supplemental_growth(false /* not full gc*/); } // Resize the young generation at every collection // even if new sizes have not been calculated. This is // to allow resizes that may have been inhibited by the // relative location of the "to" and "from" spaces. // Resizing the old gen at minor collects can cause increases // that don't feed back to the generation sizing policy until // a major collection. Don't resize the old gen here. heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(), size_policy->calculated_survivor_size_in_bytes()); if (PrintAdaptiveSizePolicy) { gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ", heap->total_collections()); } } // Update the structure of the eden. With NUMA-eden CPU hotplugging or offlining can // cause the change of the heap layout. Make sure eden is reshaped if that's the case. // Also update() will case adaptive NUMA chunk resizing. assert(young_gen->eden_space()->is_empty(), "eden space should be empty now"); young_gen->eden_space()->update(); heap->gc_policy_counters()->update_counters(); heap->resize_all_tlabs(); assert(young_gen->to_space()->is_empty(), "to space should be empty now"); } COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); NOT_PRODUCT(reference_processor()->verify_no_references_recorded()); { GCTraceTime tm("Prune Scavenge Root Methods", false, false, &_gc_timer); CodeCache::prune_scavenge_root_nmethods(); } // Re-verify object start arrays if (VerifyObjectStartArray && VerifyAfterGC) { old_gen->verify_object_start_array(); } // Verify all old -> young cards are now precise if (VerifyRememberedSets) { // Precise verification will give false positives. Until this is fixed, // use imprecise verification. // CardTableExtension::verify_all_young_refs_precise(); CardTableExtension::verify_all_young_refs_imprecise(); } if (TraceGen0Time) accumulated_time()->stop(); if (PrintGC) { if (PrintGCDetails) { // Don't print a GC timestamp here. This is after the GC so // would be confusing. young_gen->print_used_change(young_gen_used_before); } heap->print_heap_change(prev_used); } // Track memory usage and detect low memory MemoryService::track_memory_usage(); heap->update_counters(); gc_task_manager()->release_idle_workers(); } if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification Universe::verify(" VerifyAfterGC:"); } heap->print_heap_after_gc(); heap->trace_heap_after_gc(&_gc_tracer); _gc_tracer.report_tenuring_threshold(tenuring_threshold()); if (ZapUnusedHeapArea) { young_gen->eden_space()->check_mangled_unused_area_complete(); young_gen->from_space()->check_mangled_unused_area_complete(); young_gen->to_space()->check_mangled_unused_area_complete(); } scavenge_exit.update(); if (PrintGCTaskTimeStamps) { tty->print_cr("VM-Thread " INT64_FORMAT " " INT64_FORMAT " " INT64_FORMAT, scavenge_entry.ticks(), scavenge_midpoint.ticks(), scavenge_exit.ticks()); gc_task_manager()->print_task_time_stamps(); } #ifdef TRACESPINNING ParallelTaskTerminator::print_termination_counts(); #endif _gc_timer.register_gc_end(); _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions()); return !promotion_failure_occurred; }