// This method contains all heap specific policy for invoking mark sweep. // PSMarkSweep::invoke_no_policy() will only attempt to mark-sweep-compact // the heap. It will do nothing further. If we need to bail out for policy // reasons, scavenge before full gc, or any other specialized behavior, it // needs to be added here. // // Note that this method should only be called from the vm_thread while // at a safepoint! void PSMarkSweep::invoke(bool maximum_heap_compaction) { assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); assert(!Universe::heap()->is_gc_active(), "not reentrant"); ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); GCCause::Cause gc_cause = heap->gc_cause(); PSAdaptiveSizePolicy* policy = heap->size_policy(); // Before each allocation/collection attempt, find out from the // policy object if GCs are, on the whole, taking too long. If so, // bail out without attempting a collection. The exceptions are // for explicitly requested GC's. if (!policy->gc_time_limit_exceeded() || GCCause::is_user_requested_gc(gc_cause) || GCCause::is_serviceability_requested_gc(gc_cause)) { IsGCActiveMark mark; if (ScavengeBeforeFullGC) { PSScavenge::invoke_no_policy(); } int count = (maximum_heap_compaction)?1:MarkSweepAlwaysCompactCount; IntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count); PSMarkSweep::invoke_no_policy(maximum_heap_compaction); } }
// This method contains all heap specific policy for invoking scavenge. // PSScavenge::invoke_no_policy() will do nothing but attempt to // scavenge. It will not clean up after failed promotions, bail out if // we've exceeded policy time limits, or any other special behavior. // All such policy should be placed here. // // Note that this method should only be called from the vm_thread while // at a safepoint! void PSScavenge::invoke() { assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); assert(!Universe::heap()->is_gc_active(), "not reentrant"); ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); PSAdaptiveSizePolicy* policy = heap->size_policy(); IsGCActiveMark mark; bool scavenge_was_done = PSScavenge::invoke_no_policy(); PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); if (UsePerfData) counters->update_full_follows_scavenge(0); if (!scavenge_was_done || policy->should_full_GC(heap->old_gen()->free_in_bytes())) { if (UsePerfData) counters->update_full_follows_scavenge(full_follows_scavenge); GCCauseSetter gccs(heap, GCCause::_adaptive_size_policy); CollectorPolicy* cp = heap->collector_policy(); const bool clear_all_softrefs = cp->should_clear_all_soft_refs(); if (UseParallelOldGC) { PSParallelCompact::invoke_no_policy(clear_all_softrefs); } else { PSMarkSweep::invoke_no_policy(clear_all_softrefs); } } }
bool PSScavenge::should_attempt_scavenge() { ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); if (UsePerfData) { counters->update_scavenge_skipped(not_skipped); } PSYoungGen* young_gen = heap->young_gen(); PSOldGen* old_gen = heap->old_gen(); if (!ScavengeWithObjectsInToSpace) { // Do not attempt to promote unless to_space is empty if (!young_gen->to_space()->is_empty()) { _consecutive_skipped_scavenges++; if (UsePerfData) { counters->update_scavenge_skipped(to_space_not_empty); } return false; } } // Test to see if the scavenge will likely fail. PSAdaptiveSizePolicy* policy = heap->size_policy(); // A similar test is done in the policy's should_full_GC(). If this is // changed, decide if that test should also be changed. size_t avg_promoted = (size_t) policy->padded_average_promoted_in_bytes(); size_t promotion_estimate = MIN2(avg_promoted, young_gen->used_in_bytes()); bool result = promotion_estimate < old_gen->free_in_bytes(); if (PrintGCDetails && Verbose) { gclog_or_tty->print(result ? " do scavenge: " : " skip scavenge: "); gclog_or_tty->print_cr(" average_promoted " SIZE_FORMAT " padded_average_promoted " SIZE_FORMAT " free in old gen " SIZE_FORMAT, (size_t) policy->average_promoted_in_bytes(), (size_t) policy->padded_average_promoted_in_bytes(), old_gen->free_in_bytes()); if (young_gen->used_in_bytes() < (size_t) policy->padded_average_promoted_in_bytes()) { gclog_or_tty->print_cr(" padded_promoted_average is greater" " than maximum promotion = " SIZE_FORMAT, young_gen->used_in_bytes()); } } if (result) { _consecutive_skipped_scavenges = 0; } else { _consecutive_skipped_scavenges++; if (UsePerfData) { counters->update_scavenge_skipped(promoted_too_large); } } return result; }
size_t ASPSOldGen::available_for_contraction() { size_t uncommitted_bytes = virtual_space()->uncommitted_size(); if (uncommitted_bytes != 0) { return uncommitted_bytes; } ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); const size_t gen_alignment = heap->old_gen_alignment(); PSAdaptiveSizePolicy* policy = heap->size_policy(); const size_t working_size = used_in_bytes() + (size_t) policy->avg_promoted()->padded_average(); const size_t working_aligned = align_size_up(working_size, gen_alignment); const size_t working_or_min = MAX2(working_aligned, min_gen_size()); if (working_or_min > reserved().byte_size()) { // If the used or minimum gen size (aligned up) is greater // than the total reserved size, then the space available // for contraction should (after proper alignment) be 0 return 0; } const size_t max_contraction = reserved().byte_size() - working_or_min; // Use the "increment" fraction instead of the "decrement" fraction // to allow the other gen to expand more aggressively. The // "decrement" fraction is conservative because its intent is to // only reduce the footprint. size_t result = policy->promo_increment_aligned_down(max_contraction); // Also adjust for inter-generational alignment size_t result_aligned = align_size_down(result, gen_alignment); if (PrintAdaptiveSizePolicy && Verbose) { gclog_or_tty->print_cr("\nASPSOldGen::available_for_contraction:" " %d K / 0x%x", result_aligned/K, result_aligned); gclog_or_tty->print_cr(" reserved().byte_size() %d K / 0x%x ", reserved().byte_size()/K, reserved().byte_size()); size_t working_promoted = (size_t) policy->avg_promoted()->padded_average(); gclog_or_tty->print_cr(" padded promoted %d K / 0x%x", working_promoted/K, working_promoted); gclog_or_tty->print_cr(" used %d K / 0x%x", used_in_bytes()/K, used_in_bytes()); gclog_or_tty->print_cr(" min_gen_size() %d K / 0x%x", min_gen_size()/K, min_gen_size()); gclog_or_tty->print_cr(" max_contraction %d K / 0x%x", max_contraction/K, max_contraction); gclog_or_tty->print_cr(" without alignment %d K / 0x%x", policy->promo_increment(max_contraction)/K, policy->promo_increment(max_contraction)); gclog_or_tty->print_cr(" alignment 0x%x", gen_alignment); } assert(result_aligned <= max_contraction, "arithmetic is wrong"); return result_aligned; }
// Return the number of bytes the young gen is willing give up. // // Future implementations could check the survivors and if to_space is in the // right place (below from_space), take a chunk from to_space. size_t ASPSYoungGen::available_for_contraction() { size_t uncommitted_bytes = virtual_space()->uncommitted_size(); if (uncommitted_bytes != 0) { return uncommitted_bytes; } if (eden_space()->is_empty()) { // Respect the minimum size for eden and for the young gen as a whole. ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); const size_t eden_alignment = heap->intra_heap_alignment(); const size_t gen_alignment = heap->young_gen_alignment(); assert(eden_space()->capacity_in_bytes() >= eden_alignment, "Alignment is wrong"); size_t eden_avail = eden_space()->capacity_in_bytes() - eden_alignment; eden_avail = align_size_down(eden_avail, gen_alignment); assert(virtual_space()->committed_size() >= min_gen_size(), "minimum gen size is wrong"); size_t gen_avail = virtual_space()->committed_size() - min_gen_size(); assert(virtual_space()->is_aligned(gen_avail), "not aligned"); const size_t max_contraction = MIN2(eden_avail, gen_avail); // See comment for ASPSOldGen::available_for_contraction() // for reasons the "increment" fraction is used. PSAdaptiveSizePolicy* policy = heap->size_policy(); size_t result = policy->eden_increment_aligned_down(max_contraction); size_t result_aligned = align_size_down(result, gen_alignment); if (PrintAdaptiveSizePolicy && Verbose) { gclog_or_tty->print_cr("ASPSYoungGen::available_for_contraction: %d K", result_aligned/K); gclog_or_tty->print_cr(" max_contraction %d K", max_contraction/K); gclog_or_tty->print_cr(" eden_avail %d K", eden_avail/K); gclog_or_tty->print_cr(" gen_avail %d K", gen_avail/K); } return result_aligned; } return 0; }
void PSMarkSweep::invoke(bool maximum_heap_compaction) { assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); assert(!ParallelScavengeHeap::heap()->is_gc_active(), "not reentrant"); ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); GCCause::Cause gc_cause = heap->gc_cause(); PSAdaptiveSizePolicy* policy = heap->size_policy(); IsGCActiveMark mark; if (ScavengeBeforeFullGC) { PSScavenge::invoke_no_policy(); } const bool clear_all_soft_refs = heap->collector_policy()->should_clear_all_soft_refs(); uint count = maximum_heap_compaction ? 1 : MarkSweepAlwaysCompactCount; UIntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count); PSMarkSweep::invoke_no_policy(clear_all_soft_refs || maximum_heap_compaction); }
// This method contains all heap specific policy for invoking scavenge. // PSScavenge::invoke_no_policy() will do nothing but attempt to // scavenge. It will not clean up after failed promotions, bail out if // we've exceeded policy time limits, or any other special behavior. // All such policy should be placed here. // // Note that this method should only be called from the vm_thread while // at a safepoint! void PSScavenge::invoke() { assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); assert(!Universe::heap()->is_gc_active(), "not reentrant"); ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); PSAdaptiveSizePolicy* policy = heap->size_policy(); // Before each allocation/collection attempt, find out from the // policy object if GCs are, on the whole, taking too long. If so, // bail out without attempting a collection. if (!policy->gc_time_limit_exceeded()) { IsGCActiveMark mark; bool scavenge_was_done = PSScavenge::invoke_no_policy(); PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); if (UsePerfData) counters->update_full_follows_scavenge(0); if (!scavenge_was_done || policy->should_full_GC(heap->old_gen()->free_in_bytes())) { if (UsePerfData) counters->update_full_follows_scavenge(full_follows_scavenge); GCCauseSetter gccs(heap, GCCause::_adaptive_size_policy); if (UseParallelOldGC) { PSParallelCompact::invoke_no_policy(false); } else { PSMarkSweep::invoke_no_policy(false); } } } }
// This method contains no policy. You should probably // be calling invoke() instead. bool PSScavenge::invoke_no_policy() { assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); elapsedTimer scavenge_time; TimeStamp scavenge_entry; TimeStamp scavenge_midpoint; TimeStamp scavenge_exit; scavenge_entry.update(); if (GC_locker::check_active_before_gc()) { return false; } ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); GCCause::Cause gc_cause = heap->gc_cause(); assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); // Check for potential problems. if (!should_attempt_scavenge()) { return false; } bool promotion_failure_occurred = false; PSYoungGen* young_gen = heap->young_gen(); PSOldGen* old_gen = heap->old_gen(); PSPermGen* perm_gen = heap->perm_gen(); PSAdaptiveSizePolicy* size_policy = heap->size_policy(); heap->increment_total_collections(); AdaptiveSizePolicyOutput(size_policy, heap->total_collections()); if ((gc_cause != GCCause::_java_lang_system_gc) || UseAdaptiveSizePolicyWithSystemGC) { // Gather the feedback data for eden occupancy. young_gen->eden_space()->accumulate_statistics(); } // We need to track unique scavenge invocations as well. _total_invocations++; if (PrintHeapAtGC) { Universe::print_heap_before_gc(); } assert(!NeverTenure||_tenuring_threshold==markWord::max_age+1,"Sanity"); assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity"); size_t prev_used = heap->used(); assert(promotion_failed() == false, "Sanity"); // Fill in TLABs heap->accumulate_statistics_all_tlabs(); heap->ensure_parsability(true); // retire TLABs if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification gclog_or_tty->print(" VerifyBeforeGC:"); Universe::verify(true); } { ResourceMark rm; HandleMark hm; gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); TraceTime t1("GC", PrintGC, !PrintGCDetails, gclog_or_tty); TraceCollectorStats tcs(counters()); TraceMemoryManagerStats tms(false /* not full GC */); if (TraceGen0Time) scavenge_time.start(); // Let the size policy know we're starting size_policy->minor_collection_begin(); // Verify the object start arrays. if (VerifyObjectStartArray && VerifyBeforeGC) { old_gen->verify_object_start_array(); perm_gen->verify_object_start_array(); } // Verify no unmarked old->young roots if (VerifyRememberedSets) { CardTableExtension::verify_all_young_refs_imprecise(); } if (!ScavengeWithObjectsInToSpace) { assert(young_gen->to_space()->is_empty(), "Attempt to scavenge with live objects in to_space"); young_gen->to_space()->clear(); } else if (ZapUnusedHeapArea) { young_gen->to_space()->mangle_unused_area(); } save_to_space_top_before_gc(); NOT_PRODUCT(reference_processor()->verify_no_references_recorded()); DerivedPointerTable::clear(); reference_processor()->enable_discovery(); // We track how much was promoted to the next generation for // the AdaptiveSizePolicy. size_t old_gen_used_before = old_gen->used_in_bytes(); // For PrintGCDetails size_t young_gen_used_before = young_gen->used_in_bytes(); // Reset our survivor overflow. set_survivor_overflow(false); // We need to save the old/perm top values before // creating the promotion_manager. We pass the top // values to the card_table, to prevent it from // straying into the promotion labs. HeapWord* old_top = old_gen->object_space()->top(); HeapWord* perm_top = perm_gen->object_space()->top(); // Release all previously held resources gc_task_manager()->release_all_resources(); PSPromotionManager::pre_scavenge(); // We'll use the promotion manager again later. PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager(); { // TraceTime("Roots"); GCTaskQueue* q = GCTaskQueue::create(); for(uint i=0; i<ParallelGCThreads; i++) { q->enqueue(new OldToYoungRootsTask(old_gen, old_top, i)); q->enqueue(new OldToYoungRootsTask(perm_gen,perm_top,i)); } // q->enqueue(new SerialOldToYoungRootsTask(perm_gen, perm_top)); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::universe)); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jni_handles)); // We scan the thread roots in parallel // FIX ME! We should have a NoResourceMarkVerifier here! Threads::create_thread_roots_tasks(q); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::object_synchronizer)); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::management)); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::system_dictionary)); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jvmti)); // NOTE! ArtaObjects are not normal roots. During scavenges, they are // considered strong roots. During a mark sweep they are weak roots. q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::arta_objects)); ParallelTaskTerminator terminator( gc_task_manager()->workers(), promotion_manager->depth_first() ? (TaskQueueSetSuper*)promotion_manager->stack_array_depth() : (TaskQueueSetSuper*)promotion_manager->stack_array_breadth()); if (ParallelGCThreads>1) { for (uint j=0; j<ParallelGCThreads; j++) { q->enqueue(new StealTask(&terminator)); } } gc_task_manager()->execute_and_wait(q); } scavenge_midpoint.update(); // Process reference objects discovered during scavenge { ReferencePolicy *soft_ref_policy = new LRUMaxHeapPolicy(); PSKeepAliveClosure keep_alive(promotion_manager); PSEvacuateFollowersClosure evac_followers(promotion_manager); assert(soft_ref_policy != NULL,"No soft reference policy"); if (reference_processor()->processing_is_mt()) { PSRefProcTaskExecutor task_executor; reference_processor()->process_discovered_references( soft_ref_policy, &_is_alive_closure, &keep_alive, &evac_followers, &task_executor); } else { reference_processor()->process_discovered_references( soft_ref_policy, &_is_alive_closure, &keep_alive, &evac_followers, NULL); } } // Enqueue reference objects discovered during scavenge. if (reference_processor()->processing_is_mt()) { PSRefProcTaskExecutor task_executor; reference_processor()->enqueue_discovered_references(&task_executor); } else { reference_processor()->enqueue_discovered_references(NULL); } // Finally, flush the promotion_manager's labs, and deallocate its stacks. assert(promotion_manager->claimed_stack_empty(), "Sanity"); PSPromotionManager::post_scavenge(); promotion_failure_occurred = promotion_failed(); if (promotion_failure_occurred) { _total_promotion_failures++; clean_up_failed_promotion(); if (PrintGC) { gclog_or_tty->print("--"); } } // Let the size policy know we're done. Note that we count promotion // failure cleanup time as part of the collection (otherwise, we're // implicitly saying it's mutator time). size_policy->minor_collection_end(gc_cause); if (!promotion_failure_occurred) { // Swap the survivor spaces. young_gen->eden_space()->clear(); young_gen->from_space()->clear(); young_gen->swap_spaces(); size_t survived = young_gen->from_space()->used_in_bytes(); size_t promoted = old_gen->used_in_bytes() - old_gen_used_before; size_policy->update_averages(_survivor_overflow, survived, promoted); if (UseAdaptiveSizePolicy) { // Calculate the new survivor size and tenuring threshold if (PrintAdaptiveSizePolicy) { gclog_or_tty->print("AdaptiveSizeStart: "); gclog_or_tty->stamp(); gclog_or_tty->print_cr(" collection: %d ", heap->total_collections()); if (Verbose) { gclog_or_tty->print("old_gen_capacity: %zd young_gen_capacity: %zd" " perm_gen_capacity: %zd ", old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes(), perm_gen->capacity_in_bytes()); } } if (UsePerfData) { PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); counters->update_old_eden_size( size_policy->calculated_eden_size_in_bytes()); counters->update_old_promo_size( size_policy->calculated_promo_size_in_bytes()); counters->update_old_capacity(old_gen->capacity_in_bytes()); counters->update_young_capacity(young_gen->capacity_in_bytes()); counters->update_survived(survived); counters->update_promoted(promoted); counters->update_survivor_overflowed(_survivor_overflow); } size_t survivor_limit = size_policy->max_survivor_size(young_gen->max_size()); _tenuring_threshold = size_policy->compute_survivor_space_size_and_threshold( _survivor_overflow, _tenuring_threshold, survivor_limit); if (PrintTenuringDistribution) { gclog_or_tty->cr(); gclog_or_tty->print_cr("Desired survivor size %ld bytes, new threshold %d (max %ld)", size_policy->calculated_survivor_size_in_bytes(), _tenuring_threshold, MaxTenuringThreshold); } if (UsePerfData) { PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); counters->update_tenuring_threshold(_tenuring_threshold); counters->update_survivor_size_counters(); } // Do call at minor collections? // Don't check if the size_policy is ready at this // level. Let the size_policy check that internally. if (UseAdaptiveSizePolicy && UseAdaptiveGenerationSizePolicyAtMinorCollection && ((gc_cause != GCCause::_java_lang_system_gc) || UseAdaptiveSizePolicyWithSystemGC)) { // Calculate optimial free space amounts assert(young_gen->max_size() > young_gen->from_space()->capacity_in_bytes() + young_gen->to_space()->capacity_in_bytes(), "Sizes of space in young gen are out-of-bounds"); size_t max_eden_size = young_gen->max_size() - young_gen->from_space()->capacity_in_bytes() - young_gen->to_space()->capacity_in_bytes(); size_policy->compute_generation_free_space(young_gen->used_in_bytes(), young_gen->eden_space()->used_in_bytes(), old_gen->used_in_bytes(), perm_gen->used_in_bytes(), young_gen->eden_space()->capacity_in_bytes(), old_gen->max_gen_size(), max_eden_size, false /* full gc*/, gc_cause); } // Resize the young generation at every collection // even if new sizes have not been calculated. This is // to allow resizes that may have been inhibited by the // relative location of the "to" and "from" spaces. // Resizing the old gen at minor collects can cause increases // that don't feed back to the generation sizing policy until // a major collection. Don't resize the old gen here. heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(), size_policy->calculated_survivor_size_in_bytes()); if (PrintAdaptiveSizePolicy) { gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ", heap->total_collections()); } } // Update the structure of the eden. With NUMA-eden CPU hotplugging or offlining can // cause the change of the heap layout. Make sure eden is reshaped if that's the case. // Also update() will case adaptive NUMA chunk resizing. assert(young_gen->eden_space()->is_empty(), "eden space should be empty now"); young_gen->eden_space()->update(); heap->gc_policy_counters()->update_counters(); heap->resize_all_tlabs(); assert(young_gen->to_space()->is_empty(), "to space should be empty now"); } DerivedPointerTable::update_pointers(); NOT_PRODUCT(reference_processor()->verify_no_references_recorded()); // Re-verify object start arrays if (VerifyObjectStartArray && VerifyAfterGC) { old_gen->verify_object_start_array(); perm_gen->verify_object_start_array(); } // Verify all old -> young cards are now precise if (VerifyRememberedSets) { // Precise verification will give false positives. Until this is fixed, // use imprecise verification. // CardTableExtension::verify_all_young_refs_precise(); CardTableExtension::verify_all_young_refs_imprecise(); } if (TraceGen0Time) { scavenge_time.stop(); if (promotion_failure_occurred) accumulated_undo_time()->add(scavenge_time); else accumulated_gc_time()->add(scavenge_time); } if (PrintGC) { if (PrintGCDetails) { // Don't print a GC timestamp here. This is after the GC so // would be confusing. young_gen->print_used_change(young_gen_used_before); } heap->print_heap_change(prev_used); } // Track memory usage and detect low memory MemoryService::track_memory_usage(); heap->update_counters(); } if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification gclog_or_tty->print(" VerifyAfterGC:"); Universe::verify(false); } if (PrintHeapAtGC) { Universe::print_heap_after_gc(); } scavenge_exit.update(); if (PrintGCTaskTimeStamps) { tty->print_cr("VM-Thread %lld %lld %lld", scavenge_entry.ticks(), scavenge_midpoint.ticks(), scavenge_exit.ticks()); gc_task_manager()->print_task_time_stamps(); } return !promotion_failure_occurred; }
// This method contains no policy. You should probably // be calling invoke() instead. void PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) { assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); assert(ref_processor() != NULL, "Sanity"); if (GC_locker::check_active_before_gc()) { return; } ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); GCCause::Cause gc_cause = heap->gc_cause(); assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); PSAdaptiveSizePolicy* size_policy = heap->size_policy(); PSYoungGen* young_gen = heap->young_gen(); PSOldGen* old_gen = heap->old_gen(); PSPermGen* perm_gen = heap->perm_gen(); // Increment the invocation count heap->increment_total_collections(true /* full */); // Save information needed to minimize mangling heap->record_gen_tops_before_GC(); // We need to track unique mark sweep invocations as well. _total_invocations++; AdaptiveSizePolicyOutput(size_policy, heap->total_collections()); if (PrintHeapAtGC) { Universe::print_heap_before_gc(); } // Fill in TLABs heap->accumulate_statistics_all_tlabs(); heap->ensure_parsability(true); // retire TLABs if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification gclog_or_tty->print(" VerifyBeforeGC:"); Universe::verify(true); } // Verify object start arrays if (VerifyObjectStartArray && VerifyBeforeGC) { old_gen->verify_object_start_array(); perm_gen->verify_object_start_array(); } heap->pre_full_gc_dump(); // Filled in below to track the state of the young gen after the collection. bool eden_empty; bool survivors_empty; bool young_gen_empty; { HandleMark hm; const bool is_system_gc = gc_cause == GCCause::_java_lang_system_gc; // This is useful for debugging but don't change the output the // the customer sees. const char* gc_cause_str = "Full GC"; if (is_system_gc && PrintGCDetails) { gc_cause_str = "Full GC (System)"; } gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); TraceTime t1(gc_cause_str, PrintGC, !PrintGCDetails, gclog_or_tty); TraceCollectorStats tcs(counters()); TraceMemoryManagerStats tms(true /* Full GC */); if (TraceGen1Time) accumulated_time()->start(); // Let the size policy know we're starting size_policy->major_collection_begin(); // When collecting the permanent generation methodOops may be moving, // so we either have to flush all bcp data or convert it into bci. CodeCache::gc_prologue(); Threads::gc_prologue(); BiasedLocking::preserve_marks(); // Capture heap size before collection for printing. size_t prev_used = heap->used(); // Capture perm gen size before collection for sizing. size_t perm_gen_prev_used = perm_gen->used_in_bytes(); // For PrintGCDetails size_t old_gen_prev_used = old_gen->used_in_bytes(); size_t young_gen_prev_used = young_gen->used_in_bytes(); allocate_stacks(); NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); COMPILER2_PRESENT(DerivedPointerTable::clear()); ref_processor()->enable_discovery(); ref_processor()->setup_policy(clear_all_softrefs); mark_sweep_phase1(clear_all_softrefs); mark_sweep_phase2(); // Don't add any more derived pointers during phase3 COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity")); COMPILER2_PRESENT(DerivedPointerTable::set_active(false)); mark_sweep_phase3(); mark_sweep_phase4(); restore_marks(); deallocate_stacks(); if (ZapUnusedHeapArea) { // Do a complete mangle (top to end) because the usage for // scratch does not maintain a top pointer. young_gen->to_space()->mangle_unused_area_complete(); } eden_empty = young_gen->eden_space()->is_empty(); if (!eden_empty) { eden_empty = absorb_live_data_from_eden(size_policy, young_gen, old_gen); } // Update heap occupancy information which is used as // input to soft ref clearing policy at the next gc. Universe::update_heap_info_at_gc(); survivors_empty = young_gen->from_space()->is_empty() && young_gen->to_space()->is_empty(); young_gen_empty = eden_empty && survivors_empty; BarrierSet* bs = heap->barrier_set(); if (bs->is_a(BarrierSet::ModRef)) { ModRefBarrierSet* modBS = (ModRefBarrierSet*)bs; MemRegion old_mr = heap->old_gen()->reserved(); MemRegion perm_mr = heap->perm_gen()->reserved(); assert(perm_mr.end() <= old_mr.start(), "Generations out of order"); if (young_gen_empty) { modBS->clear(MemRegion(perm_mr.start(), old_mr.end())); } else { modBS->invalidate(MemRegion(perm_mr.start(), old_mr.end())); } } BiasedLocking::restore_marks(); Threads::gc_epilogue(); CodeCache::gc_epilogue(); COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); ref_processor()->enqueue_discovered_references(NULL); // Update time of last GC reset_millis_since_last_gc(); // Let the size policy know we're done size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause); if (UseAdaptiveSizePolicy) { if (PrintAdaptiveSizePolicy) { gclog_or_tty->print("AdaptiveSizeStart: "); gclog_or_tty->stamp(); gclog_or_tty->print_cr(" collection: %d ", heap->total_collections()); if (Verbose) { gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d" " perm_gen_capacity: %d ", old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes(), perm_gen->capacity_in_bytes()); } } // Don't check if the size_policy is ready here. Let // the size_policy check that internally. if (UseAdaptiveGenerationSizePolicyAtMajorCollection && ((gc_cause != GCCause::_java_lang_system_gc) || UseAdaptiveSizePolicyWithSystemGC)) { // Calculate optimal free space amounts assert(young_gen->max_size() > young_gen->from_space()->capacity_in_bytes() + young_gen->to_space()->capacity_in_bytes(), "Sizes of space in young gen are out-of-bounds"); size_t max_eden_size = young_gen->max_size() - young_gen->from_space()->capacity_in_bytes() - young_gen->to_space()->capacity_in_bytes(); size_policy->compute_generation_free_space(young_gen->used_in_bytes(), young_gen->eden_space()->used_in_bytes(), old_gen->used_in_bytes(), perm_gen->used_in_bytes(), young_gen->eden_space()->capacity_in_bytes(), old_gen->max_gen_size(), max_eden_size, true /* full gc*/, gc_cause); heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes()); // Don't resize the young generation at an major collection. A // desired young generation size may have been calculated but // resizing the young generation complicates the code because the // resizing of the old generation may have moved the boundary // between the young generation and the old generation. Let the // young generation resizing happen at the minor collections. } if (PrintAdaptiveSizePolicy) { gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ", heap->total_collections()); } } if (UsePerfData) { heap->gc_policy_counters()->update_counters(); heap->gc_policy_counters()->update_old_capacity( old_gen->capacity_in_bytes()); heap->gc_policy_counters()->update_young_capacity( young_gen->capacity_in_bytes()); } heap->resize_all_tlabs(); // We collected the perm gen, so we'll resize it here. perm_gen->compute_new_size(perm_gen_prev_used); if (TraceGen1Time) accumulated_time()->stop(); if (PrintGC) { if (PrintGCDetails) { // Don't print a GC timestamp here. This is after the GC so // would be confusing. young_gen->print_used_change(young_gen_prev_used); old_gen->print_used_change(old_gen_prev_used); } heap->print_heap_change(prev_used); // Do perm gen after heap becase prev_used does // not include the perm gen (done this way in the other // collectors). if (PrintGCDetails) { perm_gen->print_used_change(perm_gen_prev_used); } } // Track memory usage and detect low memory MemoryService::track_memory_usage(); heap->update_counters(); if (PrintGCDetails) { if (size_policy->print_gc_time_limit_would_be_exceeded()) { if (size_policy->gc_time_limit_exceeded()) { gclog_or_tty->print_cr(" GC time is exceeding GCTimeLimit " "of %d%%", GCTimeLimit); } else { gclog_or_tty->print_cr(" GC time would exceed GCTimeLimit " "of %d%%", GCTimeLimit); } } size_policy->set_print_gc_time_limit_would_be_exceeded(false); } } if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification gclog_or_tty->print(" VerifyAfterGC:"); Universe::verify(false); } // Re-verify object start arrays if (VerifyObjectStartArray && VerifyAfterGC) { old_gen->verify_object_start_array(); perm_gen->verify_object_start_array(); } if (ZapUnusedHeapArea) { old_gen->object_space()->check_mangled_unused_area_complete(); perm_gen->object_space()->check_mangled_unused_area_complete(); } NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); if (PrintHeapAtGC) { Universe::print_heap_after_gc(); } heap->post_full_gc_dump(); #ifdef TRACESPINNING ParallelTaskTerminator::print_termination_counts(); #endif }
// This method contains no policy. You should probably // be calling invoke() instead. bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) { assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); assert(ref_processor() != NULL, "Sanity"); if (GC_locker::check_active_before_gc()) { return false; } ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); GCCause::Cause gc_cause = heap->gc_cause(); _gc_timer->register_gc_start(); _gc_tracer->report_gc_start(gc_cause, _gc_timer->gc_start()); PSAdaptiveSizePolicy* size_policy = heap->size_policy(); // The scope of casr should end after code that can change // CollectorPolicy::_should_clear_all_soft_refs. ClearedAllSoftRefs casr(clear_all_softrefs, heap->collector_policy()); PSYoungGen* young_gen = heap->young_gen(); PSOldGen* old_gen = heap->old_gen(); // Increment the invocation count heap->increment_total_collections(true /* full */); // Save information needed to minimize mangling heap->record_gen_tops_before_GC(); // We need to track unique mark sweep invocations as well. _total_invocations++; AdaptiveSizePolicyOutput(size_policy, heap->total_collections()); heap->print_heap_before_gc(); heap->trace_heap_before_gc(_gc_tracer); // Fill in TLABs heap->accumulate_statistics_all_tlabs(); heap->ensure_parsability(true); // retire TLABs if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification Universe::verify(" VerifyBeforeGC:"); } // Verify object start arrays if (VerifyObjectStartArray && VerifyBeforeGC) { old_gen->verify_object_start_array(); } heap->pre_full_gc_dump(_gc_timer); // Filled in below to track the state of the young gen after the collection. bool eden_empty; bool survivors_empty; bool young_gen_empty; { HandleMark hm; TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer->gc_id()); TraceCollectorStats tcs(counters()); TraceMemoryManagerStats tms(true /* Full GC */,gc_cause); if (TraceOldGenTime) accumulated_time()->start(); // Let the size policy know we're starting size_policy->major_collection_begin(); CodeCache::gc_prologue(); BiasedLocking::preserve_marks(); // Capture heap size before collection for printing. size_t prev_used = heap->used(); // Capture metadata size before collection for sizing. size_t metadata_prev_used = MetaspaceAux::used_bytes(); // For PrintGCDetails size_t old_gen_prev_used = old_gen->used_in_bytes(); size_t young_gen_prev_used = young_gen->used_in_bytes(); allocate_stacks(); COMPILER2_PRESENT(DerivedPointerTable::clear()); ref_processor()->enable_discovery(); ref_processor()->setup_policy(clear_all_softrefs); mark_sweep_phase1(clear_all_softrefs); mark_sweep_phase2(); // Don't add any more derived pointers during phase3 COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity")); COMPILER2_PRESENT(DerivedPointerTable::set_active(false)); mark_sweep_phase3(); mark_sweep_phase4(); restore_marks(); deallocate_stacks(); if (ZapUnusedHeapArea) { // Do a complete mangle (top to end) because the usage for // scratch does not maintain a top pointer. young_gen->to_space()->mangle_unused_area_complete(); } eden_empty = young_gen->eden_space()->is_empty(); if (!eden_empty) { eden_empty = absorb_live_data_from_eden(size_policy, young_gen, old_gen); } // Update heap occupancy information which is used as // input to soft ref clearing policy at the next gc. Universe::update_heap_info_at_gc(); survivors_empty = young_gen->from_space()->is_empty() && young_gen->to_space()->is_empty(); young_gen_empty = eden_empty && survivors_empty; ModRefBarrierSet* modBS = barrier_set_cast<ModRefBarrierSet>(heap->barrier_set()); MemRegion old_mr = heap->old_gen()->reserved(); if (young_gen_empty) { modBS->clear(MemRegion(old_mr.start(), old_mr.end())); } else { modBS->invalidate(MemRegion(old_mr.start(), old_mr.end())); } // Delete metaspaces for unloaded class loaders and clean up loader_data graph ClassLoaderDataGraph::purge(); MetaspaceAux::verify_metrics(); BiasedLocking::restore_marks(); CodeCache::gc_epilogue(); JvmtiExport::gc_epilogue(); COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); ref_processor()->enqueue_discovered_references(NULL); // Update time of last GC reset_millis_since_last_gc(); // Let the size policy know we're done size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause); if (UseAdaptiveSizePolicy) { if (PrintAdaptiveSizePolicy) { gclog_or_tty->print("AdaptiveSizeStart: "); gclog_or_tty->stamp(); gclog_or_tty->print_cr(" collection: %d ", heap->total_collections()); if (Verbose) { gclog_or_tty->print("old_gen_capacity: " SIZE_FORMAT " young_gen_capacity: " SIZE_FORMAT, old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes()); } } // Don't check if the size_policy is ready here. Let // the size_policy check that internally. if (UseAdaptiveGenerationSizePolicyAtMajorCollection && ((gc_cause != GCCause::_java_lang_system_gc) || UseAdaptiveSizePolicyWithSystemGC)) { // Swap the survivor spaces if from_space is empty. The // resize_young_gen() called below is normally used after // a successful young GC and swapping of survivor spaces; // otherwise, it will fail to resize the young gen with // the current implementation. if (young_gen->from_space()->is_empty()) { young_gen->from_space()->clear(SpaceDecorator::Mangle); young_gen->swap_spaces(); } // Calculate optimal free space amounts assert(young_gen->max_size() > young_gen->from_space()->capacity_in_bytes() + young_gen->to_space()->capacity_in_bytes(), "Sizes of space in young gen are out-of-bounds"); size_t young_live = young_gen->used_in_bytes(); size_t eden_live = young_gen->eden_space()->used_in_bytes(); size_t old_live = old_gen->used_in_bytes(); size_t cur_eden = young_gen->eden_space()->capacity_in_bytes(); size_t max_old_gen_size = old_gen->max_gen_size(); size_t max_eden_size = young_gen->max_size() - young_gen->from_space()->capacity_in_bytes() - young_gen->to_space()->capacity_in_bytes(); // Used for diagnostics size_policy->clear_generation_free_space_flags(); size_policy->compute_generations_free_space(young_live, eden_live, old_live, cur_eden, max_old_gen_size, max_eden_size, true /* full gc*/); size_policy->check_gc_overhead_limit(young_live, eden_live, max_old_gen_size, max_eden_size, true /* full gc*/, gc_cause, heap->collector_policy()); size_policy->decay_supplemental_growth(true /* full gc*/); heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes()); heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(), size_policy->calculated_survivor_size_in_bytes()); } if (PrintAdaptiveSizePolicy) { gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ", heap->total_collections()); } } if (UsePerfData) { heap->gc_policy_counters()->update_counters(); heap->gc_policy_counters()->update_old_capacity( old_gen->capacity_in_bytes()); heap->gc_policy_counters()->update_young_capacity( young_gen->capacity_in_bytes()); } heap->resize_all_tlabs(); // We collected the heap, recalculate the metaspace capacity MetaspaceGC::compute_new_size(); if (TraceOldGenTime) accumulated_time()->stop(); if (PrintGC) { if (PrintGCDetails) { // Don't print a GC timestamp here. This is after the GC so // would be confusing. young_gen->print_used_change(young_gen_prev_used); old_gen->print_used_change(old_gen_prev_used); } heap->print_heap_change(prev_used); if (PrintGCDetails) { MetaspaceAux::print_metaspace_change(metadata_prev_used); } } // Track memory usage and detect low memory MemoryService::track_memory_usage(); heap->update_counters(); } if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification Universe::verify(" VerifyAfterGC:"); } // Re-verify object start arrays if (VerifyObjectStartArray && VerifyAfterGC) { old_gen->verify_object_start_array(); } if (ZapUnusedHeapArea) { old_gen->object_space()->check_mangled_unused_area_complete(); } NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); heap->print_heap_after_gc(); heap->trace_heap_after_gc(_gc_tracer); heap->post_full_gc_dump(_gc_timer); #ifdef TRACESPINNING ParallelTaskTerminator::print_termination_counts(); #endif _gc_timer->register_gc_end(); _gc_tracer->report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions()); return true; }
// This method contains no policy. You should probably // be calling invoke() instead. bool PSScavenge::invoke_no_policy() { assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); assert(_preserved_mark_stack.is_empty(), "should be empty"); assert(_preserved_oop_stack.is_empty(), "should be empty"); _gc_timer.register_gc_start(); TimeStamp scavenge_entry; TimeStamp scavenge_midpoint; TimeStamp scavenge_exit; scavenge_entry.update(); if (GC_locker::check_active_before_gc()) { return false; } ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); GCCause::Cause gc_cause = heap->gc_cause(); assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); // Check for potential problems. if (!should_attempt_scavenge()) { return false; } _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start()); bool promotion_failure_occurred = false; PSYoungGen* young_gen = heap->young_gen(); PSOldGen* old_gen = heap->old_gen(); PSAdaptiveSizePolicy* size_policy = heap->size_policy(); heap->increment_total_collections(); AdaptiveSizePolicyOutput(size_policy, heap->total_collections()); if ((gc_cause != GCCause::_java_lang_system_gc) || UseAdaptiveSizePolicyWithSystemGC) { // Gather the feedback data for eden occupancy. young_gen->eden_space()->accumulate_statistics(); } if (ZapUnusedHeapArea) { // Save information needed to minimize mangling heap->record_gen_tops_before_GC(); } heap->print_heap_before_gc(); heap->trace_heap_before_gc(&_gc_tracer); assert(!NeverTenure || _tenuring_threshold == markOopDesc::max_age + 1, "Sanity"); assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity"); size_t prev_used = heap->used(); // Fill in TLABs heap->accumulate_statistics_all_tlabs(); heap->ensure_parsability(true); // retire TLABs if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification Universe::verify(" VerifyBeforeGC:"); } { ResourceMark rm; HandleMark hm; gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); GCTraceTime t1(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL); TraceCollectorStats tcs(counters()); TraceMemoryManagerStats tms(false /* not full GC */,gc_cause); if (TraceGen0Time) accumulated_time()->start(); // Let the size policy know we're starting size_policy->minor_collection_begin(); // Verify the object start arrays. if (VerifyObjectStartArray && VerifyBeforeGC) { old_gen->verify_object_start_array(); } // Verify no unmarked old->young roots if (VerifyRememberedSets) { CardTableExtension::verify_all_young_refs_imprecise(); } if (!ScavengeWithObjectsInToSpace) { assert(young_gen->to_space()->is_empty(), "Attempt to scavenge with live objects in to_space"); young_gen->to_space()->clear(SpaceDecorator::Mangle); } else if (ZapUnusedHeapArea) { young_gen->to_space()->mangle_unused_area(); } save_to_space_top_before_gc(); COMPILER2_PRESENT(DerivedPointerTable::clear()); reference_processor()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/); reference_processor()->setup_policy(false); // We track how much was promoted to the next generation for // the AdaptiveSizePolicy. size_t old_gen_used_before = old_gen->used_in_bytes(); // For PrintGCDetails size_t young_gen_used_before = young_gen->used_in_bytes(); // Reset our survivor overflow. set_survivor_overflow(false); // We need to save the old top values before // creating the promotion_manager. We pass the top // values to the card_table, to prevent it from // straying into the promotion labs. HeapWord* old_top = old_gen->object_space()->top(); // Release all previously held resources gc_task_manager()->release_all_resources(); // Set the number of GC threads to be used in this collection gc_task_manager()->set_active_gang(); gc_task_manager()->task_idle_workers(); // Get the active number of workers here and use that value // throughout the methods. uint active_workers = gc_task_manager()->active_workers(); heap->set_par_threads(active_workers); PSPromotionManager::pre_scavenge(); // We'll use the promotion manager again later. PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager(); { GCTraceTime tm("Scavenge", false, false, &_gc_timer); ParallelScavengeHeap::ParStrongRootsScope psrs; GCTaskQueue* q = GCTaskQueue::create(); if (!old_gen->object_space()->is_empty()) { // There are only old-to-young pointers if there are objects // in the old gen. uint stripe_total = active_workers; for(uint i=0; i < stripe_total; i++) { q->enqueue(new OldToYoungRootsTask(old_gen, old_top, i, stripe_total)); } } q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::universe)); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jni_handles)); // We scan the thread roots in parallel Threads::create_thread_roots_tasks(q); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::object_synchronizer)); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::flat_profiler)); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::management)); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::system_dictionary)); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::class_loader_data)); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jvmti)); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::code_cache)); ParallelTaskTerminator terminator( active_workers, (TaskQueueSetSuper*) promotion_manager->stack_array_depth()); if (active_workers > 1) { for (uint j = 0; j < active_workers; j++) { q->enqueue(new StealTask(&terminator)); } } gc_task_manager()->execute_and_wait(q); } scavenge_midpoint.update(); // Process reference objects discovered during scavenge { GCTraceTime tm("References", false, false, &_gc_timer); reference_processor()->setup_policy(false); // not always_clear reference_processor()->set_active_mt_degree(active_workers); PSKeepAliveClosure keep_alive(promotion_manager); PSEvacuateFollowersClosure evac_followers(promotion_manager); ReferenceProcessorStats stats; if (reference_processor()->processing_is_mt()) { PSRefProcTaskExecutor task_executor; stats = reference_processor()->process_discovered_references( &_is_alive_closure, &keep_alive, &evac_followers, &task_executor, &_gc_timer); } else { stats = reference_processor()->process_discovered_references( &_is_alive_closure, &keep_alive, &evac_followers, NULL, &_gc_timer); } _gc_tracer.report_gc_reference_stats(stats); // Enqueue reference objects discovered during scavenge. if (reference_processor()->processing_is_mt()) { PSRefProcTaskExecutor task_executor; reference_processor()->enqueue_discovered_references(&task_executor); } else { reference_processor()->enqueue_discovered_references(NULL); } } { GCTraceTime tm("StringTable", false, false, &_gc_timer); // Unlink any dead interned Strings and process the remaining live ones. PSScavengeRootsClosure root_closure(promotion_manager); StringTable::unlink_or_oops_do(&_is_alive_closure, &root_closure); } // Finally, flush the promotion_manager's labs, and deallocate its stacks. promotion_failure_occurred = PSPromotionManager::post_scavenge(_gc_tracer); if (promotion_failure_occurred) { clean_up_failed_promotion(); if (PrintGC) { gclog_or_tty->print("--"); } } // Let the size policy know we're done. Note that we count promotion // failure cleanup time as part of the collection (otherwise, we're // implicitly saying it's mutator time). size_policy->minor_collection_end(gc_cause); if (!promotion_failure_occurred) { // Swap the survivor spaces. young_gen->eden_space()->clear(SpaceDecorator::Mangle); young_gen->from_space()->clear(SpaceDecorator::Mangle); young_gen->swap_spaces(); size_t survived = young_gen->from_space()->used_in_bytes(); size_t promoted = old_gen->used_in_bytes() - old_gen_used_before; size_policy->update_averages(_survivor_overflow, survived, promoted); // A successful scavenge should restart the GC time limit count which is // for full GC's. size_policy->reset_gc_overhead_limit_count(); if (UseAdaptiveSizePolicy) { // Calculate the new survivor size and tenuring threshold if (PrintAdaptiveSizePolicy) { gclog_or_tty->print("AdaptiveSizeStart: "); gclog_or_tty->stamp(); gclog_or_tty->print_cr(" collection: %d ", heap->total_collections()); if (Verbose) { gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d", old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes()); } } if (UsePerfData) { PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); counters->update_old_eden_size( size_policy->calculated_eden_size_in_bytes()); counters->update_old_promo_size( size_policy->calculated_promo_size_in_bytes()); counters->update_old_capacity(old_gen->capacity_in_bytes()); counters->update_young_capacity(young_gen->capacity_in_bytes()); counters->update_survived(survived); counters->update_promoted(promoted); counters->update_survivor_overflowed(_survivor_overflow); } size_t max_young_size = young_gen->max_size(); // Deciding a free ratio in the young generation is tricky, so if // MinHeapFreeRatio or MaxHeapFreeRatio are in use (implicating // that the old generation size may have been limited because of them) we // should then limit our young generation size using NewRatio to have it // follow the old generation size. if (MinHeapFreeRatio != 0 || MaxHeapFreeRatio != 100) { max_young_size = MIN2(old_gen->capacity_in_bytes() / NewRatio, young_gen->max_size()); } size_t survivor_limit = size_policy->max_survivor_size(max_young_size); _tenuring_threshold = size_policy->compute_survivor_space_size_and_threshold( _survivor_overflow, _tenuring_threshold, survivor_limit); if (PrintTenuringDistribution) { gclog_or_tty->cr(); gclog_or_tty->print_cr("Desired survivor size " SIZE_FORMAT " bytes, new threshold %u (max %u)", size_policy->calculated_survivor_size_in_bytes(), _tenuring_threshold, MaxTenuringThreshold); } if (UsePerfData) { PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); counters->update_tenuring_threshold(_tenuring_threshold); counters->update_survivor_size_counters(); } // Do call at minor collections? // Don't check if the size_policy is ready at this // level. Let the size_policy check that internally. if (UseAdaptiveGenerationSizePolicyAtMinorCollection && ((gc_cause != GCCause::_java_lang_system_gc) || UseAdaptiveSizePolicyWithSystemGC)) { // Calculate optimial free space amounts assert(young_gen->max_size() > young_gen->from_space()->capacity_in_bytes() + young_gen->to_space()->capacity_in_bytes(), "Sizes of space in young gen are out-of-bounds"); size_t young_live = young_gen->used_in_bytes(); size_t eden_live = young_gen->eden_space()->used_in_bytes(); size_t cur_eden = young_gen->eden_space()->capacity_in_bytes(); size_t max_old_gen_size = old_gen->max_gen_size(); size_t max_eden_size = max_young_size - young_gen->from_space()->capacity_in_bytes() - young_gen->to_space()->capacity_in_bytes(); // Used for diagnostics size_policy->clear_generation_free_space_flags(); size_policy->compute_eden_space_size(young_live, eden_live, cur_eden, max_eden_size, false /* not full gc*/); size_policy->check_gc_overhead_limit(young_live, eden_live, max_old_gen_size, max_eden_size, false /* not full gc*/, gc_cause, heap->collector_policy()); size_policy->decay_supplemental_growth(false /* not full gc*/); } // Resize the young generation at every collection // even if new sizes have not been calculated. This is // to allow resizes that may have been inhibited by the // relative location of the "to" and "from" spaces. // Resizing the old gen at minor collects can cause increases // that don't feed back to the generation sizing policy until // a major collection. Don't resize the old gen here. heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(), size_policy->calculated_survivor_size_in_bytes()); if (PrintAdaptiveSizePolicy) { gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ", heap->total_collections()); } } // Update the structure of the eden. With NUMA-eden CPU hotplugging or offlining can // cause the change of the heap layout. Make sure eden is reshaped if that's the case. // Also update() will case adaptive NUMA chunk resizing. assert(young_gen->eden_space()->is_empty(), "eden space should be empty now"); young_gen->eden_space()->update(); heap->gc_policy_counters()->update_counters(); heap->resize_all_tlabs(); assert(young_gen->to_space()->is_empty(), "to space should be empty now"); } COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); NOT_PRODUCT(reference_processor()->verify_no_references_recorded()); { GCTraceTime tm("Prune Scavenge Root Methods", false, false, &_gc_timer); CodeCache::prune_scavenge_root_nmethods(); } // Re-verify object start arrays if (VerifyObjectStartArray && VerifyAfterGC) { old_gen->verify_object_start_array(); } // Verify all old -> young cards are now precise if (VerifyRememberedSets) { // Precise verification will give false positives. Until this is fixed, // use imprecise verification. // CardTableExtension::verify_all_young_refs_precise(); CardTableExtension::verify_all_young_refs_imprecise(); } if (TraceGen0Time) accumulated_time()->stop(); if (PrintGC) { if (PrintGCDetails) { // Don't print a GC timestamp here. This is after the GC so // would be confusing. young_gen->print_used_change(young_gen_used_before); } heap->print_heap_change(prev_used); } // Track memory usage and detect low memory MemoryService::track_memory_usage(); heap->update_counters(); gc_task_manager()->release_idle_workers(); } if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification Universe::verify(" VerifyAfterGC:"); } heap->print_heap_after_gc(); heap->trace_heap_after_gc(&_gc_tracer); _gc_tracer.report_tenuring_threshold(tenuring_threshold()); if (ZapUnusedHeapArea) { young_gen->eden_space()->check_mangled_unused_area_complete(); young_gen->from_space()->check_mangled_unused_area_complete(); young_gen->to_space()->check_mangled_unused_area_complete(); } scavenge_exit.update(); if (PrintGCTaskTimeStamps) { tty->print_cr("VM-Thread " INT64_FORMAT " " INT64_FORMAT " " INT64_FORMAT, scavenge_entry.ticks(), scavenge_midpoint.ticks(), scavenge_exit.ticks()); gc_task_manager()->print_task_time_stamps(); } #ifdef TRACESPINNING ParallelTaskTerminator::print_termination_counts(); #endif _gc_timer.register_gc_end(); _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions()); return !promotion_failure_occurred; }
// This method contains no policy. You should probably // be calling invoke() instead. void PSMarkSweep::invoke_no_policy(bool& notify_ref_lock, bool clear_all_softrefs) { assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); assert(ref_processor() != NULL, "Sanity"); if (GC_locker::is_active()) return; ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); PSYoungGen* young_gen = heap->young_gen(); PSOldGen* old_gen = heap->old_gen(); PSPermGen* perm_gen = heap->perm_gen(); // Increment the invocation count heap->increment_total_collections(); // We need to track unique mark sweep invocations as well. _total_invocations++; if (PrintHeapAtGC) { gclog_or_tty->print_cr(" {Heap before GC invocations=%d:", heap->total_collections()); Universe::print(); } // Fill in TLABs heap->ensure_parseability(); if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification tty->print(" VerifyBeforeGC:"); Universe::verify(true); } { HandleMark hm; TraceTime t1("Full GC", PrintGC, true, gclog_or_tty); TraceCollectorStats tcs(counters()); if (TraceGen1Time) accumulated_time()->start(); // Let the size policy know we're starting AdaptiveSizePolicy* size_policy = heap->size_policy(); size_policy->major_collection_begin(); // When collecting the permanent generation methodOops may be moving, // so we either have to flush all bcp data or convert it into bci. NOT_CORE(CodeCache::gc_prologue()); Threads::gc_prologue(); // Capture heap size before collection for printing. size_t prev_used = heap->used(); // Capture perm gen size before collection for sizing. size_t perm_gen_prev_used = perm_gen->used_in_bytes(); bool marked_for_unloading = false; allocate_stacks(); NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); COMPILER2_ONLY(DerivedPointerTable::clear()); ref_processor()->enable_discovery(); mark_sweep_phase1(marked_for_unloading, clear_all_softrefs); mark_sweep_phase2(); // Don't add any more derived pointers during phase3 COMPILER2_ONLY(assert(DerivedPointerTable::is_active(), "Sanity")); COMPILER2_ONLY(DerivedPointerTable::set_active(false)); mark_sweep_phase3(); mark_sweep_phase4(); restore_marks(); deallocate_stacks(); // "free at last gc" is calculated from these. Universe::set_heap_capacity_at_last_gc(Universe::heap()->capacity()); Universe::set_heap_used_at_last_gc(Universe::heap()->used()); bool all_empty = young_gen->eden_space()->is_empty() && young_gen->from_space()->is_empty() && young_gen->to_space()->is_empty(); BarrierSet* bs = heap->barrier_set(); if (bs->is_a(BarrierSet::ModRef)) { ModRefBarrierSet* modBS = (ModRefBarrierSet*)bs; MemRegion old_mr = heap->old_gen()->reserved(); MemRegion perm_mr = heap->perm_gen()->reserved(); assert(old_mr.end() <= perm_mr.start(), "Generations out of order"); if (all_empty) { modBS->clear(MemRegion(old_mr.start(), perm_mr.end())); } else { modBS->invalidate(MemRegion(old_mr.start(), perm_mr.end())); } } Threads::gc_epilogue(); NOT_CORE(CodeCache::gc_epilogue()); COMPILER2_ONLY(DerivedPointerTable::update_pointers()); notify_ref_lock |= ref_processor()->enqueue_discovered_references(); // Update time of last GC reset_millis_since_last_gc(); // Let the size policy know we're done size_policy->major_collection_end(old_gen->used_in_bytes()); if (UseAdaptiveSizePolicy) { if (PrintAdaptiveSizePolicy) { tty->print_cr("AdaptiveSizeStart: collection: %d ", heap->total_collections()); } // Calculate optimial free space amounts size_policy->compute_generation_free_space(young_gen->used_in_bytes(), old_gen->used_in_bytes(), perm_gen->used_in_bytes(), true /* full gc*/); // Resize old and young generations old_gen->resize(size_policy->calculated_old_free_size_in_bytes()); young_gen->resize(size_policy->calculated_eden_size_in_bytes(), size_policy->calculated_survivor_size_in_bytes()); if (PrintAdaptiveSizePolicy) { tty->print_cr("AdaptiveSizeStop: collection: %d ", heap->total_collections()); } } // We collected the perm gen, so we'll resize it here. perm_gen->compute_new_size(perm_gen_prev_used); if (TraceGen1Time) accumulated_time()->stop(); if (PrintGC) { heap->print_heap_change(prev_used); } heap->update_counters(); } if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification tty->print(" VerifyAfterGC:"); Universe::verify(false); } NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); if (PrintHeapAtGC) { gclog_or_tty->print_cr(" Heap after GC invocations=%d:", heap->total_collections()); Universe::print(); gclog_or_tty->print("} "); } }
// This method contains no policy. You should probably // be calling invoke() instead. void PSScavenge::invoke_no_policy(bool& notify_ref_lock) { assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); TimeStamp scavenge_entry; TimeStamp scavenge_midpoint; TimeStamp scavenge_exit; scavenge_entry.update(); if (GC_locker::is_active()) return; ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); // Check for potential problems. if (!should_attempt_scavenge()) { return; } PSYoungGen* young_gen = heap->young_gen(); PSOldGen* old_gen = heap->old_gen(); PSPermGen* perm_gen = heap->perm_gen(); AdaptiveSizePolicy* size_policy = heap->size_policy(); heap->increment_total_collections(); if (PrintHeapAtGC){ gclog_or_tty->print_cr(" {Heap before GC invocations=%d:", heap->total_collections()); Universe::print(); } assert(!NeverTenure || _tenuring_threshold == markOopDesc::max_age + 1, "Sanity"); assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity"); size_t prev_used = heap->used(); assert(promotion_failed() == false, "Sanity"); // Fill in TLABs heap->ensure_parseability(); if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification tty->print(" VerifyBeforeGC:"); Universe::verify(true); } { ResourceMark rm; HandleMark hm; TraceTime t1("GC", PrintGC, true, gclog_or_tty); TraceCollectorStats tcs(counters()); if (TraceGen0Time) accumulated_time()->start(); // Let the size policy know we're starting size_policy->minor_collection_begin(); // Verify no unmarked old->young roots if (VerifyRememberedSets) { old_gen->verify_object_start_array(); perm_gen->verify_object_start_array(); CardTableExtension::verify_all_young_refs_imprecise(); } assert(young_gen->to_space()->is_empty(), "Attempt to scavenge with live objects in to_space"); young_gen->to_space()->clear(); NOT_PRODUCT(reference_processor()->verify_no_references_recorded()); COMPILER2_ONLY(DerivedPointerTable::clear();); reference_processor()->enable_discovery(); // We track how much was promoted to the next generation for // the AdaptiveSizePolicy. size_t old_gen_used_before = old_gen->object_space()->used_in_bytes(); // Reset our survivor overflow. set_survivor_overflow(false); // We need to save the old/perm top values before // creating the promotion_manager. We pass the top // values to the card_table, to prevent it from // straying into the promotion labs. HeapWord* old_top = old_gen->object_space()->top(); HeapWord* perm_top = perm_gen->object_space()->top(); // Release all previously held resources gc_task_manager()->release_all_resources(); PSPromotionManager::pre_scavenge(); // We'll use the promotion manager again later. PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager(); { // TraceTime("Roots"); GCTaskQueue* q = GCTaskQueue::create(); for(uint i=0; i<ParallelGCThreads; i++) { q->enqueue(new OldToYoungRootsTask(old_gen, old_top, i)); } q->enqueue(new SerialOldToYoungRootsTask(perm_gen, perm_top)); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::universe)); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jni_handles)); // We scan the thread roots in parallel Threads::create_thread_roots_tasks(q); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::object_synchronizer)); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::flat_profiler)); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::system_dictionary)); if (ParallelGCThreads>1) { for (uint j=0; j<ParallelGCThreads-1; j++) { q->enqueue(new StealTask(false)); } q->enqueue(new StealTask(true)); } WaitForBarrierGCTask* fin = WaitForBarrierGCTask::create(); q->enqueue(fin); gc_task_manager()->add_list(q); fin->wait_for(); // We have to release the barrier tasks! WaitForBarrierGCTask::destroy(fin); } scavenge_midpoint.update(); NOT_COMPILER2(ReferencePolicy *soft_ref_policy = new LRUCurrentHeapPolicy()); COMPILER2_ONLY(ReferencePolicy *soft_ref_policy = new LRUMaxHeapPolicy()); PSIsAliveClosure is_alive; PSKeepAliveClosure keep_alive(promotion_manager); PSEvacuateFollowersClosure evac_followers(promotion_manager); // Process reference objects discovered during scavenge reference_processor()->process_discovered_references(soft_ref_policy, &is_alive, &keep_alive, &evac_followers); // Enqueue reference objects discovered during scavenge. notify_ref_lock = reference_processor()->enqueue_discovered_references(); // Finally, flush the promotion_manager's labs, and deallocate its stacks. assert(promotion_manager->claimed_stack()->size() == 0, "Sanity"); PSPromotionManager::post_scavenge(); bool scavenge_promotion_failure = promotion_failed(); if (scavenge_promotion_failure) { clean_up_failed_promotion(); if (PrintGC) { gclog_or_tty->print("--"); } } // Let the size policy know we're done. Note that we count promotion // failure cleanup time as part of the collection (otherwise, we're implicitly // saying it's mutator time). size_policy->minor_collection_end(); if (!scavenge_promotion_failure) { // Swap the survivor spaces. young_gen->eden_space()->clear(); young_gen->from_space()->clear(); young_gen->swap_spaces(); if (UseAdaptiveSizePolicy) { // Calculate the new survivor size and tenuring threshold size_t survived = young_gen->from_space()->used_in_bytes(); size_t promoted = old_gen->used_in_bytes() - old_gen_used_before; if (PrintAdaptiveSizePolicy) { tty->print_cr("AdaptiveSizeStart: collection: %d ", heap->total_collections()); } size_t survivor_limit = size_policy->max_survivor_size(young_gen->max_size()); _tenuring_threshold = size_policy->compute_survivor_space_size_and_threshold(survived, promoted, _survivor_overflow, _tenuring_threshold, survivor_limit); // Calculate optimial free space amounts size_policy->compute_generation_free_space(young_gen->used_in_bytes(), old_gen->used_in_bytes(), perm_gen->used_in_bytes(), false /* full gc*/); // Resize the old and young generations old_gen->resize(size_policy->calculated_old_free_size_in_bytes()); young_gen->resize(size_policy->calculated_eden_size_in_bytes(), size_policy->calculated_survivor_size_in_bytes()); if (PrintAdaptiveSizePolicy) { tty->print_cr("AdaptiveSizeStop: collection: %d ", heap->total_collections()); } } assert(young_gen->to_space()->is_empty(), "to space should be empty now"); } COMPILER2_ONLY(DerivedPointerTable::update_pointers()); NOT_PRODUCT(reference_processor()->verify_no_references_recorded()); // Verify all old -> young cards are now precise if (VerifyRememberedSets) { // Precise verification will give false positives. Until this is fixed, // use imprecise verification. // CardTableExtension::verify_all_young_refs_precise(); CardTableExtension::verify_all_young_refs_imprecise(); } if (TraceGen0Time) accumulated_time()->stop(); if (PrintGC) { heap->print_heap_change(prev_used); } heap->update_counters(); }