void DefNewGeneration::gc_epilogue(bool full) { // Check if the heap is approaching full after a collection has // been done. Generally the young generation is empty at // a minimum at the end of a collection. If it is not, then // the heap is approaching full. GenCollectedHeap* gch = GenCollectedHeap::heap(); clear_should_allocate_from_space(); if (collection_attempt_is_safe()) { gch->clear_incremental_collection_will_fail(); } else { gch->set_incremental_collection_will_fail(); if (full) { // we seem to be running out of space set_should_allocate_from_space(); } } if (ZapUnusedHeapArea) { eden()->check_mangled_unused_area_complete(); from()->check_mangled_unused_area_complete(); to()->check_mangled_unused_area_complete(); } // update the generation and space performance counters update_counters(); gch->collector_policy()->counters()->update_counters(); }
/** * 标记清除的方式回收内存堆的垃圾对象 * 1.第一步: 标记所有存活的对象 * 2.第二步: 计算存活的对象在其内存区压缩后的偏移位置 * 3.第三步: 遍历所有存活的对象并修改其对应的地址映射表 * 4.第四步: 移动存活的对象压缩内存区 */ void GenMarkSweep::invoke_at_safepoint(int level, ReferenceProcessor* rp, bool clear_all_softrefs) { assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); GenCollectedHeap* gch = GenCollectedHeap::heap(); #ifdef ASSERT if (gch->collector_policy()->should_clear_all_soft_refs()) { assert(clear_all_softrefs, "Policy should have been checked earlier"); } #endif // hook up weak ref data so it can be used during Mark-Sweep assert(ref_processor() == NULL, "no stomping"); assert(rp != NULL, "should be non-NULL"); _ref_processor = rp; rp->setup_policy(clear_all_softrefs); TraceTime t1("Full GC", PrintGC && !PrintGCDetails, true, gclog_or_tty); // When collecting the permanent generation methodOops may be moving, // so we either have to flush all bcp data or convert it into bci. CodeCache::gc_prologue(); Threads::gc_prologue(); // Increment the invocation count for the permanent generation, since it is // implicitly collected whenever we do a full mark sweep collection. gch->perm_gen()->stat_record()->invocations++; //本次Gc之前内存堆的使用量 size_t gch_prev_used = gch->used(); // Some of the card table updates below assume that the perm gen is // also being collected. assert(level == gch->n_gens() - 1, "All generations are being collected, ergo perm gen too."); // Capture used regions for each generation that will be // subject to collection, so that card table adjustments can // be made intelligently (see clear / invalidate further below). gch->save_used_regions(level, true /* perm */); allocate_stacks(); mark_sweep_phase1(level, clear_all_softrefs); mark_sweep_phase2(); // Don't add any more derived pointers during phase3 COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity")); COMPILER2_PRESENT(DerivedPointerTable::set_active(false)); mark_sweep_phase3(level); VALIDATE_MARK_SWEEP_ONLY( if (ValidateMarkSweep) { guarantee(_root_refs_stack->length() == 0, "should be empty by now"); } )
DefNewGeneration::DefNewGeneration(ReservedSpace rs, size_t initial_size, const char* policy) : Generation(rs, initial_size), _promo_failure_drain_in_progress(false), _should_allocate_from_space(false) { MemRegion cmr((HeapWord*)_virtual_space.low(), (HeapWord*)_virtual_space.high()); GenCollectedHeap* gch = GenCollectedHeap::heap(); gch->barrier_set()->resize_covered_region(cmr); _eden_space = new ContiguousSpace(); _from_space = new ContiguousSpace(); _to_space = new ContiguousSpace(); if (_eden_space == NULL || _from_space == NULL || _to_space == NULL) { vm_exit_during_initialization("Could not allocate a new gen space"); } // Compute the maximum eden and survivor space sizes. These sizes // are computed assuming the entire reserved space is committed. // These values are exported as performance counters. uintx alignment = gch->collector_policy()->space_alignment(); uintx size = _virtual_space.reserved_size(); _max_survivor_size = compute_survivor_size(size, alignment); _max_eden_size = size - (2*_max_survivor_size); // allocate the performance counters GenCollectorPolicy* gcp = gch->gen_policy(); // Generation counters -- generation 0, 3 subspaces _gen_counters = new GenerationCounters("new", 0, 3, gcp->min_young_size(), gcp->max_young_size(), &_virtual_space); _gc_counters = new CollectorCounters(policy, 0); _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space, _gen_counters); _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space, _gen_counters); _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space, _gen_counters); compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle); update_counters(); _old_gen = NULL; _tenuring_threshold = MaxTenuringThreshold; _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize; _gc_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer(); }
void ASParNewGeneration::compute_new_size() { GenCollectedHeap* gch = GenCollectedHeap::heap(); assert(gch->kind() == CollectedHeap::GenCollectedHeap, "not a CMS generational heap"); CMSAdaptiveSizePolicy* size_policy = (CMSAdaptiveSizePolicy*)gch->gen_policy()->size_policy(); assert(size_policy->is_gc_cms_adaptive_size_policy(), "Wrong type of size policy"); size_t survived = from()->used(); if (!survivor_overflow()) { // Keep running averages on how much survived size_policy->avg_survived()->sample(survived); } else { size_t promoted = (size_t) next_gen()->gc_stats()->avg_promoted()->last_sample(); assert(promoted < gch->capacity(), "Conversion problem?"); size_t survived_guess = survived + promoted; size_policy->avg_survived()->sample(survived_guess); } size_t survivor_limit = max_survivor_size(); _tenuring_threshold = size_policy->compute_survivor_space_size_and_threshold( _survivor_overflow, _tenuring_threshold, survivor_limit); size_policy->avg_young_live()->sample(used()); size_policy->avg_eden_live()->sample(eden()->used()); size_policy->compute_young_generation_free_space(eden()->capacity(), max_gen_size()); resize(size_policy->calculated_eden_size_in_bytes(), size_policy->calculated_survivor_size_in_bytes()); if (UsePerfData) { CMSGCAdaptivePolicyCounters* counters = (CMSGCAdaptivePolicyCounters*) gch->collector_policy()->counters(); assert(counters->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind, "Wrong kind of counters"); counters->update_tenuring_threshold(_tenuring_threshold); counters->update_survivor_overflowed(_survivor_overflow); counters->update_young_capacity(capacity()); } }
void CardTableRS::verify() { // At present, we only know how to verify the card table RS for // generational heaps. VerifyCTGenClosure blk(this); CollectedHeap* ch = Universe::heap(); // We will do the perm-gen portion of the card table, too. Generation* pg = SharedHeap::heap()->perm_gen(); HeapWord* pg_boundary = pg->reserved().start(); if (ch->kind() == CollectedHeap::GenCollectedHeap) { GenCollectedHeap::heap()->generation_iterate(&blk, false); _ct_bs->verify(); // If the old gen collections also collect perm, then we are only // interested in perm-to-young pointers, not perm-to-old pointers. GenCollectedHeap* gch = GenCollectedHeap::heap(); CollectorPolicy* cp = gch->collector_policy(); if (cp->is_mark_sweep_policy() || cp->is_concurrent_mark_sweep_policy()) { pg_boundary = gch->get_gen(1)->reserved().start(); } } VerifyCTSpaceClosure perm_space_blk(this, pg_boundary); SharedHeap::heap()->perm_gen()->space_iterate(&perm_space_blk, true); }
void DefNewGeneration::collect(bool full, bool clear_all_soft_refs, size_t size, bool is_tlab) { assert(full || size > 0, "otherwise we don't want to collect"); GenCollectedHeap* gch = GenCollectedHeap::heap(); _gc_timer->register_gc_start(); DefNewTracer gc_tracer; gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start()); _next_gen = gch->next_gen(this); // If the next generation is too full to accommodate promotion // from this generation, pass on collection; let the next generation // do it. if (!collection_attempt_is_safe()) { if (Verbose && PrintGCDetails) { gclog_or_tty->print(" :: Collection attempt not safe :: "); } gch->set_incremental_collection_failed(); // Slight lie: we did not even attempt one return; } assert(to()->is_empty(), "Else not collection_attempt_is_safe"); init_assuming_no_promotion_failure(); GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL); // Capture heap used before collection (for printing). size_t gch_prev_used = gch->used(); gch->trace_heap_before_gc(&gc_tracer); SpecializationStats::clear(); // These can be shared for all code paths IsAliveClosure is_alive(this); ScanWeakRefClosure scan_weak_ref(this); age_table()->clear(); to()->clear(SpaceDecorator::Mangle); gch->rem_set()->prepare_for_younger_refs_iterate(false); assert(gch->no_allocs_since_save_marks(0), "save marks have not been newly set."); // Not very pretty. CollectorPolicy* cp = gch->collector_policy(); FastScanClosure fsc_with_no_gc_barrier(this, false); FastScanClosure fsc_with_gc_barrier(this, true); KlassScanClosure klass_scan_closure(&fsc_with_no_gc_barrier, gch->rem_set()->klass_rem_set()); set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier); FastEvacuateFollowersClosure evacuate_followers(gch, _level, this, &fsc_with_no_gc_barrier, &fsc_with_gc_barrier); assert(gch->no_allocs_since_save_marks(0), "save marks have not been newly set."); int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_CodeCache; gch->gen_process_strong_roots(_level, true, // Process younger gens, if any, // as strong roots. true, // activate StrongRootsScope true, // is scavenging SharedHeap::ScanningOption(so), &fsc_with_no_gc_barrier, true, // walk *all* scavengable nmethods &fsc_with_gc_barrier, &klass_scan_closure); // "evacuate followers". evacuate_followers.do_void(); FastKeepAliveClosure keep_alive(this, &scan_weak_ref); ReferenceProcessor* rp = ref_processor(); rp->setup_policy(clear_all_soft_refs); const ReferenceProcessorStats& stats = rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers, NULL, _gc_timer); gc_tracer.report_gc_reference_stats(stats); if (!_promotion_failed) { // Swap the survivor spaces. eden()->clear(SpaceDecorator::Mangle); from()->clear(SpaceDecorator::Mangle); if (ZapUnusedHeapArea) { // This is now done here because of the piece-meal mangling which // can check for valid mangling at intermediate points in the // collection(s). When a minor collection fails to collect // sufficient space resizing of the young generation can occur // an redistribute the spaces in the young generation. Mangle // here so that unzapped regions don't get distributed to // other spaces. to()->mangle_unused_area(); } swap_spaces(); assert(to()->is_empty(), "to space should be empty now"); adjust_desired_tenuring_threshold(); // A successful scavenge should restart the GC time limit count which is // for full GC's. AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy(); size_policy->reset_gc_overhead_limit_count(); if (PrintGC && !PrintGCDetails) { gch->print_heap_change(gch_prev_used); } assert(!gch->incremental_collection_failed(), "Should be clear"); } else { assert(_promo_failure_scan_stack.is_empty(), "post condition"); _promo_failure_scan_stack.clear(true); // Clear cached segments. remove_forwarding_pointers(); if (PrintGCDetails) { gclog_or_tty->print(" (promotion failed) "); } // Add to-space to the list of space to compact // when a promotion failure has occurred. In that // case there can be live objects in to-space // as a result of a partial evacuation of eden // and from-space. swap_spaces(); // For uniformity wrt ParNewGeneration. from()->set_next_compaction_space(to()); gch->set_incremental_collection_failed(); // Inform the next generation that a promotion failure occurred. _next_gen->promotion_failure_occurred(); gc_tracer.report_promotion_failed(_promotion_failed_info); // Reset the PromotionFailureALot counters. NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();) } // set new iteration safe limit for the survivor spaces from()->set_concurrent_iteration_safe_limit(from()->top()); to()->set_concurrent_iteration_safe_limit(to()->top()); SpecializationStats::print(); // We need to use a monotonically non-decreasing time in ms // or we will see time-warp warnings and os::javaTimeMillis() // does not guarantee monotonicity. jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; update_time_of_last_gc(now); gch->trace_heap_after_gc(&gc_tracer); gc_tracer.report_tenuring_threshold(tenuring_threshold()); _gc_timer->register_gc_end(); gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions()); }
void GenMarkSweep::invoke_at_safepoint(ReferenceProcessor* rp, bool clear_all_softrefs) { assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); GenCollectedHeap* gch = GenCollectedHeap::heap(); #ifdef ASSERT if (gch->collector_policy()->should_clear_all_soft_refs()) { assert(clear_all_softrefs, "Policy should have been checked earlier"); } #endif // hook up weak ref data so it can be used during Mark-Sweep assert(ref_processor() == NULL, "no stomping"); assert(rp != NULL, "should be non-NULL"); set_ref_processor(rp); rp->setup_policy(clear_all_softrefs); gch->trace_heap_before_gc(_gc_tracer); // When collecting the permanent generation Method*s may be moving, // so we either have to flush all bcp data or convert it into bci. CodeCache::gc_prologue(); // Increment the invocation count _total_invocations++; // Capture used regions for each generation that will be // subject to collection, so that card table adjustments can // be made intelligently (see clear / invalidate further below). gch->save_used_regions(); allocate_stacks(); mark_sweep_phase1(clear_all_softrefs); mark_sweep_phase2(); // Don't add any more derived pointers during phase3 #if defined(COMPILER2) || INCLUDE_JVMCI assert(DerivedPointerTable::is_active(), "Sanity"); DerivedPointerTable::set_active(false); #endif mark_sweep_phase3(); mark_sweep_phase4(); restore_marks(); // Set saved marks for allocation profiler (and other things? -- dld) // (Should this be in general part?) gch->save_marks(); deallocate_stacks(); // If compaction completely evacuated the young generation then we // can clear the card table. Otherwise, we must invalidate // it (consider all cards dirty). In the future, we might consider doing // compaction within generations only, and doing card-table sliding. CardTableRS* rs = gch->rem_set(); Generation* old_gen = gch->old_gen(); // Clear/invalidate below make use of the "prev_used_regions" saved earlier. if (gch->young_gen()->used() == 0) { // We've evacuated the young generation. rs->clear_into_younger(old_gen); } else { // Invalidate the cards corresponding to the currently used // region and clear those corresponding to the evacuated region. rs->invalidate_or_clear(old_gen); } CodeCache::gc_epilogue(); JvmtiExport::gc_epilogue(); // refs processing: clean slate set_ref_processor(NULL); // Update heap occupancy information which is used as // input to soft ref clearing policy at the next gc. Universe::update_heap_info_at_gc(); // Update time of last gc for all generations we collected // (which currently is all the generations in the heap). // We need to use a monotonically non-decreasing time in ms // or we will see time-warp warnings and os::javaTimeMillis() // does not guarantee monotonicity. jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; gch->update_time_of_last_gc(now); gch->trace_heap_after_gc(_gc_tracer); }
void DefNewGeneration::collect(bool full, bool clear_all_soft_refs, size_t size, bool is_tlab) { assert(full || size > 0, "otherwise we don't want to collect"); GenCollectedHeap* gch = GenCollectedHeap::heap(); _next_gen = gch->next_gen(this); assert(_next_gen != NULL, "This must be the youngest gen, and not the only gen"); // If the next generation is too full to accomodate promotion // from this generation, pass on collection; let the next generation // do it. if (!collection_attempt_is_safe()) { gch->set_incremental_collection_will_fail(); return; } assert(to()->is_empty(), "Else not collection_attempt_is_safe"); init_assuming_no_promotion_failure(); TraceTime t1("GC", PrintGC && !PrintGCDetails, true, gclog_or_tty); // Capture heap used before collection (for printing). size_t gch_prev_used = gch->used(); SpecializationStats::clear(); // These can be shared for all code paths IsAliveClosure is_alive(this); ScanWeakRefClosure scan_weak_ref(this); age_table()->clear(); to()->clear(SpaceDecorator::Mangle); gch->rem_set()->prepare_for_younger_refs_iterate(false); assert(gch->no_allocs_since_save_marks(0), "save marks have not been newly set."); // Not very pretty. CollectorPolicy* cp = gch->collector_policy(); FastScanClosure fsc_with_no_gc_barrier(this, false); FastScanClosure fsc_with_gc_barrier(this, true); set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier); FastEvacuateFollowersClosure evacuate_followers(gch, _level, this, &fsc_with_no_gc_barrier, &fsc_with_gc_barrier); assert(gch->no_allocs_since_save_marks(0), "save marks have not been newly set."); gch->gen_process_strong_roots(_level, true, // Process younger gens, if any, // as strong roots. true, // activate StrongRootsScope false, // not collecting perm generation. SharedHeap::SO_AllClasses, &fsc_with_no_gc_barrier, true, // walk *all* scavengable nmethods &fsc_with_gc_barrier); // "evacuate followers". evacuate_followers.do_void(); FastKeepAliveClosure keep_alive(this, &scan_weak_ref); ReferenceProcessor* rp = ref_processor(); rp->setup_policy(clear_all_soft_refs); rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers, NULL); if (!promotion_failed()) { // Swap the survivor spaces. eden()->clear(SpaceDecorator::Mangle); from()->clear(SpaceDecorator::Mangle); if (ZapUnusedHeapArea) { // This is now done here because of the piece-meal mangling which // can check for valid mangling at intermediate points in the // collection(s). When a minor collection fails to collect // sufficient space resizing of the young generation can occur // an redistribute the spaces in the young generation. Mangle // here so that unzapped regions don't get distributed to // other spaces. to()->mangle_unused_area(); } swap_spaces(); assert(to()->is_empty(), "to space should be empty now"); // Set the desired survivor size to half the real survivor space _tenuring_threshold = age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize); if (PrintGC && !PrintGCDetails) { gch->print_heap_change(gch_prev_used); } } else { assert(HandlePromotionFailure, "Should not be here unless promotion failure handling is on"); assert(_promo_failure_scan_stack != NULL && _promo_failure_scan_stack->length() == 0, "post condition"); // deallocate stack and it's elements delete _promo_failure_scan_stack; _promo_failure_scan_stack = NULL; remove_forwarding_pointers(); if (PrintGCDetails) { gclog_or_tty->print(" (promotion failed) "); } // Add to-space to the list of space to compact // when a promotion failure has occurred. In that // case there can be live objects in to-space // as a result of a partial evacuation of eden // and from-space. swap_spaces(); // For the sake of uniformity wrt ParNewGeneration::collect(). from()->set_next_compaction_space(to()); gch->set_incremental_collection_will_fail(); // Inform the next generation that a promotion failure occurred. _next_gen->promotion_failure_occurred(); // Reset the PromotionFailureALot counters. NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();) } // set new iteration safe limit for the survivor spaces from()->set_concurrent_iteration_safe_limit(from()->top()); to()->set_concurrent_iteration_safe_limit(to()->top()); SpecializationStats::print(); update_time_of_last_gc(os::javaTimeMillis()); }
void GenMarkSweep::invoke_at_safepoint(int level, ReferenceProcessor* rp, bool clear_all_softrefs) { guarantee(level == 1, "We always collect both old and young."); assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); GenCollectedHeap* gch = GenCollectedHeap::heap(); #ifdef ASSERT if (gch->collector_policy()->should_clear_all_soft_refs()) { assert(clear_all_softrefs, "Policy should have been checked earlier"); } #endif // hook up weak ref data so it can be used during Mark-Sweep assert(ref_processor() == NULL, "no stomping"); assert(rp != NULL, "should be non-NULL"); _ref_processor = rp; rp->setup_policy(clear_all_softrefs); GCTraceTime t1(GCCauseString("Full GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, _gc_tracer->gc_id()); gch->trace_heap_before_gc(_gc_tracer); // When collecting the permanent generation Method*s may be moving, // so we either have to flush all bcp data or convert it into bci. CodeCache::gc_prologue(); Threads::gc_prologue(); // Increment the invocation count _total_invocations++; // Capture heap size before collection for printing. size_t gch_prev_used = gch->used(); // Capture used regions for each generation that will be // subject to collection, so that card table adjustments can // be made intelligently (see clear / invalidate further below). gch->save_used_regions(level); allocate_stacks(); mark_sweep_phase1(level, clear_all_softrefs); mark_sweep_phase2(); // Don't add any more derived pointers during phase3 COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity")); COMPILER2_PRESENT(DerivedPointerTable::set_active(false)); mark_sweep_phase3(level); mark_sweep_phase4(); restore_marks(); // Set saved marks for allocation profiler (and other things? -- dld) // (Should this be in general part?) gch->save_marks(); deallocate_stacks(); // If compaction completely evacuated all generations younger than this // one, then we can clear the card table. Otherwise, we must invalidate // it (consider all cards dirty). In the future, we might consider doing // compaction within generations only, and doing card-table sliding. bool all_empty = true; for (int i = 0; all_empty && i < level; i++) { Generation* g = gch->get_gen(i); all_empty = all_empty && gch->get_gen(i)->used() == 0; } GenRemSet* rs = gch->rem_set(); Generation* old_gen = gch->get_gen(level); // Clear/invalidate below make use of the "prev_used_regions" saved earlier. if (all_empty) { // We've evacuated all generations below us. rs->clear_into_younger(old_gen); } else { // Invalidate the cards corresponding to the currently used // region and clear those corresponding to the evacuated region. rs->invalidate_or_clear(old_gen); } Threads::gc_epilogue(); CodeCache::gc_epilogue(); JvmtiExport::gc_epilogue(); if (PrintGC && !PrintGCDetails) { gch->print_heap_change(gch_prev_used); } // refs processing: clean slate _ref_processor = NULL; // Update heap occupancy information which is used as // input to soft ref clearing policy at the next gc. Universe::update_heap_info_at_gc(); // Update time of last gc for all generations we collected // (which curently is all the generations in the heap). // We need to use a monotonically non-deccreasing time in ms // or we will see time-warp warnings and os::javaTimeMillis() // does not guarantee monotonicity. jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; gch->update_time_of_last_gc(now); gch->trace_heap_after_gc(_gc_tracer); }
void DefNewGeneration::collect(bool full, bool clear_all_soft_refs, size_t size, bool is_large_noref, bool is_tlab) { assert(full || size > 0, "otherwise we don't want to collect"); GenCollectedHeap* gch = GenCollectedHeap::heap(); _next_gen = gch->next_gen(this); assert(_next_gen != NULL, "This must be the youngest gen, and not the only gen"); // If the next generation is too full to accomodate worst-case promotion // from this generation, pass on collection; let the next generation // do it. if (!full_promotion_would_succeed()) { gch->set_incremental_collection_will_fail(); if (PrintGC && Verbose) { gclog_or_tty->print_cr("DefNewGeneration::collect" " contiguous_available: " SIZE_FORMAT " < used: " SIZE_FORMAT, _next_gen->max_contiguous_available(), used()); } return; } TraceTime t1("GC", PrintGC && !PrintGCDetails, true, gclog_or_tty); // Capture heap used before collection (for printing). size_t gch_prev_used = gch->used(); SpecializationStats::clear(); // These can be shared for all code paths IsAliveClosure is_alive(this); ScanWeakRefClosure scan_weak_ref(this); age_table()->clear(); to()->clear(); gch->rem_set()->prepare_for_younger_refs_iterate(false); assert(gch->no_allocs_since_save_marks(0), "save marks have not been newly set."); // Weak refs. // FIXME: Are these storage leaks, or are they resource objects? NOT_COMPILER2(ReferencePolicy *soft_ref_policy = new LRUCurrentHeapPolicy()); COMPILER2_ONLY(ReferencePolicy *soft_ref_policy = new LRUMaxHeapPolicy()); // Not very pretty. CollectorPolicy* cp = gch->collector_policy(); if (!cp->is_train_policy()) { FastScanClosure fsc_with_no_gc_barrier(this, false); FastScanClosure fsc_with_gc_barrier(this, true); FastEvacuateFollowersClosure evacuate_followers(gch, _level, &fsc_with_no_gc_barrier, &fsc_with_gc_barrier); assert(gch->no_allocs_since_save_marks(0), "save marks have not been newly set."); gch->process_strong_roots(_level, true, // Process younger gens, if any, as // strong roots. false,// not collecting permanent generation. GenCollectedHeap::CSO_AllClasses, &fsc_with_gc_barrier, &fsc_with_no_gc_barrier); // "evacuate followers". evacuate_followers.do_void(); FastKeepAliveClosure keep_alive(this, &scan_weak_ref); ref_processor()->process_discovered_references(soft_ref_policy, &is_alive, &keep_alive, &evacuate_followers); } else { // Train policy ScanClosure sc_with_no_gc_barrier(this, false); ScanClosure sc_with_gc_barrier(this, true); EvacuateFollowersClosure evacuate_followers(gch, _level, &sc_with_no_gc_barrier, &sc_with_gc_barrier); gch->process_strong_roots(_level, true, // Process younger gens, if any, as // strong roots. false,// not collecting perm generation. GenCollectedHeap::CSO_AllClasses, &sc_with_gc_barrier, &sc_with_no_gc_barrier); // "evacuate followers". evacuate_followers.do_void(); TrainPolicyKeepAliveClosure keep_alive((TrainGeneration*)_next_gen, &scan_weak_ref); ref_processor()->process_discovered_references(soft_ref_policy, &is_alive, &keep_alive, &evacuate_followers); } // Swap the survivor spaces. eden()->clear(); from()->clear(); swap_spaces(); assert(to()->is_empty(), "to space should be empty now"); // Set the desired survivor size to half the real survivor space _tenuring_threshold = age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize); if (PrintGC && !PrintGCDetails) { gch->print_heap_change(gch_prev_used); } SpecializationStats::print(); update_time_of_last_gc(os::javaTimeMillis()); }