void DefNewGeneration::collect(bool full, bool clear_all_soft_refs, size_t size, bool is_tlab) { assert(full || size > 0, "otherwise we don't want to collect"); GenCollectedHeap* gch = GenCollectedHeap::heap(); _next_gen = gch->next_gen(this); assert(_next_gen != NULL, "This must be the youngest gen, and not the only gen"); // If the next generation is too full to accomodate promotion // from this generation, pass on collection; let the next generation // do it. if (!collection_attempt_is_safe()) { gch->set_incremental_collection_will_fail(); return; } assert(to()->is_empty(), "Else not collection_attempt_is_safe"); init_assuming_no_promotion_failure(); TraceTime t1("GC", PrintGC && !PrintGCDetails, true, gclog_or_tty); // Capture heap used before collection (for printing). size_t gch_prev_used = gch->used(); SpecializationStats::clear(); // These can be shared for all code paths IsAliveClosure is_alive(this); ScanWeakRefClosure scan_weak_ref(this); age_table()->clear(); to()->clear(SpaceDecorator::Mangle); gch->rem_set()->prepare_for_younger_refs_iterate(false); assert(gch->no_allocs_since_save_marks(0), "save marks have not been newly set."); // Not very pretty. CollectorPolicy* cp = gch->collector_policy(); FastScanClosure fsc_with_no_gc_barrier(this, false); FastScanClosure fsc_with_gc_barrier(this, true); set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier); FastEvacuateFollowersClosure evacuate_followers(gch, _level, this, &fsc_with_no_gc_barrier, &fsc_with_gc_barrier); assert(gch->no_allocs_since_save_marks(0), "save marks have not been newly set."); gch->gen_process_strong_roots(_level, true, // Process younger gens, if any, // as strong roots. true, // activate StrongRootsScope false, // not collecting perm generation. SharedHeap::SO_AllClasses, &fsc_with_no_gc_barrier, true, // walk *all* scavengable nmethods &fsc_with_gc_barrier); // "evacuate followers". evacuate_followers.do_void(); FastKeepAliveClosure keep_alive(this, &scan_weak_ref); ReferenceProcessor* rp = ref_processor(); rp->setup_policy(clear_all_soft_refs); rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers, NULL); if (!promotion_failed()) { // Swap the survivor spaces. eden()->clear(SpaceDecorator::Mangle); from()->clear(SpaceDecorator::Mangle); if (ZapUnusedHeapArea) { // This is now done here because of the piece-meal mangling which // can check for valid mangling at intermediate points in the // collection(s). When a minor collection fails to collect // sufficient space resizing of the young generation can occur // an redistribute the spaces in the young generation. Mangle // here so that unzapped regions don't get distributed to // other spaces. to()->mangle_unused_area(); } swap_spaces(); assert(to()->is_empty(), "to space should be empty now"); // Set the desired survivor size to half the real survivor space _tenuring_threshold = age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize); if (PrintGC && !PrintGCDetails) { gch->print_heap_change(gch_prev_used); } } else { assert(HandlePromotionFailure, "Should not be here unless promotion failure handling is on"); assert(_promo_failure_scan_stack != NULL && _promo_failure_scan_stack->length() == 0, "post condition"); // deallocate stack and it's elements delete _promo_failure_scan_stack; _promo_failure_scan_stack = NULL; remove_forwarding_pointers(); if (PrintGCDetails) { gclog_or_tty->print(" (promotion failed) "); } // Add to-space to the list of space to compact // when a promotion failure has occurred. In that // case there can be live objects in to-space // as a result of a partial evacuation of eden // and from-space. swap_spaces(); // For the sake of uniformity wrt ParNewGeneration::collect(). from()->set_next_compaction_space(to()); gch->set_incremental_collection_will_fail(); // Inform the next generation that a promotion failure occurred. _next_gen->promotion_failure_occurred(); // Reset the PromotionFailureALot counters. NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();) } // set new iteration safe limit for the survivor spaces from()->set_concurrent_iteration_safe_limit(from()->top()); to()->set_concurrent_iteration_safe_limit(to()->top()); SpecializationStats::print(); update_time_of_last_gc(os::javaTimeMillis()); }
void DefNewGeneration::collect(bool full, bool clear_all_soft_refs, size_t size, bool is_tlab) { assert(full || size > 0, "otherwise we don't want to collect"); GenCollectedHeap* gch = GenCollectedHeap::heap(); _gc_timer->register_gc_start(); DefNewTracer gc_tracer; gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start()); _next_gen = gch->next_gen(this); // If the next generation is too full to accommodate promotion // from this generation, pass on collection; let the next generation // do it. if (!collection_attempt_is_safe()) { if (Verbose && PrintGCDetails) { gclog_or_tty->print(" :: Collection attempt not safe :: "); } gch->set_incremental_collection_failed(); // Slight lie: we did not even attempt one return; } assert(to()->is_empty(), "Else not collection_attempt_is_safe"); init_assuming_no_promotion_failure(); GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL); // Capture heap used before collection (for printing). size_t gch_prev_used = gch->used(); gch->trace_heap_before_gc(&gc_tracer); SpecializationStats::clear(); // These can be shared for all code paths IsAliveClosure is_alive(this); ScanWeakRefClosure scan_weak_ref(this); age_table()->clear(); to()->clear(SpaceDecorator::Mangle); gch->rem_set()->prepare_for_younger_refs_iterate(false); assert(gch->no_allocs_since_save_marks(0), "save marks have not been newly set."); // Not very pretty. CollectorPolicy* cp = gch->collector_policy(); FastScanClosure fsc_with_no_gc_barrier(this, false); FastScanClosure fsc_with_gc_barrier(this, true); KlassScanClosure klass_scan_closure(&fsc_with_no_gc_barrier, gch->rem_set()->klass_rem_set()); set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier); FastEvacuateFollowersClosure evacuate_followers(gch, _level, this, &fsc_with_no_gc_barrier, &fsc_with_gc_barrier); assert(gch->no_allocs_since_save_marks(0), "save marks have not been newly set."); int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_CodeCache; gch->gen_process_strong_roots(_level, true, // Process younger gens, if any, // as strong roots. true, // activate StrongRootsScope true, // is scavenging SharedHeap::ScanningOption(so), &fsc_with_no_gc_barrier, true, // walk *all* scavengable nmethods &fsc_with_gc_barrier, &klass_scan_closure); // "evacuate followers". evacuate_followers.do_void(); FastKeepAliveClosure keep_alive(this, &scan_weak_ref); ReferenceProcessor* rp = ref_processor(); rp->setup_policy(clear_all_soft_refs); const ReferenceProcessorStats& stats = rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers, NULL, _gc_timer); gc_tracer.report_gc_reference_stats(stats); if (!_promotion_failed) { // Swap the survivor spaces. eden()->clear(SpaceDecorator::Mangle); from()->clear(SpaceDecorator::Mangle); if (ZapUnusedHeapArea) { // This is now done here because of the piece-meal mangling which // can check for valid mangling at intermediate points in the // collection(s). When a minor collection fails to collect // sufficient space resizing of the young generation can occur // an redistribute the spaces in the young generation. Mangle // here so that unzapped regions don't get distributed to // other spaces. to()->mangle_unused_area(); } swap_spaces(); assert(to()->is_empty(), "to space should be empty now"); adjust_desired_tenuring_threshold(); // A successful scavenge should restart the GC time limit count which is // for full GC's. AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy(); size_policy->reset_gc_overhead_limit_count(); if (PrintGC && !PrintGCDetails) { gch->print_heap_change(gch_prev_used); } assert(!gch->incremental_collection_failed(), "Should be clear"); } else { assert(_promo_failure_scan_stack.is_empty(), "post condition"); _promo_failure_scan_stack.clear(true); // Clear cached segments. remove_forwarding_pointers(); if (PrintGCDetails) { gclog_or_tty->print(" (promotion failed) "); } // Add to-space to the list of space to compact // when a promotion failure has occurred. In that // case there can be live objects in to-space // as a result of a partial evacuation of eden // and from-space. swap_spaces(); // For uniformity wrt ParNewGeneration. from()->set_next_compaction_space(to()); gch->set_incremental_collection_failed(); // Inform the next generation that a promotion failure occurred. _next_gen->promotion_failure_occurred(); gc_tracer.report_promotion_failed(_promotion_failed_info); // Reset the PromotionFailureALot counters. NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();) } // set new iteration safe limit for the survivor spaces from()->set_concurrent_iteration_safe_limit(from()->top()); to()->set_concurrent_iteration_safe_limit(to()->top()); SpecializationStats::print(); // We need to use a monotonically non-decreasing time in ms // or we will see time-warp warnings and os::javaTimeMillis() // does not guarantee monotonicity. jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; update_time_of_last_gc(now); gch->trace_heap_after_gc(&gc_tracer); gc_tracer.report_tenuring_threshold(tenuring_threshold()); _gc_timer->register_gc_end(); gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions()); }
void DefNewGeneration::collect(bool full, bool clear_all_soft_refs, size_t size, bool is_large_noref, bool is_tlab) { assert(full || size > 0, "otherwise we don't want to collect"); GenCollectedHeap* gch = GenCollectedHeap::heap(); _next_gen = gch->next_gen(this); assert(_next_gen != NULL, "This must be the youngest gen, and not the only gen"); // If the next generation is too full to accomodate worst-case promotion // from this generation, pass on collection; let the next generation // do it. if (!full_promotion_would_succeed()) { gch->set_incremental_collection_will_fail(); if (PrintGC && Verbose) { gclog_or_tty->print_cr("DefNewGeneration::collect" " contiguous_available: " SIZE_FORMAT " < used: " SIZE_FORMAT, _next_gen->max_contiguous_available(), used()); } return; } TraceTime t1("GC", PrintGC && !PrintGCDetails, true, gclog_or_tty); // Capture heap used before collection (for printing). size_t gch_prev_used = gch->used(); SpecializationStats::clear(); // These can be shared for all code paths IsAliveClosure is_alive(this); ScanWeakRefClosure scan_weak_ref(this); age_table()->clear(); to()->clear(); gch->rem_set()->prepare_for_younger_refs_iterate(false); assert(gch->no_allocs_since_save_marks(0), "save marks have not been newly set."); // Weak refs. // FIXME: Are these storage leaks, or are they resource objects? NOT_COMPILER2(ReferencePolicy *soft_ref_policy = new LRUCurrentHeapPolicy()); COMPILER2_ONLY(ReferencePolicy *soft_ref_policy = new LRUMaxHeapPolicy()); // Not very pretty. CollectorPolicy* cp = gch->collector_policy(); if (!cp->is_train_policy()) { FastScanClosure fsc_with_no_gc_barrier(this, false); FastScanClosure fsc_with_gc_barrier(this, true); FastEvacuateFollowersClosure evacuate_followers(gch, _level, &fsc_with_no_gc_barrier, &fsc_with_gc_barrier); assert(gch->no_allocs_since_save_marks(0), "save marks have not been newly set."); gch->process_strong_roots(_level, true, // Process younger gens, if any, as // strong roots. false,// not collecting permanent generation. GenCollectedHeap::CSO_AllClasses, &fsc_with_gc_barrier, &fsc_with_no_gc_barrier); // "evacuate followers". evacuate_followers.do_void(); FastKeepAliveClosure keep_alive(this, &scan_weak_ref); ref_processor()->process_discovered_references(soft_ref_policy, &is_alive, &keep_alive, &evacuate_followers); } else { // Train policy ScanClosure sc_with_no_gc_barrier(this, false); ScanClosure sc_with_gc_barrier(this, true); EvacuateFollowersClosure evacuate_followers(gch, _level, &sc_with_no_gc_barrier, &sc_with_gc_barrier); gch->process_strong_roots(_level, true, // Process younger gens, if any, as // strong roots. false,// not collecting perm generation. GenCollectedHeap::CSO_AllClasses, &sc_with_gc_barrier, &sc_with_no_gc_barrier); // "evacuate followers". evacuate_followers.do_void(); TrainPolicyKeepAliveClosure keep_alive((TrainGeneration*)_next_gen, &scan_weak_ref); ref_processor()->process_discovered_references(soft_ref_policy, &is_alive, &keep_alive, &evacuate_followers); } // Swap the survivor spaces. eden()->clear(); from()->clear(); swap_spaces(); assert(to()->is_empty(), "to space should be empty now"); // Set the desired survivor size to half the real survivor space _tenuring_threshold = age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize); if (PrintGC && !PrintGCDetails) { gch->print_heap_change(gch_prev_used); } SpecializationStats::print(); update_time_of_last_gc(os::javaTimeMillis()); }