void DefNewGeneration::gc_epilogue(bool full) { // Check if the heap is approaching full after a collection has // been done. Generally the young generation is empty at // a minimum at the end of a collection. If it is not, then // the heap is approaching full. GenCollectedHeap* gch = GenCollectedHeap::heap(); clear_should_allocate_from_space(); if (collection_attempt_is_safe()) { gch->clear_incremental_collection_will_fail(); } else { gch->set_incremental_collection_will_fail(); if (full) { // we seem to be running out of space set_should_allocate_from_space(); } } if (ZapUnusedHeapArea) { eden()->check_mangled_unused_area_complete(); from()->check_mangled_unused_area_complete(); to()->check_mangled_unused_area_complete(); } // update the generation and space performance counters update_counters(); gch->collector_policy()->counters()->update_counters(); }
void DefNewGeneration::collect(bool full, bool clear_all_soft_refs, size_t size, bool is_tlab) { assert(full || size > 0, "otherwise we don't want to collect"); GenCollectedHeap* gch = GenCollectedHeap::heap(); _next_gen = gch->next_gen(this); assert(_next_gen != NULL, "This must be the youngest gen, and not the only gen"); // If the next generation is too full to accomodate promotion // from this generation, pass on collection; let the next generation // do it. if (!collection_attempt_is_safe()) { gch->set_incremental_collection_will_fail(); return; } assert(to()->is_empty(), "Else not collection_attempt_is_safe"); init_assuming_no_promotion_failure(); TraceTime t1("GC", PrintGC && !PrintGCDetails, true, gclog_or_tty); // Capture heap used before collection (for printing). size_t gch_prev_used = gch->used(); SpecializationStats::clear(); // These can be shared for all code paths IsAliveClosure is_alive(this); ScanWeakRefClosure scan_weak_ref(this); age_table()->clear(); to()->clear(SpaceDecorator::Mangle); gch->rem_set()->prepare_for_younger_refs_iterate(false); assert(gch->no_allocs_since_save_marks(0), "save marks have not been newly set."); // Not very pretty. CollectorPolicy* cp = gch->collector_policy(); FastScanClosure fsc_with_no_gc_barrier(this, false); FastScanClosure fsc_with_gc_barrier(this, true); set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier); FastEvacuateFollowersClosure evacuate_followers(gch, _level, this, &fsc_with_no_gc_barrier, &fsc_with_gc_barrier); assert(gch->no_allocs_since_save_marks(0), "save marks have not been newly set."); gch->gen_process_strong_roots(_level, true, // Process younger gens, if any, // as strong roots. true, // activate StrongRootsScope false, // not collecting perm generation. SharedHeap::SO_AllClasses, &fsc_with_no_gc_barrier, true, // walk *all* scavengable nmethods &fsc_with_gc_barrier); // "evacuate followers". evacuate_followers.do_void(); FastKeepAliveClosure keep_alive(this, &scan_weak_ref); ReferenceProcessor* rp = ref_processor(); rp->setup_policy(clear_all_soft_refs); rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers, NULL); if (!promotion_failed()) { // Swap the survivor spaces. eden()->clear(SpaceDecorator::Mangle); from()->clear(SpaceDecorator::Mangle); if (ZapUnusedHeapArea) { // This is now done here because of the piece-meal mangling which // can check for valid mangling at intermediate points in the // collection(s). When a minor collection fails to collect // sufficient space resizing of the young generation can occur // an redistribute the spaces in the young generation. Mangle // here so that unzapped regions don't get distributed to // other spaces. to()->mangle_unused_area(); } swap_spaces(); assert(to()->is_empty(), "to space should be empty now"); // Set the desired survivor size to half the real survivor space _tenuring_threshold = age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize); if (PrintGC && !PrintGCDetails) { gch->print_heap_change(gch_prev_used); } } else { assert(HandlePromotionFailure, "Should not be here unless promotion failure handling is on"); assert(_promo_failure_scan_stack != NULL && _promo_failure_scan_stack->length() == 0, "post condition"); // deallocate stack and it's elements delete _promo_failure_scan_stack; _promo_failure_scan_stack = NULL; remove_forwarding_pointers(); if (PrintGCDetails) { gclog_or_tty->print(" (promotion failed) "); } // Add to-space to the list of space to compact // when a promotion failure has occurred. In that // case there can be live objects in to-space // as a result of a partial evacuation of eden // and from-space. swap_spaces(); // For the sake of uniformity wrt ParNewGeneration::collect(). from()->set_next_compaction_space(to()); gch->set_incremental_collection_will_fail(); // Inform the next generation that a promotion failure occurred. _next_gen->promotion_failure_occurred(); // Reset the PromotionFailureALot counters. NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();) } // set new iteration safe limit for the survivor spaces from()->set_concurrent_iteration_safe_limit(from()->top()); to()->set_concurrent_iteration_safe_limit(to()->top()); SpecializationStats::print(); update_time_of_last_gc(os::javaTimeMillis()); }
void DefNewGeneration::collect(bool full, bool clear_all_soft_refs, size_t size, bool is_large_noref, bool is_tlab) { assert(full || size > 0, "otherwise we don't want to collect"); GenCollectedHeap* gch = GenCollectedHeap::heap(); _next_gen = gch->next_gen(this); assert(_next_gen != NULL, "This must be the youngest gen, and not the only gen"); // If the next generation is too full to accomodate worst-case promotion // from this generation, pass on collection; let the next generation // do it. if (!full_promotion_would_succeed()) { gch->set_incremental_collection_will_fail(); if (PrintGC && Verbose) { gclog_or_tty->print_cr("DefNewGeneration::collect" " contiguous_available: " SIZE_FORMAT " < used: " SIZE_FORMAT, _next_gen->max_contiguous_available(), used()); } return; } TraceTime t1("GC", PrintGC && !PrintGCDetails, true, gclog_or_tty); // Capture heap used before collection (for printing). size_t gch_prev_used = gch->used(); SpecializationStats::clear(); // These can be shared for all code paths IsAliveClosure is_alive(this); ScanWeakRefClosure scan_weak_ref(this); age_table()->clear(); to()->clear(); gch->rem_set()->prepare_for_younger_refs_iterate(false); assert(gch->no_allocs_since_save_marks(0), "save marks have not been newly set."); // Weak refs. // FIXME: Are these storage leaks, or are they resource objects? NOT_COMPILER2(ReferencePolicy *soft_ref_policy = new LRUCurrentHeapPolicy()); COMPILER2_ONLY(ReferencePolicy *soft_ref_policy = new LRUMaxHeapPolicy()); // Not very pretty. CollectorPolicy* cp = gch->collector_policy(); if (!cp->is_train_policy()) { FastScanClosure fsc_with_no_gc_barrier(this, false); FastScanClosure fsc_with_gc_barrier(this, true); FastEvacuateFollowersClosure evacuate_followers(gch, _level, &fsc_with_no_gc_barrier, &fsc_with_gc_barrier); assert(gch->no_allocs_since_save_marks(0), "save marks have not been newly set."); gch->process_strong_roots(_level, true, // Process younger gens, if any, as // strong roots. false,// not collecting permanent generation. GenCollectedHeap::CSO_AllClasses, &fsc_with_gc_barrier, &fsc_with_no_gc_barrier); // "evacuate followers". evacuate_followers.do_void(); FastKeepAliveClosure keep_alive(this, &scan_weak_ref); ref_processor()->process_discovered_references(soft_ref_policy, &is_alive, &keep_alive, &evacuate_followers); } else { // Train policy ScanClosure sc_with_no_gc_barrier(this, false); ScanClosure sc_with_gc_barrier(this, true); EvacuateFollowersClosure evacuate_followers(gch, _level, &sc_with_no_gc_barrier, &sc_with_gc_barrier); gch->process_strong_roots(_level, true, // Process younger gens, if any, as // strong roots. false,// not collecting perm generation. GenCollectedHeap::CSO_AllClasses, &sc_with_gc_barrier, &sc_with_no_gc_barrier); // "evacuate followers". evacuate_followers.do_void(); TrainPolicyKeepAliveClosure keep_alive((TrainGeneration*)_next_gen, &scan_weak_ref); ref_processor()->process_discovered_references(soft_ref_policy, &is_alive, &keep_alive, &evacuate_followers); } // Swap the survivor spaces. eden()->clear(); from()->clear(); swap_spaces(); assert(to()->is_empty(), "to space should be empty now"); // Set the desired survivor size to half the real survivor space _tenuring_threshold = age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize); if (PrintGC && !PrintGCDetails) { gch->print_heap_change(gch_prev_used); } SpecializationStats::print(); update_time_of_last_gc(os::javaTimeMillis()); }