void GenMarkSweep::mark_sweep_phase3() { GenCollectedHeap* gch = GenCollectedHeap::heap(); // Adjust the pointers to reflect the new locations GCTraceTime(Info, gc, phases) tm("Phase 3: Adjust pointers", gc_timer()); // Need new claim bits for the pointer adjustment tracing. ClassLoaderDataGraph::clear_claimed_marks(); // Because the closure below is created statically, we cannot // use OopsInGenClosure constructor which takes a generation, // as the Universe has not been created when the static constructors // are run. adjust_pointer_closure.set_orig_generation(gch->old_gen()); { StrongRootsScope srs(1); gch->gen_process_roots(&srs, GenCollectedHeap::OldGen, false, // Younger gens are not roots. GenCollectedHeap::SO_AllCodeCache, GenCollectedHeap::StrongAndWeakRoots, &adjust_pointer_closure, &adjust_pointer_closure, &adjust_cld_closure); } gch->gen_process_weak_roots(&adjust_pointer_closure); adjust_marks(); GenAdjustPointersClosure blk; gch->generation_iterate(&blk, true); }
void GenMarkSweep::mark_sweep_phase3(int level) { GenCollectedHeap* gch = GenCollectedHeap::heap(); // Adjust the pointers to reflect the new locations GCTraceTime tm("phase 3", PrintGC && Verbose, true, _gc_timer, _gc_tracer->gc_id()); trace("3"); // Need new claim bits for the pointer adjustment tracing. ClassLoaderDataGraph::clear_claimed_marks(); // Because the closure below is created statically, we cannot // use OopsInGenClosure constructor which takes a generation, // as the Universe has not been created when the static constructors // are run. adjust_pointer_closure.set_orig_generation(gch->get_gen(level)); gch->gen_process_roots(level, false, // Younger gens are not roots. true, // activate StrongRootsScope SharedHeap::SO_AllCodeCache, GenCollectedHeap::StrongAndWeakRoots, &adjust_pointer_closure, &adjust_pointer_closure, &adjust_cld_closure); gch->gen_process_weak_roots(&adjust_pointer_closure); adjust_marks(); GenAdjustPointersClosure blk; gch->generation_iterate(&blk, true); }
void GenMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) { // Recursively traverse all live objects and mark them GCTraceTime(Trace, gc) tm("Phase 1: Mark live objects", _gc_timer); GenCollectedHeap* gch = GenCollectedHeap::heap(); // Because follow_root_closure is created statically, cannot // use OopsInGenClosure constructor which takes a generation, // as the Universe has not been created when the static constructors // are run. follow_root_closure.set_orig_generation(gch->old_gen()); // Need new claim bits before marking starts. ClassLoaderDataGraph::clear_claimed_marks(); { StrongRootsScope srs(1); gch->gen_process_roots(&srs, GenCollectedHeap::OldGen, false, // Younger gens are not roots. GenCollectedHeap::SO_None, ClassUnloading, &follow_root_closure, &follow_root_closure, &follow_cld_closure); } // Process reference objects found during marking { ref_processor()->setup_policy(clear_all_softrefs); const ReferenceProcessorStats& stats = ref_processor()->process_discovered_references( &is_alive, &keep_alive, &follow_stack_closure, NULL, _gc_timer); gc_tracer()->report_gc_reference_stats(stats); } // This is the point where the entire marking should have completed. assert(_marking_stack.is_empty(), "Marking should have completed"); // Unload classes and purge the SystemDictionary. bool purged_class = SystemDictionary::do_unloading(&is_alive); // Unload nmethods. CodeCache::do_unloading(&is_alive, purged_class); // Prune dead klasses from subklass/sibling/implementor lists. Klass::clean_weak_klass_links(&is_alive); // Delete entries for dead interned strings. StringTable::unlink(&is_alive); // Clean up unreferenced symbols in symbol table. SymbolTable::unlink(); gc_tracer()->report_object_count_after_gc(&is_alive); }
void DefNewGeneration::collect(bool full, bool clear_all_soft_refs, size_t size, bool is_tlab) { assert(full || size > 0, "otherwise we don't want to collect"); GenCollectedHeap* gch = GenCollectedHeap::heap(); _gc_timer->register_gc_start(); DefNewTracer gc_tracer; gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start()); _old_gen = gch->old_gen(); // If the next generation is too full to accommodate promotion // from this generation, pass on collection; let the next generation // do it. if (!collection_attempt_is_safe()) { log_trace(gc)(":: Collection attempt not safe ::"); gch->set_incremental_collection_failed(); // Slight lie: we did not even attempt one return; } assert(to()->is_empty(), "Else not collection_attempt_is_safe"); init_assuming_no_promotion_failure(); GCTraceTime(Trace, gc) tm("DefNew", NULL, gch->gc_cause()); gch->trace_heap_before_gc(&gc_tracer); // These can be shared for all code paths IsAliveClosure is_alive(this); ScanWeakRefClosure scan_weak_ref(this); age_table()->clear(); to()->clear(SpaceDecorator::Mangle); gch->rem_set()->prepare_for_younger_refs_iterate(false); assert(gch->no_allocs_since_save_marks(), "save marks have not been newly set."); // Not very pretty. CollectorPolicy* cp = gch->collector_policy(); FastScanClosure fsc_with_no_gc_barrier(this, false); FastScanClosure fsc_with_gc_barrier(this, true); KlassScanClosure klass_scan_closure(&fsc_with_no_gc_barrier, gch->rem_set()->klass_rem_set()); CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure, &fsc_with_no_gc_barrier, false); set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier); FastEvacuateFollowersClosure evacuate_followers(gch, &fsc_with_no_gc_barrier, &fsc_with_gc_barrier); assert(gch->no_allocs_since_save_marks(), "save marks have not been newly set."); { // DefNew needs to run with n_threads == 0, to make sure the serial // version of the card table scanning code is used. // See: CardTableModRefBSForCTRS::non_clean_card_iterate_possibly_parallel. StrongRootsScope srs(0); gch->gen_process_roots(&srs, GenCollectedHeap::YoungGen, true, // Process younger gens, if any, // as strong roots. GenCollectedHeap::SO_ScavengeCodeCache, GenCollectedHeap::StrongAndWeakRoots, &fsc_with_no_gc_barrier, &fsc_with_gc_barrier, &cld_scan_closure); } // "evacuate followers". evacuate_followers.do_void(); FastKeepAliveClosure keep_alive(this, &scan_weak_ref); ReferenceProcessor* rp = ref_processor(); rp->setup_policy(clear_all_soft_refs); const ReferenceProcessorStats& stats = rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers, NULL, _gc_timer); gc_tracer.report_gc_reference_stats(stats); gc_tracer.report_tenuring_threshold(tenuring_threshold()); if (!_promotion_failed) { // Swap the survivor spaces. eden()->clear(SpaceDecorator::Mangle); from()->clear(SpaceDecorator::Mangle); if (ZapUnusedHeapArea) { // This is now done here because of the piece-meal mangling which // can check for valid mangling at intermediate points in the // collection(s). When a young collection fails to collect // sufficient space resizing of the young generation can occur // an redistribute the spaces in the young generation. Mangle // here so that unzapped regions don't get distributed to // other spaces. to()->mangle_unused_area(); } swap_spaces(); assert(to()->is_empty(), "to space should be empty now"); adjust_desired_tenuring_threshold(); // A successful scavenge should restart the GC time limit count which is // for full GC's. AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy(); size_policy->reset_gc_overhead_limit_count(); assert(!gch->incremental_collection_failed(), "Should be clear"); } else { assert(_promo_failure_scan_stack.is_empty(), "post condition"); _promo_failure_scan_stack.clear(true); // Clear cached segments. remove_forwarding_pointers(); log_debug(gc)("Promotion failed"); // Add to-space to the list of space to compact // when a promotion failure has occurred. In that // case there can be live objects in to-space // as a result of a partial evacuation of eden // and from-space. swap_spaces(); // For uniformity wrt ParNewGeneration. from()->set_next_compaction_space(to()); gch->set_incremental_collection_failed(); // Inform the next generation that a promotion failure occurred. _old_gen->promotion_failure_occurred(); gc_tracer.report_promotion_failed(_promotion_failed_info); // Reset the PromotionFailureALot counters. NOT_PRODUCT(gch->reset_promotion_should_fail();) } // set new iteration safe limit for the survivor spaces from()->set_concurrent_iteration_safe_limit(from()->top()); to()->set_concurrent_iteration_safe_limit(to()->top()); // We need to use a monotonically non-decreasing time in ms // or we will see time-warp warnings and os::javaTimeMillis() // does not guarantee monotonicity. jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; update_time_of_last_gc(now); gch->trace_heap_after_gc(&gc_tracer); _gc_timer->register_gc_end(); gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions()); }