void ASParNewGeneration::compute_new_size() { GenCollectedHeap* gch = GenCollectedHeap::heap(); assert(gch->kind() == CollectedHeap::GenCollectedHeap, "not a CMS generational heap"); CMSAdaptiveSizePolicy* size_policy = (CMSAdaptiveSizePolicy*)gch->gen_policy()->size_policy(); assert(size_policy->is_gc_cms_adaptive_size_policy(), "Wrong type of size policy"); size_t survived = from()->used(); if (!survivor_overflow()) { // Keep running averages on how much survived size_policy->avg_survived()->sample(survived); } else { size_t promoted = (size_t) next_gen()->gc_stats()->avg_promoted()->last_sample(); assert(promoted < gch->capacity(), "Conversion problem?"); size_t survived_guess = survived + promoted; size_policy->avg_survived()->sample(survived_guess); } size_t survivor_limit = max_survivor_size(); _tenuring_threshold = size_policy->compute_survivor_space_size_and_threshold( _survivor_overflow, _tenuring_threshold, survivor_limit); size_policy->avg_young_live()->sample(used()); size_policy->avg_eden_live()->sample(eden()->used()); size_policy->compute_young_generation_free_space(eden()->capacity(), max_gen_size()); resize(size_policy->calculated_eden_size_in_bytes(), size_policy->calculated_survivor_size_in_bytes()); if (UsePerfData) { CMSGCAdaptivePolicyCounters* counters = (CMSGCAdaptivePolicyCounters*) gch->collector_policy()->counters(); assert(counters->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind, "Wrong kind of counters"); counters->update_tenuring_threshold(_tenuring_threshold); counters->update_survivor_overflowed(_survivor_overflow); counters->update_young_capacity(capacity()); } }
// Changes from PSYoungGen version // value of "alignment" void ASParNewGeneration::space_invariants() { const size_t alignment = os::vm_page_size(); // Currently, our eden size cannot shrink to zero guarantee(eden()->capacity() >= alignment, "eden too small"); guarantee(from()->capacity() >= alignment, "from too small"); guarantee(to()->capacity() >= alignment, "to too small"); // Relationship of spaces to each other char* eden_start = (char*)eden()->bottom(); char* eden_end = (char*)eden()->end(); char* from_start = (char*)from()->bottom(); char* from_end = (char*)from()->end(); char* to_start = (char*)to()->bottom(); char* to_end = (char*)to()->end(); guarantee(eden_start >= virtual_space()->low(), "eden bottom"); guarantee(eden_start < eden_end, "eden space consistency"); guarantee(from_start < from_end, "from space consistency"); guarantee(to_start < to_end, "to space consistency"); // Check whether from space is below to space if (from_start < to_start) { // Eden, from, to guarantee(eden_end <= from_start, "eden/from boundary"); guarantee(from_end <= to_start, "from/to boundary"); guarantee(to_end <= virtual_space()->high(), "to end"); } else { // Eden, to, from guarantee(eden_end <= to_start, "eden/to boundary"); guarantee(to_end <= from_start, "to/from boundary"); guarantee(from_end <= virtual_space()->high(), "from end"); } // More checks that the virtual space is consistent with the spaces assert(virtual_space()->committed_size() >= (eden()->capacity() + to()->capacity() + from()->capacity()), "Committed size is inconsistent"); assert(virtual_space()->committed_size() <= virtual_space()->reserved_size(), "Space invariant"); char* eden_top = (char*)eden()->top(); char* from_top = (char*)from()->top(); char* to_top = (char*)to()->top(); assert(eden_top <= virtual_space()->high(), "eden top"); assert(from_top <= virtual_space()->high(), "from top"); assert(to_top <= virtual_space()->high(), "to top"); }
void DefNewGeneration::gc_epilogue(bool full) { // Check if the heap is approaching full after a collection has // been done. Generally the young generation is empty at // a minimum at the end of a collection. If it is not, then // the heap is approaching full. GenCollectedHeap* gch = GenCollectedHeap::heap(); clear_should_allocate_from_space(); if (collection_attempt_is_safe()) { gch->clear_incremental_collection_will_fail(); } else { gch->set_incremental_collection_will_fail(); if (full) { // we seem to be running out of space set_should_allocate_from_space(); } } if (ZapUnusedHeapArea) { eden()->check_mangled_unused_area_complete(); from()->check_mangled_unused_area_complete(); to()->check_mangled_unused_area_complete(); } // update the generation and space performance counters update_counters(); gch->collector_policy()->counters()->update_counters(); }
void DefNewGeneration::space_iterate(SpaceClosure* blk, bool usedOnly) { blk->do_space(eden()); blk->do_space(from()); if (!usedOnly) blk->do_space(to()); }
HeapWord* DefNewGeneration::allocate(size_t word_size, bool is_large_noref, bool is_tlab) { // This is the slow-path allocation for the DefNewGeneration. // Most allocations are fast-path in compiled code. // We try to allocate from the eden. If that works, we are happy. // Note that since DefNewGeneration supports lock-free allocation, we // have to use it here, as well. HeapWord* result = eden()->par_allocate(word_size); if (result == NULL) { // Tell the next generation we reached a limit. HeapWord* new_limit = next_gen()->allocation_limit_reached(eden(), eden()->top(), word_size); if (new_limit != NULL) { eden()->set_soft_end(new_limit); result = eden()->par_allocate(word_size); } else { assert(eden()->soft_end() == eden()->end(), "invalid state after allocation_limit_reached returned null") } // If the eden is full and the last collection bailed out, we are running // out of heap space, and we try to allocate the from-space, too. // allocate_from_space can't be inlined because that would introduce a // circular dependency at compile time. if (result == NULL) { result = allocate_from_space(word_size); } }
void DefNewGeneration::print_on(outputStream* st) const { Generation::print_on(st); st->print(" eden"); eden()->print_on(st); st->print(" from"); from()->print_on(st); st->print(" to "); to()->print_on(st); }
HeapWord* DefNewGeneration::allocate(size_t word_size, bool is_tlab) { // This is the slow-path allocation for the DefNewGeneration. // Most allocations are fast-path in compiled code. // We try to allocate from the eden. If that works, we are happy. // Note that since DefNewGeneration supports lock-free allocation, we // have to use it here, as well. HeapWord* result = eden()->par_allocate(word_size); if (result != NULL) { return result; } do { HeapWord* old_limit = eden()->soft_end(); if (old_limit < eden()->end()) { // Tell the next generation we reached a limit. HeapWord* new_limit = next_gen()->allocation_limit_reached(eden(), eden()->top(), word_size); if (new_limit != NULL) { Atomic::cmpxchg_ptr(new_limit, eden()->soft_end_addr(), old_limit); } else { assert(eden()->soft_end() == eden()->end(), "invalid state after allocation_limit_reached returned null"); } } else { // The allocation failed and the soft limit is equal to the hard limit, // there are no reasons to do an attempt to allocate assert(old_limit == eden()->end(), "sanity check"); break; } // Try to allocate until succeeded or the soft limit can't be adjusted result = eden()->par_allocate(word_size); } while (result == NULL); // If the eden is full and the last collection bailed out, we are running // out of heap space, and we try to allocate the from-space, too. // allocate_from_space can't be inlined because that would introduce a // circular dependency at compile time. if (result == NULL) { result = allocate_from_space(word_size); } return result; }
int DefNewGeneration::addr_to_arena_id(void* addr) { if (eden()->contains(addr)) return 0; if (from()->contains(addr)) { return (from()->bottom() < to()->bottom()) ? 1 : 2; } if (to()->contains(addr)) { return (from()->bottom() < to()->bottom()) ? 2 : 1; } // Otherwise... return -3; }
void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size) { // All space sizes must be multiples of car size in order for the CarTable to work. // Note that the CarTable is used with and without train gc (for fast lookup). uintx alignment = CarSpace::car_size(); // Compute sizes uintx size = _virtual_space.committed_size(); uintx survivor_size = compute_survivor_size(size, alignment); uintx eden_size = size - (2*survivor_size); assert(eden_size > 0 && survivor_size <= eden_size, "just checking"); if (eden_size < minimum_eden_size) { // May happen due to 64Kb rounding, if so adjust eden size back up minimum_eden_size = align_size_up(minimum_eden_size, alignment); uintx maximum_survivor_size = (size - minimum_eden_size) / 2; uintx unaligned_survivor_size = align_size_down(maximum_survivor_size, alignment); survivor_size = MAX2(unaligned_survivor_size, alignment); eden_size = size - (2*survivor_size); assert(eden_size > 0 && survivor_size <= eden_size, "just checking"); assert(eden_size >= minimum_eden_size, "just checking"); } char *eden_start = _virtual_space.low(); char *from_start = eden_start + eden_size; char *to_start = from_start + survivor_size; char *to_end = to_start + survivor_size; assert(to_end == _virtual_space.high(), "just checking"); assert(Space::is_aligned((HeapWord*)eden_start), "checking alignment"); assert(Space::is_aligned((HeapWord*)from_start), "checking alignment"); assert(Space::is_aligned((HeapWord*)to_start), "checking alignment"); MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start); MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start); MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end); eden()->initialize(edenMR, (minimum_eden_size == 0)); from()->initialize(fromMR, true); to()->initialize(toMR , true); if (jvmpi::is_event_enabled(JVMPI_EVENT_ARENA_NEW)) { CollectedHeap* ch = Universe::heap(); jvmpi::post_arena_new_event(ch->addr_to_arena_id(eden_start), "Eden"); jvmpi::post_arena_new_event(ch->addr_to_arena_id(from_start), "Semi"); jvmpi::post_arena_new_event(ch->addr_to_arena_id(to_start), "Semi"); } }
void DefNewGeneration::remove_forwarding_pointers() { RemoveForwardPointerClosure rspc; eden()->object_iterate(&rspc); from()->object_iterate(&rspc); // Now restore saved marks, if any. assert(_objs_with_preserved_marks.size() == _preserved_marks_of_objs.size(), "should be the same"); while (!_objs_with_preserved_marks.is_empty()) { oop obj = _objs_with_preserved_marks.pop(); markOop m = _preserved_marks_of_objs.pop(); obj->set_mark(m); } _objs_with_preserved_marks.clear(true); _preserved_marks_of_objs.clear(true); }
void DefNewGeneration::swap_spaces() { ContiguousSpace* s = from(); _from_space = to(); _to_space = s; eden()->set_next_compaction_space(from()); // The to-space is normally empty before a compaction so need // not be considered. The exception is during promotion // failure handling when to-space can contain live objects. from()->set_next_compaction_space(NULL); if (UsePerfData) { CSpaceCounters* c = _from_counters; _from_counters = _to_counters; _to_counters = c; } }
int main() { world odd; world even; int i; init(odd); init(even); eden(even); // simulation for (i = 0; i < CYCLES; ++i) { if ((i % 2)) { update(even,odd); dele(odd); } else { update(odd,even); dele(even); } } }
void DefNewGeneration::remove_forwarding_pointers() { RemoveForwardPointerClosure rspc; eden()->object_iterate(&rspc); from()->object_iterate(&rspc); // Now restore saved marks, if any. if (_objs_with_preserved_marks != NULL) { assert(_preserved_marks_of_objs != NULL, "Both or none."); assert(_objs_with_preserved_marks->length() == _preserved_marks_of_objs->length(), "Both or none."); for (int i = 0; i < _objs_with_preserved_marks->length(); i++) { oop obj = _objs_with_preserved_marks->at(i); markOop m = _preserved_marks_of_objs->at(i); obj->set_mark(m); } delete _objs_with_preserved_marks; delete _preserved_marks_of_objs; _objs_with_preserved_marks = NULL; _preserved_marks_of_objs = NULL; } }
void DefNewGeneration::object_iterate(ObjectClosure* blk) { eden()->object_iterate(blk); from()->object_iterate(blk); }
size_t DefNewGeneration::contiguous_available() const { return eden()->free(); }
size_t DefNewGeneration::capacity_before_gc() const { return eden()->capacity(); }
size_t DefNewGeneration::unsafe_max_alloc_nogc() const { return eden()->free(); }
size_t DefNewGeneration::free() const { return eden()->free() + from()->free(); // to() is only used during scavenge }
void DefNewGeneration::compute_new_size() { // This is called after a gc that includes the following generation // (which is required to exist.) So from-space will normally be empty. // Note that we check both spaces, since if scavenge failed they revert roles. // If not we bail out (otherwise we would have to relocate the objects) if (!from()->is_empty() || !to()->is_empty()) { return; } int next_level = level() + 1; GenCollectedHeap* gch = GenCollectedHeap::heap(); assert(next_level < gch->_n_gens, "DefNewGeneration cannot be an oldest gen"); Generation* next_gen = gch->_gens[next_level]; size_t old_size = next_gen->capacity(); size_t new_size_before = _virtual_space.committed_size(); size_t min_new_size = spec()->init_size(); size_t max_new_size = reserved().byte_size(); assert(min_new_size <= new_size_before && new_size_before <= max_new_size, "just checking"); // All space sizes must be multiples of Generation::GenGrain. size_t alignment = Generation::GenGrain; // Compute desired new generation size based on NewRatio and // NewSizeThreadIncrease size_t desired_new_size = old_size/NewRatio; int threads_count = Threads::number_of_non_daemon_threads(); size_t thread_increase_size = threads_count * NewSizeThreadIncrease; desired_new_size = align_size_up(desired_new_size + thread_increase_size, alignment); // Adjust new generation size desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size); assert(desired_new_size <= max_new_size, "just checking"); bool changed = false; if (desired_new_size > new_size_before) { size_t change = desired_new_size - new_size_before; assert(change % alignment == 0, "just checking"); if (expand(change)) { changed = true; } // If the heap failed to expand to the desired size, // "changed" will be false. If the expansion failed // (and at this point it was expected to succeed), // ignore the failure (leaving "changed" as false). } if (desired_new_size < new_size_before && eden()->is_empty()) { // bail out of shrinking if objects in eden size_t change = new_size_before - desired_new_size; assert(change % alignment == 0, "just checking"); _virtual_space.shrink_by(change); changed = true; } if (changed) { // The spaces have already been mangled at this point but // may not have been cleared (set top = bottom) and should be. // Mangling was done when the heap was being expanded. compute_space_boundaries(eden()->used(), SpaceDecorator::Clear, SpaceDecorator::DontMangle); MemRegion cmr((HeapWord*)_virtual_space.low(), (HeapWord*)_virtual_space.high()); Universe::heap()->barrier_set()->resize_covered_region(cmr); if (Verbose && PrintGC) { size_t new_size_after = _virtual_space.committed_size(); size_t eden_size_after = eden()->capacity(); size_t survivor_size_after = from()->capacity(); gclog_or_tty->print("New generation size " SIZE_FORMAT "K->" SIZE_FORMAT "K [eden=" SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]", new_size_before/K, new_size_after/K, eden_size_after/K, survivor_size_after/K); if (WizardMode) { gclog_or_tty->print("[allowed " SIZE_FORMAT "K extra for %d threads]", thread_increase_size/K, threads_count); } gclog_or_tty->cr(); } } }
// Moved from inline file as they are not called inline CompactibleSpace* DefNewGeneration::first_compaction_space() const { return eden(); }
void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size, bool clear_space, bool mangle_space) { uintx alignment = GenCollectedHeap::heap()->collector_policy()->space_alignment(); // If the spaces are being cleared (only done at heap initialization // currently), the survivor spaces need not be empty. // Otherwise, no care is taken for used areas in the survivor spaces // so check. assert(clear_space || (to()->is_empty() && from()->is_empty()), "Initialization of the survivor spaces assumes these are empty"); // Compute sizes uintx size = _virtual_space.committed_size(); uintx survivor_size = compute_survivor_size(size, alignment); uintx eden_size = size - (2*survivor_size); assert(eden_size > 0 && survivor_size <= eden_size, "just checking"); if (eden_size < minimum_eden_size) { // May happen due to 64Kb rounding, if so adjust eden size back up minimum_eden_size = align_size_up(minimum_eden_size, alignment); uintx maximum_survivor_size = (size - minimum_eden_size) / 2; uintx unaligned_survivor_size = align_size_down(maximum_survivor_size, alignment); survivor_size = MAX2(unaligned_survivor_size, alignment); eden_size = size - (2*survivor_size); assert(eden_size > 0 && survivor_size <= eden_size, "just checking"); assert(eden_size >= minimum_eden_size, "just checking"); } char *eden_start = _virtual_space.low(); char *from_start = eden_start + eden_size; char *to_start = from_start + survivor_size; char *to_end = to_start + survivor_size; assert(to_end == _virtual_space.high(), "just checking"); assert(Space::is_aligned((HeapWord*)eden_start), "checking alignment"); assert(Space::is_aligned((HeapWord*)from_start), "checking alignment"); assert(Space::is_aligned((HeapWord*)to_start), "checking alignment"); MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start); MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start); MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end); // A minimum eden size implies that there is a part of eden that // is being used and that affects the initialization of any // newly formed eden. bool live_in_eden = minimum_eden_size > 0; // If not clearing the spaces, do some checking to verify that // the space are already mangled. if (!clear_space) { // Must check mangling before the spaces are reshaped. Otherwise, // the bottom or end of one space may have moved into another // a failure of the check may not correctly indicate which space // is not properly mangled. if (ZapUnusedHeapArea) { HeapWord* limit = (HeapWord*) _virtual_space.high(); eden()->check_mangled_unused_area(limit); from()->check_mangled_unused_area(limit); to()->check_mangled_unused_area(limit); } } // Reset the spaces for their new regions. eden()->initialize(edenMR, clear_space && !live_in_eden, SpaceDecorator::Mangle); // If clear_space and live_in_eden, we will not have cleared any // portion of eden above its top. This can cause newly // expanded space not to be mangled if using ZapUnusedHeapArea. // We explicitly do such mangling here. if (ZapUnusedHeapArea && clear_space && live_in_eden && mangle_space) { eden()->mangle_unused_area(); } from()->initialize(fromMR, clear_space, mangle_space); to()->initialize(toMR, clear_space, mangle_space); // Set next compaction spaces. eden()->set_next_compaction_space(from()); // The to-space is normally empty before a compaction so need // not be considered. The exception is during promotion // failure handling when to-space can contain live objects. from()->set_next_compaction_space(NULL); }
void DefNewGeneration::object_iterate_since_last_GC(ObjectClosure* cl) { // $$$ This may be wrong in case of "scavenge failure"? eden()->object_iterate(cl); }
void DefNewGeneration::reset_saved_marks() { eden()->reset_saved_mark(); to()->reset_saved_mark(); from()->reset_saved_mark(); }
HeapWord* DefNewGeneration::par_allocate(size_t word_size, bool is_tlab) { return eden()->par_allocate(word_size); }
void DefNewGeneration::record_spaces_top() { assert(ZapUnusedHeapArea, "Not mangling unused space"); eden()->set_top_for_allocations(); to()->set_top_for_allocations(); from()->set_top_for_allocations(); }
void DefNewGeneration::collect(bool full, bool clear_all_soft_refs, size_t size, bool is_tlab) { assert(full || size > 0, "otherwise we don't want to collect"); GenCollectedHeap* gch = GenCollectedHeap::heap(); _gc_timer->register_gc_start(); DefNewTracer gc_tracer; gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start()); _next_gen = gch->next_gen(this); // If the next generation is too full to accommodate promotion // from this generation, pass on collection; let the next generation // do it. if (!collection_attempt_is_safe()) { if (Verbose && PrintGCDetails) { gclog_or_tty->print(" :: Collection attempt not safe :: "); } gch->set_incremental_collection_failed(); // Slight lie: we did not even attempt one return; } assert(to()->is_empty(), "Else not collection_attempt_is_safe"); init_assuming_no_promotion_failure(); GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL); // Capture heap used before collection (for printing). size_t gch_prev_used = gch->used(); gch->trace_heap_before_gc(&gc_tracer); SpecializationStats::clear(); // These can be shared for all code paths IsAliveClosure is_alive(this); ScanWeakRefClosure scan_weak_ref(this); age_table()->clear(); to()->clear(SpaceDecorator::Mangle); gch->rem_set()->prepare_for_younger_refs_iterate(false); assert(gch->no_allocs_since_save_marks(0), "save marks have not been newly set."); // Not very pretty. CollectorPolicy* cp = gch->collector_policy(); FastScanClosure fsc_with_no_gc_barrier(this, false); FastScanClosure fsc_with_gc_barrier(this, true); KlassScanClosure klass_scan_closure(&fsc_with_no_gc_barrier, gch->rem_set()->klass_rem_set()); set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier); FastEvacuateFollowersClosure evacuate_followers(gch, _level, this, &fsc_with_no_gc_barrier, &fsc_with_gc_barrier); assert(gch->no_allocs_since_save_marks(0), "save marks have not been newly set."); int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_CodeCache; gch->gen_process_strong_roots(_level, true, // Process younger gens, if any, // as strong roots. true, // activate StrongRootsScope true, // is scavenging SharedHeap::ScanningOption(so), &fsc_with_no_gc_barrier, true, // walk *all* scavengable nmethods &fsc_with_gc_barrier, &klass_scan_closure); // "evacuate followers". evacuate_followers.do_void(); FastKeepAliveClosure keep_alive(this, &scan_weak_ref); ReferenceProcessor* rp = ref_processor(); rp->setup_policy(clear_all_soft_refs); const ReferenceProcessorStats& stats = rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers, NULL, _gc_timer); gc_tracer.report_gc_reference_stats(stats); if (!_promotion_failed) { // Swap the survivor spaces. eden()->clear(SpaceDecorator::Mangle); from()->clear(SpaceDecorator::Mangle); if (ZapUnusedHeapArea) { // This is now done here because of the piece-meal mangling which // can check for valid mangling at intermediate points in the // collection(s). When a minor collection fails to collect // sufficient space resizing of the young generation can occur // an redistribute the spaces in the young generation. Mangle // here so that unzapped regions don't get distributed to // other spaces. to()->mangle_unused_area(); } swap_spaces(); assert(to()->is_empty(), "to space should be empty now"); adjust_desired_tenuring_threshold(); // A successful scavenge should restart the GC time limit count which is // for full GC's. AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy(); size_policy->reset_gc_overhead_limit_count(); if (PrintGC && !PrintGCDetails) { gch->print_heap_change(gch_prev_used); } assert(!gch->incremental_collection_failed(), "Should be clear"); } else { assert(_promo_failure_scan_stack.is_empty(), "post condition"); _promo_failure_scan_stack.clear(true); // Clear cached segments. remove_forwarding_pointers(); if (PrintGCDetails) { gclog_or_tty->print(" (promotion failed) "); } // Add to-space to the list of space to compact // when a promotion failure has occurred. In that // case there can be live objects in to-space // as a result of a partial evacuation of eden // and from-space. swap_spaces(); // For uniformity wrt ParNewGeneration. from()->set_next_compaction_space(to()); gch->set_incremental_collection_failed(); // Inform the next generation that a promotion failure occurred. _next_gen->promotion_failure_occurred(); gc_tracer.report_promotion_failed(_promotion_failed_info); // Reset the PromotionFailureALot counters. NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();) } // set new iteration safe limit for the survivor spaces from()->set_concurrent_iteration_safe_limit(from()->top()); to()->set_concurrent_iteration_safe_limit(to()->top()); SpecializationStats::print(); // We need to use a monotonically non-decreasing time in ms // or we will see time-warp warnings and os::javaTimeMillis() // does not guarantee monotonicity. jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; update_time_of_last_gc(now); gch->trace_heap_after_gc(&gc_tracer); gc_tracer.report_tenuring_threshold(tenuring_threshold()); _gc_timer->register_gc_end(); gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions()); }
void DefNewGeneration::gc_prologue(bool full) { // Ensure that _end and _soft_end are the same in eden space. eden()->set_soft_end(eden()->end()); }
void DefNewGeneration::verify(bool allow_dirty) { eden()->verify(allow_dirty); from()->verify(allow_dirty); to()->verify(allow_dirty); }
bool DefNewGeneration::no_allocs_since_save_marks() { assert(eden()->saved_mark_at_top(), "Violated spec - alloc in eden"); assert(from()->saved_mark_at_top(), "Violated spec - alloc in from"); return to()->saved_mark_at_top(); }
size_t DefNewGeneration::tlab_capacity() const { return eden()->capacity(); }