DefNewGeneration::DefNewGeneration(ReservedSpace rs, size_t initial_size, int level, const char* policy) : Generation(rs, initial_size, level), _objs_with_preserved_marks(NULL), _preserved_marks_of_objs(NULL), _promo_failure_scan_stack(NULL), _promo_failure_drain_in_progress(false), _should_allocate_from_space(false) { MemRegion cmr((HeapWord*)_virtual_space.low(), (HeapWord*)_virtual_space.high()); Universe::heap()->barrier_set()->resize_covered_region(cmr); if (GenCollectedHeap::heap()->collector_policy()->has_soft_ended_eden()) { _eden_space = new ConcEdenSpace(this); } else { _eden_space = new EdenSpace(this); } _from_space = new ContiguousSpace(); _to_space = new ContiguousSpace(); if (_eden_space == NULL || _from_space == NULL || _to_space == NULL) vm_exit_during_initialization("Could not allocate a new gen space"); // Compute the maximum eden and survivor space sizes. These sizes // are computed assuming the entire reserved space is committed. // These values are exported as performance counters. uintx alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment(); uintx size = _virtual_space.reserved_size(); _max_survivor_size = compute_survivor_size(size, alignment); _max_eden_size = size - (2*_max_survivor_size); // allocate the performance counters // Generation counters -- generation 0, 3 subspaces _gen_counters = new GenerationCounters("new", 0, 3, &_virtual_space); _gc_counters = new CollectorCounters(policy, 0); _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space, _gen_counters); _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space, _gen_counters); _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space, _gen_counters); compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle); update_counters(); _next_gen = NULL; _tenuring_threshold = MaxTenuringThreshold; _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize; }
DefNewGeneration::DefNewGeneration(ReservedSpace rs, size_t initial_size, const char* policy) : Generation(rs, initial_size), _promo_failure_drain_in_progress(false), _should_allocate_from_space(false) { MemRegion cmr((HeapWord*)_virtual_space.low(), (HeapWord*)_virtual_space.high()); GenCollectedHeap* gch = GenCollectedHeap::heap(); gch->barrier_set()->resize_covered_region(cmr); _eden_space = new ContiguousSpace(); _from_space = new ContiguousSpace(); _to_space = new ContiguousSpace(); if (_eden_space == NULL || _from_space == NULL || _to_space == NULL) { vm_exit_during_initialization("Could not allocate a new gen space"); } // Compute the maximum eden and survivor space sizes. These sizes // are computed assuming the entire reserved space is committed. // These values are exported as performance counters. uintx alignment = gch->collector_policy()->space_alignment(); uintx size = _virtual_space.reserved_size(); _max_survivor_size = compute_survivor_size(size, alignment); _max_eden_size = size - (2*_max_survivor_size); // allocate the performance counters GenCollectorPolicy* gcp = gch->gen_policy(); // Generation counters -- generation 0, 3 subspaces _gen_counters = new GenerationCounters("new", 0, 3, gcp->min_young_size(), gcp->max_young_size(), &_virtual_space); _gc_counters = new CollectorCounters(policy, 0); _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space, _gen_counters); _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space, _gen_counters); _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space, _gen_counters); compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle); update_counters(); _old_gen = NULL; _tenuring_threshold = MaxTenuringThreshold; _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize; _gc_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer(); }
DefNewGeneration::DefNewGeneration(ReservedSpace rs, size_t initial_size, int level, const char* policy) : Generation(rs, initial_size, level) { MemRegion cmr((HeapWord*)_virtual_space.low(), (HeapWord*)_virtual_space.high()); Universe::heap()->barrier_set()->resize_covered_region(cmr); _eden_space = new EdenSpace(this); _from_space = new ContiguousSpace(); _to_space = new ContiguousSpace(); if (_eden_space == NULL || _from_space == NULL || _to_space == NULL) vm_exit_during_initialization("Could not allocate a new gen space"); // Compute the maximum eden and survivor space sizes. These sizes // are computed assuming the entire reserved space is committed. // These values are exported as performance counters. uintx alignment = CarSpace::car_size(); uintx size = _virtual_space.reserved_size(); uintx max_survivor_size = compute_survivor_size(size, alignment); uintx max_eden_size = size - (2*max_survivor_size); // allocate the performance counters // Generation counters -- generation 0, 3 subspaces _gen_counters = new GenerationCounters(PERF_GC, "new", 0, 3, &_virtual_space); _gc_counters = new CollectorCounters(PERF_GC, policy, 0); const char* ns = _gen_counters->name_space(); _eden_counters = new CSpaceCounters(ns, "eden", 0, max_eden_size, _eden_space); _from_counters = new CSpaceCounters(ns, "s0", 1, max_survivor_size, _from_space); _to_counters = new CSpaceCounters(ns, "s1", 2, max_survivor_size, _to_space); compute_space_boundaries(0); update_counters(); _next_gen = NULL; _tenuring_threshold = MaxTenuringThreshold; _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize; }
void DefNewGeneration::compute_new_size() { // This is called after a gc that includes the following generation // (which is required to exist.) So from-space will normally be empty. // Note that we check both spaces, since if scavenge failed they revert roles. // If not we bail out (otherwise we would have to relocate the objects) if (!from()->is_empty() || !to()->is_empty()) { return; } int next_level = level() + 1; GenCollectedHeap* gch = GenCollectedHeap::heap(); assert(next_level < gch->_n_gens, "DefNewGeneration cannot be an oldest gen"); Generation* next_gen = gch->_gens[next_level]; size_t old_size = next_gen->capacity(); size_t new_size_before = _virtual_space.committed_size(); size_t min_new_size = spec()->init_size(); size_t max_new_size = reserved().byte_size(); assert(min_new_size <= new_size_before && new_size_before <= max_new_size, "just checking"); // All space sizes must be multiples of Generation::GenGrain. size_t alignment = Generation::GenGrain; // Compute desired new generation size based on NewRatio and // NewSizeThreadIncrease size_t desired_new_size = old_size/NewRatio; int threads_count = Threads::number_of_non_daemon_threads(); size_t thread_increase_size = threads_count * NewSizeThreadIncrease; desired_new_size = align_size_up(desired_new_size + thread_increase_size, alignment); // Adjust new generation size desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size); assert(desired_new_size <= max_new_size, "just checking"); bool changed = false; if (desired_new_size > new_size_before) { size_t change = desired_new_size - new_size_before; assert(change % alignment == 0, "just checking"); if (expand(change)) { changed = true; } // If the heap failed to expand to the desired size, // "changed" will be false. If the expansion failed // (and at this point it was expected to succeed), // ignore the failure (leaving "changed" as false). } if (desired_new_size < new_size_before && eden()->is_empty()) { // bail out of shrinking if objects in eden size_t change = new_size_before - desired_new_size; assert(change % alignment == 0, "just checking"); _virtual_space.shrink_by(change); changed = true; } if (changed) { // The spaces have already been mangled at this point but // may not have been cleared (set top = bottom) and should be. // Mangling was done when the heap was being expanded. compute_space_boundaries(eden()->used(), SpaceDecorator::Clear, SpaceDecorator::DontMangle); MemRegion cmr((HeapWord*)_virtual_space.low(), (HeapWord*)_virtual_space.high()); Universe::heap()->barrier_set()->resize_covered_region(cmr); if (Verbose && PrintGC) { size_t new_size_after = _virtual_space.committed_size(); size_t eden_size_after = eden()->capacity(); size_t survivor_size_after = from()->capacity(); gclog_or_tty->print("New generation size " SIZE_FORMAT "K->" SIZE_FORMAT "K [eden=" SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]", new_size_before/K, new_size_after/K, eden_size_after/K, survivor_size_after/K); if (WizardMode) { gclog_or_tty->print("[allowed " SIZE_FORMAT "K extra for %d threads]", thread_increase_size/K, threads_count); } gclog_or_tty->cr(); } } }
void DefNewGeneration::compute_new_size() { // This is called after a gc that includes the following generation // (which is required to exist.) So from-space will normally be empty. // Note that we check both spaces, since if scavenge failed they revert roles. // If not we bail out (otherwise we would have to relocate the objects) assert(to()->is_empty(), "to-space should be empty here"); if (!from()->is_empty()) return; int next_level = level() + 1; GenCollectedHeap* gch = GenCollectedHeap::heap(); assert(next_level < gch->_n_gens, "DefNewGeneration cannot be an oldest gen"); Generation* next_gen = gch->_gens[next_level]; size_t old_size = next_gen->capacity(); size_t new_size_before = _virtual_space.committed_size(); size_t min_new_size = spec()->init_size(); size_t max_new_size = reserved().byte_size(); assert(min_new_size <= new_size_before && new_size_before <= max_new_size, "just checking"); // All space sizes must be multiples of Generation::GenGrain. size_t alignment = Generation::GenGrain; // Compute desired new generation size based on NewRatio and // NewSizeThreadIncrease size_t desired_new_size = old_size/NewRatio; int threads_count = Threads::number_of_non_daemon_threads(); size_t thread_increase_size = threads_count * NewSizeThreadIncrease; desired_new_size = align_size_up(desired_new_size + thread_increase_size, alignment); // Adjust new generation size desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size); assert(desired_new_size <= max_new_size, "just checking"); bool changed = false; if (desired_new_size > new_size_before) { size_t change = desired_new_size - new_size_before; assert(change % alignment == 0, "just checking"); changed = _virtual_space.expand_by(change); if (!changed) { // Do better than this for Merlin vm_exit_out_of_memory(change, "heap expansion"); } } if (desired_new_size < new_size_before && eden()->is_empty()) { // bail out of shrinking if objects in eden size_t change = new_size_before - desired_new_size; assert(change % alignment == 0, "just checking"); _virtual_space.shrink_by(change); changed = true; } if (changed) { compute_space_boundaries(eden()->used()); MemRegion cmr((HeapWord*)_virtual_space.low(), (HeapWord*)_virtual_space.high()); Universe::heap()->barrier_set()->resize_covered_region(cmr); if (Verbose && PrintGC) { size_t new_size_after = _virtual_space.committed_size(); size_t eden_size_after = eden()->capacity(); size_t survivor_size_after = from()->capacity(); gclog_or_tty->print("New generation size " SIZE_FORMAT "K->" SIZE_FORMAT "K [eden=" SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]", new_size_before/K, new_size_after/K, eden_size_after/K, survivor_size_after/K); if (WizardMode) { gclog_or_tty->print("[allowed " SIZE_FORMAT "K extra for %d threads]", thread_increase_size/K, threads_count); } tty->cr(); } } }