// This method assumes that from-space has live data and that // any shrinkage of the young gen is limited by location of // from-space. size_t PSYoungGen::available_to_live() { size_t delta_in_survivor = 0; ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); const size_t space_alignment = heap->intra_heap_alignment(); const size_t gen_alignment = heap->young_gen_alignment(); MutableSpace* space_shrinking = NULL; if (from_space()->end() > to_space()->end()) { space_shrinking = from_space(); } else { space_shrinking = to_space(); } // Include any space that is committed but not included in // the survivor spaces. assert(((HeapWord*)virtual_space()->high()) >= space_shrinking->end(), "Survivor space beyond high end"); size_t unused_committed = pointer_delta(virtual_space()->high(), space_shrinking->end(), sizeof(char)); if (space_shrinking->is_empty()) { // Don't let the space shrink to 0 assert(space_shrinking->capacity_in_bytes() >= space_alignment, "Space is too small"); delta_in_survivor = space_shrinking->capacity_in_bytes() - space_alignment; } else { delta_in_survivor = pointer_delta(space_shrinking->end(), space_shrinking->top(), sizeof(char)); } size_t delta_in_bytes = unused_committed + delta_in_survivor; delta_in_bytes = align_size_down(delta_in_bytes, gen_alignment); return delta_in_bytes; }
size_t ASPSYoungGen::available_for_expansion() { size_t current_committed_size = virtual_space()->committed_size(); assert((gen_size_limit() >= current_committed_size), "generation size limit is wrong"); ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); size_t result = gen_size_limit() - current_committed_size; size_t result_aligned = align_size_down(result, heap->young_gen_alignment()); return result_aligned; }
// Return the number of bytes the young gen is willing give up. // // Future implementations could check the survivors and if to_space is in the // right place (below from_space), take a chunk from to_space. size_t ASPSYoungGen::available_for_contraction() { size_t uncommitted_bytes = virtual_space()->uncommitted_size(); if (uncommitted_bytes != 0) { return uncommitted_bytes; } if (eden_space()->is_empty()) { // Respect the minimum size for eden and for the young gen as a whole. ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); const size_t eden_alignment = heap->intra_heap_alignment(); const size_t gen_alignment = heap->young_gen_alignment(); assert(eden_space()->capacity_in_bytes() >= eden_alignment, "Alignment is wrong"); size_t eden_avail = eden_space()->capacity_in_bytes() - eden_alignment; eden_avail = align_size_down(eden_avail, gen_alignment); assert(virtual_space()->committed_size() >= min_gen_size(), "minimum gen size is wrong"); size_t gen_avail = virtual_space()->committed_size() - min_gen_size(); assert(virtual_space()->is_aligned(gen_avail), "not aligned"); const size_t max_contraction = MIN2(eden_avail, gen_avail); // See comment for ASPSOldGen::available_for_contraction() // for reasons the "increment" fraction is used. PSAdaptiveSizePolicy* policy = heap->size_policy(); size_t result = policy->eden_increment_aligned_down(max_contraction); size_t result_aligned = align_size_down(result, gen_alignment); if (PrintAdaptiveSizePolicy && Verbose) { gclog_or_tty->print_cr("ASPSYoungGen::available_for_contraction: %d K", result_aligned/K); gclog_or_tty->print_cr(" max_contraction %d K", max_contraction/K); gclog_or_tty->print_cr(" eden_avail %d K", eden_avail/K); gclog_or_tty->print_cr(" gen_avail %d K", gen_avail/K); } return result_aligned; } return 0; }