size_t ASPSOldGen::available_for_expansion() {
  assert(virtual_space()->is_aligned(gen_size_limit()), "not aligned");
  assert(gen_size_limit() >= virtual_space()->committed_size(), "bad gen size");

  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  size_t result =  gen_size_limit() - virtual_space()->committed_size();
  size_t result_aligned = align_size_down(result, heap->old_gen_alignment());
  return result_aligned;
}
Ejemplo n.º 2
0
size_t ASPSYoungGen::available_for_expansion() {

  size_t current_committed_size = virtual_space()->committed_size();
  assert((gen_size_limit() >= current_committed_size),
    "generation size limit is wrong");
  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  size_t result =  gen_size_limit() - current_committed_size;
  size_t result_aligned = align_size_down(result, heap->young_gen_alignment());
  return result_aligned;
}
Ejemplo n.º 3
0
// Return the number of bytes the young gen is willing give up.
//
// Future implementations could check the survivors and if to_space is in the
// right place (below from_space), take a chunk from to_space.
size_t ASPSYoungGen::available_for_contraction() {

  size_t uncommitted_bytes = virtual_space()->uncommitted_size();
  if (uncommitted_bytes != 0) {
    return uncommitted_bytes;
  }

  if (eden_space()->is_empty()) {
    // Respect the minimum size for eden and for the young gen as a whole.
    ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
    const size_t eden_alignment = heap->intra_heap_alignment();
    const size_t gen_alignment = heap->young_gen_alignment();

    assert(eden_space()->capacity_in_bytes() >= eden_alignment,
      "Alignment is wrong");
    size_t eden_avail = eden_space()->capacity_in_bytes() - eden_alignment;
    eden_avail = align_size_down(eden_avail, gen_alignment);

    assert(virtual_space()->committed_size() >= min_gen_size(),
      "minimum gen size is wrong");
    size_t gen_avail = virtual_space()->committed_size() - min_gen_size();
    assert(virtual_space()->is_aligned(gen_avail), "not aligned");

    const size_t max_contraction = MIN2(eden_avail, gen_avail);
    // See comment for ASPSOldGen::available_for_contraction()
    // for reasons the "increment" fraction is used.
    PSAdaptiveSizePolicy* policy = heap->size_policy();
    size_t result = policy->eden_increment_aligned_down(max_contraction);
    size_t result_aligned = align_size_down(result, gen_alignment);
    if (PrintAdaptiveSizePolicy && Verbose) {
      gclog_or_tty->print_cr("ASPSYoungGen::available_for_contraction: %d K",
        result_aligned/K);
      gclog_or_tty->print_cr("  max_contraction %d K", max_contraction/K);
      gclog_or_tty->print_cr("  eden_avail %d K", eden_avail/K);
      gclog_or_tty->print_cr("  gen_avail %d K", gen_avail/K);
    }
    return result_aligned;

  }

  return 0;
}
Flag::Error InitialBootClassLoaderMetaspaceSizeConstraintFunc(size_t value, bool verbose) {
  size_t aligned_max = (size_t)align_size_down(max_uintx/2, Metaspace::reserve_alignment_words());
  if (value > aligned_max) {
    CommandLineError::print(verbose,
                            "InitialBootClassLoaderMetaspaceSize (" SIZE_FORMAT ") must be "
                            "less than or equal to aligned maximum value (" SIZE_FORMAT ")\n",
                            value, aligned_max);
    return Flag::VIOLATES_CONSTRAINT;
  }
  return Flag::SUCCESS;
}
Ejemplo n.º 5
0
 // Return the maximum size of a survivor space if the young generation were of
 // size gen_size.
 size_t max_survivor_size(size_t gen_size) {
   // Never allow the target survivor size to grow more than MinSurvivorRatio
   // of the young generation size.  We cannot grow into a two semi-space
   // system, with Eden zero sized.  Even if the survivor space grows, from()
   // might grow by moving the bottom boundary "down" -- so from space will
   // remain almost full anyway (top() will be near end(), but there will be a
   // large filler object at the bottom).
   const size_t sz = gen_size / MinSurvivorRatio;
   const size_t alignment = _intra_generation_alignment;
   return sz > alignment ? align_size_down(sz, alignment) : alignment;
 }
Ejemplo n.º 6
0
size_t ASPSOldGen::available_for_contraction() {
  size_t uncommitted_bytes = virtual_space()->uncommitted_size();
  if (uncommitted_bytes != 0) {
    return uncommitted_bytes;
  }

  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  const size_t gen_alignment = heap->old_gen_alignment();
  PSAdaptiveSizePolicy* policy = heap->size_policy();
  const size_t working_size =
    used_in_bytes() + (size_t) policy->avg_promoted()->padded_average();
  const size_t working_aligned = align_size_up(working_size, gen_alignment);
  const size_t working_or_min = MAX2(working_aligned, min_gen_size());
  if (working_or_min > reserved().byte_size()) {
    // If the used or minimum gen size (aligned up) is greater
    // than the total reserved size, then the space available
    // for contraction should (after proper alignment) be 0
    return 0;
  }
  const size_t max_contraction =
    reserved().byte_size() - working_or_min;

  // Use the "increment" fraction instead of the "decrement" fraction
  // to allow the other gen to expand more aggressively.  The
  // "decrement" fraction is conservative because its intent is to
  // only reduce the footprint.

  size_t result = policy->promo_increment_aligned_down(max_contraction);
  // Also adjust for inter-generational alignment
  size_t result_aligned = align_size_down(result, gen_alignment);
  if (PrintAdaptiveSizePolicy && Verbose) {
    gclog_or_tty->print_cr("\nASPSOldGen::available_for_contraction:"
      " %d K / 0x%x", result_aligned/K, result_aligned);
    gclog_or_tty->print_cr(" reserved().byte_size() %d K / 0x%x ",
      reserved().byte_size()/K, reserved().byte_size());
    size_t working_promoted = (size_t) policy->avg_promoted()->padded_average();
    gclog_or_tty->print_cr(" padded promoted %d K / 0x%x",
      working_promoted/K, working_promoted);
    gclog_or_tty->print_cr(" used %d K / 0x%x",
      used_in_bytes()/K, used_in_bytes());
    gclog_or_tty->print_cr(" min_gen_size() %d K / 0x%x",
      min_gen_size()/K, min_gen_size());
    gclog_or_tty->print_cr(" max_contraction %d K / 0x%x",
      max_contraction/K, max_contraction);
    gclog_or_tty->print_cr("    without alignment %d K / 0x%x",
      policy->promo_increment(max_contraction)/K,
      policy->promo_increment(max_contraction));
    gclog_or_tty->print_cr(" alignment 0x%x", gen_alignment);
  }
  assert(result_aligned <= max_contraction, "arithmetic is wrong");
  return result_aligned;
}
Ejemplo n.º 7
0
// This method assumes that from-space has live data and that
// any shrinkage of the young gen is limited by location of
// from-space.
size_t ASParNewGeneration::available_to_live() const {
#undef SHRINKS_AT_END_OF_EDEN
#ifdef SHRINKS_AT_END_OF_EDEN
  size_t delta_in_survivor = 0;
  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  const size_t space_alignment = heap->intra_heap_alignment();
  const size_t gen_alignment = heap->object_heap_alignment();

  MutableSpace* space_shrinking = NULL;
  if (from_space()->end() > to_space()->end()) {
    space_shrinking = from_space();
  } else {
    space_shrinking = to_space();
  }

  // Include any space that is committed but not included in
  // the survivor spaces.
  assert(((HeapWord*)virtual_space()->high()) >= space_shrinking->end(),
    "Survivor space beyond high end");
  size_t unused_committed = pointer_delta(virtual_space()->high(),
    space_shrinking->end(), sizeof(char));

  if (space_shrinking->is_empty()) {
    // Don't let the space shrink to 0
    assert(space_shrinking->capacity_in_bytes() >= space_alignment,
      "Space is too small");
    delta_in_survivor = space_shrinking->capacity_in_bytes() - space_alignment;
  } else {
    delta_in_survivor = pointer_delta(space_shrinking->end(),
                                      space_shrinking->top(),
                                      sizeof(char));
  }

  size_t delta_in_bytes = unused_committed + delta_in_survivor;
  delta_in_bytes = align_size_down(delta_in_bytes, gen_alignment);
  return delta_in_bytes;
#else
  // The only space available for shrinking is in to-space if it
  // is above from-space.
  if (to()->bottom() > from()->bottom()) {
    const size_t alignment = os::vm_page_size();
    if (to()->capacity() < alignment) {
      return 0;
    } else {
      return to()->capacity() - alignment;
    }
  } else {
    return 0;
  }
#endif
}
Ejemplo n.º 8
0
void TwoGenerationCollectorPolicy::initialize_flags() {
  GenCollectorPolicy::initialize_flags();

  OldSize = align_size_down(OldSize, min_alignment());
  if (NewSize + OldSize > MaxHeapSize) {
    MaxHeapSize = NewSize + OldSize;
  }
  MaxHeapSize = align_size_up(MaxHeapSize, max_alignment());

  always_do_update_barrier = UseConcMarkSweepGC;

  // Check validity of heap flags
  assert(OldSize     % min_alignment() == 0, "old space alignment");
  assert(MaxHeapSize % max_alignment() == 0, "maximum heap alignment");
}
Ejemplo n.º 9
0
// Call this method during the sizing of the gen1 to make
// adjustments to gen0 because of gen1 sizing policy.  gen0 initially has
// the most freedom in sizing because it is done before the
// policy for gen1 is applied.  Once gen1 policies have been applied,
// there may be conflicts in the shape of the heap and this method
// is used to make the needed adjustments.  The application of the
// policies could be more sophisticated (iterative for example) but
// keeping it simple also seems a worthwhile goal.
bool TwoGenerationCollectorPolicy::adjust_gen0_sizes(size_t* gen0_size_ptr,
                                                     size_t* gen1_size_ptr,
                                                     size_t heap_size,
                                                     size_t min_gen0_size) {
  bool result = false;
  if ((*gen1_size_ptr + *gen0_size_ptr) > heap_size) {
    if (((*gen0_size_ptr + OldSize) > heap_size) &&
       (heap_size - min_gen0_size) >= min_alignment()) {
      // Adjust gen0 down to accomodate OldSize
      *gen0_size_ptr = heap_size - min_gen0_size;
      *gen0_size_ptr =
        MAX2((size_t)align_size_down(*gen0_size_ptr, min_alignment()),
             min_alignment());
      assert(*gen0_size_ptr > 0, "Min gen0 is too large");
      result = true;
    } else {
      *gen1_size_ptr = heap_size - *gen0_size_ptr;
      *gen1_size_ptr =
        MAX2((size_t)align_size_down(*gen1_size_ptr, min_alignment()),
                       min_alignment());
    }
  }
  return result;
}
void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size) {
  // All space sizes must be multiples of car size in order for the CarTable to work.
  // Note that the CarTable is used with and without train gc (for fast lookup).
  uintx alignment = CarSpace::car_size();

  // Compute sizes
  uintx size = _virtual_space.committed_size();
  uintx survivor_size = compute_survivor_size(size, alignment);
  uintx eden_size = size - (2*survivor_size);
  assert(eden_size > 0 && survivor_size <= eden_size, "just checking");

  if (eden_size < minimum_eden_size) {
    // May happen due to 64Kb rounding, if so adjust eden size back up
    minimum_eden_size = align_size_up(minimum_eden_size, alignment);
    uintx maximum_survivor_size = (size - minimum_eden_size) / 2;
    uintx unaligned_survivor_size = 
      align_size_down(maximum_survivor_size, alignment);
    survivor_size = MAX2(unaligned_survivor_size, alignment);
    eden_size = size - (2*survivor_size);
    assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
    assert(eden_size >= minimum_eden_size, "just checking");
  }

  char *eden_start = _virtual_space.low();
  char *from_start = eden_start + eden_size;
  char *to_start   = from_start + survivor_size;
  char *to_end     = to_start   + survivor_size;

  assert(to_end == _virtual_space.high(), "just checking");
  assert(Space::is_aligned((HeapWord*)eden_start), "checking alignment");
  assert(Space::is_aligned((HeapWord*)from_start), "checking alignment");
  assert(Space::is_aligned((HeapWord*)to_start),   "checking alignment");

  MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start);
  MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start);
  MemRegion toMR  ((HeapWord*)to_start, (HeapWord*)to_end);

  eden()->initialize(edenMR, (minimum_eden_size == 0));
  from()->initialize(fromMR, true);
    to()->initialize(toMR  , true);

  if (jvmpi::is_event_enabled(JVMPI_EVENT_ARENA_NEW)) {
    CollectedHeap* ch = Universe::heap();
    jvmpi::post_arena_new_event(ch->addr_to_arena_id(eden_start), "Eden");
    jvmpi::post_arena_new_event(ch->addr_to_arena_id(from_start), "Semi");
    jvmpi::post_arena_new_event(ch->addr_to_arena_id(to_start), "Semi");
  }
}
const size_t ThreadLocalAllocBuffer::max_size() {

  // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
  // This restriction could be removed by enabling filling with multiple arrays.
  // If we compute that the reasonable way as
  //    header_size + ((sizeof(jint) * max_jint) / HeapWordSize)
  // we'll overflow on the multiply, so we do the divide first.
  // We actually lose a little by dividing first,
  // but that just makes the TLAB  somewhat smaller than the biggest array,
  // which is fine, since we'll be able to fill that.

  size_t unaligned_max_size = typeArrayOopDesc::header_size(T_INT) +
                              sizeof(jint) *
                              ((juint) max_jint / (size_t) HeapWordSize);
  return align_size_down(unaligned_max_size, MinObjAlignment);
}
Ejemplo n.º 12
0
void CollectorPolicy::initialize_flags() {
  assert(max_alignment() >= min_alignment(),
      err_msg("max_alignment: " SIZE_FORMAT " less than min_alignment: " SIZE_FORMAT,
          max_alignment(), min_alignment()));
  assert(max_alignment() % min_alignment() == 0,
      err_msg("max_alignment: " SIZE_FORMAT " not aligned by min_alignment: " SIZE_FORMAT,
          max_alignment(), min_alignment()));

  if (PermSize > MaxPermSize) {
    MaxPermSize = PermSize;
  }
  PermSize = MAX2(min_alignment(), align_size_down_(PermSize, min_alignment()));
  // Don't increase Perm size limit above specified.
  MaxPermSize = align_size_down(MaxPermSize, max_alignment());
  if (PermSize > MaxPermSize) {
    PermSize = MaxPermSize;
  }

  MinPermHeapExpansion = MAX2(min_alignment(), align_size_down_(MinPermHeapExpansion, min_alignment()));
  MaxPermHeapExpansion = MAX2(min_alignment(), align_size_down_(MaxPermHeapExpansion, min_alignment()));

  MinHeapDeltaBytes = align_size_up(MinHeapDeltaBytes, min_alignment());

  SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment());
  SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment());
  SharedMiscDataSize = align_size_up(SharedMiscDataSize, max_alignment());

  assert(PermSize    % min_alignment() == 0, "permanent space alignment");
  assert(MaxPermSize % max_alignment() == 0, "maximum permanent space alignment");
  assert(SharedReadOnlySize % max_alignment() == 0, "read-only space alignment");
  assert(SharedReadWriteSize % max_alignment() == 0, "read-write space alignment");
  assert(SharedMiscDataSize % max_alignment() == 0, "misc-data space alignment");
  if (PermSize < M) {
    vm_exit_during_initialization("Too small initial permanent heap");
  }
}
Ejemplo n.º 13
0
void GenCollectorPolicy::initialize_flags() {
  CollectorPolicy::initialize_flags();

  assert(_gen_alignment != 0, "Generation alignment not set up properly");
  assert(_heap_alignment >= _gen_alignment,
         err_msg("heap_alignment: " SIZE_FORMAT " less than gen_alignment: " SIZE_FORMAT,
                 _heap_alignment, _gen_alignment));
  assert(_gen_alignment % _space_alignment == 0,
         err_msg("gen_alignment: " SIZE_FORMAT " not aligned by space_alignment: " SIZE_FORMAT,
                 _gen_alignment, _space_alignment));
  assert(_heap_alignment % _gen_alignment == 0,
         err_msg("heap_alignment: " SIZE_FORMAT " not aligned by gen_alignment: " SIZE_FORMAT,
                 _heap_alignment, _gen_alignment));

  // All generational heaps have a youngest gen; handle those flags here

  // Make sure the heap is large enough for two generations
  uintx smallest_new_size = young_gen_size_lower_bound();
  uintx smallest_heap_size = align_size_up(smallest_new_size + align_size_up(_space_alignment, _gen_alignment),
                                           _heap_alignment);
  if (MaxHeapSize < smallest_heap_size) {
    FLAG_SET_ERGO(uintx, MaxHeapSize, smallest_heap_size);
    _max_heap_byte_size = MaxHeapSize;
  }
  // If needed, synchronize _min_heap_byte size and _initial_heap_byte_size
  if (_min_heap_byte_size < smallest_heap_size) {
    _min_heap_byte_size = smallest_heap_size;
    if (InitialHeapSize < _min_heap_byte_size) {
      FLAG_SET_ERGO(uintx, InitialHeapSize, smallest_heap_size);
      _initial_heap_byte_size = smallest_heap_size;
    }
  }

  // Now take the actual NewSize into account. We will silently increase NewSize
  // if the user specified a smaller or unaligned value.
  smallest_new_size = MAX2(smallest_new_size, (uintx)align_size_down(NewSize, _gen_alignment));
  if (smallest_new_size != NewSize) {
    // Do not use FLAG_SET_ERGO to update NewSize here, since this will override
    // if NewSize was set on the command line or not. This information is needed
    // later when setting the initial and minimum young generation size.
    NewSize = smallest_new_size;
  }
  _initial_gen0_size = NewSize;

  if (!FLAG_IS_DEFAULT(MaxNewSize)) {
    uintx min_new_size = MAX2(_gen_alignment, _min_gen0_size);

    if (MaxNewSize >= MaxHeapSize) {
      // Make sure there is room for an old generation
      uintx smaller_max_new_size = MaxHeapSize - _gen_alignment;
      if (FLAG_IS_CMDLINE(MaxNewSize)) {
        warning("MaxNewSize (" SIZE_FORMAT "k) is equal to or greater than the entire "
                "heap (" SIZE_FORMAT "k).  A new max generation size of " SIZE_FORMAT "k will be used.",
                MaxNewSize/K, MaxHeapSize/K, smaller_max_new_size/K);
      }
      FLAG_SET_ERGO(uintx, MaxNewSize, smaller_max_new_size);
      if (NewSize > MaxNewSize) {
        FLAG_SET_ERGO(uintx, NewSize, MaxNewSize);
        _initial_gen0_size = NewSize;
      }
    } else if (MaxNewSize < min_new_size) {
      FLAG_SET_ERGO(uintx, MaxNewSize, min_new_size);
    } else if (!is_size_aligned(MaxNewSize, _gen_alignment)) {
      FLAG_SET_ERGO(uintx, MaxNewSize, align_size_down(MaxNewSize, _gen_alignment));
    }
    _max_gen0_size = MaxNewSize;
  }

  if (NewSize > MaxNewSize) {
    // At this point this should only happen if the user specifies a large NewSize and/or
    // a small (but not too small) MaxNewSize.
    if (FLAG_IS_CMDLINE(MaxNewSize)) {
      warning("NewSize (" SIZE_FORMAT "k) is greater than the MaxNewSize (" SIZE_FORMAT "k). "
              "A new max generation size of " SIZE_FORMAT "k will be used.",
              NewSize/K, MaxNewSize/K, NewSize/K);
    }
    FLAG_SET_ERGO(uintx, MaxNewSize, NewSize);
    _max_gen0_size = MaxNewSize;
  }

  if (SurvivorRatio < 1 || NewRatio < 1) {
    vm_exit_during_initialization("Invalid young gen ratio specified");
  }

  DEBUG_ONLY(GenCollectorPolicy::assert_flags();)
}
Ejemplo n.º 14
0
// Values set on the command line win over any ergonomically
// set command line parameters.
// Ergonomic choice of parameters are done before this
// method is called.  Values for command line parameters such as NewSize
// and MaxNewSize feed those ergonomic choices into this method.
// This method makes the final generation sizings consistent with
// themselves and with overall heap sizings.
// In the absence of explicitly set command line flags, policies
// such as the use of NewRatio are used to size the generation.
void GenCollectorPolicy::initialize_size_info() {
  CollectorPolicy::initialize_size_info();

  // min_alignment() is used for alignment within a generation.
  // There is additional alignment done down stream for some
  // collectors that sometimes causes unwanted rounding up of
  // generations sizes.

  // Determine maximum size of gen0

  size_t max_new_size = 0;
  if (FLAG_IS_CMDLINE(MaxNewSize)) {
    if (MaxNewSize < min_alignment()) {
      max_new_size = min_alignment();
    } else if (MaxNewSize >= max_heap_byte_size()) {
      max_new_size = align_size_down(max_heap_byte_size() - min_alignment(),
                                     min_alignment());
      warning("MaxNewSize (" SIZE_FORMAT "k) is equal to or "
        "greater than the entire heap (" SIZE_FORMAT "k).  A "
        "new generation size of " SIZE_FORMAT "k will be used.",
        MaxNewSize/K, max_heap_byte_size()/K, max_new_size/K);
    } else {
      max_new_size = align_size_down(MaxNewSize, min_alignment());
    }

  // The case for FLAG_IS_ERGO(MaxNewSize) could be treated
  // specially at this point to just use an ergonomically set
  // MaxNewSize to set max_new_size.  For cases with small
  // heaps such a policy often did not work because the MaxNewSize
  // was larger than the entire heap.  The interpretation given
  // to ergonomically set flags is that the flags are set
  // by different collectors for their own special needs but
  // are not allowed to badly shape the heap.  This allows the
  // different collectors to decide what's best for themselves
  // without having to factor in the overall heap shape.  It
  // can be the case in the future that the collectors would
  // only make "wise" ergonomics choices and this policy could
  // just accept those choices.  The choices currently made are
  // not always "wise".
  } else {
    max_new_size = scale_by_NewRatio_aligned(max_heap_byte_size());
    // Bound the maximum size by NewSize below (since it historically
    // would have been NewSize and because the NewRatio calculation could
    // yield a size that is too small) and bound it by MaxNewSize above.
    // Ergonomics plays here by previously calculating the desired
    // NewSize and MaxNewSize.
    max_new_size = MIN2(MAX2(max_new_size, (size_t)NewSize), (size_t)MaxNewSize);
  }
  assert(max_new_size > 0, "All paths should set max_new_size");

  // Given the maximum gen0 size, determine the initial and
  // minimum sizes.

  if (max_heap_byte_size() == min_heap_byte_size()) {
    // The maximum and minimum heap sizes are the same so
    // the generations minimum and initial must be the
    // same as its maximum.
    set_min_gen0_size(max_new_size);
    set_initial_gen0_size(max_new_size);
    set_max_gen0_size(max_new_size);
  } else {
    size_t desired_new_size = 0;
    if (!FLAG_IS_DEFAULT(NewSize)) {
      // If NewSize is set ergonomically (for example by cms), it
      // would make sense to use it.  If it is used, also use it
      // to set the initial size.  Although there is no reason
      // the minimum size and the initial size have to be the same,
      // the current implementation gets into trouble during the calculation
      // of the tenured generation sizes if they are different.
      // Note that this makes the initial size and the minimum size
      // generally small compared to the NewRatio calculation.
      _min_gen0_size = NewSize;
      desired_new_size = NewSize;
      max_new_size = MAX2(max_new_size, (size_t) NewSize);
    } else {
      // For the case where NewSize is the default, use NewRatio
      // to size the minimum and initial generation sizes.
      // Use the default NewSize as the floor for these values.  If
      // NewRatio is overly large, the resulting sizes can be too
      // small.
      _min_gen0_size = MAX2(scale_by_NewRatio_aligned(min_heap_byte_size()),
                          (size_t) NewSize);
      desired_new_size =
        MAX2(scale_by_NewRatio_aligned(initial_heap_byte_size()),
             (size_t) NewSize);
    }

    assert(_min_gen0_size > 0, "Sanity check");
    set_initial_gen0_size(desired_new_size);
    set_max_gen0_size(max_new_size);

    // At this point the desirable initial and minimum sizes have been
    // determined without regard to the maximum sizes.

    // Bound the sizes by the corresponding overall heap sizes.
    set_min_gen0_size(
      bound_minus_alignment(_min_gen0_size, min_heap_byte_size()));
    set_initial_gen0_size(
      bound_minus_alignment(_initial_gen0_size, initial_heap_byte_size()));
    set_max_gen0_size(
      bound_minus_alignment(_max_gen0_size, max_heap_byte_size()));

    // At this point all three sizes have been checked against the
    // maximum sizes but have not been checked for consistency
    // among the three.

    // Final check min <= initial <= max
    set_min_gen0_size(MIN2(_min_gen0_size, _max_gen0_size));
    set_initial_gen0_size(
      MAX2(MIN2(_initial_gen0_size, _max_gen0_size), _min_gen0_size));
    set_min_gen0_size(MIN2(_min_gen0_size, _initial_gen0_size));
  }

  if (PrintGCDetails && Verbose) {
    gclog_or_tty->print_cr("Minimum gen0 " SIZE_FORMAT "  Initial gen0 "
      SIZE_FORMAT "  Maximum gen0 " SIZE_FORMAT,
      min_gen0_size(), initial_gen0_size(), max_gen0_size());
  }
}
int ZeroStack::suggest_size(Thread *thread) const {
  assert(needs_setup(), "already set up");
  return align_size_down(abi_stack_available(thread) / 2, wordSize);
}
Ejemplo n.º 16
0
void ParGCAllocBufferWithBOT::retire(bool end_of_gc, bool retain) {
  assert(!retain || end_of_gc, "Can only retain at GC end.");
  if (_retained) {
    // We're about to make the retained_filler into a block.
    _bt.BlockOffsetArray::alloc_block(_retained_filler.start(),
                                      _retained_filler.end());
  }
  // Reset _hard_end to _true_end (and update _end)
  if (retain && _hard_end != NULL) {
    assert(_hard_end <= _true_end, "Invariant.");
    _hard_end = _true_end;
    _end      = MAX2(_top, _hard_end - AlignmentReserve);
    assert(_end <= _hard_end, "Invariant.");
  }
  _true_end = _hard_end;
  HeapWord* pre_top = _top;

  ParGCAllocBuffer::retire(end_of_gc, retain);
  // Now any old _retained_filler is cut back to size, the free part is
  // filled with a filler object, and top is past the header of that
  // object.

  if (retain && _top < _end) {
    assert(end_of_gc && retain, "Or else retain should be false.");
    // If the lab does not start on a card boundary, we don't want to
    // allocate onto that card, since that might lead to concurrent
    // allocation and card scanning, which we don't support.  So we fill
    // the first card with a garbage object.
    size_t first_card_index = _bsa->index_for(pre_top);
    HeapWord* first_card_start = _bsa->address_for_index(first_card_index);
    if (first_card_start < pre_top) {
      HeapWord* second_card_start =
        _bsa->inc_by_region_size(first_card_start);

      // Ensure enough room to fill with the smallest block
      second_card_start = MAX2(second_card_start, pre_top + AlignmentReserve);

      // If the end is already in the first card, don't go beyond it!
      // Or if the remainder is too small for a filler object, gobble it up.
      if (_hard_end < second_card_start ||
          pointer_delta(_hard_end, second_card_start) < AlignmentReserve) {
        second_card_start = _hard_end;
      }
      if (pre_top < second_card_start) {
        MemRegion first_card_suffix(pre_top, second_card_start);
        fill_region_with_block(first_card_suffix, true);
      }
      pre_top = second_card_start;
      _top = pre_top;
      _end = MAX2(_top, _hard_end - AlignmentReserve);
    }

    // If the lab does not end on a card boundary, we don't want to
    // allocate onto that card, since that might lead to concurrent
    // allocation and card scanning, which we don't support.  So we fill
    // the last card with a garbage object.
    size_t last_card_index = _bsa->index_for(_hard_end);
    HeapWord* last_card_start = _bsa->address_for_index(last_card_index);
    if (last_card_start < _hard_end) {

      // Ensure enough room to fill with the smallest block
      last_card_start = MIN2(last_card_start, _hard_end - AlignmentReserve);

      // If the top is already in the last card, don't go back beyond it!
      // Or if the remainder is too small for a filler object, gobble it up.
      if (_top > last_card_start ||
          pointer_delta(last_card_start, _top) < AlignmentReserve) {
        last_card_start = _top;
      }
      if (last_card_start < _hard_end) {
        MemRegion last_card_prefix(last_card_start, _hard_end);
        fill_region_with_block(last_card_prefix, false);
      }
      _hard_end = last_card_start;
      _end      = MAX2(_top, _hard_end - AlignmentReserve);
      _true_end = _hard_end;
      assert(_end <= _hard_end, "Invariant.");
    }

    // At this point:
    //   1) we had a filler object from the original top to hard_end.
    //   2) We've filled in any partial cards at the front and back.
    if (pre_top < _hard_end) {
      // Now we can reset the _bt to do allocation in the given area.
      MemRegion new_filler(pre_top, _hard_end);
      fill_region_with_block(new_filler, false);
      _top = pre_top + ParGCAllocBuffer::FillerHeaderSize;
      // If there's no space left, don't retain.
      if (_top >= _end) {
        _retained = false;
        invalidate();
        return;
      }
      _retained_filler = MemRegion(pre_top, _top);
      _bt.set_region(MemRegion(_top, _hard_end));
      _bt.initialize_threshold();
      assert(_bt.threshold() > _top, "initialize_threshold failed!");

      // There may be other reasons for queries into the middle of the
      // filler object.  When such queries are done in parallel with
      // allocation, bad things can happen, if the query involves object
      // iteration.  So we ensure that such queries do not involve object
      // iteration, by putting another filler object on the boundaries of
      // such queries.  One such is the object spanning a parallel card
      // chunk boundary.

      // "chunk_boundary" is the address of the first chunk boundary less
      // than "hard_end".
      HeapWord* chunk_boundary =
        (HeapWord*)align_size_down(intptr_t(_hard_end-1), ChunkSizeInBytes);
      assert(chunk_boundary < _hard_end, "Or else above did not work.");
      assert(pointer_delta(_true_end, chunk_boundary) >= AlignmentReserve,
             "Consequence of last card handling above.");

      if (_top <= chunk_boundary) {
        assert(_true_end == _hard_end, "Invariant.");
        while (_top <= chunk_boundary) {
          assert(pointer_delta(_hard_end, chunk_boundary) >= AlignmentReserve,
                 "Consequence of last card handling above.");
          _bt.BlockOffsetArray::alloc_block(chunk_boundary, _hard_end);
          CollectedHeap::fill_with_object(chunk_boundary, _hard_end);
          _hard_end = chunk_boundary;
          chunk_boundary -= ChunkSizeInWords;
        }
        _end = _hard_end - AlignmentReserve;
        assert(_top <= _end, "Invariant.");
        // Now reset the initial filler chunk so it doesn't overlap with
        // the one(s) inserted above.
        MemRegion new_filler(pre_top, _hard_end);
        fill_region_with_block(new_filler, false);
      }
    } else {
      _retained = false;
      invalidate();
    }
  } else {
    assert(!end_of_gc ||
           (!_retained && _true_end == _hard_end), "Checking.");
  }
  assert(_end <= _hard_end, "Invariant.");
  assert(_top < _end || _top == _hard_end, "Invariant");
}
Ejemplo n.º 17
0
void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size,
                                                bool clear_space,
                                                bool mangle_space) {
  uintx alignment =
    GenCollectedHeap::heap()->collector_policy()->space_alignment();

  // If the spaces are being cleared (only done at heap initialization
  // currently), the survivor spaces need not be empty.
  // Otherwise, no care is taken for used areas in the survivor spaces
  // so check.
  assert(clear_space || (to()->is_empty() && from()->is_empty()),
    "Initialization of the survivor spaces assumes these are empty");

  // Compute sizes
  uintx size = _virtual_space.committed_size();
  uintx survivor_size = compute_survivor_size(size, alignment);
  uintx eden_size = size - (2*survivor_size);
  assert(eden_size > 0 && survivor_size <= eden_size, "just checking");

  if (eden_size < minimum_eden_size) {
    // May happen due to 64Kb rounding, if so adjust eden size back up
    minimum_eden_size = align_size_up(minimum_eden_size, alignment);
    uintx maximum_survivor_size = (size - minimum_eden_size) / 2;
    uintx unaligned_survivor_size =
      align_size_down(maximum_survivor_size, alignment);
    survivor_size = MAX2(unaligned_survivor_size, alignment);
    eden_size = size - (2*survivor_size);
    assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
    assert(eden_size >= minimum_eden_size, "just checking");
  }

  char *eden_start = _virtual_space.low();
  char *from_start = eden_start + eden_size;
  char *to_start   = from_start + survivor_size;
  char *to_end     = to_start   + survivor_size;

  assert(to_end == _virtual_space.high(), "just checking");
  assert(Space::is_aligned((HeapWord*)eden_start), "checking alignment");
  assert(Space::is_aligned((HeapWord*)from_start), "checking alignment");
  assert(Space::is_aligned((HeapWord*)to_start),   "checking alignment");

  MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start);
  MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start);
  MemRegion toMR  ((HeapWord*)to_start, (HeapWord*)to_end);

  // A minimum eden size implies that there is a part of eden that
  // is being used and that affects the initialization of any
  // newly formed eden.
  bool live_in_eden = minimum_eden_size > 0;

  // If not clearing the spaces, do some checking to verify that
  // the space are already mangled.
  if (!clear_space) {
    // Must check mangling before the spaces are reshaped.  Otherwise,
    // the bottom or end of one space may have moved into another
    // a failure of the check may not correctly indicate which space
    // is not properly mangled.
    if (ZapUnusedHeapArea) {
      HeapWord* limit = (HeapWord*) _virtual_space.high();
      eden()->check_mangled_unused_area(limit);
      from()->check_mangled_unused_area(limit);
        to()->check_mangled_unused_area(limit);
    }
  }

  // Reset the spaces for their new regions.
  eden()->initialize(edenMR,
                     clear_space && !live_in_eden,
                     SpaceDecorator::Mangle);
  // If clear_space and live_in_eden, we will not have cleared any
  // portion of eden above its top. This can cause newly
  // expanded space not to be mangled if using ZapUnusedHeapArea.
  // We explicitly do such mangling here.
  if (ZapUnusedHeapArea && clear_space && live_in_eden && mangle_space) {
    eden()->mangle_unused_area();
  }
  from()->initialize(fromMR, clear_space, mangle_space);
  to()->initialize(toMR, clear_space, mangle_space);

  // Set next compaction spaces.
  eden()->set_next_compaction_space(from());
  // The to-space is normally empty before a compaction so need
  // not be considered.  The exception is during promotion
  // failure handling when to-space can contain live objects.
  from()->set_next_compaction_space(NULL);
}
Ejemplo n.º 18
0
void CodeCache::initialize_heaps() {
  bool non_nmethod_set      = FLAG_IS_CMDLINE(NonNMethodCodeHeapSize);
  bool profiled_set         = FLAG_IS_CMDLINE(ProfiledCodeHeapSize);
  bool non_profiled_set     = FLAG_IS_CMDLINE(NonProfiledCodeHeapSize);
  size_t min_size           = os::vm_page_size();
  size_t cache_size         = ReservedCodeCacheSize;
  size_t non_nmethod_size   = NonNMethodCodeHeapSize;
  size_t profiled_size      = ProfiledCodeHeapSize;
  size_t non_profiled_size  = NonProfiledCodeHeapSize;
  // Check if total size set via command line flags exceeds the reserved size
  check_heap_sizes((non_nmethod_set  ? non_nmethod_size  : min_size),
                   (profiled_set     ? profiled_size     : min_size),
                   (non_profiled_set ? non_profiled_size : min_size),
                   cache_size,
                   non_nmethod_set && profiled_set && non_profiled_set);

  // Determine size of compiler buffers
  size_t code_buffers_size = 0;
#ifdef COMPILER1
  // C1 temporary code buffers (see Compiler::init_buffer_blob())
  const int c1_count = CompilationPolicy::policy()->compiler_count(CompLevel_simple);
  code_buffers_size += c1_count * Compiler::code_buffer_size();
#endif
#ifdef COMPILER2
  // C2 scratch buffers (see Compile::init_scratch_buffer_blob())
  const int c2_count = CompilationPolicy::policy()->compiler_count(CompLevel_full_optimization);
  // Initial size of constant table (this may be increased if a compiled method needs more space)
  code_buffers_size += c2_count * C2Compiler::initial_code_buffer_size();
#endif

  // Increase default non_nmethod_size to account for compiler buffers
  if (!non_nmethod_set) {
    non_nmethod_size += code_buffers_size;
  }
  // Calculate default CodeHeap sizes if not set by user
  if (!non_nmethod_set && !profiled_set && !non_profiled_set) {
    // Check if we have enough space for the non-nmethod code heap
    if (cache_size > non_nmethod_size) {
      // Use the default value for non_nmethod_size and one half of the
      // remaining size for non-profiled and one half for profiled methods
      size_t remaining_size = cache_size - non_nmethod_size;
      profiled_size = remaining_size / 2;
      non_profiled_size = remaining_size - profiled_size;
    } else {
      // Use all space for the non-nmethod heap and set other heaps to minimal size
      non_nmethod_size = cache_size - 2 * min_size;
      profiled_size = min_size;
      non_profiled_size = min_size;
    }
  } else if (!non_nmethod_set || !profiled_set || !non_profiled_set) {
    // The user explicitly set some code heap sizes. Increase or decrease the (default)
    // sizes of the other code heaps accordingly. First adapt non-profiled and profiled
    // code heap sizes and then only change non-nmethod code heap size if still necessary.
    intx diff_size = cache_size - (non_nmethod_size + profiled_size + non_profiled_size);
    if (non_profiled_set) {
      if (!profiled_set) {
        // Adapt size of profiled code heap
        if (diff_size < 0 && ((intx)profiled_size + diff_size) <= 0) {
          // Not enough space available, set to minimum size
          diff_size += profiled_size - min_size;
          profiled_size = min_size;
        } else {
          profiled_size += diff_size;
          diff_size = 0;
        }
      }
    } else if (profiled_set) {
      // Adapt size of non-profiled code heap
      if (diff_size < 0 && ((intx)non_profiled_size + diff_size) <= 0) {
        // Not enough space available, set to minimum size
        diff_size += non_profiled_size - min_size;
        non_profiled_size = min_size;
      } else {
        non_profiled_size += diff_size;
        diff_size = 0;
      }
    } else if (non_nmethod_set) {
      // Distribute remaining size between profiled and non-profiled code heaps
      diff_size = cache_size - non_nmethod_size;
      profiled_size = diff_size / 2;
      non_profiled_size = diff_size - profiled_size;
      diff_size = 0;
    }
    if (diff_size != 0) {
      // Use non-nmethod code heap for remaining space requirements
      assert(!non_nmethod_set && ((intx)non_nmethod_size + diff_size) > 0, "sanity");
      non_nmethod_size += diff_size;
    }
  }

  // We do not need the profiled CodeHeap, use all space for the non-profiled CodeHeap
  if(!heap_available(CodeBlobType::MethodProfiled)) {
    non_profiled_size += profiled_size;
    profiled_size = 0;
  }
  // We do not need the non-profiled CodeHeap, use all space for the non-nmethod CodeHeap
  if(!heap_available(CodeBlobType::MethodNonProfiled)) {
    non_nmethod_size += non_profiled_size;
    non_profiled_size = 0;
  }
  // Make sure we have enough space for VM internal code
  uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3);
  if (non_nmethod_size < (min_code_cache_size + code_buffers_size)) {
    vm_exit_during_initialization(err_msg(
        "Not enough space in non-nmethod code heap to run VM: %zuK < %zuK",
        non_nmethod_size/K, (min_code_cache_size + code_buffers_size)/K));
  }

  // Verify sizes and update flag values
  assert(non_profiled_size + profiled_size + non_nmethod_size == cache_size, "Invalid code heap sizes");
  FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, non_nmethod_size);
  FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, profiled_size);
  FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, non_profiled_size);

  // Align CodeHeaps
  size_t alignment = heap_alignment();
  non_nmethod_size = align_size_up(non_nmethod_size, alignment);
  profiled_size   = align_size_down(profiled_size, alignment);

  // Reserve one continuous chunk of memory for CodeHeaps and split it into
  // parts for the individual heaps. The memory layout looks like this:
  // ---------- high -----------
  //    Non-profiled nmethods
  //      Profiled nmethods
  //         Non-nmethods
  // ---------- low ------------
  ReservedCodeSpace rs = reserve_heap_memory(cache_size);
  ReservedSpace non_method_space    = rs.first_part(non_nmethod_size);
  ReservedSpace rest                = rs.last_part(non_nmethod_size);
  ReservedSpace profiled_space      = rest.first_part(profiled_size);
  ReservedSpace non_profiled_space  = rest.last_part(profiled_size);

  // Non-nmethods (stubs, adapters, ...)
  add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod);
  // Tier 2 and tier 3 (profiled) methods
  add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled);
  // Tier 1 and tier 4 (non-profiled) methods and native methods
  add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled);
}
Ejemplo n.º 19
0
void ASParNewGeneration::resize_spaces(size_t requested_eden_size,
                                       size_t requested_survivor_size) {
  assert(UseAdaptiveSizePolicy, "sanity check");
  assert(requested_eden_size > 0  && requested_survivor_size > 0,
         "just checking");
  CollectedHeap* heap = Universe::heap();
  assert(heap->kind() == CollectedHeap::GenCollectedHeap, "Sanity");


  // We require eden and to space to be empty
  if ((!eden()->is_empty()) || (!to()->is_empty())) {
    return;
  }

  size_t cur_eden_size = eden()->capacity();

  if (PrintAdaptiveSizePolicy && Verbose) {
    gclog_or_tty->print_cr("ASParNew::resize_spaces(requested_eden_size: "
                  SIZE_FORMAT
                  ", requested_survivor_size: " SIZE_FORMAT ")",
                  requested_eden_size, requested_survivor_size);
    gclog_or_tty->print_cr("    eden: [" PTR_FORMAT ".." PTR_FORMAT ") "
                  SIZE_FORMAT,
                  eden()->bottom(),
                  eden()->end(),
                  pointer_delta(eden()->end(),
                                eden()->bottom(),
                                sizeof(char)));
    gclog_or_tty->print_cr("    from: [" PTR_FORMAT ".." PTR_FORMAT ") "
                  SIZE_FORMAT,
                  from()->bottom(),
                  from()->end(),
                  pointer_delta(from()->end(),
                                from()->bottom(),
                                sizeof(char)));
    gclog_or_tty->print_cr("      to: [" PTR_FORMAT ".." PTR_FORMAT ") "
                  SIZE_FORMAT,
                  to()->bottom(),
                  to()->end(),
                  pointer_delta(  to()->end(),
                                  to()->bottom(),
                                  sizeof(char)));
  }

  // There's nothing to do if the new sizes are the same as the current
  if (requested_survivor_size == to()->capacity() &&
      requested_survivor_size == from()->capacity() &&
      requested_eden_size == eden()->capacity()) {
    if (PrintAdaptiveSizePolicy && Verbose) {
      gclog_or_tty->print_cr("    capacities are the right sizes, returning");
    }
    return;
  }

  char* eden_start = (char*)eden()->bottom();
  char* eden_end   = (char*)eden()->end();
  char* from_start = (char*)from()->bottom();
  char* from_end   = (char*)from()->end();
  char* to_start   = (char*)to()->bottom();
  char* to_end     = (char*)to()->end();

  const size_t alignment = os::vm_page_size();
  const bool maintain_minimum =
    (requested_eden_size + 2 * requested_survivor_size) <= min_gen_size();

  // Check whether from space is below to space
  if (from_start < to_start) {
    // Eden, from, to
    if (PrintAdaptiveSizePolicy && Verbose) {
      gclog_or_tty->print_cr("  Eden, from, to:");
    }

    // Set eden
    // "requested_eden_size" is a goal for the size of eden
    // and may not be attainable.  "eden_size" below is
    // calculated based on the location of from-space and
    // the goal for the size of eden.  from-space is
    // fixed in place because it contains live data.
    // The calculation is done this way to avoid 32bit
    // overflow (i.e., eden_start + requested_eden_size
    // may too large for representation in 32bits).
    size_t eden_size;
    if (maintain_minimum) {
      // Only make eden larger than the requested size if
      // the minimum size of the generation has to be maintained.
      // This could be done in general but policy at a higher
      // level is determining a requested size for eden and that
      // should be honored unless there is a fundamental reason.
      eden_size = pointer_delta(from_start,
                                eden_start,
                                sizeof(char));
    } else {
      eden_size = MIN2(requested_eden_size,
                       pointer_delta(from_start, eden_start, sizeof(char)));
    }

    eden_size = align_size_down(eden_size, alignment);
    eden_end = eden_start + eden_size;
    assert(eden_end >= eden_start, "addition overflowed")

    // To may resize into from space as long as it is clear of live data.
    // From space must remain page aligned, though, so we need to do some
    // extra calculations.

    // First calculate an optimal to-space
    to_end   = (char*)virtual_space()->high();
    to_start = (char*)pointer_delta(to_end, (char*)requested_survivor_size,
                                    sizeof(char));

    // Does the optimal to-space overlap from-space?
    if (to_start < (char*)from()->end()) {
      // Calculate the minimum offset possible for from_end
      size_t from_size = pointer_delta(from()->top(), from_start, sizeof(char));

      // Should we be in this method if from_space is empty? Why not the set_space method? FIX ME!
      if (from_size == 0) {
        from_size = alignment;
      } else {
        from_size = align_size_up(from_size, alignment);
      }

      from_end = from_start + from_size;
      assert(from_end > from_start, "addition overflow or from_size problem");

      guarantee(from_end <= (char*)from()->end(), "from_end moved to the right");

      // Now update to_start with the new from_end
      to_start = MAX2(from_end, to_start);
    } else {
      // If shrinking, move to-space down to abut the end of from-space
      // so that shrinking will move to-space down.  If not shrinking
      // to-space is moving up to allow for growth on the next expansion.
      if (requested_eden_size <= cur_eden_size) {
        to_start = from_end;
        if (to_start + requested_survivor_size > to_start) {
          to_end = to_start + requested_survivor_size;
        }
      }
      // else leave to_end pointing to the high end of the virtual space.
    }

    guarantee(to_start != to_end, "to space is zero sized");

    if (PrintAdaptiveSizePolicy && Verbose) {
      gclog_or_tty->print_cr("    [eden_start .. eden_end): "
                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
                    eden_start,
                    eden_end,
                    pointer_delta(eden_end, eden_start, sizeof(char)));
      gclog_or_tty->print_cr("    [from_start .. from_end): "
                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
                    from_start,
                    from_end,
                    pointer_delta(from_end, from_start, sizeof(char)));
      gclog_or_tty->print_cr("    [  to_start ..   to_end): "
                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
                    to_start,
                    to_end,
                    pointer_delta(  to_end,   to_start, sizeof(char)));
    }
  } else {
    // Eden, to, from
    if (PrintAdaptiveSizePolicy && Verbose) {
      gclog_or_tty->print_cr("  Eden, to, from:");
    }

    // Calculate the to-space boundaries based on
    // the start of from-space.
    to_end = from_start;
    to_start = (char*)pointer_delta(from_start,
                                    (char*)requested_survivor_size,
                                    sizeof(char));
    // Calculate the ideal eden boundaries.
    // eden_end is already at the bottom of the generation
    assert(eden_start == virtual_space()->low(),
      "Eden is not starting at the low end of the virtual space");
    if (eden_start + requested_eden_size >= eden_start) {
      eden_end = eden_start + requested_eden_size;
    } else {
      eden_end = to_start;
    }

    // Does eden intrude into to-space?  to-space
    // gets priority but eden is not allowed to shrink
    // to 0.
    if (eden_end > to_start) {
      eden_end = to_start;
    }

    // Don't let eden shrink down to 0 or less.
    eden_end = MAX2(eden_end, eden_start + alignment);
    assert(eden_start + alignment >= eden_start, "Overflow");

    size_t eden_size;
    if (maintain_minimum) {
      // Use all the space available.
      eden_end = MAX2(eden_end, to_start);
      eden_size = pointer_delta(eden_end, eden_start, sizeof(char));
      eden_size = MIN2(eden_size, cur_eden_size);
    } else {
      eden_size = pointer_delta(eden_end, eden_start, sizeof(char));
    }
    eden_size = align_size_down(eden_size, alignment);
    assert(maintain_minimum || eden_size <= requested_eden_size,
      "Eden size is too large");
    assert(eden_size >= alignment, "Eden size is too small");
    eden_end = eden_start + eden_size;

    // Move to-space down to eden.
    if (requested_eden_size < cur_eden_size) {
      to_start = eden_end;
      if (to_start + requested_survivor_size > to_start) {
        to_end = MIN2(from_start, to_start + requested_survivor_size);
      } else {
        to_end = from_start;
      }
    }

    // eden_end may have moved so again make sure
    // the to-space and eden don't overlap.
    to_start = MAX2(eden_end, to_start);

    // from-space
    size_t from_used = from()->used();
    if (requested_survivor_size > from_used) {
      if (from_start + requested_survivor_size >= from_start) {
        from_end = from_start + requested_survivor_size;
      }
      if (from_end > virtual_space()->high()) {
        from_end = virtual_space()->high();
      }
    }

    assert(to_start >= eden_end, "to-space should be above eden");
    if (PrintAdaptiveSizePolicy && Verbose) {
      gclog_or_tty->print_cr("    [eden_start .. eden_end): "
                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
                    eden_start,
                    eden_end,
                    pointer_delta(eden_end, eden_start, sizeof(char)));
      gclog_or_tty->print_cr("    [  to_start ..   to_end): "
                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
                    to_start,
                    to_end,
                    pointer_delta(  to_end,   to_start, sizeof(char)));
      gclog_or_tty->print_cr("    [from_start .. from_end): "
                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
                    from_start,
                    from_end,
                    pointer_delta(from_end, from_start, sizeof(char)));
    }
  }


  guarantee((HeapWord*)from_start <= from()->bottom(),
            "from start moved to the right");
  guarantee((HeapWord*)from_end >= from()->top(),
            "from end moved into live data");
  assert(is_object_aligned((intptr_t)eden_start), "checking alignment");
  assert(is_object_aligned((intptr_t)from_start), "checking alignment");
  assert(is_object_aligned((intptr_t)to_start), "checking alignment");

  MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)eden_end);
  MemRegion toMR  ((HeapWord*)to_start,   (HeapWord*)to_end);
  MemRegion fromMR((HeapWord*)from_start, (HeapWord*)from_end);

  // Let's make sure the call to initialize doesn't reset "top"!
  HeapWord* old_from_top = from()->top();

  // For PrintAdaptiveSizePolicy block  below
  size_t old_from = from()->capacity();
  size_t old_to   = to()->capacity();

  // If not clearing the spaces, do some checking to verify that
  // the spaces are already mangled.

  // Must check mangling before the spaces are reshaped.  Otherwise,
  // the bottom or end of one space may have moved into another
  // a failure of the check may not correctly indicate which space
  // is not properly mangled.
  if (ZapUnusedHeapArea) {
    HeapWord* limit = (HeapWord*) virtual_space()->high();
    eden()->check_mangled_unused_area(limit);
    from()->check_mangled_unused_area(limit);
      to()->check_mangled_unused_area(limit);
  }

  // The call to initialize NULL's the next compaction space
  eden()->initialize(edenMR,
                     SpaceDecorator::Clear,
                     SpaceDecorator::DontMangle);
  eden()->set_next_compaction_space(from());
    to()->initialize(toMR  ,
                     SpaceDecorator::Clear,
                     SpaceDecorator::DontMangle);
  from()->initialize(fromMR,
                     SpaceDecorator::DontClear,
                     SpaceDecorator::DontMangle);

  assert(from()->top() == old_from_top, "from top changed!");

  if (PrintAdaptiveSizePolicy) {
    GenCollectedHeap* gch = GenCollectedHeap::heap();
    assert(gch->kind() == CollectedHeap::GenCollectedHeap, "Sanity");

    gclog_or_tty->print("AdaptiveSizePolicy::survivor space sizes: "
                  "collection: %d "
                  "(" SIZE_FORMAT ", " SIZE_FORMAT ") -> "
                  "(" SIZE_FORMAT ", " SIZE_FORMAT ") ",
                  gch->total_collections(),
                  old_from, old_to,
                  from()->capacity(),
                  to()->capacity());
    gclog_or_tty->cr();
  }
}
Ejemplo n.º 20
0
// Return the number of bytes available for resizing down the young
// generation.  This is the minimum of
//      input "bytes"
//      bytes to the minimum young gen size
//      bytes to the size currently being used + some small extra
size_t ASParNewGeneration::limit_gen_shrink (size_t bytes) {
  // Allow shrinkage into the current eden but keep eden large enough
  // to maintain the minimum young gen size
  bytes = MIN3(bytes, available_to_min_gen(), available_to_live());
  return align_size_down(bytes, os::vm_page_size());
}
Ejemplo n.º 21
0
void TwoGenerationCollectorPolicy::initialize_size_info() {
  GenCollectorPolicy::initialize_size_info();

  // At this point the minimum, initial and maximum sizes
  // of the overall heap and of gen0 have been determined.
  // The maximum gen1 size can be determined from the maximum gen0
  // and maximum heap size since not explicit flags exits
  // for setting the gen1 maximum.
  _max_gen1_size = max_heap_byte_size() - _max_gen0_size;
  _max_gen1_size =
    MAX2((size_t)align_size_down(_max_gen1_size, min_alignment()),
         min_alignment());
  // If no explicit command line flag has been set for the
  // gen1 size, use what is left for gen1.
  if (FLAG_IS_DEFAULT(OldSize) || FLAG_IS_ERGO(OldSize)) {
    // The user has not specified any value or ergonomics
    // has chosen a value (which may or may not be consistent
    // with the overall heap size).  In either case make
    // the minimum, maximum and initial sizes consistent
    // with the gen0 sizes and the overall heap sizes.
    assert(min_heap_byte_size() > _min_gen0_size,
      "gen0 has an unexpected minimum size");
    set_min_gen1_size(min_heap_byte_size() - min_gen0_size());
    set_min_gen1_size(
      MAX2((size_t)align_size_down(_min_gen1_size, min_alignment()),
           min_alignment()));
    set_initial_gen1_size(initial_heap_byte_size() - initial_gen0_size());
    set_initial_gen1_size(
      MAX2((size_t)align_size_down(_initial_gen1_size, min_alignment()),
           min_alignment()));

  } else {
    // It's been explicitly set on the command line.  Use the
    // OldSize and then determine the consequences.
    set_min_gen1_size(OldSize);
    set_initial_gen1_size(OldSize);

    // If the user has explicitly set an OldSize that is inconsistent
    // with other command line flags, issue a warning.
    // The generation minimums and the overall heap mimimum should
    // be within one heap alignment.
    if ((_min_gen1_size + _min_gen0_size + min_alignment()) <
           min_heap_byte_size()) {
      warning("Inconsistency between minimum heap size and minimum "
          "generation sizes: using minimum heap = " SIZE_FORMAT,
          min_heap_byte_size());
    }
    if ((OldSize > _max_gen1_size)) {
      warning("Inconsistency between maximum heap size and maximum "
          "generation sizes: using maximum heap = " SIZE_FORMAT
          " -XX:OldSize flag is being ignored",
          max_heap_byte_size());
  }
    // If there is an inconsistency between the OldSize and the minimum and/or
    // initial size of gen0, since OldSize was explicitly set, OldSize wins.
    if (adjust_gen0_sizes(&_min_gen0_size, &_min_gen1_size,
                          min_heap_byte_size(), OldSize)) {
      if (PrintGCDetails && Verbose) {
        gclog_or_tty->print_cr("Minimum gen0 " SIZE_FORMAT "  Initial gen0 "
              SIZE_FORMAT "  Maximum gen0 " SIZE_FORMAT,
              min_gen0_size(), initial_gen0_size(), max_gen0_size());
      }
    }
    // Initial size
    if (adjust_gen0_sizes(&_initial_gen0_size, &_initial_gen1_size,
                         initial_heap_byte_size(), OldSize)) {
      if (PrintGCDetails && Verbose) {
        gclog_or_tty->print_cr("Minimum gen0 " SIZE_FORMAT "  Initial gen0 "
          SIZE_FORMAT "  Maximum gen0 " SIZE_FORMAT,
          min_gen0_size(), initial_gen0_size(), max_gen0_size());
      }
    }
  }
  // Enforce the maximum gen1 size.
  set_min_gen1_size(MIN2(_min_gen1_size, _max_gen1_size));

  // Check that min gen1 <= initial gen1 <= max gen1
  set_initial_gen1_size(MAX2(_initial_gen1_size, _min_gen1_size));
  set_initial_gen1_size(MIN2(_initial_gen1_size, _max_gen1_size));

  if (PrintGCDetails && Verbose) {
    gclog_or_tty->print_cr("Minimum gen1 " SIZE_FORMAT "  Initial gen1 "
      SIZE_FORMAT "  Maximum gen1 " SIZE_FORMAT,
      min_gen1_size(), initial_gen1_size(), max_gen1_size());
  }
}
void PSYoungGen::initialize_work() {

  _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(),
                        (HeapWord*)virtual_space()->high_boundary());

  MemRegion cmr((HeapWord*)virtual_space()->low(),
                (HeapWord*)virtual_space()->high());
  Universe::heap()->barrier_set()->resize_covered_region(cmr);

  if (ZapUnusedHeapArea) {
    // Mangle newly committed space immediately because it
    // can be done here more simply that after the new
    // spaces have been computed.
    SpaceMangler::mangle_region(cmr);
  }

  if (UseNUMA) {
    _eden_space = new MutableNUMASpace(virtual_space()->alignment());
  } else {
    _eden_space = new MutableSpace(virtual_space()->alignment());
  }
  _from_space = new MutableSpace(virtual_space()->alignment());
  _to_space   = new MutableSpace(virtual_space()->alignment());

  if (_eden_space == NULL || _from_space == NULL || _to_space == NULL) {
    vm_exit_during_initialization("Could not allocate a young gen space");
  }

  // Allocate the mark sweep views of spaces
  _eden_mark_sweep =
      new PSMarkSweepDecorator(_eden_space, NULL, MarkSweepDeadRatio);
  _from_mark_sweep =
      new PSMarkSweepDecorator(_from_space, NULL, MarkSweepDeadRatio);
  _to_mark_sweep =
      new PSMarkSweepDecorator(_to_space, NULL, MarkSweepDeadRatio);

  if (_eden_mark_sweep == NULL ||
      _from_mark_sweep == NULL ||
      _to_mark_sweep == NULL) {
    vm_exit_during_initialization("Could not complete allocation"
                                  " of the young generation");
  }

  // Generation Counters - generation 0, 3 subspaces
  _gen_counters = new PSGenerationCounters("new", 0, 3, _virtual_space);

  // Compute maximum space sizes for performance counters
  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  size_t alignment = heap->intra_heap_alignment();
  size_t size = virtual_space()->reserved_size();

  size_t max_survivor_size;
  size_t max_eden_size;

  if (UseAdaptiveSizePolicy) {
    max_survivor_size = size / MinSurvivorRatio;

    // round the survivor space size down to the nearest alignment
    // and make sure its size is greater than 0.
    max_survivor_size = align_size_down(max_survivor_size, alignment);
    max_survivor_size = MAX2(max_survivor_size, alignment);

    // set the maximum size of eden to be the size of the young gen
    // less two times the minimum survivor size. The minimum survivor
    // size for UseAdaptiveSizePolicy is one alignment.
    max_eden_size = size - 2 * alignment;
  } else {
    max_survivor_size = size / InitialSurvivorRatio;

    // round the survivor space size down to the nearest alignment
    // and make sure its size is greater than 0.
    max_survivor_size = align_size_down(max_survivor_size, alignment);
    max_survivor_size = MAX2(max_survivor_size, alignment);

    // set the maximum size of eden to be the size of the young gen
    // less two times the survivor size when the generation is 100%
    // committed. The minimum survivor size for -UseAdaptiveSizePolicy
    // is dependent on the committed portion (current capacity) of the
    // generation - the less space committed, the smaller the survivor
    // space, possibly as small as an alignment. However, we are interested
    // in the case where the young generation is 100% committed, as this
    // is the point where eden reachs its maximum size. At this point,
    // the size of a survivor space is max_survivor_size.
    max_eden_size = size - 2 * max_survivor_size;
  }

  _eden_counters = new SpaceCounters("eden", 0, max_eden_size, _eden_space,
                                     _gen_counters);
  _from_counters = new SpaceCounters("s0", 1, max_survivor_size, _from_space,
                                     _gen_counters);
  _to_counters = new SpaceCounters("s1", 2, max_survivor_size, _to_space,
                                   _gen_counters);

  compute_initial_space_boundaries();
}
// Return the number of bytes available for resizing down the young
// generation.  This is the minimum of
//      input "bytes"
//      bytes to the minimum young gen size
//      bytes to the size currently being used + some small extra
size_t PSYoungGen::limit_gen_shrink(size_t bytes) {
  // Allow shrinkage into the current eden but keep eden large enough
  // to maintain the minimum young gen size
  bytes = MIN3(bytes, available_to_min_gen(), available_to_live());
  return align_size_down(bytes, virtual_space()->alignment());
}
Ejemplo n.º 24
0
 // Return the size of a survivor space if this generation were of size
 // gen_size.
 size_t compute_survivor_size(size_t gen_size, size_t alignment) const {
   size_t n = gen_size / (SurvivorRatio + 2);
   return n > alignment ? align_size_down(n, alignment) : alignment;
 }
Ejemplo n.º 25
0
void PSPermGen::compute_new_size(size_t used_before_collection) {
  // Update our padded average of objects allocated in perm
  // gen between collections.
  assert(used_before_collection >= _last_used,
                                "negative allocation amount since last GC?");

  const size_t alloc_since_last_gc = used_before_collection - _last_used;
  _avg_size->sample(alloc_since_last_gc);

  const size_t current_live = used_in_bytes();
  // Stash away the current amount live for the next call to this method.
  _last_used = current_live;

  // We have different alignment constraints than the rest of the heap.
  const size_t alignment = MAX2(MinPermHeapExpansion,
                                virtual_space()->alignment());

  // Compute the desired size:
  //  The free space is the newly computed padded average,
  //  so the desired size is what's live + the free space.
  size_t desired_size = current_live + (size_t)_avg_size->padded_average();
  desired_size = align_size_up(desired_size, alignment);

  // ...and no larger or smaller than our max and min allowed.
  desired_size = MAX2(MIN2(desired_size, _max_gen_size), _min_gen_size);
  assert(desired_size <= _max_gen_size, "just checking");

  const size_t size_before = _virtual_space->committed_size();

  if (desired_size == size_before) {
    // no change, we're done
    return;
  }

  {
    // We'll be growing or shrinking the heap:  in either case,
    // we need to hold a lock.
    MutexLocker x(ExpandHeap_lock);
    if (desired_size > size_before) {
      const size_t change_bytes = desired_size - size_before;
      const size_t aligned_change_bytes =
        align_size_up(change_bytes, alignment);
      expand_by(aligned_change_bytes);
    } else {
      // Shrinking
      const size_t change_bytes =
        size_before - desired_size;
      const size_t aligned_change_bytes = align_size_down(change_bytes, alignment);
      shrink(aligned_change_bytes);
    }
  }

  // While this code isn't controlled by AdaptiveSizePolicy, it's
  // convenient to see all resizing decsions under the same flag.
  if (PrintAdaptiveSizePolicy) {
    ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
    assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");

    gclog_or_tty->print_cr("AdaptiveSizePolicy::perm generation size: "
                           "collection: %d "
                           "(" SIZE_FORMAT ") -> (" SIZE_FORMAT ") ",
                           heap->total_collections(),
                           size_before, _virtual_space->committed_size());
  }
}
Ejemplo n.º 26
0
static void current_stack_region(address *bottom, size_t *size) {
  pthread_attr_t attr;
  int res = pthread_getattr_np(pthread_self(), &attr);
  if (res != 0) {
    if (res == ENOMEM) {
      vm_exit_out_of_memory(0, "pthread_getattr_np");
    }
    else {
      fatal(err_msg("pthread_getattr_np failed with errno = %d", res));
    }
  }

  address stack_bottom;
  size_t stack_bytes;
  res = pthread_attr_getstack(&attr, (void **) &stack_bottom, &stack_bytes);
  if (res != 0) {
    fatal(err_msg("pthread_attr_getstack failed with errno = %d", res));
  }
  address stack_top = stack_bottom + stack_bytes;

  // The block of memory returned by pthread_attr_getstack() includes
  // guard pages where present.  We need to trim these off.
  size_t page_bytes = os::Linux::page_size();
  assert(((intptr_t) stack_bottom & (page_bytes - 1)) == 0, "unaligned stack");

  size_t guard_bytes;
  res = pthread_attr_getguardsize(&attr, &guard_bytes);
  if (res != 0) {
    fatal(err_msg("pthread_attr_getguardsize failed with errno = %d", res));
  }
  int guard_pages = align_size_up(guard_bytes, page_bytes) / page_bytes;
  assert(guard_bytes == guard_pages * page_bytes, "unaligned guard");

#ifdef IA64
  // IA64 has two stacks sharing the same area of memory, a normal
  // stack growing downwards and a register stack growing upwards.
  // Guard pages, if present, are in the centre.  This code splits
  // the stack in two even without guard pages, though in theory
  // there's nothing to stop us allocating more to the normal stack
  // or more to the register stack if one or the other were found
  // to grow faster.
  int total_pages = align_size_down(stack_bytes, page_bytes) / page_bytes;
  stack_bottom += (total_pages - guard_pages) / 2 * page_bytes;
#endif // IA64

  stack_bottom += guard_bytes;

  pthread_attr_destroy(&attr);

  // The initial thread has a growable stack, and the size reported
  // by pthread_attr_getstack is the maximum size it could possibly
  // be given what currently mapped.  This can be huge, so we cap it.
  if (os::Linux::is_initial_thread()) {
    stack_bytes = stack_top - stack_bottom;

    if (stack_bytes > JavaThread::stack_size_at_create())
      stack_bytes = JavaThread::stack_size_at_create();

    stack_bottom = stack_top - stack_bytes;
  }

  assert(os::current_stack_pointer() >= stack_bottom, "should do");
  assert(os::current_stack_pointer() < stack_top, "should do");

  *bottom = stack_bottom;
  *size = stack_top - stack_bottom;
}
Ejemplo n.º 27
0
size_t ReservedSpace::page_align_size_down(size_t size) {
  return align_size_down(size, os::vm_page_size());
}
Ejemplo n.º 28
0
void TwoGenerationCollectorPolicy::initialize_flags() {
  // All space sizes must be multiples of car size in order for the
  // CarTable to work.  Similarly with the should_scavenge region size.
  set_min_alignment(MAX2((uintx) CarSpace::car_size(), (uintx) Generation::GenGrain));

  // The card marking array and the offset arrays for old generations are
  // committed in os pages as well. Make sure they are entirely full (to
  // avoid partial page problems), e.g. if 512 bytes heap corresponds to 1
  // byte entry and the os page size is 4096, the maximum heap size should
  // be 512*4096 = 2MB aligned.
  set_max_alignment(GenRemSet::max_alignment_constraint(rem_set_name()));
  assert(max_alignment() >= min_alignment() && max_alignment() % min_alignment() == 0, "invalid alignment constraints");

  // Adjust max size parameters
  if (NewSize > MaxNewSize) {
    MaxNewSize = NewSize;
  }
  NewSize = align_size_down(NewSize, min_alignment());
  MaxNewSize = align_size_down(MaxNewSize, min_alignment());

  OldSize = align_size_down(OldSize, min_alignment());
  if (NewSize + OldSize > MaxHeapSize) {
    MaxHeapSize = NewSize + OldSize;
  }
  MaxHeapSize = align_size_up(MaxHeapSize, max_alignment());

  if (PermSize > MaxPermSize) {
    MaxPermSize = PermSize;
  }
  PermSize = align_size_down(PermSize, min_alignment());
  MaxPermSize = align_size_down(MaxPermSize, max_alignment());

  MinPermHeapExpansion = align_size_down(MinPermHeapExpansion, min_alignment());
  MaxPermHeapExpansion = align_size_down(MaxPermHeapExpansion, min_alignment());

  MinHeapDeltaBytes = align_size_up(MinHeapDeltaBytes, min_alignment());

  always_do_update_barrier = UseConcMarkSweepGC;
  BlockOffsetArrayUseUnallocatedBlock |= ParallelGCThreads > 0;

  SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment());
  SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment());
  SharedMiscDataSize = align_size_up(SharedMiscDataSize, max_alignment());

  // Check validity of heap flags
  assert(NewSize     % min_alignment() == 0, "eden space alignment");
  assert(MaxNewSize  % min_alignment() == 0, "survivor space alignment");
  assert(OldSize     % min_alignment() == 0, "old space alignment");
  assert(MaxHeapSize % max_alignment() == 0, "maximum heap alignment");
  assert(PermSize    % min_alignment() == 0, "permanent space alignment");
  assert(MaxPermSize % max_alignment() == 0, "maximum permanent space alignment");
  assert(SharedReadOnlySize % max_alignment() == 0, "read-only space alignment");
  assert(SharedReadWriteSize % max_alignment() == 0, "read-write space alignment");
  assert(SharedMiscDataSize % max_alignment() == 0, "misc-data space alignment");

  if (NewSize < 3*min_alignment()) {
     // make sure there room for eden and two survivor spaces
    vm_exit_during_initialization("Too small new size specified");
  }
  if (PermSize < M) {
    vm_exit_during_initialization("Too small initial permanent heap");
  }
  if (SurvivorRatio < 1 || NewRatio < 1) {
    vm_exit_during_initialization("Invalid heap ratio specified");
  }
}
void PSAdaptiveSizePolicy::compute_generation_free_space(
                                           size_t young_live,
                                           size_t eden_live,
                                           size_t old_live,
                                           size_t perm_live,
                                           size_t cur_eden,
                                           size_t max_old_gen_size,
                                           size_t max_eden_size,
                                           bool   is_full_gc,
                                           GCCause::Cause gc_cause,
                                           CollectorPolicy* collector_policy) {

  // Update statistics
  // Time statistics are updated as we go, update footprint stats here
  _avg_base_footprint->sample(BaseFootPrintEstimate + perm_live);
  avg_young_live()->sample(young_live);
  avg_eden_live()->sample(eden_live);
  if (is_full_gc) {
    // old_live is only accurate after a full gc
    avg_old_live()->sample(old_live);
  }

  // This code used to return if the policy was not ready , i.e.,
  // policy_is_ready() returning false.  The intent was that
  // decisions below needed major collection times and so could
  // not be made before two major collections.  A consequence was
  // adjustments to the young generation were not done until after
  // two major collections even if the minor collections times
  // exceeded the requested goals.  Now let the young generation
  // adjust for the minor collection times.  Major collection times
  // will be zero for the first collection and will naturally be
  // ignored.  Tenured generation adjustments are only made at the
  // full collections so until the second major collection has
  // been reached, no tenured generation adjustments will be made.

  // Until we know better, desired promotion size uses the last calculation
  size_t desired_promo_size = _promo_size;

  // Start eden at the current value.  The desired value that is stored
  // in _eden_size is not bounded by constraints of the heap and can
  // run away.
  //
  // As expected setting desired_eden_size to the current
  // value of desired_eden_size as a starting point
  // caused desired_eden_size to grow way too large and caused
  // an overflow down stream.  It may have improved performance in
  // some case but is dangerous.
  size_t desired_eden_size = cur_eden;

#ifdef ASSERT
  size_t original_promo_size = desired_promo_size;
  size_t original_eden_size = desired_eden_size;
#endif

  // Cache some values. There's a bit of work getting these, so
  // we might save a little time.
  const double major_cost = major_gc_cost();
  const double minor_cost = minor_gc_cost();

  // Used for diagnostics
  clear_generation_free_space_flags();

  // Limits on our growth
  size_t promo_limit = (size_t)(max_old_gen_size - avg_old_live()->average());

  // This method sets the desired eden size.  That plus the
  // desired survivor space sizes sets the desired young generation
  // size.  This methods does not know what the desired survivor
  // size is but expects that other policy will attempt to make
  // the survivor sizes compatible with the live data in the
  // young generation.  This limit is an estimate of the space left
  // in the young generation after the survivor spaces have been
  // subtracted out.
  size_t eden_limit = max_eden_size;

  // But don't force a promo size below the current promo size. Otherwise,
  // the promo size will shrink for no good reason.
  promo_limit = MAX2(promo_limit, _promo_size);

  const double gc_cost_limit = GCTimeLimit/100.0;

  // Which way should we go?
  // if pause requirement is not met
  //   adjust size of any generation with average paus exceeding
  //   the pause limit.  Adjust one pause at a time (the larger)
  //   and only make adjustments for the major pause at full collections.
  // else if throughput requirement not met
  //   adjust the size of the generation with larger gc time.  Only
  //   adjust one generation at a time.
  // else
  //   adjust down the total heap size.  Adjust down the larger of the
  //   generations.

  // Add some checks for a threshhold for a change.  For example,
  // a change less than the necessary alignment is probably not worth
  // attempting.


  if ((_avg_minor_pause->padded_average() > gc_pause_goal_sec()) ||
      (_avg_major_pause->padded_average() > gc_pause_goal_sec())) {
    //
    // Check pauses
    //
    // Make changes only to affect one of the pauses (the larger)
    // at a time.
    adjust_for_pause_time(is_full_gc, &desired_promo_size, &desired_eden_size);

  } else if (_avg_minor_pause->padded_average() > gc_minor_pause_goal_sec()) {
    // Adjust only for the minor pause time goal
    adjust_for_minor_pause_time(is_full_gc, &desired_promo_size, &desired_eden_size);

  } else if(adjusted_mutator_cost() < _throughput_goal) {
    // This branch used to require that (mutator_cost() > 0.0 in 1.4.2.
    // This sometimes resulted in skipping to the minimize footprint
    // code.  Change this to try and reduce GC time if mutator time is
    // negative for whatever reason.  Or for future consideration,
    // bail out of the code if mutator time is negative.
    //
    // Throughput
    //
    assert(major_cost >= 0.0, "major cost is < 0.0");
    assert(minor_cost >= 0.0, "minor cost is < 0.0");
    // Try to reduce the GC times.
    adjust_for_throughput(is_full_gc, &desired_promo_size, &desired_eden_size);

  } else {

    // Be conservative about reducing the footprint.
    //   Do a minimum number of major collections first.
    //   Have reasonable averages for major and minor collections costs.
    if (UseAdaptiveSizePolicyFootprintGoal &&
        young_gen_policy_is_ready() &&
        avg_major_gc_cost()->average() >= 0.0 &&
        avg_minor_gc_cost()->average() >= 0.0) {
      size_t desired_sum = desired_eden_size + desired_promo_size;
      desired_eden_size = adjust_eden_for_footprint(desired_eden_size,
                                                    desired_sum);
      if (is_full_gc) {
        set_decide_at_full_gc(decide_at_full_gc_true);
        desired_promo_size = adjust_promo_for_footprint(desired_promo_size,
                                                        desired_sum);
      }
    }
  }

  // Note we make the same tests as in the code block below;  the code
  // seems a little easier to read with the printing in another block.
  if (PrintAdaptiveSizePolicy) {
    if (desired_promo_size > promo_limit)  {
      // "free_in_old_gen" was the original value for used for promo_limit
      size_t free_in_old_gen = (size_t)(max_old_gen_size - avg_old_live()->average());
      gclog_or_tty->print_cr(
            "PSAdaptiveSizePolicy::compute_generation_free_space limits:"
            " desired_promo_size: " SIZE_FORMAT
            " promo_limit: " SIZE_FORMAT
            " free_in_old_gen: " SIZE_FORMAT
            " max_old_gen_size: " SIZE_FORMAT
            " avg_old_live: " SIZE_FORMAT,
            desired_promo_size, promo_limit, free_in_old_gen,
            max_old_gen_size, (size_t) avg_old_live()->average());
    }
    if (desired_eden_size > eden_limit) {
      gclog_or_tty->print_cr(
            "AdaptiveSizePolicy::compute_generation_free_space limits:"
            " desired_eden_size: " SIZE_FORMAT
            " old_eden_size: " SIZE_FORMAT
            " eden_limit: " SIZE_FORMAT
            " cur_eden: " SIZE_FORMAT
            " max_eden_size: " SIZE_FORMAT
            " avg_young_live: " SIZE_FORMAT,
            desired_eden_size, _eden_size, eden_limit, cur_eden,
            max_eden_size, (size_t)avg_young_live()->average());
    }
    if (gc_cost() > gc_cost_limit) {
      gclog_or_tty->print_cr(
            "AdaptiveSizePolicy::compute_generation_free_space: gc time limit"
            " gc_cost: %f "
            " GCTimeLimit: %d",
            gc_cost(), GCTimeLimit);
    }
  }

  // Align everything and make a final limit check
  const size_t alignment = _intra_generation_alignment;
  desired_eden_size  = align_size_up(desired_eden_size, alignment);
  desired_eden_size  = MAX2(desired_eden_size, alignment);
  desired_promo_size = align_size_up(desired_promo_size, alignment);
  desired_promo_size = MAX2(desired_promo_size, alignment);

  eden_limit  = align_size_down(eden_limit, alignment);
  promo_limit = align_size_down(promo_limit, alignment);

  // Is too much time being spent in GC?
  //   Is the heap trying to grow beyond it's limits?

  const size_t free_in_old_gen =
    (size_t)(max_old_gen_size - avg_old_live()->average());
  if (desired_promo_size > free_in_old_gen && desired_eden_size > eden_limit) {
    check_gc_overhead_limit(young_live,
                            eden_live,
                            max_old_gen_size,
                            max_eden_size,
                            is_full_gc,
                            gc_cause,
                            collector_policy);
  }


  // And one last limit check, now that we've aligned things.
  if (desired_eden_size > eden_limit) {
    // If the policy says to get a larger eden but
    // is hitting the limit, don't decrease eden.
    // This can lead to a general drifting down of the
    // eden size.  Let the tenuring calculation push more
    // into the old gen.
    desired_eden_size = MAX2(eden_limit, cur_eden);
  }
  desired_promo_size = MIN2(desired_promo_size, promo_limit);


  if (PrintAdaptiveSizePolicy) {
    // Timing stats
    gclog_or_tty->print(
               "PSAdaptiveSizePolicy::compute_generation_free_space: costs"
               " minor_time: %f"
               " major_cost: %f"
               " mutator_cost: %f"
               " throughput_goal: %f",
               minor_gc_cost(), major_gc_cost(), mutator_cost(),
               _throughput_goal);

    // We give more details if Verbose is set
    if (Verbose) {
      gclog_or_tty->print( " minor_pause: %f"
                  " major_pause: %f"
                  " minor_interval: %f"
                  " major_interval: %f"
                  " pause_goal: %f",
                  _avg_minor_pause->padded_average(),
                  _avg_major_pause->padded_average(),
                  _avg_minor_interval->average(),
                  _avg_major_interval->average(),
                  gc_pause_goal_sec());
    }

    // Footprint stats
    gclog_or_tty->print( " live_space: " SIZE_FORMAT
                " free_space: " SIZE_FORMAT,
                live_space(), free_space());
    // More detail
    if (Verbose) {
      gclog_or_tty->print( " base_footprint: " SIZE_FORMAT
                  " avg_young_live: " SIZE_FORMAT
                  " avg_old_live: " SIZE_FORMAT,
                  (size_t)_avg_base_footprint->average(),
                  (size_t)avg_young_live()->average(),
                  (size_t)avg_old_live()->average());
    }

    // And finally, our old and new sizes.
    gclog_or_tty->print(" old_promo_size: " SIZE_FORMAT
               " old_eden_size: " SIZE_FORMAT
               " desired_promo_size: " SIZE_FORMAT
               " desired_eden_size: " SIZE_FORMAT,
               _promo_size, _eden_size,
               desired_promo_size, desired_eden_size);
    gclog_or_tty->cr();
  }

  decay_supplemental_growth(is_full_gc);

  set_promo_size(desired_promo_size);
  set_eden_size(desired_eden_size);
};
Ejemplo n.º 30
0
size_t ReservedSpace::allocation_align_size_down(size_t size) {
  return align_size_down(size, os::vm_allocation_granularity());
}