コード例 #1
0
ファイル: psMarkSweep.cpp プロジェクト: AllenWeb/openjdk-1
// This method contains all heap specific policy for invoking mark sweep.
// PSMarkSweep::invoke_no_policy() will only attempt to mark-sweep-compact
// the heap. It will do nothing further. If we need to bail out for policy
// reasons, scavenge before full gc, or any other specialized behavior, it
// needs to be added here.
//
// Note that this method should only be called from the vm_thread while
// at a safepoint!
void PSMarkSweep::invoke(bool maximum_heap_compaction) {
  assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
  assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
  assert(!Universe::heap()->is_gc_active(), "not reentrant");

  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  GCCause::Cause gc_cause = heap->gc_cause();
  PSAdaptiveSizePolicy* policy = heap->size_policy();

  // Before each allocation/collection attempt, find out from the
  // policy object if GCs are, on the whole, taking too long. If so,
  // bail out without attempting a collection.  The exceptions are
  // for explicitly requested GC's.
  if (!policy->gc_time_limit_exceeded() ||
      GCCause::is_user_requested_gc(gc_cause) ||
      GCCause::is_serviceability_requested_gc(gc_cause)) {
    IsGCActiveMark mark;

    if (ScavengeBeforeFullGC) {
      PSScavenge::invoke_no_policy();
    }

    int count = (maximum_heap_compaction)?1:MarkSweepAlwaysCompactCount;
    IntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count);
    PSMarkSweep::invoke_no_policy(maximum_heap_compaction);
  }
}
コード例 #2
0
// This method contains all heap specific policy for invoking scavenge.
// PSScavenge::invoke_no_policy() will do nothing but attempt to
// scavenge. It will not clean up after failed promotions, bail out if
// we've exceeded policy time limits, or any other special behavior.
// All such policy should be placed here.
//
// Note that this method should only be called from the vm_thread while
// at a safepoint!
void PSScavenge::invoke() {
  assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
  assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
  assert(!Universe::heap()->is_gc_active(), "not reentrant");

  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");

  PSAdaptiveSizePolicy* policy = heap->size_policy();
  IsGCActiveMark mark;

  bool scavenge_was_done = PSScavenge::invoke_no_policy();

  PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
  if (UsePerfData)
    counters->update_full_follows_scavenge(0);
  if (!scavenge_was_done ||
      policy->should_full_GC(heap->old_gen()->free_in_bytes())) {
    if (UsePerfData)
      counters->update_full_follows_scavenge(full_follows_scavenge);
    GCCauseSetter gccs(heap, GCCause::_adaptive_size_policy);
    CollectorPolicy* cp = heap->collector_policy();
    const bool clear_all_softrefs = cp->should_clear_all_soft_refs();

    if (UseParallelOldGC) {
      PSParallelCompact::invoke_no_policy(clear_all_softrefs);
    } else {
      PSMarkSweep::invoke_no_policy(clear_all_softrefs);
    }
  }
}
コード例 #3
0
bool PSScavenge::should_attempt_scavenge() {
  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
  PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
  
  if (UsePerfData) {
    counters->update_scavenge_skipped(not_skipped);
  }

  PSYoungGen* young_gen = heap->young_gen();
  PSOldGen* old_gen = heap->old_gen();

  if (!ScavengeWithObjectsInToSpace) {
    // Do not attempt to promote unless to_space is empty
    if (!young_gen->to_space()->is_empty()) {
      _consecutive_skipped_scavenges++;
      if (UsePerfData) {
	counters->update_scavenge_skipped(to_space_not_empty);
      }
      return false;
    }
  }

  // Test to see if the scavenge will likely fail.
  PSAdaptiveSizePolicy* policy = heap->size_policy();

  // A similar test is done in the policy's should_full_GC().  If this is
  // changed, decide if that test should also be changed.
  size_t avg_promoted = (size_t) policy->padded_average_promoted_in_bytes();
  size_t promotion_estimate = MIN2(avg_promoted, young_gen->used_in_bytes());
  bool result = promotion_estimate < old_gen->free_in_bytes();

  if (PrintGCDetails && Verbose) {
    gclog_or_tty->print(result ? "  do scavenge: " : "  skip scavenge: ");
    gclog_or_tty->print_cr(" average_promoted " SIZE_FORMAT
      " padded_average_promoted " SIZE_FORMAT
      " free in old gen " SIZE_FORMAT,
      (size_t) policy->average_promoted_in_bytes(),
      (size_t) policy->padded_average_promoted_in_bytes(),
      old_gen->free_in_bytes());
    if (young_gen->used_in_bytes() < 
        (size_t) policy->padded_average_promoted_in_bytes()) {
      gclog_or_tty->print_cr(" padded_promoted_average is greater"
	" than maximum promotion = " SIZE_FORMAT, young_gen->used_in_bytes());
    }
  }

  if (result) {
    _consecutive_skipped_scavenges = 0;
  } else {
    _consecutive_skipped_scavenges++;
    if (UsePerfData) {
      counters->update_scavenge_skipped(promoted_too_large);
    }
  }
  return result;
}
コード例 #4
0
size_t ASPSOldGen::available_for_contraction() {
  size_t uncommitted_bytes = virtual_space()->uncommitted_size();
  if (uncommitted_bytes != 0) {
    return uncommitted_bytes;
  }

  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  const size_t gen_alignment = heap->old_gen_alignment();
  PSAdaptiveSizePolicy* policy = heap->size_policy();
  const size_t working_size =
    used_in_bytes() + (size_t) policy->avg_promoted()->padded_average();
  const size_t working_aligned = align_size_up(working_size, gen_alignment);
  const size_t working_or_min = MAX2(working_aligned, min_gen_size());
  if (working_or_min > reserved().byte_size()) {
    // If the used or minimum gen size (aligned up) is greater
    // than the total reserved size, then the space available
    // for contraction should (after proper alignment) be 0
    return 0;
  }
  const size_t max_contraction =
    reserved().byte_size() - working_or_min;

  // Use the "increment" fraction instead of the "decrement" fraction
  // to allow the other gen to expand more aggressively.  The
  // "decrement" fraction is conservative because its intent is to
  // only reduce the footprint.

  size_t result = policy->promo_increment_aligned_down(max_contraction);
  // Also adjust for inter-generational alignment
  size_t result_aligned = align_size_down(result, gen_alignment);
  if (PrintAdaptiveSizePolicy && Verbose) {
    gclog_or_tty->print_cr("\nASPSOldGen::available_for_contraction:"
      " %d K / 0x%x", result_aligned/K, result_aligned);
    gclog_or_tty->print_cr(" reserved().byte_size() %d K / 0x%x ",
      reserved().byte_size()/K, reserved().byte_size());
    size_t working_promoted = (size_t) policy->avg_promoted()->padded_average();
    gclog_or_tty->print_cr(" padded promoted %d K / 0x%x",
      working_promoted/K, working_promoted);
    gclog_or_tty->print_cr(" used %d K / 0x%x",
      used_in_bytes()/K, used_in_bytes());
    gclog_or_tty->print_cr(" min_gen_size() %d K / 0x%x",
      min_gen_size()/K, min_gen_size());
    gclog_or_tty->print_cr(" max_contraction %d K / 0x%x",
      max_contraction/K, max_contraction);
    gclog_or_tty->print_cr("    without alignment %d K / 0x%x",
      policy->promo_increment(max_contraction)/K,
      policy->promo_increment(max_contraction));
    gclog_or_tty->print_cr(" alignment 0x%x", gen_alignment);
  }
  assert(result_aligned <= max_contraction, "arithmetic is wrong");
  return result_aligned;
}
コード例 #5
0
// Return the number of bytes the young gen is willing give up.
//
// Future implementations could check the survivors and if to_space is in the
// right place (below from_space), take a chunk from to_space.
size_t ASPSYoungGen::available_for_contraction() {

  size_t uncommitted_bytes = virtual_space()->uncommitted_size();
  if (uncommitted_bytes != 0) {
    return uncommitted_bytes;
  }

  if (eden_space()->is_empty()) {
    // Respect the minimum size for eden and for the young gen as a whole.
    ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
    const size_t eden_alignment = heap->intra_heap_alignment();
    const size_t gen_alignment = heap->young_gen_alignment();

    assert(eden_space()->capacity_in_bytes() >= eden_alignment,
      "Alignment is wrong");
    size_t eden_avail = eden_space()->capacity_in_bytes() - eden_alignment;
    eden_avail = align_size_down(eden_avail, gen_alignment);

    assert(virtual_space()->committed_size() >= min_gen_size(),
      "minimum gen size is wrong");
    size_t gen_avail = virtual_space()->committed_size() - min_gen_size();
    assert(virtual_space()->is_aligned(gen_avail), "not aligned");

    const size_t max_contraction = MIN2(eden_avail, gen_avail);
    // See comment for ASPSOldGen::available_for_contraction()
    // for reasons the "increment" fraction is used.
    PSAdaptiveSizePolicy* policy = heap->size_policy();
    size_t result = policy->eden_increment_aligned_down(max_contraction);
    size_t result_aligned = align_size_down(result, gen_alignment);
    if (PrintAdaptiveSizePolicy && Verbose) {
      gclog_or_tty->print_cr("ASPSYoungGen::available_for_contraction: %d K",
        result_aligned/K);
      gclog_or_tty->print_cr("  max_contraction %d K", max_contraction/K);
      gclog_or_tty->print_cr("  eden_avail %d K", eden_avail/K);
      gclog_or_tty->print_cr("  gen_avail %d K", gen_avail/K);
    }
    return result_aligned;

  }

  return 0;
}
コード例 #6
0
// This method contains all heap specific policy for invoking scavenge.
// PSScavenge::invoke_no_policy() will do nothing but attempt to
// scavenge. It will not clean up after failed promotions, bail out if
// we've exceeded policy time limits, or any other special behavior.
// All such policy should be placed here.
//
// Note that this method should only be called from the vm_thread while
// at a safepoint!
void PSScavenge::invoke()
{
  assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
  assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
  assert(!Universe::heap()->is_gc_active(), "not reentrant");

  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");

  PSAdaptiveSizePolicy* policy = heap->size_policy();

  // Before each allocation/collection attempt, find out from the
  // policy object if GCs are, on the whole, taking too long. If so,
  // bail out without attempting a collection.
  if (!policy->gc_time_limit_exceeded()) {
    IsGCActiveMark mark;

    bool scavenge_was_done = PSScavenge::invoke_no_policy();

    PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
    if (UsePerfData)
      counters->update_full_follows_scavenge(0);
    if (!scavenge_was_done || 
	policy->should_full_GC(heap->old_gen()->free_in_bytes())) {
      if (UsePerfData)
        counters->update_full_follows_scavenge(full_follows_scavenge);

      GCCauseSetter gccs(heap, GCCause::_adaptive_size_policy);
      if (UseParallelOldGC) {
	PSParallelCompact::invoke_no_policy(false);
      } else {
	PSMarkSweep::invoke_no_policy(false);
      }
    }
  }
}