void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) {
  CollectedHeap::ensure_parsability(retire_tlabs);
  young_gen()->eden_space()->ensure_parsability();
  young_gen()->from_space()->ensure_parsability();
  young_gen()->to_space()->ensure_parsability();
  old_gen()->object_space()->ensure_parsability();
}
void ParallelScavengeHeap::gen_mangle_unused_area() {
  if (ZapUnusedHeapArea) {
    young_gen()->eden_space()->mangle_unused_area();
    young_gen()->to_space()->mangle_unused_area();
    young_gen()->from_space()->mangle_unused_area();
    old_gen()->object_space()->mangle_unused_area();
  }
}
size_t ParallelScavengeHeap::max_capacity() const {
  size_t estimated = reserved_region().byte_size();
  if (UseAdaptiveSizePolicy) {
    estimated -= _size_policy->max_survivor_size(young_gen()->max_size());
  } else {
    estimated -= young_gen()->to_space()->capacity_in_bytes();
  }
  return MAX2(estimated, capacity());
}
Example #4
0
// Make checks on the current sizes of the generations and
// the constraints on the sizes of the generations.  Push
// up the boundary within the constraints.  A partial
// push can occur.
void AdjoiningGenerations::request_old_gen_expansion(size_t expand_in_bytes) {
  assert(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary, "runtime check");

  assert_lock_strong(ExpandHeap_lock);
  assert_locked_or_safepoint(Heap_lock);

  // These sizes limit the amount the boundaries can move.  Effectively,
  // the generation says how much it is willing to yield to the other
  // generation.
  const size_t young_gen_available = young_gen()->available_for_contraction();
  const size_t old_gen_available = old_gen()->available_for_expansion();
  const size_t alignment = virtual_spaces()->alignment();
  size_t change_in_bytes = MIN3(young_gen_available,
                                old_gen_available,
                                align_size_up_(expand_in_bytes, alignment));

  if (change_in_bytes == 0) {
    return;
  }

  if (TraceAdaptiveGCBoundary) {
    gclog_or_tty->print_cr("Before expansion of old gen with boundary move");
    gclog_or_tty->print_cr("  Requested change: " SIZE_FORMAT_HEX
                           "  Attempted change: " SIZE_FORMAT_HEX,
      expand_in_bytes, change_in_bytes);
    if (!PrintHeapAtGC) {
      Universe::print_on(gclog_or_tty);
    }
    gclog_or_tty->print_cr("  PSOldGen max size: " SIZE_FORMAT "K",
      old_gen()->max_gen_size()/K);
  }

  // Move the boundary between the generations up (smaller young gen).
  if (virtual_spaces()->adjust_boundary_up(change_in_bytes)) {
    young_gen()->reset_after_change();
    old_gen()->reset_after_change();
  }

  // The total reserved for the generations should match the sum
  // of the two even if the boundary is moving.
  assert(reserved_byte_size() ==
         old_gen()->max_gen_size() + young_gen()->max_size(),
         "Space is missing");
  young_gen()->space_invariants();
  old_gen()->space_invariants();

  if (TraceAdaptiveGCBoundary) {
    gclog_or_tty->print_cr("After expansion of old gen with boundary move");
    if (!PrintHeapAtGC) {
      Universe::print_on(gclog_or_tty);
    }
    gclog_or_tty->print_cr("  PSOldGen max size: " SIZE_FORMAT "K",
      old_gen()->max_gen_size()/K);
  }
}
// Failed allocation policy. Must be called from the VM thread, and
// only at a safepoint! Note that this method has policy for allocation
// flow, and NOT collection policy. So we do not check for gc collection
// time over limit here, that is the responsibility of the heap specific
// collection methods. This method decides where to attempt allocations,
// and when to attempt collections, but no collection specific policy. 
HeapWord* ParallelScavengeHeap::failed_mem_allocate(bool* notify_ref_lock,
                                                    size_t size,
                                                    bool is_noref,
                                                    bool is_tlab) {
  assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
  assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
  assert(!Universe::heap()->is_gc_active(), "not reentrant");
  assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
  // Note that this assert assumes no from-space allocation in eden
  assert(size > young_gen()->eden_space()->free_in_words(), "Allocation should fail");

  size_t mark_sweep_invocation_count = PSMarkSweep::total_invocations();

  // We assume (and assert!) that an allocation at this point will fail
  // unless we collect.

  // First level allocation failure, scavenge and allocate in young gen.
  GCCauseSetter gccs(this, GCCause::_allocation_failure);
  PSScavenge::invoke(notify_ref_lock);
  HeapWord* result = young_gen()->allocate(size, is_noref, is_tlab);

  // Second level allocation failure. 
  //   Mark sweep and allocate in young generation.  
  if (result == NULL) {
    // There is some chance the scavenge method decided to invoke mark_sweep.
    // Don't mark sweep twice if so.
    if (mark_sweep_invocation_count == PSMarkSweep::total_invocations()) {
      PSMarkSweep::invoke(notify_ref_lock, false);
      result = young_gen()->allocate(size, is_noref, is_tlab);
    }
  }

  // Third level allocation failure.
  //   After mark sweep and young generation allocation failure, 
  //   allocate in old generation.
  if (result == NULL && !is_tlab) {
    result = old_gen()->allocate(size, is_noref, is_tlab);
  }

  // Fourth level allocation failure. We're running out of memory.
  //   More complete mark sweep and allocate in young generation.
  if (result == NULL) {
    PSMarkSweep::invoke(notify_ref_lock, true /* max_heap_compaction */);
    result = young_gen()->allocate(size, is_noref, is_tlab);
  }

  // Fifth level allocation failure.
  //   After more complete mark sweep, allocate in old generation.
  if (result == NULL && !is_tlab) {
    result = old_gen()->allocate(size, is_noref, is_tlab);
  }

  return result;
}
HeapWord* ParallelScavengeHeap::block_start(const void* addr) const {
  if (young_gen()->is_in_reserved(addr)) {
    assert(young_gen()->is_in(addr),
           "addr should be in allocated part of young gen");
    // called from os::print_location by find or VMError
    if (Debugging || VMError::fatal_error_in_progress())  return NULL;
    Unimplemented();
  } else if (old_gen()->is_in_reserved(addr)) {
    assert(old_gen()->is_in(addr),
           "addr should be in allocated part of old gen");
    return old_gen()->start_array()->object_start((HeapWord*)addr);
  }
  return 0;
}
void ParallelScavengeHeap::record_gen_tops_before_GC() {
  if (ZapUnusedHeapArea) {
    young_gen()->record_spaces_top();
    old_gen()->record_spaces_top();
    perm_gen()->record_spaces_top();
  }
}
// Basic allocation policy. Should never be called at a safepoint, or
// from the VM thread.
//
// This method must handle cases where many mem_allocate requests fail
// simultaneously. When that happens, only one VM operation will succeed,
// and the rest will not be executed. For that reason, this method loops
// during failed allocation attempts. If the java heap becomes exhausted,
// we rely on the size_policy object to force a bail out.
HeapWord* ParallelScavengeHeap::mem_allocate(size_t size, bool is_noref, bool is_tlab) {
  assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
  assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
  assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");

  HeapWord* result;

  uint loop_count = 0;

  do {
    result = young_gen()->allocate(size, is_noref, is_tlab);

    // In some cases, the requested object will be too large to easily
    // fit in the young_gen. Rather than force a safepoint and collection
    // for each one, try allocation in old_gen for objects likely to fail
    // allocation in eden.
    if (result == NULL && size >= (young_gen()->eden_space()->capacity_in_words() / 2) && !is_tlab) {
      MutexLocker ml(Heap_lock);
      result = old_gen()->allocate(size, is_noref, is_tlab);
    }

    if (result == NULL) {
      // Generate a VM operation
      VM_ParallelGCFailedAllocation op(size, is_noref, is_tlab);
      VMThread::execute(&op);
      
      // Did the VM operation execute? If so, return the result directly.
      // This prevents us from looping until time out on requests that can
      // not be satisfied.
      if (op.prologue_succeeded()) {
        assert(Universe::heap()->is_in_or_null(op.result()), "result not in heap");
        return op.result();
      }
    }

    // The policy object will prevent us from looping forever. If the
    // time spent in gc crosses a threshold, we will bail out.
    loop_count++;
    if ((QueuedAllocationWarningCount > 0) && (loop_count % QueuedAllocationWarningCount == 0)) {
      warning("ParallelScavengeHeap::mem_allocate retries %d times \n\t"
              " size=%d %s", loop_count, size, is_tlab ? "(TLAB)" : "");
    }
  } while (result == NULL && !size_policy()->gc_time_limit_exceeded());

  return result;
}
// See comments on request_old_gen_expansion()
bool AdjoiningGenerations::request_young_gen_expansion(size_t expand_in_bytes) {
  assert(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary, "runtime check");

  // If eden is not empty, the boundary can be moved but no advantage
  // can be made of the move since eden cannot be moved.
  if (!young_gen()->eden_space()->is_empty()) {
    return false;
  }


  bool result = false;
  const size_t young_gen_available = young_gen()->available_for_expansion();
  const size_t old_gen_available = old_gen()->available_for_contraction();
  const size_t alignment = virtual_spaces()->alignment();
  size_t change_in_bytes = MIN3(young_gen_available,
				old_gen_available,
				align_size_up_(expand_in_bytes, alignment));

  if (change_in_bytes == 0) {
    return false;
  }

  if (TraceAdaptiveGCBoundary) {
    gclog_or_tty->print_cr("Before expansion of young gen with boundary move");
gclog_or_tty->print_cr("  Requested change: 0x%zx  Attempted change: 0x%zx",
      expand_in_bytes, change_in_bytes);
    if (!PrintHeapAtGC) {
      Universe::print_on(gclog_or_tty);
    }
    gclog_or_tty->print_cr("  PSYoungGen max size: " SIZE_FORMAT "K", 
      young_gen()->max_size()/K);
  }

  // Move the boundary between the generations down (smaller old gen).
  MutexLocker x(ExpandHeap_lock);
  if (virtual_spaces()->adjust_boundary_down(change_in_bytes)) {
    young_gen()->reset_after_change();
    old_gen()->reset_after_change();
    result = true;
  }

  // The total reserved for the generations should match the sum
  // of the two even if the boundary is moving.
  assert(reserved_byte_size() ==
	 old_gen()->max_gen_size() + young_gen()->max_size(),
	 "Space is missing");
  young_gen()->space_invariants();
  old_gen()->space_invariants();

  if (TraceAdaptiveGCBoundary) {
    gclog_or_tty->print_cr("After expansion of young gen with boundary move");
    if (!PrintHeapAtGC) {
      Universe::print_on(gclog_or_tty);
    }
    gclog_or_tty->print_cr("  PSYoungGen max size: " SIZE_FORMAT "K", 
      young_gen()->max_size()/K);
  }

  return result;
}
// Failed allocation policy. Must be called from the VM thread, and
// only at a safepoint! Note that this method has policy for allocation
// flow, and NOT collection policy. So we do not check for gc collection
// time over limit here, that is the responsibility of the heap specific
// collection methods. This method decides where to attempt allocations,
// and when to attempt collections, but no collection specific policy.
HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size) {
  assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
  assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
  assert(!is_gc_active(), "not reentrant");
  assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");

  // We assume that allocation in eden will fail unless we collect.

  // First level allocation failure, scavenge and allocate in young gen.
  GCCauseSetter gccs(this, GCCause::_allocation_failure);
  const bool invoked_full_gc = PSScavenge::invoke();
  HeapWord* result = young_gen()->allocate(size);

  // Second level allocation failure.
  //   Mark sweep and allocate in young generation.
  if (result == NULL && !invoked_full_gc) {
    do_full_collection(false);
    result = young_gen()->allocate(size);
  }

  death_march_check(result, size);

  // Third level allocation failure.
  //   After mark sweep and young generation allocation failure,
  //   allocate in old generation.
  if (result == NULL) {
    result = old_gen()->allocate(size);
  }

  // Fourth level allocation failure. We're running out of memory.
  //   More complete mark sweep and allocate in young generation.
  if (result == NULL) {
    do_full_collection(true);
    result = young_gen()->allocate(size);
  }

  // Fifth level allocation failure.
  //   After more complete mark sweep, allocate in old generation.
  if (result == NULL) {
    result = old_gen()->allocate(size);
  }

  return result;
}
HeapWord* ParallelScavengeHeap::block_start(const void* addr) const {
  if (young_gen()->is_in(addr)) {
    Unimplemented();
  } else if (old_gen()->is_in(addr)) {
    return old_gen()->start_array()->object_start((HeapWord*)addr);
  } else if (perm_gen()->is_in(addr)) {
    return perm_gen()->start_array()->object_start((HeapWord*)addr);
  }
  return 0;
}
void ParallelScavengeHeap::verify(VerifyOption option /* ignored */) {
  // Why do we need the total_collections()-filter below?
  if (total_collections() > 0) {
    log_debug(gc, verify)("Tenured");
    old_gen()->verify();

    log_debug(gc, verify)("Eden");
    young_gen()->verify();
  }
}
// See comment on adjust_boundary_for_old_gen_needss().
// Adjust boundary down only.
void AdjoiningGenerations::adjust_boundary_for_young_gen_needs(size_t eden_size,
    size_t survivor_size) {

  assert(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary, "runtime check");

  // Stress testing.
  if (PSAdaptiveSizePolicyResizeVirtualSpaceAlot == 0) {
    request_young_gen_expansion(virtual_spaces()->alignment() * 3 / 2);
    eden_size = young_gen()->eden_space()->capacity_in_bytes();
  }

  // Expand only if the entire generation is already committed.
  if (young_gen()->virtual_space()->uncommitted_size() == 0) {
    size_t desired_size = eden_size + 2 * survivor_size;
    const size_t committed = young_gen()->virtual_space()->committed_size();
    if (desired_size > committed) {
      request_young_gen_expansion(desired_size - committed);
    }
  }
}
bool ParallelScavengeHeap::is_in_reserved(const void* p) const {
  if (young_gen()->is_in_reserved(p)) {
    return true;
  }

  if (old_gen()->is_in_reserved(p)) {
    return true;
  }

  return false;
}
PSHeapSummary ParallelScavengeHeap::create_ps_heap_summary() {
  PSOldGen* old = old_gen();
  HeapWord* old_committed_end = (HeapWord*)old->virtual_space()->committed_high_addr();
  VirtualSpaceSummary old_summary(old->reserved().start(), old_committed_end, old->reserved().end());
  SpaceSummary old_space(old->reserved().start(), old_committed_end, old->used_in_bytes());

  PSYoungGen* young = young_gen();
  VirtualSpaceSummary young_summary(young->reserved().start(),
    (HeapWord*)young->virtual_space()->committed_high_addr(), young->reserved().end());

  MutableSpace* eden = young_gen()->eden_space();
  SpaceSummary eden_space(eden->bottom(), eden->end(), eden->used_in_bytes());

  MutableSpace* from = young_gen()->from_space();
  SpaceSummary from_space(from->bottom(), from->end(), from->used_in_bytes());

  MutableSpace* to = young_gen()->to_space();
  SpaceSummary to_space(to->bottom(), to->end(), to->used_in_bytes());

  VirtualSpaceSummary heap_summary = create_heap_space_summary();
  return PSHeapSummary(heap_summary, used(), old_summary, old_space, young_summary, eden_space, from_space, to_space);
}
void ParallelScavengeHeap::verify(bool silent, VerifyOption option /* ignored */) {
  // Why do we need the total_collections()-filter below?
  if (total_collections() > 0) {
    if (!silent) {
      gclog_or_tty->print("tenured ");
    }
    old_gen()->verify();

    if (!silent) {
      gclog_or_tty->print("eden ");
    }
    young_gen()->verify();
  }
}
HeapColor ParallelScavengeHeap::get_current_color(oop obj)
{
  guarantee(UseColoredSpaces, "not using colored spaces!");
  guarantee(is_in(obj), "obj not in a colored space!");

  if (is_in_young(obj)) {
    if (young_gen()->eden_space()->contains(obj)) {
      if (((MutableColoredSpace*)young_gen()->eden_space())->
          colored_spaces()->at(HC_RED)->space()->contains(obj)) {
        return HC_RED;
      }
      guarantee(((MutableColoredSpace*)young_gen()->eden_space())->
                colored_spaces()->at(HC_BLUE)->space()->contains(obj), "wtf");
      return HC_BLUE;
    } else if (young_gen()->from_space()->contains(obj)) {
      if (((MutableColoredSpace*)young_gen()->from_space())->
          colored_spaces()->at(HC_RED)->space()->contains(obj)) {
        return HC_RED;
      }
      guarantee(((MutableColoredSpace*)young_gen()->from_space())->
                colored_spaces()->at(HC_BLUE)->space()->contains(obj), "wtf");
      return HC_BLUE;
    } else {
      if (((MutableColoredSpace*)young_gen()->to_space())->
          colored_spaces()->at(HC_RED)->space()->contains(obj)) {
        return HC_RED;
      }
      guarantee(((MutableColoredSpace*)young_gen()->to_space())->
                colored_spaces()->at(HC_BLUE)->space()->contains(obj), "wtf");
      return HC_BLUE;
    }
  } else {
      if (((MutableColoredSpace*)old_gen()->object_space())->
          colored_spaces()->at(HC_RED)->space()->contains(obj)) {
        return HC_RED;
      }
      guarantee(((MutableColoredSpace*)old_gen()->object_space())->
                colored_spaces()->at(HC_BLUE)->space()->contains(obj), "wtf");
      return HC_BLUE;
  }
}
void ParallelScavengeHeap::verify(bool allow_dirty, bool silent) {
  // Really stupid. Can't fill the tlabs, can't verify. FIX ME!
  if (total_collections() > 0) {
    if (!silent) {
      gclog_or_tty->print("permanent ");
    }
    perm_gen()->verify(allow_dirty);

    if (!silent) {
      gclog_or_tty->print("tenured ");
    }
    old_gen()->verify(allow_dirty);

    if (!silent) {
      gclog_or_tty->print("eden ");
    }
    young_gen()->verify(allow_dirty);
  }
}
Example #19
0
void ParallelScavengeHeap::verify(bool allow_dirty, bool silent, bool option /* ignored */) {
  // Why do we need the total_collections()-filter below?
  if (total_collections() > 0) {
    if (!silent) {
      gclog_or_tty->print("permanent ");
    }
    perm_gen()->verify(allow_dirty);

    if (!silent) {
      gclog_or_tty->print("tenured ");
    }
    old_gen()->verify(allow_dirty);

    if (!silent) {
      gclog_or_tty->print("eden ");
    }
    young_gen()->verify(allow_dirty);
  }
  if (!silent) {
    gclog_or_tty->print("ref_proc ");
  }
  ReferenceProcessor::verify();
}
Example #20
0
size_t ParallelScavengeHeap::used() const {
  size_t value = young_gen()->used_in_bytes() + old_gen()->used_in_bytes();
  return value;
}
Example #21
0
void ParallelScavengeHeap::print_on(outputStream* st) const {
  young_gen()->print_on(st);
  old_gen()->print_on(st);
  perm_gen()->print_on(st);
}
Example #22
0
void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
  young_gen()->object_iterate(cl);
  old_gen()->object_iterate(cl);
  perm_gen()->object_iterate(cl);
}
Example #23
0
HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t size) {
  return young_gen()->allocate(size, true);
}
Example #24
0
size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const {
  return young_gen()->eden_space()->unsafe_max_tlab_alloc(thr);
}
Example #25
0
size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const {
  return young_gen()->eden_space()->tlab_capacity(thr);
}
Example #26
0
size_t ParallelScavengeHeap::unsafe_max_alloc() {
  return young_gen()->eden_space()->free_in_bytes();
}
Example #27
0
jint ParallelScavengeHeap::initialize() {
  CollectedHeap::pre_initialize();

  // Cannot be initialized until after the flags are parsed
  // GenerationSizer flag_parser;
  _collector_policy = new GenerationSizer();

  size_t yg_min_size = _collector_policy->min_young_gen_size();
  size_t yg_max_size = _collector_policy->max_young_gen_size();
  size_t og_min_size = _collector_policy->min_old_gen_size();
  size_t og_max_size = _collector_policy->max_old_gen_size();
  // Why isn't there a min_perm_gen_size()?
  size_t pg_min_size = _collector_policy->perm_gen_size();
  size_t pg_max_size = _collector_policy->max_perm_gen_size();

  trace_gen_sizes("ps heap raw",
                  pg_min_size, pg_max_size,
                  og_min_size, og_max_size,
                  yg_min_size, yg_max_size);

  // The ReservedSpace ctor used below requires that the page size for the perm
  // gen is <= the page size for the rest of the heap (young + old gens).
  const size_t og_page_sz = os::page_size_for_region(yg_min_size + og_min_size,
                                                     yg_max_size + og_max_size,
                                                     8);
  const size_t pg_page_sz = MIN2(os::page_size_for_region(pg_min_size,
                                                          pg_max_size, 16),
                                 og_page_sz);

  const size_t pg_align = set_alignment(_perm_gen_alignment,  pg_page_sz);
  const size_t og_align = set_alignment(_old_gen_alignment,   og_page_sz);
  const size_t yg_align = set_alignment(_young_gen_alignment, og_page_sz);

  // Update sizes to reflect the selected page size(s).
  //
  // NEEDS_CLEANUP.  The default TwoGenerationCollectorPolicy uses NewRatio; it
  // should check UseAdaptiveSizePolicy.  Changes from generationSizer could
  // move to the common code.
  yg_min_size = align_size_up(yg_min_size, yg_align);
  yg_max_size = align_size_up(yg_max_size, yg_align);
  size_t yg_cur_size =
    align_size_up(_collector_policy->young_gen_size(), yg_align);
  yg_cur_size = MAX2(yg_cur_size, yg_min_size);

  og_min_size = align_size_up(og_min_size, og_align);
  og_max_size = align_size_up(og_max_size, og_align);
  size_t og_cur_size =
    align_size_up(_collector_policy->old_gen_size(), og_align);
  og_cur_size = MAX2(og_cur_size, og_min_size);

  pg_min_size = align_size_up(pg_min_size, pg_align);
  pg_max_size = align_size_up(pg_max_size, pg_align);
  size_t pg_cur_size = pg_min_size;

  trace_gen_sizes("ps heap rnd",
                  pg_min_size, pg_max_size,
                  og_min_size, og_max_size,
                  yg_min_size, yg_max_size);

  const size_t total_reserved = pg_max_size + og_max_size + yg_max_size;
  char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);

  // The main part of the heap (old gen + young gen) can often use a larger page
  // size than is needed or wanted for the perm gen.  Use the "compound
  // alignment" ReservedSpace ctor to avoid having to use the same page size for
  // all gens.

  ReservedHeapSpace heap_rs(pg_max_size, pg_align, og_max_size + yg_max_size,
                            og_align, addr);

  if (UseCompressedOops) {
    if (addr != NULL && !heap_rs.is_reserved()) {
      // Failed to reserve at specified address - the requested memory
      // region is taken already, for example, by 'java' launcher.
      // Try again to reserver heap higher.
      addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop);
      ReservedHeapSpace heap_rs0(pg_max_size, pg_align, og_max_size + yg_max_size,
                                 og_align, addr);
      if (addr != NULL && !heap_rs0.is_reserved()) {
        // Failed to reserve at specified address again - give up.
        addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop);
        assert(addr == NULL, "");
        ReservedHeapSpace heap_rs1(pg_max_size, pg_align, og_max_size + yg_max_size,
                                   og_align, addr);
        heap_rs = heap_rs1;
      } else {
        heap_rs = heap_rs0;
      }
    }
  }

  os::trace_page_sizes("ps perm", pg_min_size, pg_max_size, pg_page_sz,
                       heap_rs.base(), pg_max_size);
  os::trace_page_sizes("ps main", og_min_size + yg_min_size,
                       og_max_size + yg_max_size, og_page_sz,
                       heap_rs.base() + pg_max_size,
                       heap_rs.size() - pg_max_size);
  if (!heap_rs.is_reserved()) {
    vm_shutdown_during_initialization(
      "Could not reserve enough space for object heap");
    return JNI_ENOMEM;
  }

  _reserved = MemRegion((HeapWord*)heap_rs.base(),
                        (HeapWord*)(heap_rs.base() + heap_rs.size()));

  CardTableExtension* const barrier_set = new CardTableExtension(_reserved, 3);
  _barrier_set = barrier_set;
  oopDesc::set_bs(_barrier_set);
  if (_barrier_set == NULL) {
    vm_shutdown_during_initialization(
      "Could not reserve enough space for barrier set");
    return JNI_ENOMEM;
  }

  // Initial young gen size is 4 Mb
  //
  // XXX - what about flag_parser.young_gen_size()?
  const size_t init_young_size = align_size_up(4 * M, yg_align);
  yg_cur_size = MAX2(MIN2(init_young_size, yg_max_size), yg_cur_size);

  // Split the reserved space into perm gen and the main heap (everything else).
  // The main heap uses a different alignment.
  ReservedSpace perm_rs = heap_rs.first_part(pg_max_size);
  ReservedSpace main_rs = heap_rs.last_part(pg_max_size, og_align);

  // Make up the generations
  // Calculate the maximum size that a generation can grow.  This
  // includes growth into the other generation.  Note that the
  // parameter _max_gen_size is kept as the maximum
  // size of the generation as the boundaries currently stand.
  // _max_gen_size is still used as that value.
  double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
  double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0;

  _gens = new AdjoiningGenerations(main_rs,
                                   og_cur_size,
                                   og_min_size,
                                   og_max_size,
                                   yg_cur_size,
                                   yg_min_size,
                                   yg_max_size,
                                   yg_align);

  _old_gen = _gens->old_gen();
  _young_gen = _gens->young_gen();

  const size_t eden_capacity = _young_gen->eden_space()->capacity_in_bytes();
  const size_t old_capacity = _old_gen->capacity_in_bytes();
  const size_t initial_promo_size = MIN2(eden_capacity, old_capacity);
  _size_policy =
    new PSAdaptiveSizePolicy(eden_capacity,
                             initial_promo_size,
                             young_gen()->to_space()->capacity_in_bytes(),
                             intra_heap_alignment(),
                             max_gc_pause_sec,
                             max_gc_minor_pause_sec,
                             GCTimeRatio
                             );

  _perm_gen = new PSPermGen(perm_rs,
                            pg_align,
                            pg_cur_size,
                            pg_cur_size,
                            pg_max_size,
                            "perm", 2);

  assert(!UseAdaptiveGCBoundary ||
    (old_gen()->virtual_space()->high_boundary() ==
     young_gen()->virtual_space()->low_boundary()),
    "Boundaries must meet");
  // initialize the policy counters - 2 collectors, 3 generations
  _gc_policy_counters =
    new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 3, _size_policy);
  _psh = this;

  // Set up the GCTaskManager
  _gc_task_manager = GCTaskManager::create(ParallelGCThreads);

  if (UseParallelOldGC && !PSParallelCompact::initialize()) {
    return JNI_ENOMEM;
  }

  return JNI_OK;
}
Example #28
0
// Basic allocation policy. Should never be called at a safepoint, or
// from the VM thread.
//
// This method must handle cases where many mem_allocate requests fail
// simultaneously. When that happens, only one VM operation will succeed,
// and the rest will not be executed. For that reason, this method loops
// during failed allocation attempts. If the java heap becomes exhausted,
// we rely on the size_policy object to force a bail out.
HeapWord* ParallelScavengeHeap::mem_allocate(
                                     size_t size,
                                     bool is_noref,
                                     bool is_tlab,
                                     bool* gc_overhead_limit_was_exceeded) {
  assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
  assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
  assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");

  // In general gc_overhead_limit_was_exceeded should be false so
  // set it so here and reset it to true only if the gc time
  // limit is being exceeded as checked below.
  *gc_overhead_limit_was_exceeded = false;

  HeapWord* result = young_gen()->allocate(size, is_tlab);

  uint loop_count = 0;
  uint gc_count = 0;

  while (result == NULL) {
    // We don't want to have multiple collections for a single filled generation.
    // To prevent this, each thread tracks the total_collections() value, and if
    // the count has changed, does not do a new collection.
    //
    // The collection count must be read only while holding the heap lock. VM
    // operations also hold the heap lock during collections. There is a lock
    // contention case where thread A blocks waiting on the Heap_lock, while
    // thread B is holding it doing a collection. When thread A gets the lock,
    // the collection count has already changed. To prevent duplicate collections,
    // The policy MUST attempt allocations during the same period it reads the
    // total_collections() value!
    {
      MutexLocker ml(Heap_lock);
      gc_count = Universe::heap()->total_collections();

      result = young_gen()->allocate(size, is_tlab);

      // (1) If the requested object is too large to easily fit in the
      //     young_gen, or
      // (2) If GC is locked out via GCLocker, young gen is full and
      //     the need for a GC already signalled to GCLocker (done
      //     at a safepoint),
      // ... then, rather than force a safepoint and (a potentially futile)
      // collection (attempt) for each allocation, try allocation directly
      // in old_gen. For case (2) above, we may in the future allow
      // TLAB allocation directly in the old gen.
      if (result != NULL) {
        return result;
      }
      if (!is_tlab &&
          size >= (young_gen()->eden_space()->capacity_in_words(Thread::current()) / 2)) {
        result = old_gen()->allocate(size, is_tlab);
        if (result != NULL) {
          return result;
        }
      }
      if (GC_locker::is_active_and_needs_gc()) {
        // GC is locked out. If this is a TLAB allocation,
        // return NULL; the requestor will retry allocation
        // of an idividual object at a time.
        if (is_tlab) {
          return NULL;
        }

        // If this thread is not in a jni critical section, we stall
        // the requestor until the critical section has cleared and
        // GC allowed. When the critical section clears, a GC is
        // initiated by the last thread exiting the critical section; so
        // we retry the allocation sequence from the beginning of the loop,
        // rather than causing more, now probably unnecessary, GC attempts.
        JavaThread* jthr = JavaThread::current();
        if (!jthr->in_critical()) {
          MutexUnlocker mul(Heap_lock);
          GC_locker::stall_until_clear();
          continue;
        } else {
          if (CheckJNICalls) {
            fatal("Possible deadlock due to allocating while"
                  " in jni critical section");
          }
          return NULL;
        }
      }
    }

    if (result == NULL) {

      // Generate a VM operation
      VM_ParallelGCFailedAllocation op(size, is_tlab, gc_count);
      VMThread::execute(&op);

      // Did the VM operation execute? If so, return the result directly.
      // This prevents us from looping until time out on requests that can
      // not be satisfied.
      if (op.prologue_succeeded()) {
        assert(Universe::heap()->is_in_or_null(op.result()),
          "result not in heap");

        // If GC was locked out during VM operation then retry allocation
        // and/or stall as necessary.
        if (op.gc_locked()) {
          assert(op.result() == NULL, "must be NULL if gc_locked() is true");
          continue;  // retry and/or stall as necessary
        }

        // Exit the loop if the gc time limit has been exceeded.
        // The allocation must have failed above ("result" guarding
        // this path is NULL) and the most recent collection has exceeded the
        // gc overhead limit (although enough may have been collected to
        // satisfy the allocation).  Exit the loop so that an out-of-memory
        // will be thrown (return a NULL ignoring the contents of
        // op.result()),
        // but clear gc_overhead_limit_exceeded so that the next collection
        // starts with a clean slate (i.e., forgets about previous overhead
        // excesses).  Fill op.result() with a filler object so that the
        // heap remains parsable.
        const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
        const bool softrefs_clear = collector_policy()->all_soft_refs_clear();
        assert(!limit_exceeded || softrefs_clear, "Should have been cleared");
        if (limit_exceeded && softrefs_clear) {
          *gc_overhead_limit_was_exceeded = true;
          size_policy()->set_gc_overhead_limit_exceeded(false);
          if (PrintGCDetails && Verbose) {
            gclog_or_tty->print_cr("ParallelScavengeHeap::mem_allocate: "
              "return NULL because gc_overhead_limit_exceeded is set");
          }
          if (op.result() != NULL) {
            CollectedHeap::fill_with_object(op.result(), size);
          }
          return NULL;
        }

        return op.result();
      }
    }

    // The policy object will prevent us from looping forever. If the
    // time spent in gc crosses a threshold, we will bail out.
    loop_count++;
    if ((result == NULL) && (QueuedAllocationWarningCount > 0) &&
        (loop_count % QueuedAllocationWarningCount == 0)) {
      warning("ParallelScavengeHeap::mem_allocate retries %d times \n\t"
              " size=%d %s", loop_count, size, is_tlab ? "(TLAB)" : "");
    }
  }

  return result;
}
Example #29
0
bool ParallelScavengeHeap::is_maximal_no_gc() const {
  return old_gen()->is_maximal_no_gc() && young_gen()->is_maximal_no_gc();
}
Example #30
0
size_t ParallelScavengeHeap::capacity() const {
  size_t value = young_gen()->capacity_in_bytes() + old_gen()->capacity_in_bytes();
  return value;
}