// This method iterates over all objects in the young generation,
// unforwarding markOops. It then restores any preserved mark oops,
// and clears the _preserved_mark_stack.
void PSScavenge::clean_up_failed_promotion() {
  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");

  PSYoungGen* young_gen = heap->young_gen();

  {
    ResourceMark rm;

    // Unforward all pointers in the young gen.
    PSPromotionFailedClosure unforward_closure;
    young_gen->object_iterate(&unforward_closure);

    if (PrintGC && Verbose) {
      gclog_or_tty->print_cr("Restoring %d marks", _preserved_oop_stack.size());
    }

    // Restore any saved marks.
    while (!_preserved_oop_stack.is_empty()) {
      oop obj      = _preserved_oop_stack.pop();
      markOop mark = _preserved_mark_stack.pop();
      obj->set_mark(mark);
    }

    // Clear the preserved mark and oop stack caches.
    _preserved_mark_stack.clear(true);
    _preserved_oop_stack.clear(true);
  }

  // Reset the PromotionFailureALot counters.
  NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
}
void PSYoungGen::compute_initial_space_boundaries() {
  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");

  // Compute sizes
  size_t alignment = heap->intra_heap_alignment();
  size_t size = virtual_space()->committed_size();

  size_t survivor_size = size / InitialSurvivorRatio;
  survivor_size = align_size_down(survivor_size, alignment);
  // ... but never less than an alignment
  survivor_size = MAX2(survivor_size, alignment);

  // Young generation is eden + 2 survivor spaces
  size_t eden_size = size - (2 * survivor_size);

  // Now go ahead and set 'em.
  set_space_boundaries(eden_size, survivor_size);
  space_invariants();

  if (UsePerfData) {
    _eden_counters->update_capacity();
    _from_counters->update_capacity();
    _to_counters->update_capacity();
  }
}
void PSPromotionManager::initialize() {
  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");

  _old_gen = heap->old_gen();
  _young_space = heap->young_gen()->to_space();

  assert(_manager_array == NULL, "Attempt to initialize twice");
  _manager_array = NEW_C_HEAP_ARRAY(PSPromotionManager*, ParallelGCThreads+1, mtGC);
  guarantee(_manager_array != NULL, "Could not initialize promotion manager");

  _stack_array_depth = new OopStarTaskQueueSet(ParallelGCThreads);
  guarantee(_stack_array_depth != NULL, "Cound not initialize promotion manager");

  // Create and register the PSPromotionManager(s) for the worker threads.
  for(uint i=0; i<ParallelGCThreads; i++) {
    _manager_array[i] = new PSPromotionManager();
    guarantee(_manager_array[i] != NULL, "Could not create PSPromotionManager");
    stack_array_depth()->register_queue(i, _manager_array[i]->claimed_stack_depth());
  }

  // The VMThread gets its own PSPromotionManager, which is not available
  // for work stealing.
  _manager_array[ParallelGCThreads] = new PSPromotionManager();
  guarantee(_manager_array[ParallelGCThreads] != NULL, "Could not create PSPromotionManager");
}
예제 #4
0
void PSMarkSweep::mark_sweep_phase2() {
  EventMark m("2 compute new addresses");
  TraceTime tm("phase 2", PrintGCDetails && Verbose, true, gclog_or_tty);
  trace("2");

  // Now all live objects are marked, compute the new object addresses.

  // It is imperative that we traverse perm_gen LAST. If dead space is
  // allowed a range of dead object may get overwritten by a dead int
  // array. If perm_gen is not traversed last a klassOop may get
  // overwritten. This is fine since it is dead, but if the class has dead
  // instances we have to skip them, and in order to find their size we
  // need the klassOop!
  //
  // It is not required that we traverse spaces in the same order in
  // phase2, phase3 and phase4, but the ValidateMarkSweep live oops
  // tracking expects us to do so. See comment under phase4.

  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");

  PSOldGen* old_gen = heap->old_gen();
  PSPermGen* perm_gen = heap->perm_gen();

  // Begin compacting into the old gen
  PSMarkSweepDecorator::set_destination_decorator_tenured();

  // This will also compact the young gen spaces.
  old_gen->precompact();

  // Compact the perm gen into the perm gen
  PSMarkSweepDecorator::set_destination_decorator_perm_gen();

  perm_gen->precompact();
}
예제 #5
0
void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
  // Recursively traverse all live objects and mark them
  GCTraceTime tm("phase 1", PrintGCDetails && Verbose, true, _gc_timer);
  trace(" 1");

  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");

  // General strong roots.
  {
    ParallelScavengeHeap::ParStrongRootsScope psrs;
    Universe::oops_do(mark_and_push_closure());
    JNIHandles::oops_do(mark_and_push_closure());   // Global (strong) JNI handles
    CodeBlobToOopClosure each_active_code_blob(mark_and_push_closure(), /*do_marking=*/ true);
    Threads::oops_do(mark_and_push_closure(), &each_active_code_blob);
    ObjectSynchronizer::oops_do(mark_and_push_closure());
    FlatProfiler::oops_do(mark_and_push_closure());
    Management::oops_do(mark_and_push_closure());
    JvmtiExport::oops_do(mark_and_push_closure());
    SystemDictionary::always_strong_oops_do(mark_and_push_closure());
    // Do not treat nmethods as strong roots for mark/sweep, since we can unload them.
    //CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(mark_and_push_closure()));
  }

  // Flush marking stack.
  follow_stack();

  // Process reference objects found during marking
  {
    ref_processor()->setup_policy(clear_all_softrefs);
    const ReferenceProcessorStats& stats =
      ref_processor()->process_discovered_references(
        is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL, _gc_timer);
    gc_tracer()->report_gc_reference_stats(stats);
  }

  // Follow system dictionary roots and unload classes
  bool purged_class = SystemDictionary::do_unloading(is_alive_closure());

  // Follow code cache roots
  CodeCache::do_unloading(is_alive_closure(), mark_and_push_closure(),
                          purged_class);
  follow_stack(); // Flush marking stack

  // Update subklass/sibling/implementor links of live klasses
  follow_weak_klass_links();
  assert(_marking_stack.is_empty(), "just drained");

  // Visit memoized mdo's and clear unmarked weak refs
  follow_mdo_weak_refs();
  assert(_marking_stack.is_empty(), "just drained");

  // Visit interned string tables and delete unmarked oops
  StringTable::unlink(is_alive_closure());
  // Clean up unreferenced symbols in symbol table.
  SymbolTable::unlink();

  assert(_marking_stack.is_empty(), "stack should be empty by now");
  _gc_tracer->report_object_count_after_gc(is_alive_closure());
}
// This method contains all heap specific policy for invoking scavenge.
// PSScavenge::invoke_no_policy() will do nothing but attempt to
// scavenge. It will not clean up after failed promotions, bail out if
// we've exceeded policy time limits, or any other special behavior.
// All such policy should be placed here.
//
// Note that this method should only be called from the vm_thread while
// at a safepoint!
void PSScavenge::invoke() {
  assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
  assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
  assert(!Universe::heap()->is_gc_active(), "not reentrant");

  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");

  PSAdaptiveSizePolicy* policy = heap->size_policy();
  IsGCActiveMark mark;

  bool scavenge_was_done = PSScavenge::invoke_no_policy();

  PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
  if (UsePerfData)
    counters->update_full_follows_scavenge(0);
  if (!scavenge_was_done ||
      policy->should_full_GC(heap->old_gen()->free_in_bytes())) {
    if (UsePerfData)
      counters->update_full_follows_scavenge(full_follows_scavenge);
    GCCauseSetter gccs(heap, GCCause::_adaptive_size_policy);
    CollectorPolicy* cp = heap->collector_policy();
    const bool clear_all_softrefs = cp->should_clear_all_soft_refs();

    if (UseParallelOldGC) {
      PSParallelCompact::invoke_no_policy(clear_all_softrefs);
    } else {
      PSMarkSweep::invoke_no_policy(clear_all_softrefs);
    }
  }
}
 PSKeepAliveClosure(PSPromotionManager* pm) : _promotion_manager(pm) {
   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   _to_space = heap->young_gen()->to_space();
   
   assert(_promotion_manager != NULL, "Sanity");
 }
예제 #8
0
void PSMarkSweep::allocate_stacks() {
  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");

  PSYoungGen* young_gen = heap->young_gen();

  MutableSpace* to_space = young_gen->to_space();
  _preserved_marks = (PreservedMark*)to_space->top();
  _preserved_count = 0;

  // We want to calculate the size in bytes first.
  _preserved_count_max  = pointer_delta(to_space->end(), to_space->top(), sizeof(jbyte));
  // Now divide by the size of a PreservedMark
  _preserved_count_max /= sizeof(PreservedMark);

  _preserved_mark_stack = NULL;
  _preserved_oop_stack = NULL;

  _marking_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(4000, true);

  int size = SystemDictionary::number_of_classes() * 2;
  _revisit_klass_stack = new (ResourceObj::C_HEAP) GrowableArray<Klass*>(size, true);
  // (#klass/k)^2, for k ~ 10 appears a better setting, but this will have to do for
  // now until we investigate a more optimal setting.
  _revisit_mdo_stack   = new (ResourceObj::C_HEAP) GrowableArray<DataLayout*>(size*2, true);
}
예제 #9
0
void PSMarkSweep::mark_sweep_phase1( bool& marked_for_unloading, bool clear_all_softrefs) {
    // Recursively traverse all live objects and mark them
    EventMark m("1 mark object");
    TraceTime tm("phase 1", PrintGC && Verbose, true, gclog_or_tty);
    trace(" 1");

    ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
    assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");

    // General strong roots.
    Universe::oops_do(mark_and_push_closure());
    JNIHandles::oops_do(mark_and_push_closure());   // Global (strong) JNI handles
    Threads::oops_do(mark_and_push_closure());
    ObjectSynchronizer::oops_do(mark_and_push_closure());
    FlatProfiler::oops_do(mark_and_push_closure());
    SystemDictionary::always_strong_oops_do(mark_and_push_closure());

    guarantee(!jvmdi::enabled(), "Should not be used with jvmdi");
    vmSymbols::oops_do(mark_and_push_closure());

    // Flush marking stack.
    follow_stack();

    // Process reference objects found during marking
    ReferencePolicy *soft_ref_policy;
    if (clear_all_softrefs) {
        soft_ref_policy = new AlwaysClearPolicy();
    } else {
        NOT_COMPILER2(soft_ref_policy = new LRUCurrentHeapPolicy();)
        COMPILER2_ONLY(soft_ref_policy = new LRUMaxHeapPolicy();)
    }
void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
  // Recursively traverse all live objects and mark them
  GCTraceTime tm("phase 1", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id());
  trace(" 1");

  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");

  // Need to clear claim bits before the tracing starts.
  ClassLoaderDataGraph::clear_claimed_marks();

  // General strong roots.
  {
    ParallelScavengeHeap::ParStrongRootsScope psrs;
    Universe::oops_do(mark_and_push_closure());
    JNIHandles::oops_do(mark_and_push_closure());   // Global (strong) JNI handles
    CLDToOopClosure mark_and_push_from_cld(mark_and_push_closure());
    MarkingCodeBlobClosure each_active_code_blob(mark_and_push_closure(), !CodeBlobToOopClosure::FixRelocations);
    Threads::oops_do(mark_and_push_closure(), &mark_and_push_from_cld, &each_active_code_blob);
    ObjectSynchronizer::oops_do(mark_and_push_closure());
    FlatProfiler::oops_do(mark_and_push_closure());
    Management::oops_do(mark_and_push_closure());
    JvmtiExport::oops_do(mark_and_push_closure());
    SystemDictionary::always_strong_oops_do(mark_and_push_closure());
    ClassLoaderDataGraph::always_strong_cld_do(follow_cld_closure());
    // Do not treat nmethods as strong roots for mark/sweep, since we can unload them.
    //CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(mark_and_push_closure()));
  }

  // Flush marking stack.
  follow_stack();

  // Process reference objects found during marking
  {
    ref_processor()->setup_policy(clear_all_softrefs);
    const ReferenceProcessorStats& stats =
      ref_processor()->process_discovered_references(
        is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL, _gc_timer, _gc_tracer->gc_id());
    gc_tracer()->report_gc_reference_stats(stats);
  }

  // This is the point where the entire marking should have completed.
  assert(_marking_stack.is_empty(), "Marking should have completed");

  // Unload classes and purge the SystemDictionary.
  bool purged_class = SystemDictionary::do_unloading(is_alive_closure());

  // Unload nmethods.
  CodeCache::do_unloading(is_alive_closure(), purged_class);

  // Prune dead klasses from subklass/sibling/implementor lists.
  Klass::clean_weak_klass_links(is_alive_closure());

  // Delete entries for dead interned strings.
  StringTable::unlink(is_alive_closure());

  // Clean up unreferenced symbols in symbol table.
  SymbolTable::unlink();
  _gc_tracer->report_object_count_after_gc(is_alive_closure());
}
예제 #11
0
  CheckForUnmarkedObjects() {
    ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
    assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");

    _young_gen = heap->young_gen();
    _card_table = (CardTableExtension*)heap->barrier_set();
    // No point in asserting barrier set type here. Need to make CardTableExtension
    // a unique barrier set type.
  }
bool PSScavenge::should_attempt_scavenge() {
  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
  PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
  
  if (UsePerfData) {
    counters->update_scavenge_skipped(not_skipped);
  }

  PSYoungGen* young_gen = heap->young_gen();
  PSOldGen* old_gen = heap->old_gen();

  if (!ScavengeWithObjectsInToSpace) {
    // Do not attempt to promote unless to_space is empty
    if (!young_gen->to_space()->is_empty()) {
      _consecutive_skipped_scavenges++;
      if (UsePerfData) {
	counters->update_scavenge_skipped(to_space_not_empty);
      }
      return false;
    }
  }

  // Test to see if the scavenge will likely fail.
  PSAdaptiveSizePolicy* policy = heap->size_policy();

  // A similar test is done in the policy's should_full_GC().  If this is
  // changed, decide if that test should also be changed.
  size_t avg_promoted = (size_t) policy->padded_average_promoted_in_bytes();
  size_t promotion_estimate = MIN2(avg_promoted, young_gen->used_in_bytes());
  bool result = promotion_estimate < old_gen->free_in_bytes();

  if (PrintGCDetails && Verbose) {
    gclog_or_tty->print(result ? "  do scavenge: " : "  skip scavenge: ");
    gclog_or_tty->print_cr(" average_promoted " SIZE_FORMAT
      " padded_average_promoted " SIZE_FORMAT
      " free in old gen " SIZE_FORMAT,
      (size_t) policy->average_promoted_in_bytes(),
      (size_t) policy->padded_average_promoted_in_bytes(),
      old_gen->free_in_bytes());
    if (young_gen->used_in_bytes() < 
        (size_t) policy->padded_average_promoted_in_bytes()) {
      gclog_or_tty->print_cr(" padded_promoted_average is greater"
	" than maximum promotion = " SIZE_FORMAT, young_gen->used_in_bytes());
    }
  }

  if (result) {
    _consecutive_skipped_scavenges = 0;
  } else {
    _consecutive_skipped_scavenges++;
    if (UsePerfData) {
      counters->update_scavenge_skipped(promoted_too_large);
    }
  }
  return result;
}
예제 #13
0
void PSPromotionManager::pre_scavenge() {
  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");

  _young_space = heap->young_gen()->to_space();

  for(uint i=0; i<ParallelGCThreads+1; i++) {
    manager_array(i)->reset();
  }
}
예제 #14
0
void PSOldGen::precompact() {
  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");

  // Reset start array first.
  _start_array.reset();

  object_mark_sweep()->precompact();

  // Now compact the young gen
  heap->young_gen()->precompact();
}
예제 #15
0
bool PSYoungPromotionLAB::lab_is_valid(MemRegion lab) {
  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");

  MutableSpace* to_space = heap->young_gen()->to_space();
  MemRegion used = to_space->used_region();
  if (used.contains(lab)) {
    return true;
  }

  return false;
}
// Static method
bool ParallelScavengeHeap::is_in_young(oop* p) {
  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, 
                                            "Must be ParallelScavengeHeap");

  PSYoungGen* young_gen = heap->young_gen();

  if (young_gen->is_in(p)) {
    return true;
  }

  return false;
}
ParCompactionManager::ParCompactionManager() :
    _action(CopyAndUpdate),
    _region_stack(NULL),
    _region_stack_index((uint)max_uintx) {

  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");

  _old_gen = heap->old_gen();
  _start_array = old_gen()->start_array();

  marking_stack()->initialize();
  _objarray_stack.initialize();
}
예제 #18
0
bool PSOldPromotionLAB::lab_is_valid(MemRegion lab) {
  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
  assert(_start_array->covered_region().contains(lab), "Sanity");    

  PSOldGen* old_gen = heap->old_gen();
  MemRegion used = old_gen->object_space()->used_region();
  
  if (used.contains(lab)) {
    return true;
  }

  return false;
}
예제 #19
0
void PSMarkSweep::allocate_stacks() {
  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");

  PSYoungGen* young_gen = heap->young_gen();

  MutableSpace* to_space = young_gen->to_space();
  _preserved_marks = (PreservedMark*)to_space->top();
  _preserved_count = 0;

  // We want to calculate the size in bytes first.
  _preserved_count_max  = pointer_delta(to_space->end(), to_space->top(), sizeof(jbyte));
  // Now divide by the size of a PreservedMark
  _preserved_count_max /= sizeof(PreservedMark);
}
예제 #20
0
void PSMarkSweep::mark_sweep_phase4() {
  EventMark m("4 compact heap");
  TraceTime tm("phase 4", PrintGCDetails && Verbose, true, gclog_or_tty);
  trace("4");

  // All pointers are now adjusted, move objects accordingly

  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");

  PSYoungGen* young_gen = heap->young_gen();
  PSOldGen* old_gen = heap->old_gen();

  old_gen->compact();
  young_gen->compact();
}
예제 #21
0
void PSScavenge::initialize() {
  // Arguments must have been parsed

  if (AlwaysTenure) {
    _tenuring_threshold = 0;
  } else if (NeverTenure) {
    _tenuring_threshold = markOopDesc::max_age + 1;
  } else {
    // We want to smooth out our startup times for the AdaptiveSizePolicy
    _tenuring_threshold = (UseAdaptiveSizePolicy) ? InitialTenuringThreshold :
                                                    MaxTenuringThreshold;
  }

  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");

  PSYoungGen* young_gen = heap->young_gen();
  PSOldGen* old_gen = heap->old_gen();
  PSPermGen* perm_gen = heap->perm_gen();

  // Set boundary between young_gen and old_gen
  assert(perm_gen->reserved().end() <= old_gen->object_space()->bottom(),
         "perm above old");
  assert(old_gen->reserved().end() <= young_gen->eden_space()->bottom(),
         "old above young");
  _young_generation_boundary = young_gen->eden_space()->bottom();

  // Initialize ref handling object for scavenging.
  MemRegion mr = young_gen->reserved();

  _ref_processor =
    new ReferenceProcessor(mr,                         // span
                           ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
                           (int) ParallelGCThreads,    // mt processing degree
                           true,                       // mt discovery
                           (int) ParallelGCThreads,    // mt discovery degree
                           true,                       // atomic_discovery
                           NULL,                       // header provides liveness info
                           false);                     // next field updates do not need write barrier

  // Cache the cardtable
  BarrierSet* bs = Universe::heap()->barrier_set();
  assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
  _card_table = (CardTableExtension*)bs;

  _counters = new CollectorCounters("PSScavenge", 0);
}
// Static method
bool ParallelScavengeHeap::is_in_old_or_perm(oop* p) {
  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, 
                                            "Must be ParallelScavengeHeap");

  PSOldGen* old_gen = heap->old_gen();
  PSPermGen* perm_gen = heap->perm_gen();

  if (old_gen->is_in(p)) {
    return true;
  }

  if (perm_gen->is_in(p)) {
    return true;
  }

  return false;
}
예제 #23
0
void PSMarkSweep::mark_sweep_phase3() {
  // Adjust the pointers to reflect the new locations
  EventMark m("3 adjust pointers");
  TraceTime tm("phase 3", PrintGCDetails && Verbose, true, gclog_or_tty);
  trace("3");

  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");

  PSYoungGen* young_gen = heap->young_gen();
  PSOldGen* old_gen = heap->old_gen();
  PSPermGen* perm_gen = heap->perm_gen();

  // General strong roots.
  Universe::oops_do(adjust_root_pointer_closure());
  ReferenceProcessor::oops_do(adjust_root_pointer_closure());
  JNIHandles::oops_do(adjust_root_pointer_closure());   // Global (strong) JNI handles
  Threads::oops_do(adjust_root_pointer_closure(), NULL);
  ObjectSynchronizer::oops_do(adjust_root_pointer_closure());
  FlatProfiler::oops_do(adjust_root_pointer_closure());
  Management::oops_do(adjust_root_pointer_closure());
  JvmtiExport::oops_do(adjust_root_pointer_closure());
  // SO_AllClasses
  SystemDictionary::oops_do(adjust_root_pointer_closure());
  vmSymbols::oops_do(adjust_root_pointer_closure());
  //CodeCache::scavenge_root_nmethods_oops_do(adjust_root_pointer_closure());

  // Now adjust pointers in remaining weak roots.  (All of which should
  // have been cleared if they pointed to non-surviving objects.)
  // Global (weak) JNI handles
  JNIHandles::weak_oops_do(&always_true, adjust_root_pointer_closure());

  CodeCache::oops_do(adjust_pointer_closure());
  SymbolTable::oops_do(adjust_root_pointer_closure());
  StringTable::oops_do(adjust_root_pointer_closure());
  ref_processor()->weak_oops_do(adjust_root_pointer_closure());
  PSScavenge::reference_processor()->weak_oops_do(adjust_root_pointer_closure());

  adjust_marks();

  young_gen->adjust_pointers();
  old_gen->adjust_pointers();
  perm_gen->adjust_pointers();
}
void PSMarkSweep::mark_sweep_phase3() {
  // Adjust the pointers to reflect the new locations
  GCTraceTime tm("phase 3", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id());
  trace("3");

  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");

  PSYoungGen* young_gen = heap->young_gen();
  PSOldGen* old_gen = heap->old_gen();

  // Need to clear claim bits before the tracing starts.
  ClassLoaderDataGraph::clear_claimed_marks();

  // General strong roots.
  Universe::oops_do(adjust_pointer_closure());
  JNIHandles::oops_do(adjust_pointer_closure());   // Global (strong) JNI handles
  CLDToOopClosure adjust_from_cld(adjust_pointer_closure());
  Threads::oops_do(adjust_pointer_closure(), &adjust_from_cld, NULL);
  ObjectSynchronizer::oops_do(adjust_pointer_closure());
  FlatProfiler::oops_do(adjust_pointer_closure());
  Management::oops_do(adjust_pointer_closure());
  JvmtiExport::oops_do(adjust_pointer_closure());
  SystemDictionary::oops_do(adjust_pointer_closure());
  ClassLoaderDataGraph::cld_do(adjust_cld_closure());

  // Now adjust pointers in remaining weak roots.  (All of which should
  // have been cleared if they pointed to non-surviving objects.)
  // Global (weak) JNI handles
  JNIHandles::weak_oops_do(&always_true, adjust_pointer_closure());

  CodeBlobToOopClosure adjust_from_blobs(adjust_pointer_closure(), CodeBlobToOopClosure::FixRelocations);
  CodeCache::blobs_do(&adjust_from_blobs);
  StringTable::oops_do(adjust_pointer_closure());
  ref_processor()->weak_oops_do(adjust_pointer_closure());
  PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure());

  adjust_marks();

  young_gen->adjust_pointers();
  old_gen->adjust_pointers();
}
void PSMarkSweepDecorator::advance_destination_decorator() {
  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");

  assert(_destination_decorator != NULL, "Sanity");

  PSMarkSweepDecorator* first = heap->old_gen()->object_mark_sweep();
  PSMarkSweepDecorator* second = heap->young_gen()->eden_mark_sweep();
  PSMarkSweepDecorator* third = heap->young_gen()->from_mark_sweep();
  PSMarkSweepDecorator* fourth = heap->young_gen()->to_mark_sweep();

  if ( _destination_decorator == first ) {
    _destination_decorator = second;
  } else if ( _destination_decorator == second ) {
    _destination_decorator = third;
  } else if ( _destination_decorator == third ) {
    _destination_decorator = fourth;
  } else {
    fatal("PSMarkSweep attempting to advance past last compaction area");
  }
}
예제 #26
0
void PSMarkSweep::mark_sweep_phase2() {
  TraceTime tm("phase 2", PrintGCDetails && Verbose, true, gclog_or_tty);
  trace("2");

  // Now all live objects are marked, compute the new object addresses.

  // It is not required that we traverse spaces in the same order in
  // phase2, phase3 and phase4, but the ValidateMarkSweep live oops
  // tracking expects us to do so. See comment under phase4.

  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");

  PSOldGen* old_gen = heap->old_gen();

  // Begin compacting into the old gen
  PSMarkSweepDecorator::set_destination_decorator_tenured();

  // This will also compact the young gen spaces.
  old_gen->precompact();
}
예제 #27
0
void PSMarkSweep::mark_sweep_phase4() {
  EventMark m("4 compact heap");
  TraceTime tm("phase 4", PrintGCDetails && Verbose, true, gclog_or_tty);
  trace("4");

  // All pointers are now adjusted, move objects accordingly

  // It is imperative that we traverse perm_gen first in phase4. All
  // classes must be allocated earlier than their instances, and traversing
  // perm_gen first makes sure that all klassOops have moved to their new
  // location before any instance does a dispatch through it's klass!
  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");

  PSYoungGen* young_gen = heap->young_gen();
  PSOldGen* old_gen = heap->old_gen();
  PSPermGen* perm_gen = heap->perm_gen();

  perm_gen->compact();
  old_gen->compact();
  young_gen->compact();
}
// This method iterates over all objects in the young generation,
// unforwarding markWords. It then restores any preserved mark oops,
// and clears the _preserved_mark_stack.
void PSScavenge::clean_up_failed_promotion() {
  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
  assert(promotion_failed(), "Sanity");

  PSYoungGen* young_gen = heap->young_gen();

  {
    ResourceMark rm;

    // Unforward all pointers in the young gen.
    PSPromotionFailedClosure unforward_closure;
    young_gen->object_iterate(&unforward_closure);

    if (PrintGC && Verbose) {
      gclog_or_tty->print_cr("Restoring %d marks", 
                              _preserved_oop_stack->length());
    }

    // Restore any saved marks.
    for (int i=0; i < _preserved_oop_stack->length(); i++) {
      oop obj       = _preserved_oop_stack->at(i);
markWord*mark=_preserved_mark_stack->at(i);
      obj->set_mark(mark);      
    }
 
    // Deallocate the preserved mark and oop stacks.
    // The stacks were allocated as CHeap objects, so
    // we must call delete to prevent mem leaks.
    delete _preserved_mark_stack;
    _preserved_mark_stack = NULL;
    delete _preserved_oop_stack;
    _preserved_oop_stack = NULL;
  }

  // Reset the PromotionFailureALot counters.
  NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
}
예제 #29
0
void PSMarkSweep::allocate_stacks() {
    ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
    assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");

    PSYoungGen* young_gen = heap->young_gen();

    MutableSpace* to_space = young_gen->to_space();
    _preserved_marks = (PreservedMark*)to_space->top();
    _preserved_count = 0;

    // We want to calculate the size in bytes first.
    _preserved_count_max  = pointer_delta(to_space->end(), to_space->top(), sizeof(jbyte));
    // Now divide by the size of a PreservedMark
    _preserved_count_max /= sizeof(PreservedMark);

    _preserved_mark_stack = NULL;
    _preserved_oop_stack = NULL;

    _marking_stack = new GrowableArray<oop>(4000, true);

    int size = SystemDictionary::number_of_classes() * 2;
    _revisit_klass_stack = new GrowableArray<Klass*>(size, true);
}
예제 #30
0
void PSOldGen::resize(size_t desired_free_space) {
  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
  const size_t alignment = heap->min_alignment();

  const size_t size_before = _virtual_space.committed_size();
  size_t new_size = used_in_bytes() + desired_free_space;
  new_size = align_size_up(new_size, alignment);

  assert(_max_gen_size == reserved().byte_size(), "max new size problem?");

  // Adjust according to our min and max
  new_size = MAX2(MIN2(new_size, _max_gen_size), _min_gen_size);

  const size_t current_size = capacity_in_bytes();
  if (new_size == current_size) {
    // No change requested
    return;
  }
  if (new_size > current_size) {
    size_t change_bytes = new_size - current_size;
    expand(change_bytes);
  } else {
    size_t change_bytes = current_size - new_size;
    // shrink doesn't grab this lock, expand does. Is that right?
    MutexLocker x(ExpandHeap_lock);
    shrink(change_bytes);
  }

  if (PrintAdaptiveSizePolicy) {
    gclog_or_tty->print_cr("AdaptiveSizePolicy::old generation size: "
                  "collection: %d "
                  "(" SIZE_FORMAT ") -> (" SIZE_FORMAT ") ",
                  heap->total_collections(),
                  size_before, _virtual_space.committed_size());
  }
}