Ejemplo n.º 1
0
void GenMarkSweep::mark_sweep_phase3() {
  GenCollectedHeap* gch = GenCollectedHeap::heap();

  // Adjust the pointers to reflect the new locations
  GCTraceTime(Info, gc, phases) tm("Phase 3: Adjust pointers", gc_timer());

  // Need new claim bits for the pointer adjustment tracing.
  ClassLoaderDataGraph::clear_claimed_marks();

  // Because the closure below is created statically, we cannot
  // use OopsInGenClosure constructor which takes a generation,
  // as the Universe has not been created when the static constructors
  // are run.
  adjust_pointer_closure.set_orig_generation(gch->old_gen());

  {
    StrongRootsScope srs(1);

    gch->gen_process_roots(&srs,
                           GenCollectedHeap::OldGen,
                           false, // Younger gens are not roots.
                           GenCollectedHeap::SO_AllCodeCache,
                           GenCollectedHeap::StrongAndWeakRoots,
                           &adjust_pointer_closure,
                           &adjust_pointer_closure,
                           &adjust_cld_closure);
  }

  gch->gen_process_weak_roots(&adjust_pointer_closure);

  adjust_marks();
  GenAdjustPointersClosure blk;
  gch->generation_iterate(&blk, true);
}
Ejemplo n.º 2
0
void G1MarkSweep::mark_sweep_phase3() {
  G1CollectedHeap* g1h = G1CollectedHeap::heap();

  // Adjust the pointers to reflect the new locations
  GCTraceTime(Info, gc, phases) tm("Phase 3: Adjust pointers", gc_timer());

  // Need cleared claim bits for the roots processing
  ClassLoaderDataGraph::clear_claimed_marks();

  CodeBlobToOopClosure adjust_code_closure(&GenMarkSweep::adjust_pointer_closure, CodeBlobToOopClosure::FixRelocations);
  {
    G1RootProcessor root_processor(g1h, 1);
    root_processor.process_all_roots(&GenMarkSweep::adjust_pointer_closure,
                                     &GenMarkSweep::adjust_cld_closure,
                                     &adjust_code_closure);
  }

  assert(GenMarkSweep::ref_processor() == g1h->ref_processor_stw(), "Sanity");
  g1h->ref_processor_stw()->weak_oops_do(&GenMarkSweep::adjust_pointer_closure);

  // Now adjust pointers in remaining weak roots.  (All of which should
  // have been cleared if they pointed to non-surviving objects.)
  JNIHandles::weak_oops_do(&GenMarkSweep::adjust_pointer_closure);

  if (G1StringDedup::is_enabled()) {
    G1StringDedup::oops_do(&GenMarkSweep::adjust_pointer_closure);
  }

  GenMarkSweep::adjust_marks();

  G1AdjustPointersClosure blk;
  g1h->heap_region_iterate(&blk);
}
Ejemplo n.º 3
0
TEST_VM_F(GCTraceTimeTest, no_heap) {
  set_log_config(TestLogFileName, "gc=debug,gc+start=debug");

  LogTarget(Debug, gc) gc_debug;
  LogTarget(Debug, gc, start) gc_start_debug;

  EXPECT_TRUE(gc_debug.is_enabled());
  EXPECT_TRUE(gc_start_debug.is_enabled());

  {
    GCTraceTime(Debug, gc) timer("Test GC", NULL, GCCause::_allocation_failure, false);
  }

  const char* expected[] = {
    // [2.975s][debug][gc,start] Test GC (Allocation Failure)
    "[gc,start", "] Test GC (Allocation Failure)",
    // [2.975s][debug][gc      ] Test GC (Allocation Failure) 0.026ms
    "[gc", "] Test GC (Allocation Failure) ", "ms",
    NULL
  };
  EXPECT_TRUE(file_contains_substrings_in_order(TestLogFileName, expected));

  const char* not_expected[] = {
      // [2.975s][debug][gc      ] Test GC 59M->59M(502M) 0.026ms
      "[gc", "] Test GC ", "M) ", "ms",
  };
  EXPECT_FALSE(file_contains_substrings_in_order(TestLogFileName, not_expected));
}
void GenMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
  // Recursively traverse all live objects and mark them
  GCTraceTime(Trace, gc) tm("Phase 1: Mark live objects", _gc_timer);

  GenCollectedHeap* gch = GenCollectedHeap::heap();

  // Because follow_root_closure is created statically, cannot
  // use OopsInGenClosure constructor which takes a generation,
  // as the Universe has not been created when the static constructors
  // are run.
  follow_root_closure.set_orig_generation(gch->old_gen());

  // Need new claim bits before marking starts.
  ClassLoaderDataGraph::clear_claimed_marks();

  {
    StrongRootsScope srs(1);

    gch->gen_process_roots(&srs,
                           GenCollectedHeap::OldGen,
                           false, // Younger gens are not roots.
                           GenCollectedHeap::SO_None,
                           ClassUnloading,
                           &follow_root_closure,
                           &follow_root_closure,
                           &follow_cld_closure);
  }

  // Process reference objects found during marking
  {
    ref_processor()->setup_policy(clear_all_softrefs);
    const ReferenceProcessorStats& stats =
      ref_processor()->process_discovered_references(
        &is_alive, &keep_alive, &follow_stack_closure, NULL, _gc_timer);
    gc_tracer()->report_gc_reference_stats(stats);
  }

  // This is the point where the entire marking should have completed.
  assert(_marking_stack.is_empty(), "Marking should have completed");

  // Unload classes and purge the SystemDictionary.
  bool purged_class = SystemDictionary::do_unloading(&is_alive);

  // Unload nmethods.
  CodeCache::do_unloading(&is_alive, purged_class);

  // Prune dead klasses from subklass/sibling/implementor lists.
  Klass::clean_weak_klass_links(&is_alive);

  // Delete entries for dead interned strings.
  StringTable::unlink(&is_alive);

  // Clean up unreferenced symbols in symbol table.
  SymbolTable::unlink();

  gc_tracer()->report_object_count_after_gc(&is_alive);
}
Ejemplo n.º 5
0
void VM_CMS_Operation::verify_after_gc() {
  if (VerifyAfterGC &&
      GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
    GCTraceTime(Info, gc, phases, verify) tm("Verify After", _collector->_gc_timer_cm);
    HandleMark hm;
    FreelistLocker x(_collector);
    MutexLockerEx  y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag);
    Universe::verify();
  }
}
Ejemplo n.º 6
0
void G1MarkSweep::mark_sweep_phase2() {
  // Now all live objects are marked, compute the new object addresses.

  // It is not required that we traverse spaces in the same order in
  // phase2, phase3 and phase4, but the ValidateMarkSweep live oops
  // tracking expects us to do so. See comment under phase4.

  GCTraceTime(Info, gc, phases) tm("Phase 2: Compute new object addresses", gc_timer());

  prepare_compaction();
}
Ejemplo n.º 7
0
void G1MarkSweep::mark_sweep_phase4() {
  // All pointers are now adjusted, move objects accordingly

  // The ValidateMarkSweep live oops tracking expects us to traverse spaces
  // in the same order in phase2, phase3 and phase4. We don't quite do that
  // here (code and comment not fixed for perm removal), so we tell the validate code
  // to use a higher index (saved from phase2) when verifying perm_gen.
  G1CollectedHeap* g1h = G1CollectedHeap::heap();

  GCTraceTime(Info, gc, phases) tm("Phase 4: Move objects", gc_timer());

  G1SpaceCompactClosure blk;
  g1h->heap_region_iterate(&blk);

}
Ejemplo n.º 8
0
void GenMarkSweep::mark_sweep_phase4() {
  // All pointers are now adjusted, move objects accordingly

  // It is imperative that we traverse perm_gen first in phase4. All
  // classes must be allocated earlier than their instances, and traversing
  // perm_gen first makes sure that all Klass*s have moved to their new
  // location before any instance does a dispatch through it's klass!

  // The ValidateMarkSweep live oops tracking expects us to traverse spaces
  // in the same order in phase2, phase3 and phase4. We don't quite do that
  // here (perm_gen first rather than last), so we tell the validate code
  // to use a higher index (saved from phase2) when verifying perm_gen.
  GenCollectedHeap* gch = GenCollectedHeap::heap();

  GCTraceTime(Info, gc, phases) tm("Phase 4: Move objects", _gc_timer);

  GenCompactClosure blk;
  gch->generation_iterate(&blk, true);
}
Ejemplo n.º 9
0
void GenMarkSweep::mark_sweep_phase2() {
  // Now all live objects are marked, compute the new object addresses.

  // It is imperative that we traverse perm_gen LAST. If dead space is
  // allowed a range of dead object may get overwritten by a dead int
  // array. If perm_gen is not traversed last a Klass* may get
  // overwritten. This is fine since it is dead, but if the class has dead
  // instances we have to skip them, and in order to find their size we
  // need the Klass*!
  //
  // It is not required that we traverse spaces in the same order in
  // phase2, phase3 and phase4, but the ValidateMarkSweep live oops
  // tracking expects us to do so. See comment under phase4.

  GenCollectedHeap* gch = GenCollectedHeap::heap();

  GCTraceTime(Info, gc, phases) tm("Phase 2: Compute new object addresses", _gc_timer);

  gch->prepare_for_compaction();
}
Ejemplo n.º 10
0
TEST_VM_F(GCTraceTimeTest, full_multitag) {
  set_log_config(TestLogFileName, "gc+ref=debug,gc+ref+start=debug");

  LogTarget(Debug, gc, ref) gc_debug;
  LogTarget(Debug, gc, ref, start) gc_start_debug;

  EXPECT_TRUE(gc_debug.is_enabled());
  EXPECT_TRUE(gc_start_debug.is_enabled());

  {
    ThreadInVMfromNative tvn(JavaThread::current());
    MutexLocker lock(Heap_lock); // Needed to read heap usage
    GCTraceTime(Debug, gc, ref) timer("Test GC", NULL, GCCause::_allocation_failure, true);
  }

  const char* expected[] = {
    "[gc,ref,start", "] Test GC (Allocation Failure)",
    "[gc,ref", "] Test GC (Allocation Failure) ", "M) ", "ms",
    NULL
  };
  EXPECT_TRUE(file_contains_substrings_in_order(TestLogFileName, expected));
}
Ejemplo n.º 11
0
TEST_VM_F(GCTraceTimeTest, no_cause) {
  set_log_config(TestLogFileName, "gc=debug,gc+start=debug");

  LogTarget(Debug, gc) gc_debug;
  LogTarget(Debug, gc, start) gc_start_debug;

  EXPECT_TRUE(gc_debug.is_enabled());
  EXPECT_TRUE(gc_start_debug.is_enabled());

  {
    ThreadInVMfromNative tvn(JavaThread::current());
    MutexLocker lock(Heap_lock); // Needed to read heap usage
    GCTraceTime(Debug, gc) timer("Test GC", NULL, GCCause::_no_gc, true);
  }

  const char* expected[] = {
    // [2.975s][debug][gc,start] Test GC
    "[gc,start", "] Test GC",
    // [2.975s][debug][gc      ] Test GC 59M->59M(502M) 0.026ms
    "[gc", "] Test GC ", "M) ", "ms",
    NULL
  };
  EXPECT_TRUE(file_contains_substrings_in_order(TestLogFileName, expected));
}
Ejemplo n.º 12
0
void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
                                    bool clear_all_softrefs) {
  // Recursively traverse all live objects and mark them
  GCTraceTime(Info, gc, phases) tm("Phase 1: Mark live objects", gc_timer());

  G1CollectedHeap* g1h = G1CollectedHeap::heap();

  // Need cleared claim bits for the roots processing
  ClassLoaderDataGraph::clear_claimed_marks();

  MarkingCodeBlobClosure follow_code_closure(&GenMarkSweep::follow_root_closure, !CodeBlobToOopClosure::FixRelocations);
  {
    G1RootProcessor root_processor(g1h, 1);
    if (ClassUnloading) {
      root_processor.process_strong_roots(&GenMarkSweep::follow_root_closure,
                                          &GenMarkSweep::follow_cld_closure,
                                          &follow_code_closure);
    } else {
      root_processor.process_all_roots_no_string_table(
                                          &GenMarkSweep::follow_root_closure,
                                          &GenMarkSweep::follow_cld_closure,
                                          &follow_code_closure);
    }
  }

  {
    GCTraceTime(Debug, gc, phases) trace("Reference Processing", gc_timer());

    // Process reference objects found during marking
    ReferenceProcessor* rp = GenMarkSweep::ref_processor();
    assert(rp == g1h->ref_processor_stw(), "Sanity");

    rp->setup_policy(clear_all_softrefs);
    const ReferenceProcessorStats& stats =
        rp->process_discovered_references(&GenMarkSweep::is_alive,
                                          &GenMarkSweep::keep_alive,
                                          &GenMarkSweep::follow_stack_closure,
                                          NULL,
                                          gc_timer());
    gc_tracer()->report_gc_reference_stats(stats);
  }

  // This is the point where the entire marking should have completed.
  assert(GenMarkSweep::_marking_stack.is_empty(), "Marking should have completed");

  if (ClassUnloading) {
    GCTraceTime(Debug, gc, phases) trace("Class Unloading", gc_timer());

    // Unload classes and purge the SystemDictionary.
    bool purged_class = SystemDictionary::do_unloading(&GenMarkSweep::is_alive);

    // Unload nmethods.
    CodeCache::do_unloading(&GenMarkSweep::is_alive, purged_class);

    // Prune dead klasses from subklass/sibling/implementor lists.
    Klass::clean_weak_klass_links(&GenMarkSweep::is_alive);
  }

  {
    GCTraceTime(Debug, gc, phases) trace("Scrub String and Symbol Tables", gc_timer());
    // Delete entries for dead interned string and clean up unreferenced symbols in symbol table.
    g1h->unlink_string_and_symbol_table(&GenMarkSweep::is_alive);
  }

  if (G1StringDedup::is_enabled()) {
    GCTraceTime(Debug, gc, phases) trace("String Deduplication Unlink", gc_timer());
    G1StringDedup::unlink(&GenMarkSweep::is_alive);
  }

  if (VerifyDuringGC) {
    HandleMark hm;  // handle scope
#if defined(COMPILER2) || INCLUDE_JVMCI
    DerivedPointerTableDeactivate dpt_deact;
#endif
    g1h->prepare_for_verify();
    // Note: we can verify only the heap here. When an object is
    // marked, the previous value of the mark word (including
    // identity hash values, ages, etc) is preserved, and the mark
    // word is set to markOop::marked_value - effectively removing
    // any hash values from the mark word. These hash values are
    // used when verifying the dictionaries and so removing them
    // from the mark word can make verification of the dictionaries
    // fail. At the end of the GC, the original mark word values
    // (including hash values) are restored to the appropriate
    // objects.
    GCTraceTime(Info, gc, verify)("During GC (full)");
    g1h->verify(VerifyOption_G1UseMarkWord);
  }

  gc_tracer()->report_object_count_after_gc(&GenMarkSweep::is_alive);
}
Ejemplo n.º 13
0
void DefNewGeneration::collect(bool   full,
                               bool   clear_all_soft_refs,
                               size_t size,
                               bool   is_tlab) {
  assert(full || size > 0, "otherwise we don't want to collect");

  GenCollectedHeap* gch = GenCollectedHeap::heap();

  _gc_timer->register_gc_start();
  DefNewTracer gc_tracer;
  gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());

  _old_gen = gch->old_gen();

  // If the next generation is too full to accommodate promotion
  // from this generation, pass on collection; let the next generation
  // do it.
  if (!collection_attempt_is_safe()) {
    log_trace(gc)(":: Collection attempt not safe ::");
    gch->set_incremental_collection_failed(); // Slight lie: we did not even attempt one
    return;
  }
  assert(to()->is_empty(), "Else not collection_attempt_is_safe");

  init_assuming_no_promotion_failure();

  GCTraceTime(Trace, gc) tm("DefNew", NULL, gch->gc_cause());

  gch->trace_heap_before_gc(&gc_tracer);

  // These can be shared for all code paths
  IsAliveClosure is_alive(this);
  ScanWeakRefClosure scan_weak_ref(this);

  age_table()->clear();
  to()->clear(SpaceDecorator::Mangle);

  gch->rem_set()->prepare_for_younger_refs_iterate(false);

  assert(gch->no_allocs_since_save_marks(),
         "save marks have not been newly set.");

  // Not very pretty.
  CollectorPolicy* cp = gch->collector_policy();

  FastScanClosure fsc_with_no_gc_barrier(this, false);
  FastScanClosure fsc_with_gc_barrier(this, true);

  KlassScanClosure klass_scan_closure(&fsc_with_no_gc_barrier,
                                      gch->rem_set()->klass_rem_set());
  CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure,
                                           &fsc_with_no_gc_barrier,
                                           false);

  set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier);
  FastEvacuateFollowersClosure evacuate_followers(gch,
                                                  &fsc_with_no_gc_barrier,
                                                  &fsc_with_gc_barrier);

  assert(gch->no_allocs_since_save_marks(),
         "save marks have not been newly set.");

  {
    // DefNew needs to run with n_threads == 0, to make sure the serial
    // version of the card table scanning code is used.
    // See: CardTableModRefBSForCTRS::non_clean_card_iterate_possibly_parallel.
    StrongRootsScope srs(0);

    gch->gen_process_roots(&srs,
                           GenCollectedHeap::YoungGen,
                           true,  // Process younger gens, if any,
                                  // as strong roots.
                           GenCollectedHeap::SO_ScavengeCodeCache,
                           GenCollectedHeap::StrongAndWeakRoots,
                           &fsc_with_no_gc_barrier,
                           &fsc_with_gc_barrier,
                           &cld_scan_closure);
  }

  // "evacuate followers".
  evacuate_followers.do_void();

  FastKeepAliveClosure keep_alive(this, &scan_weak_ref);
  ReferenceProcessor* rp = ref_processor();
  rp->setup_policy(clear_all_soft_refs);
  const ReferenceProcessorStats& stats =
  rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers,
                                    NULL, _gc_timer);
  gc_tracer.report_gc_reference_stats(stats);
  gc_tracer.report_tenuring_threshold(tenuring_threshold());

  if (!_promotion_failed) {
    // Swap the survivor spaces.
    eden()->clear(SpaceDecorator::Mangle);
    from()->clear(SpaceDecorator::Mangle);
    if (ZapUnusedHeapArea) {
      // This is now done here because of the piece-meal mangling which
      // can check for valid mangling at intermediate points in the
      // collection(s).  When a young collection fails to collect
      // sufficient space resizing of the young generation can occur
      // an redistribute the spaces in the young generation.  Mangle
      // here so that unzapped regions don't get distributed to
      // other spaces.
      to()->mangle_unused_area();
    }
    swap_spaces();

    assert(to()->is_empty(), "to space should be empty now");

    adjust_desired_tenuring_threshold();

    // A successful scavenge should restart the GC time limit count which is
    // for full GC's.
    AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
    size_policy->reset_gc_overhead_limit_count();
    assert(!gch->incremental_collection_failed(), "Should be clear");
  } else {
    assert(_promo_failure_scan_stack.is_empty(), "post condition");
    _promo_failure_scan_stack.clear(true); // Clear cached segments.

    remove_forwarding_pointers();
    log_debug(gc)("Promotion failed");
    // Add to-space to the list of space to compact
    // when a promotion failure has occurred.  In that
    // case there can be live objects in to-space
    // as a result of a partial evacuation of eden
    // and from-space.
    swap_spaces();   // For uniformity wrt ParNewGeneration.
    from()->set_next_compaction_space(to());
    gch->set_incremental_collection_failed();

    // Inform the next generation that a promotion failure occurred.
    _old_gen->promotion_failure_occurred();
    gc_tracer.report_promotion_failed(_promotion_failed_info);

    // Reset the PromotionFailureALot counters.
    NOT_PRODUCT(gch->reset_promotion_should_fail();)
  }
  // set new iteration safe limit for the survivor spaces
  from()->set_concurrent_iteration_safe_limit(from()->top());
  to()->set_concurrent_iteration_safe_limit(to()->top());

  // We need to use a monotonically non-decreasing time in ms
  // or we will see time-warp warnings and os::javaTimeMillis()
  // does not guarantee monotonicity.
  jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
  update_time_of_last_gc(now);

  gch->trace_heap_after_gc(&gc_tracer);

  _gc_timer->register_gc_end();

  gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
}