void VM_GenCollectForPermanentAllocation::doit() {
  SvcGCMarker sgcm(SvcGCMarker::FULL);

  SharedHeap* heap = (SharedHeap*)Universe::heap();
  GCCauseSetter gccs(heap, _gc_cause);
  switch (heap->kind()) {
    case (CollectedHeap::GenCollectedHeap): {
      GenCollectedHeap* gch = (GenCollectedHeap*)heap;
      gch->do_full_collection(gch->must_clear_all_soft_refs(),
                              gch->n_gens() - 1);
      break;
    }
#ifndef SERIALGC
    case (CollectedHeap::G1CollectedHeap): {
      G1CollectedHeap* g1h = (G1CollectedHeap*)heap;
      g1h->do_full_collection(_gc_cause == GCCause::_last_ditch_collection);
      break;
    }
#endif // SERIALGC
    default:
      ShouldNotReachHere();
  }
  _res = heap->perm_gen()->allocate(_size, false);
  assert(heap->is_in_reserved_or_null(_res), "result not in heap");
  if (_res == NULL && GC_locker::is_active_and_needs_gc()) {
    set_gc_locked();
  }
}
HeapWord* HeapInspection::start_of_perm_gen() {
  if (is_shared_heap()) {
    SharedHeap* sh = SharedHeap::heap();
    return sh->perm_gen()->used_region().start();
  }
#ifndef SERIALGC
  ParallelScavengeHeap* psh = (ParallelScavengeHeap*)Universe::heap();
  return psh->perm_gen()->object_space()->used_region().start();
#else
  ShouldNotReachHere();
  return NULL;
#endif // SERIALGC
}
void HeapInspection::heap_inspection(outputStream* st, bool need_prologue) {
  ResourceMark rm;
  HeapWord* ref;

  CollectedHeap* heap = Universe::heap();
  bool is_shared_heap = false;
  switch (heap->kind()) {
    case CollectedHeap::G1CollectedHeap:
    case CollectedHeap::GenCollectedHeap: {
      is_shared_heap = true;
      SharedHeap* sh = (SharedHeap*)heap;
      if (need_prologue) {
        sh->gc_prologue(false /* !full */); // get any necessary locks, etc.
      }
      ref = sh->perm_gen()->used_region().start();
      break;
    }
#ifndef SERIALGC
    case CollectedHeap::ParallelScavengeHeap: {
      ParallelScavengeHeap* psh = (ParallelScavengeHeap*)heap;
      ref = psh->perm_gen()->object_space()->used_region().start();
      break;
    }
#endif // SERIALGC
    default:
      ShouldNotReachHere(); // Unexpected heap kind for this op
  }
  // Collect klass instance info
  KlassInfoTable cit(KlassInfoTable::cit_size, ref);
  if (!cit.allocation_failed()) {
    // Iterate over objects in the heap
    RecordInstanceClosure ric(&cit);
    // If this operation encounters a bad object when using CMS,
    // consider using safe_object_iterate() which avoids perm gen
    // objects that may contain bad references.
    Universe::heap()->object_iterate(&ric);

    // Report if certain classes are not counted because of
    // running out of C-heap for the histogram.
    size_t missed_count = ric.missed_count();
    if (missed_count != 0) {
      st->print_cr("WARNING: Ran out of C-heap; undercounted " SIZE_FORMAT
                   " total instances in data below",
                   missed_count);
    }
    // Sort and print klass instance info
    KlassInfoHisto histo("\n"
                     " num     #instances         #bytes  class name\n"
                     "----------------------------------------------",
                     KlassInfoHisto::histo_initial_size);
    HistoClosure hc(&histo);
    cit.iterate(&hc);
    histo.sort();
    histo.print_on(st);
  } else {
    st->print_cr("WARNING: Ran out of C-heap; histogram not generated");
  }
  st->flush();

  if (need_prologue && is_shared_heap) {
    SharedHeap* sh = (SharedHeap*)heap;
    sh->gc_epilogue(false /* !full */); // release all acquired locks, etc.
  }
}
Exemple #4
0
void G1MarkSweep::invoke_at_safepoint(ReferenceProcessor* rp,
                                      bool clear_all_softrefs) {
  assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");

  SharedHeap* sh = SharedHeap::heap();
#ifdef ASSERT
  if (sh->collector_policy()->should_clear_all_soft_refs()) {
    assert(clear_all_softrefs, "Policy should have been checked earler");
  }
#endif
  // hook up weak ref data so it can be used during Mark-Sweep
  assert(GenMarkSweep::ref_processor() == NULL, "no stomping");
  assert(rp != NULL, "should be non-NULL");
  GenMarkSweep::_ref_processor = rp;
  rp->setup_policy(clear_all_softrefs);

  // When collecting the permanent generation methodOops may be moving,
  // so we either have to flush all bcp data or convert it into bci.
  CodeCache::gc_prologue();
  Threads::gc_prologue();

  // Increment the invocation count for the permanent generation, since it is
  // implicitly collected whenever we do a full mark sweep collection.
  sh->perm_gen()->stat_record()->invocations++;

  bool marked_for_unloading = false;

  allocate_stacks();

  // We should save the marks of the currently locked biased monitors.
  // The marking doesn't preserve the marks of biased objects.
  BiasedLocking::preserve_marks();

  mark_sweep_phase1(marked_for_unloading, clear_all_softrefs);

  if (VerifyDuringGC) {
      G1CollectedHeap* g1h = G1CollectedHeap::heap();
      g1h->checkConcurrentMark();
  }

  mark_sweep_phase2();

  // Don't add any more derived pointers during phase3
  COMPILER2_PRESENT(DerivedPointerTable::set_active(false));

  mark_sweep_phase3();

  mark_sweep_phase4();

  GenMarkSweep::restore_marks();
  BiasedLocking::restore_marks();
  GenMarkSweep::deallocate_stacks();

  // We must invalidate the perm-gen rs, so that it gets rebuilt.
  GenRemSet* rs = sh->rem_set();
  rs->invalidate(sh->perm_gen()->used_region(), true /*whole_heap*/);

  // "free at last gc" is calculated from these.
  // CHF: cheating for now!!!
  //  Universe::set_heap_capacity_at_last_gc(Universe::heap()->capacity());
  //  Universe::set_heap_used_at_last_gc(Universe::heap()->used());

  Threads::gc_epilogue();
  CodeCache::gc_epilogue();

  // refs processing: clean slate
  GenMarkSweep::_ref_processor = NULL;
}