コード例 #1
0
void VM_GC_HeapInspection::doit() {
  HandleMark hm;
  CollectedHeap* ch = Universe::heap();
  ch->ensure_parsability(false); // must happen, even if collection does
                                 // not happen (e.g. due to GC_locker)
  if (_full_gc) {
    // The collection attempt below would be skipped anyway if
    // the gc locker is held. The following dump may then be a tad
    // misleading to someone expecting only live objects to show
    // up in the dump (see CR 6944195). Just issue a suitable warning
    // in that case and do not attempt to do a collection.
    // The latter is a subtle point, because even a failed attempt
    // to GC will, in fact, induce one in the future, which we
    // probably want to avoid in this case because the GC that we may
    // be about to attempt holds value for us only
    // if it happens now and not if it happens in the eventual
    // future.
    if (GC_locker::is_active()) {
      warning("GC locker is held; pre-dump GC was skipped");
    } else {
      ch->collect_as_vm_thread(GCCause::_heap_inspection);
    }
  }
  HeapInspection::heap_inspection(_out, _need_prologue /* need_prologue */);
}
コード例 #2
0
ファイル: gcLocker.cpp プロジェクト: AllenWeb/openjdk-1
Pause_No_GC_Verifier::~Pause_No_GC_Verifier() {
  if (_ngcv->_verifygc) {
    // if we were verifying before, then reenable verification
    CollectedHeap* h = Universe::heap();
    assert(!h->is_gc_active(), "GC active during No_GC_Verifier");
    _ngcv->_old_invocations = h->total_collections();
  }
}
コード例 #3
0
ファイル: gcLocker.cpp プロジェクト: AllenWeb/openjdk-1
No_GC_Verifier::No_GC_Verifier(bool verifygc) {
  _verifygc = verifygc;
  if (_verifygc) {
    CollectedHeap* h = Universe::heap();
    assert(!h->is_gc_active(), "GC active during No_GC_Verifier");
    _old_invocations = h->total_collections();
  }
}
コード例 #4
0
ファイル: gcLocker.cpp プロジェクト: AllenWeb/openjdk-1
No_GC_Verifier::~No_GC_Verifier() {
  if (_verifygc) {
    CollectedHeap* h = Universe::heap();
    assert(!h->is_gc_active(), "GC active during No_GC_Verifier");
    if (_old_invocations != h->total_collections()) {
      fatal("collection in a No_GC_Verifier secured function");
    }
  }
}
コード例 #5
0
void VM_GC_ObjectAddressInfoCollection::doit() {
  HandleMark hm;
  CollectedHeap* ch = Universe::heap();
  ch->ensure_parsability(false); // must happen, even if collection does
                                 // not happen (e.g. due to GC_locker)

  ObjectAddressInfoCollection::collect_object_address_info(_addrinfo_log,
    _krinfo_log, _reason);
}
コード例 #6
0
 // The special value of a zero count can be used to ignore
 // the count test.
 AdaptiveSizePolicyOutput(uint count) {
   if (UseAdaptiveSizePolicy && (AdaptiveSizePolicyOutputInterval > 0)) {
     CollectedHeap* heap = Universe::heap();
     _size_policy = heap->size_policy();
     _do_print = print_test(count);
   } else {
     _size_policy = NULL;
     _do_print = false;
   }
 } 
コード例 #7
0
void CardTableRS::verify() {
  // At present, we only know how to verify the card table RS for
  // generational heaps.
  VerifyCTGenClosure blk(this);
  CollectedHeap* ch = Universe::heap();

  if (ch->kind() == CollectedHeap::GenCollectedHeap) {
    GenCollectedHeap::heap()->generation_iterate(&blk, false);
    _ct_bs->verify();
    }
  }
コード例 #8
0
ファイル: gcLocker.cpp プロジェクト: AllenWeb/openjdk-1
Pause_No_GC_Verifier::Pause_No_GC_Verifier(No_GC_Verifier * ngcv) {
  _ngcv = ngcv;
  if (_ngcv->_verifygc) {
    // if we were verifying, then make sure that nothing is
    // wrong before we "pause" verification
    CollectedHeap* h = Universe::heap();
    assert(!h->is_gc_active(), "GC active during No_GC_Verifier");
    if (_ngcv->_old_invocations != h->total_collections()) {
      fatal("collection in a No_GC_Verifier secured function");
    }
  }
}
コード例 #9
0
void VM_GC_ObjectInfoCollection::doit() {
  HandleMark hm;
  CollectedHeap* ch = Universe::heap();
  ch->ensure_parsability(false); // must happen, even if collection does
                                 // not happen (e.g. due to GC_locker)

  ObjectInfoCollection::collect_object_info(_objinfo_log, _apinfo_log, _reason);

  if (CrashOnObjectInfoDump) {
    guarantee(false, "requested crash after object info dump");
  }
}
コード例 #10
0
inline void update_barrier_set(oop *p, oop v) {
  assert(oopDesc::bs() != NULL, "Uninitialized bs in oop!");
  oopDesc::bs()->write_ref_field(p, v);

  if (UseTrainGC) {
    // Each generation has a chance to examine the oop.
    CollectedHeap* gch = Universe::heap();
    // This is even more bogus.
    if (gch->kind() == CollectedHeap::GenCollectedHeap) {
      ((GenCollectedHeap*)gch)->examine_modified_oop(p);
    }
  }
}
コード例 #11
0
ファイル: methodDataOop.cpp プロジェクト: AllenWeb/openjdk-1
void ReceiverTypeData::oop_iterate_m(OopClosure* blk, MemRegion mr) {
  // Currently, this interface is called only during card-scanning for
  // a young gen gc, in which case this object cannot contribute anything,
  // since it does not contain any references that cross out of
  // the perm gen. However, for future more general use we allow
  // the possibility of calling for instance from more general
  // iterators (for example, a future regionalized perm gen for G1,
  // or the possibility of moving some references out of perm in
  // the case of other collectors). In that case, you will need
  // to relax or remove some of the assertions below.
#ifdef ASSERT
  // Verify that none of the embedded oop references cross out of
  // this generation.
  for (uint row = 0; row < row_limit(); row++) {
    if (receiver(row) != NULL) {
      oop* adr = adr_receiver(row);
      CollectedHeap* h = Universe::heap();
      assert(h->is_permanent(adr) && h->is_permanent_or_null(*adr), "Not intra-perm");
    }
  }
#endif // ASSERT
  assert(!blk->should_remember_mdo(), "Not expected to remember MDO");
  return;   // Nothing to do, see comment above
#if 0
  if (blk->should_remember_mdo()) {
    // This is a set of weak references that need
    // to be followed at the end of the strong marking
    // phase. Memoize this object so it can be visited
    // in the weak roots processing phase.
    blk->remember_mdo(data());
  } else { // normal scan
    for (uint row = 0; row < row_limit(); row++) {
      if (receiver(row) != NULL) {
        oop* adr = adr_receiver(row);
        if (mr.contains(adr)) {
          blk->do_oop(adr);
        } else if ((HeapWord*)adr >= mr.end()) {
          // Test that the current cursor and the two ends of the range
          // that we may have skipped iterating over are monotonically ordered;
          // this is just a paranoid assertion, just in case represetations
          // should change in the future rendering the short-circuit return
          // here invalid.
          assert((row+1 >= row_limit() || adr_receiver(row+1) > adr) &&
                 (row+2 >= row_limit() || adr_receiver(row_limit()-1) > adr_receiver(row+1)), "Reducing?");
          break; // remaining should be outside this mr too
        }
      }
    }
  }
#endif
}
コード例 #12
0
void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size) {
  // All space sizes must be multiples of car size in order for the CarTable to work.
  // Note that the CarTable is used with and without train gc (for fast lookup).
  uintx alignment = CarSpace::car_size();

  // Compute sizes
  uintx size = _virtual_space.committed_size();
  uintx survivor_size = compute_survivor_size(size, alignment);
  uintx eden_size = size - (2*survivor_size);
  assert(eden_size > 0 && survivor_size <= eden_size, "just checking");

  if (eden_size < minimum_eden_size) {
    // May happen due to 64Kb rounding, if so adjust eden size back up
    minimum_eden_size = align_size_up(minimum_eden_size, alignment);
    uintx maximum_survivor_size = (size - minimum_eden_size) / 2;
    uintx unaligned_survivor_size = 
      align_size_down(maximum_survivor_size, alignment);
    survivor_size = MAX2(unaligned_survivor_size, alignment);
    eden_size = size - (2*survivor_size);
    assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
    assert(eden_size >= minimum_eden_size, "just checking");
  }

  char *eden_start = _virtual_space.low();
  char *from_start = eden_start + eden_size;
  char *to_start   = from_start + survivor_size;
  char *to_end     = to_start   + survivor_size;

  assert(to_end == _virtual_space.high(), "just checking");
  assert(Space::is_aligned((HeapWord*)eden_start), "checking alignment");
  assert(Space::is_aligned((HeapWord*)from_start), "checking alignment");
  assert(Space::is_aligned((HeapWord*)to_start),   "checking alignment");

  MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start);
  MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start);
  MemRegion toMR  ((HeapWord*)to_start, (HeapWord*)to_end);

  eden()->initialize(edenMR, (minimum_eden_size == 0));
  from()->initialize(fromMR, true);
    to()->initialize(toMR  , true);

  if (jvmpi::is_event_enabled(JVMPI_EVENT_ARENA_NEW)) {
    CollectedHeap* ch = Universe::heap();
    jvmpi::post_arena_new_event(ch->addr_to_arena_id(eden_start), "Eden");
    jvmpi::post_arena_new_event(ch->addr_to_arena_id(from_start), "Semi");
    jvmpi::post_arena_new_event(ch->addr_to_arena_id(to_start), "Semi");
  }
}
コード例 #13
0
void DefNewGeneration::swap_spaces() {
  CollectedHeap* ch = Universe::heap();
  if (jvmpi::is_event_enabled(JVMPI_EVENT_ARENA_DELETE)) {
    jvmpi::post_arena_delete_event(ch->addr_to_arena_id(from()->bottom()));
  }
  if (jvmpi::is_event_enabled(JVMPI_EVENT_ARENA_NEW)) {
    jvmpi::post_arena_new_event(ch->addr_to_arena_id(from()->bottom()), "Semi");
  }
  ContiguousSpace* s = from();
  _from_space        = to();
  _to_space          = s;

  if (UsePerfData) {
    CSpaceCounters* c = _from_counters;
    _from_counters = _to_counters;
    _to_counters = c;
  }
}
コード例 #14
0
TEST_VM(CollectedHeap, is_in) {
  CollectedHeap* heap = Universe::heap();

  uintptr_t epsilon = (uintptr_t) MinObjAlignment;
  uintptr_t heap_start = (uintptr_t) heap->reserved_region().start();
  uintptr_t heap_end = (uintptr_t) heap->reserved_region().end();

  // Test that NULL is not in the heap.
  ASSERT_FALSE(heap->is_in(NULL)) << "NULL is unexpectedly in the heap";

  // Test that a pointer to before the heap start is reported as outside the heap.
  ASSERT_GE(heap_start, ((uintptr_t) NULL + epsilon))
          << "Sanity check - heap should not start at 0";

  void* before_heap = (void*) (heap_start - epsilon);
  ASSERT_FALSE(heap->is_in(before_heap)) << "before_heap: " << p2i(before_heap)
          << " is unexpectedly in the heap";

  // Test that a pointer to after the heap end is reported as outside the heap.
  ASSERT_LE(heap_end, ((uintptr_t)-1 - epsilon))
          << "Sanity check - heap should not end at the end of address space";

  void* after_heap = (void*) (heap_end + epsilon);
  ASSERT_FALSE(heap->is_in(after_heap)) << "after_heap: " << p2i(after_heap)
          << " is unexpectedly in the heap";
}
コード例 #15
0
ファイル: debug.cpp プロジェクト: LeLiKa/openjdk
static void findref(intptr_t x) {
  CollectedHeap *ch = Universe::heap();
  LookForRefInGenClosure lookFor;
  lookFor.target = (oop) x;
  LookForRefInObjectClosure look_in_object((oop) x);

  tty->print_cr("Searching heap:");
  ch->object_iterate(&look_in_object);

  tty->print_cr("Searching strong roots:");
  Universe::oops_do(&lookFor, false);
  JNIHandles::oops_do(&lookFor);   // Global (strong) JNI handles
  Threads::oops_do(&lookFor, NULL);
  ObjectSynchronizer::oops_do(&lookFor);
  //FlatProfiler::oops_do(&lookFor);
  SystemDictionary::oops_do(&lookFor);

  tty->print_cr("Searching code cache:");
  CodeCache::oops_do(&lookFor);

  tty->print_cr("Done.");
}
コード例 #16
0
typeArrayOop typeArrayKlass::allocate(int length, TRAPS) {
  assert(log2_element_size() >= 0, "bad scale");
  if (length >= 0) {
    if (length <= max_length()) {
      size_t size = typeArrayOopDesc::object_size(layout_helper(), length);
      KlassHandle h_k(THREAD, as_klassOop());
      typeArrayOop t;
      CollectedHeap* ch = Universe::heap();
      if (size < ch->large_typearray_limit()) {
        t = (typeArrayOop)CollectedHeap::array_allocate(h_k, (int)size, length, CHECK_NULL);
      } else {
        t = (typeArrayOop)CollectedHeap::large_typearray_allocate(h_k, (int)size, length, CHECK_NULL);
      }
      assert(t->is_parsable(), "Don't publish unless parsable");
      return t;
    } else {
      THROW_OOP_0(Universe::out_of_memory_error_array_size());
    }
  } else {
    THROW_0(vmSymbols::java_lang_NegativeArraySizeException());
  }
}
コード例 #17
0
inline void GCTraceTimeImpl::log_stop(jlong start_counter, jlong stop_counter) {
  double duration_in_ms = TimeHelper::counter_to_millis(stop_counter - start_counter);
  double start_time_in_secs = TimeHelper::counter_to_seconds(start_counter);
  double stop_time_in_secs = TimeHelper::counter_to_seconds(stop_counter);

  LogStream out(_out_stop);

  out.print("%s", _title);

  if (_gc_cause != GCCause::_no_gc) {
    out.print(" (%s)", GCCause::to_string(_gc_cause));
  }

  if (_heap_usage_before != SIZE_MAX) {
    CollectedHeap* heap = Universe::heap();
    size_t used_before_m = _heap_usage_before / M;
    size_t used_m = heap->used() / M;
    size_t capacity_m = heap->capacity() / M;
    out.print(" " LOG_STOP_HEAP_FORMAT, used_before_m, used_m, capacity_m);
  }

  out.print_cr(" " LOG_STOP_TIME_FORMAT, start_time_in_secs, stop_time_in_secs, duration_in_ms);
}
コード例 #18
0
void CardTableRS::verify() {
  // At present, we only know how to verify the card table RS for
  // generational heaps.
  VerifyCTGenClosure blk(this);
  CollectedHeap* ch = Universe::heap();
  // We will do the perm-gen portion of the card table, too.
  Generation* pg = SharedHeap::heap()->perm_gen();
  HeapWord* pg_boundary = pg->reserved().start();

  if (ch->kind() == CollectedHeap::GenCollectedHeap) {
    GenCollectedHeap::heap()->generation_iterate(&blk, false);
    _ct_bs->verify();

    // If the old gen collections also collect perm, then we are only
    // interested in perm-to-young pointers, not perm-to-old pointers.
    GenCollectedHeap* gch = GenCollectedHeap::heap();
    CollectorPolicy* cp = gch->collector_policy();
    if (cp->is_mark_sweep_policy() || cp->is_concurrent_mark_sweep_policy()) {
      pg_boundary = gch->get_gen(1)->reserved().start();
    }
  }
  VerifyCTSpaceClosure perm_space_blk(this, pg_boundary);
  SharedHeap::heap()->perm_gen()->space_iterate(&perm_space_blk, true);
}
コード例 #19
0
ParallelScavengeHeap* ParallelScavengeHeap::heap() {
  CollectedHeap* heap = Universe::heap();
  assert(heap != NULL, "Uninitialized access to ParallelScavengeHeap::heap()");
  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Not a ParallelScavengeHeap");
  return (ParallelScavengeHeap*)heap;
}
コード例 #20
0
void VM_CollectForMetadataAllocation::doit() {
  SvcGCMarker sgcm(SvcGCMarker::FULL);

  CollectedHeap* heap = Universe::heap();
  GCCauseSetter gccs(heap, _gc_cause);

  // Check again if the space is available.  Another thread
  // may have similarly failed a metadata allocation and induced
  // a GC that freed space for the allocation.
  if (!MetadataAllocationFailALot) {
    _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
  }

  if (_result == NULL) {
    if (UseConcMarkSweepGC) {
      if (CMSClassUnloadingEnabled) {
        MetaspaceGC::set_should_concurrent_collect(true);
      }
      // For CMS expand since the collection is going to be concurrent.
      _result =
        _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype);
    }
    if (_result == NULL) {
      // Don't clear the soft refs yet.
      if (Verbose && PrintGCDetails && UseConcMarkSweepGC) {
        gclog_or_tty->print_cr("\nCMS full GC for Metaspace");
      }
      heap->collect_as_vm_thread(GCCause::_metadata_GC_threshold);
      // After a GC try to allocate without expanding.  Could fail
      // and expansion will be tried below.
      _result =
        _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
    }
    if (_result == NULL) {
      // If still failing, allow the Metaspace to expand.
      // See delta_capacity_until_GC() for explanation of the
      // amount of the expansion.
      // This should work unless there really is no more space
      // or a MaxMetaspaceSize has been specified on the command line.
      _result =
        _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype);
      if (_result == NULL) {
        // If expansion failed, do a last-ditch collection and try allocating
        // again.  A last-ditch collection will clear softrefs.  This
        // behavior is similar to the last-ditch collection done for perm
        // gen when it was full and a collection for failed allocation
        // did not free perm gen space.
        heap->collect_as_vm_thread(GCCause::_last_ditch_collection);
        _result =
          _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
      }
    }
    if (Verbose && PrintGCDetails && _result == NULL) {
      gclog_or_tty->print_cr("\nAfter Metaspace GC failed to allocate size "
                             SIZE_FORMAT, _size);
    }
  }

  if (_result == NULL && GC_locker::is_active_and_needs_gc()) {
    set_gc_locked();
  }
}
コード例 #21
0
void HeapInspection::heap_inspection(outputStream* st, bool need_prologue) {
  ResourceMark rm;
  HeapWord* ref;

  CollectedHeap* heap = Universe::heap();
  bool is_shared_heap = false;
  switch (heap->kind()) {
    case CollectedHeap::G1CollectedHeap:
    case CollectedHeap::GenCollectedHeap: {
      is_shared_heap = true;
      SharedHeap* sh = (SharedHeap*)heap;
      if (need_prologue) {
        sh->gc_prologue(false /* !full */); // get any necessary locks, etc.
      }
      ref = sh->perm_gen()->used_region().start();
      break;
    }
#ifndef SERIALGC
    case CollectedHeap::ParallelScavengeHeap: {
      ParallelScavengeHeap* psh = (ParallelScavengeHeap*)heap;
      ref = psh->perm_gen()->object_space()->used_region().start();
      break;
    }
#endif // SERIALGC
    default:
      ShouldNotReachHere(); // Unexpected heap kind for this op
  }
  // Collect klass instance info
  KlassInfoTable cit(KlassInfoTable::cit_size, ref);
  if (!cit.allocation_failed()) {
    // Iterate over objects in the heap
    RecordInstanceClosure ric(&cit);
    // If this operation encounters a bad object when using CMS,
    // consider using safe_object_iterate() which avoids perm gen
    // objects that may contain bad references.
    Universe::heap()->object_iterate(&ric);

    // Report if certain classes are not counted because of
    // running out of C-heap for the histogram.
    size_t missed_count = ric.missed_count();
    if (missed_count != 0) {
      st->print_cr("WARNING: Ran out of C-heap; undercounted " SIZE_FORMAT
                   " total instances in data below",
                   missed_count);
    }
    // Sort and print klass instance info
    KlassInfoHisto histo("\n"
                     " num     #instances         #bytes  class name\n"
                     "----------------------------------------------",
                     KlassInfoHisto::histo_initial_size);
    HistoClosure hc(&histo);
    cit.iterate(&hc);
    histo.sort();
    histo.print_on(st);
  } else {
    st->print_cr("WARNING: Ran out of C-heap; histogram not generated");
  }
  st->flush();

  if (need_prologue && is_shared_heap) {
    SharedHeap* sh = (SharedHeap*)heap;
    sh->gc_epilogue(false /* !full */); // release all acquired locks, etc.
  }
}
コード例 #22
0
 ~VM_GC_Operation() {
   CollectedHeap* ch = Universe::heap();
   ch->collector_policy()->set_all_soft_refs_clear(false);
 }
コード例 #23
0
ファイル: isGCActiveMark.hpp プロジェクト: dain/graal
 ~IsGCActiveMark() {
   CollectedHeap* heap = Universe::heap();
   assert(heap->is_gc_active(), "Sanity");
   heap->_is_gc_active = false;
 }
コード例 #24
0
ファイル: isGCActiveMark.hpp プロジェクト: dain/graal
 IsGCActiveMark() {
   CollectedHeap* heap = Universe::heap();
   assert(!heap->is_gc_active(), "Not reentrant");
   heap->_is_gc_active = true;
 }
コード例 #25
0
void SharedUserData::task(){
#ifdef AZ_PROXIED
  // Static variables store peak values seen during the life of the run.
  static volatile sud_jvm_heap_rev1_t peak_jvm_heap;
  static sud_io_rev1_t io_stats;
  static volatile bool initialized = false;
  if (!initialized) {
    memset ((void*)(&peak_jvm_heap), 0, sizeof(peak_jvm_heap));
    initialized = true;
  }

  if (SafepointSynchronize::is_at_safepoint()) return;

  CollectedHeap *heap = Universe::heap();
  if (!heap) return;

  size_t l = heap->last_gc_live_bytes();
size_t u=heap->used();
  size_t c = heap->capacity();
  size_t m = heap->max_capacity();
  size_t pu = heap->permanent_used();
  size_t pc = heap->permanent_capacity();

  // Make sure that the numbers make sense when graphing.
  c = (u > c) ? u : c;
  m = (c > m) ? c : m;
  pc = (pu > pc) ? pu : pc;

  sud_jvm_heap_rev1_t jvm_heap;
  memset(&jvm_heap, 0, sizeof(jvm_heap));
  jvm_heap.revision = SUD_JVM_HEAP_REVISION;
  switch (heap->kind()) {
  case CollectedHeap::GenCollectedHeap: strcpy(jvm_heap.name, "GenCollectedHeap"); break;
  case CollectedHeap::ParallelScavengeHeap: strcpy(jvm_heap.name, "ParallelScavengeHeap"); break;
  case CollectedHeap::PauselessHeap: strcpy(jvm_heap.name, "PauselessHeap"); break;
  default: strcpy(jvm_heap.name, "");
  }
  if (heap->supports_tlab_allocation()) jvm_heap.flags |= SUD_JVM_HEAP_FLAG_TLAB_ALLOCATION;
  if (heap->supports_inline_contig_alloc()) jvm_heap.flags |= SUD_JVM_HEAP_FLAG_INLINE_CONTIG_ALLOC;

  uint64_t now = (uint64_t) os::javaTimeMillis();
  jvm_heap.timestamp_ms = now;
  jvm_heap.live_bytes = l;
  jvm_heap.used_bytes = u;
  jvm_heap.capacity_bytes = c;
  jvm_heap.max_capacity_bytes = m;
  jvm_heap.permanent_used_bytes = pu;
  jvm_heap.permanent_capacity_bytes = pc;
  jvm_heap.total_collections = heap->total_collections();

  libos::AccountInfo ai;
  az_allocid_t allocid = process_get_allocationid();
  sys_return_t ret = ai.inspectProcess (allocid);
  if (ret == SYSERR_NONE) {
    // Copy memory_accounting information into the sud structure.
    // Take care not to overflow the accounts past the maximum storable.
    const account_info_t *account_info = ai.getAccountInfo();
    uint64_t count =
      (account_info->ac_count < SUD_MAX_ACCOUNTS) ?
      account_info->ac_count :
      SUD_MAX_ACCOUNTS;
    jvm_heap.account_info.ac_count = count;
    for (uint64_t i = 0; i < count; i++) {
      jvm_heap.account_info.ac_array[i] = account_info->ac_array[i];
    }
  }
  else {
warning("Failed to inspect memory accounting info (%d)",ret);
  }

#define UPDATE_PEAK(struct_member,value) \
  if (peak_jvm_heap.peak_ ## struct_member ## _bytes < value) { \
    peak_jvm_heap.peak_ ## struct_member ## _bytes = value; \
    peak_jvm_heap.peak_ ## struct_member ## _timestamp_ms = now; \
  } \
  jvm_heap.peak_ ## struct_member ## _bytes = peak_jvm_heap.peak_ ## struct_member ## _bytes; \
  jvm_heap.peak_ ## struct_member ## _timestamp_ms = peak_jvm_heap.peak_ ## struct_member ## _timestamp_ms;

  UPDATE_PEAK (live,l);
  UPDATE_PEAK (used,u);
  UPDATE_PEAK (capacity,c);
  UPDATE_PEAK (max_capacity,m);
  UPDATE_PEAK (permanent_used,pu);
  UPDATE_PEAK (permanent_capacity,pc);

  UPDATE_PEAK (allocated,ai.getAllocatedBytes());
  UPDATE_PEAK (funded,ai.getFundedBytes());
  UPDATE_PEAK (overdraft,ai.getOverdraftBytes());
  UPDATE_PEAK (footprint,ai.getFootprintBytes());

  UPDATE_PEAK (committed,ai.getCommittedBytes());
  UPDATE_PEAK (grant,ai.getGrantBytes());
  UPDATE_PEAK (allocated_from_committed,ai.getAllocatedFromCommittedBytes());

  UPDATE_PEAK (default_allocated,ai.getDefaultAllocatedBytes());
  UPDATE_PEAK (default_committed,ai.getDefaultCommittedBytes());
  UPDATE_PEAK (default_footprint,ai.getDefaultFootprintBytes());
  UPDATE_PEAK (default_grant,ai.getDefaultGrantBytes());

  UPDATE_PEAK (heap_allocated,ai.getHeapAllocatedBytes());
  UPDATE_PEAK (heap_committed,ai.getHeapCommittedBytes());
  UPDATE_PEAK (heap_footprint,ai.getHeapFootprintBytes());
  UPDATE_PEAK (heap_grant,ai.getHeapGrantBytes());

  ret = shared_user_data_set_jvm_heap_rev1 (allocid, &jvm_heap);
  if (ret != SYSERR_NONE) warning("Failed to set jvm_heap shared user data (%d)", ret);

  memset ((void*)(&io_stats), 0, sizeof(io_stats));
  io_stats.revision = SUD_IO_REVISION;
  atcpn_stats_get_io_rev1(&io_stats);
  ret = shared_user_data_set_io_rev1 (allocid, &io_stats);
  if (ret != SYSERR_NONE) warning("Failed to set io_stats shared user data (%d)", ret);
#endif // AZ_PROXIED
}
コード例 #26
0
bool HeapInspection::is_shared_heap() {
  CollectedHeap* heap = Universe::heap();
  return heap->kind() == CollectedHeap::G1CollectedHeap ||
         heap->kind() == CollectedHeap::GenCollectedHeap;
}
コード例 #27
0
ファイル: forte.cpp プロジェクト: AllenWeb/openjdk-1
static void forte_fill_call_trace_given_top(JavaThread* thd,
                                            ASGCT_CallTrace* trace,
                                            int depth,
                                            frame top_frame) {
  NoHandleMark nhm;

  frame initial_Java_frame;
  methodOop method;
  int bci;
  int count;

  count = 0;
  assert(trace->frames != NULL, "trace->frames must be non-NULL");

  bool fully_decipherable = find_initial_Java_frame(thd, &top_frame, &initial_Java_frame, &method, &bci);

  // The frame might not be walkable but still recovered a method
  // (e.g. an nmethod with no scope info for the pc

  if (method == NULL) return;

  CollectedHeap* ch = Universe::heap();

  // The method is not stored GC safe so see if GC became active
  // after we entered AsyncGetCallTrace() and before we try to
  // use the methodOop.
  // Yes, there is still a window after this check and before
  // we use methodOop below, but we can't lock out GC so that
  // has to be an acceptable risk.
  if (!ch->is_valid_method(method)) {
    trace->num_frames = ticks_GC_active; // -2
    return;
  }

  // We got a Java frame however it isn't fully decipherable
  // so it won't necessarily be safe to use it for the
  // initial frame in the vframe stream.

  if (!fully_decipherable) {
    // Take whatever method the top-frame decoder managed to scrape up.
    // We look further at the top frame only if non-safepoint
    // debugging information is available.
    count++;
    trace->num_frames = count;
    trace->frames[0].method_id = method->find_jmethod_id_or_null();
    if (!method->is_native()) {
      trace->frames[0].lineno = bci;
    } else {
      trace->frames[0].lineno = -3;
    }

    if (!initial_Java_frame.safe_for_sender(thd)) return;

    RegisterMap map(thd, false);
    initial_Java_frame = initial_Java_frame.sender(&map);
  }

  vframeStreamForte st(thd, initial_Java_frame, false);

  for (; !st.at_end() && count < depth; st.forte_next(), count++) {
    bci = st.bci();
    method = st.method();

    // The method is not stored GC safe so see if GC became active
    // after we entered AsyncGetCallTrace() and before we try to
    // use the methodOop.
    // Yes, there is still a window after this check and before
    // we use methodOop below, but we can't lock out GC so that
    // has to be an acceptable risk.
    if (!ch->is_valid_method(method)) {
      // we throw away everything we've gathered in this sample since
      // none of it is safe
      trace->num_frames = ticks_GC_active; // -2
      return;
    }

    trace->frames[count].method_id = method->find_jmethod_id_or_null();
    if (!method->is_native()) {
      trace->frames[count].lineno = bci;
    } else {
      trace->frames[count].lineno = -3;
    }
  }
  trace->num_frames = count;
  return;
}
コード例 #28
0
void ASParNewGeneration::resize_spaces(size_t requested_eden_size,
                                       size_t requested_survivor_size) {
  assert(UseAdaptiveSizePolicy, "sanity check");
  assert(requested_eden_size > 0  && requested_survivor_size > 0,
         "just checking");
  CollectedHeap* heap = Universe::heap();
  assert(heap->kind() == CollectedHeap::GenCollectedHeap, "Sanity");


  // We require eden and to space to be empty
  if ((!eden()->is_empty()) || (!to()->is_empty())) {
    return;
  }

  size_t cur_eden_size = eden()->capacity();

  if (PrintAdaptiveSizePolicy && Verbose) {
    gclog_or_tty->print_cr("ASParNew::resize_spaces(requested_eden_size: "
                  SIZE_FORMAT
                  ", requested_survivor_size: " SIZE_FORMAT ")",
                  requested_eden_size, requested_survivor_size);
    gclog_or_tty->print_cr("    eden: [" PTR_FORMAT ".." PTR_FORMAT ") "
                  SIZE_FORMAT,
                  eden()->bottom(),
                  eden()->end(),
                  pointer_delta(eden()->end(),
                                eden()->bottom(),
                                sizeof(char)));
    gclog_or_tty->print_cr("    from: [" PTR_FORMAT ".." PTR_FORMAT ") "
                  SIZE_FORMAT,
                  from()->bottom(),
                  from()->end(),
                  pointer_delta(from()->end(),
                                from()->bottom(),
                                sizeof(char)));
    gclog_or_tty->print_cr("      to: [" PTR_FORMAT ".." PTR_FORMAT ") "
                  SIZE_FORMAT,
                  to()->bottom(),
                  to()->end(),
                  pointer_delta(  to()->end(),
                                  to()->bottom(),
                                  sizeof(char)));
  }

  // There's nothing to do if the new sizes are the same as the current
  if (requested_survivor_size == to()->capacity() &&
      requested_survivor_size == from()->capacity() &&
      requested_eden_size == eden()->capacity()) {
    if (PrintAdaptiveSizePolicy && Verbose) {
      gclog_or_tty->print_cr("    capacities are the right sizes, returning");
    }
    return;
  }

  char* eden_start = (char*)eden()->bottom();
  char* eden_end   = (char*)eden()->end();
  char* from_start = (char*)from()->bottom();
  char* from_end   = (char*)from()->end();
  char* to_start   = (char*)to()->bottom();
  char* to_end     = (char*)to()->end();

  const size_t alignment = os::vm_page_size();
  const bool maintain_minimum =
    (requested_eden_size + 2 * requested_survivor_size) <= min_gen_size();

  // Check whether from space is below to space
  if (from_start < to_start) {
    // Eden, from, to
    if (PrintAdaptiveSizePolicy && Verbose) {
      gclog_or_tty->print_cr("  Eden, from, to:");
    }

    // Set eden
    // "requested_eden_size" is a goal for the size of eden
    // and may not be attainable.  "eden_size" below is
    // calculated based on the location of from-space and
    // the goal for the size of eden.  from-space is
    // fixed in place because it contains live data.
    // The calculation is done this way to avoid 32bit
    // overflow (i.e., eden_start + requested_eden_size
    // may too large for representation in 32bits).
    size_t eden_size;
    if (maintain_minimum) {
      // Only make eden larger than the requested size if
      // the minimum size of the generation has to be maintained.
      // This could be done in general but policy at a higher
      // level is determining a requested size for eden and that
      // should be honored unless there is a fundamental reason.
      eden_size = pointer_delta(from_start,
                                eden_start,
                                sizeof(char));
    } else {
      eden_size = MIN2(requested_eden_size,
                       pointer_delta(from_start, eden_start, sizeof(char)));
    }

    eden_size = align_size_down(eden_size, alignment);
    eden_end = eden_start + eden_size;
    assert(eden_end >= eden_start, "addition overflowed")

    // To may resize into from space as long as it is clear of live data.
    // From space must remain page aligned, though, so we need to do some
    // extra calculations.

    // First calculate an optimal to-space
    to_end   = (char*)virtual_space()->high();
    to_start = (char*)pointer_delta(to_end, (char*)requested_survivor_size,
                                    sizeof(char));

    // Does the optimal to-space overlap from-space?
    if (to_start < (char*)from()->end()) {
      // Calculate the minimum offset possible for from_end
      size_t from_size = pointer_delta(from()->top(), from_start, sizeof(char));

      // Should we be in this method if from_space is empty? Why not the set_space method? FIX ME!
      if (from_size == 0) {
        from_size = alignment;
      } else {
        from_size = align_size_up(from_size, alignment);
      }

      from_end = from_start + from_size;
      assert(from_end > from_start, "addition overflow or from_size problem");

      guarantee(from_end <= (char*)from()->end(), "from_end moved to the right");

      // Now update to_start with the new from_end
      to_start = MAX2(from_end, to_start);
    } else {
      // If shrinking, move to-space down to abut the end of from-space
      // so that shrinking will move to-space down.  If not shrinking
      // to-space is moving up to allow for growth on the next expansion.
      if (requested_eden_size <= cur_eden_size) {
        to_start = from_end;
        if (to_start + requested_survivor_size > to_start) {
          to_end = to_start + requested_survivor_size;
        }
      }
      // else leave to_end pointing to the high end of the virtual space.
    }

    guarantee(to_start != to_end, "to space is zero sized");

    if (PrintAdaptiveSizePolicy && Verbose) {
      gclog_or_tty->print_cr("    [eden_start .. eden_end): "
                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
                    eden_start,
                    eden_end,
                    pointer_delta(eden_end, eden_start, sizeof(char)));
      gclog_or_tty->print_cr("    [from_start .. from_end): "
                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
                    from_start,
                    from_end,
                    pointer_delta(from_end, from_start, sizeof(char)));
      gclog_or_tty->print_cr("    [  to_start ..   to_end): "
                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
                    to_start,
                    to_end,
                    pointer_delta(  to_end,   to_start, sizeof(char)));
    }
  } else {
    // Eden, to, from
    if (PrintAdaptiveSizePolicy && Verbose) {
      gclog_or_tty->print_cr("  Eden, to, from:");
    }

    // Calculate the to-space boundaries based on
    // the start of from-space.
    to_end = from_start;
    to_start = (char*)pointer_delta(from_start,
                                    (char*)requested_survivor_size,
                                    sizeof(char));
    // Calculate the ideal eden boundaries.
    // eden_end is already at the bottom of the generation
    assert(eden_start == virtual_space()->low(),
      "Eden is not starting at the low end of the virtual space");
    if (eden_start + requested_eden_size >= eden_start) {
      eden_end = eden_start + requested_eden_size;
    } else {
      eden_end = to_start;
    }

    // Does eden intrude into to-space?  to-space
    // gets priority but eden is not allowed to shrink
    // to 0.
    if (eden_end > to_start) {
      eden_end = to_start;
    }

    // Don't let eden shrink down to 0 or less.
    eden_end = MAX2(eden_end, eden_start + alignment);
    assert(eden_start + alignment >= eden_start, "Overflow");

    size_t eden_size;
    if (maintain_minimum) {
      // Use all the space available.
      eden_end = MAX2(eden_end, to_start);
      eden_size = pointer_delta(eden_end, eden_start, sizeof(char));
      eden_size = MIN2(eden_size, cur_eden_size);
    } else {
      eden_size = pointer_delta(eden_end, eden_start, sizeof(char));
    }
    eden_size = align_size_down(eden_size, alignment);
    assert(maintain_minimum || eden_size <= requested_eden_size,
      "Eden size is too large");
    assert(eden_size >= alignment, "Eden size is too small");
    eden_end = eden_start + eden_size;

    // Move to-space down to eden.
    if (requested_eden_size < cur_eden_size) {
      to_start = eden_end;
      if (to_start + requested_survivor_size > to_start) {
        to_end = MIN2(from_start, to_start + requested_survivor_size);
      } else {
        to_end = from_start;
      }
    }

    // eden_end may have moved so again make sure
    // the to-space and eden don't overlap.
    to_start = MAX2(eden_end, to_start);

    // from-space
    size_t from_used = from()->used();
    if (requested_survivor_size > from_used) {
      if (from_start + requested_survivor_size >= from_start) {
        from_end = from_start + requested_survivor_size;
      }
      if (from_end > virtual_space()->high()) {
        from_end = virtual_space()->high();
      }
    }

    assert(to_start >= eden_end, "to-space should be above eden");
    if (PrintAdaptiveSizePolicy && Verbose) {
      gclog_or_tty->print_cr("    [eden_start .. eden_end): "
                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
                    eden_start,
                    eden_end,
                    pointer_delta(eden_end, eden_start, sizeof(char)));
      gclog_or_tty->print_cr("    [  to_start ..   to_end): "
                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
                    to_start,
                    to_end,
                    pointer_delta(  to_end,   to_start, sizeof(char)));
      gclog_or_tty->print_cr("    [from_start .. from_end): "
                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
                    from_start,
                    from_end,
                    pointer_delta(from_end, from_start, sizeof(char)));
    }
  }


  guarantee((HeapWord*)from_start <= from()->bottom(),
            "from start moved to the right");
  guarantee((HeapWord*)from_end >= from()->top(),
            "from end moved into live data");
  assert(is_object_aligned((intptr_t)eden_start), "checking alignment");
  assert(is_object_aligned((intptr_t)from_start), "checking alignment");
  assert(is_object_aligned((intptr_t)to_start), "checking alignment");

  MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)eden_end);
  MemRegion toMR  ((HeapWord*)to_start,   (HeapWord*)to_end);
  MemRegion fromMR((HeapWord*)from_start, (HeapWord*)from_end);

  // Let's make sure the call to initialize doesn't reset "top"!
  HeapWord* old_from_top = from()->top();

  // For PrintAdaptiveSizePolicy block  below
  size_t old_from = from()->capacity();
  size_t old_to   = to()->capacity();

  // If not clearing the spaces, do some checking to verify that
  // the spaces are already mangled.

  // Must check mangling before the spaces are reshaped.  Otherwise,
  // the bottom or end of one space may have moved into another
  // a failure of the check may not correctly indicate which space
  // is not properly mangled.
  if (ZapUnusedHeapArea) {
    HeapWord* limit = (HeapWord*) virtual_space()->high();
    eden()->check_mangled_unused_area(limit);
    from()->check_mangled_unused_area(limit);
      to()->check_mangled_unused_area(limit);
  }

  // The call to initialize NULL's the next compaction space
  eden()->initialize(edenMR,
                     SpaceDecorator::Clear,
                     SpaceDecorator::DontMangle);
  eden()->set_next_compaction_space(from());
    to()->initialize(toMR  ,
                     SpaceDecorator::Clear,
                     SpaceDecorator::DontMangle);
  from()->initialize(fromMR,
                     SpaceDecorator::DontClear,
                     SpaceDecorator::DontMangle);

  assert(from()->top() == old_from_top, "from top changed!");

  if (PrintAdaptiveSizePolicy) {
    GenCollectedHeap* gch = GenCollectedHeap::heap();
    assert(gch->kind() == CollectedHeap::GenCollectedHeap, "Sanity");

    gclog_or_tty->print("AdaptiveSizePolicy::survivor space sizes: "
                  "collection: %d "
                  "(" SIZE_FORMAT ", " SIZE_FORMAT ") -> "
                  "(" SIZE_FORMAT ", " SIZE_FORMAT ") ",
                  gch->total_collections(),
                  old_from, old_to,
                  from()->capacity(),
                  to()->capacity());
    gclog_or_tty->cr();
  }
}