MemoryUsage EdenMutableSpacePool::get_memory_usage() {
  size_t maxSize   = (available_for_allocation() ? max_size() : 0);
  size_t used = used_in_bytes();
  size_t committed = _space->capacity_in_bytes();

  return MemoryUsage(initial_size(), used, committed, maxSize);
}
Esempio n. 2
0
MemoryUsage CodeHeapPool::get_memory_usage() {
  size_t used      = used_in_bytes();
  size_t committed = _codeHeap->capacity();
  size_t maxSize   = (available_for_allocation() ? max_size() : 0);

  return MemoryUsage(initial_size(), used, committed, maxSize);
}
Esempio n. 3
0
/**
 * Copies data from this buffer to the "dst" address. The buffer is
 * shrunk if possible. If the "dst" address is NULL, then the message
 * is dequeued but is not copied.
 */
void
circular_buffer::dequeue(void *dst) {
    assert(unit_sz > 0);
    assert(_unread >= unit_sz);
    assert(_unread <= _buffer_sz);
    assert(_buffer);

    KLOG(kernel, mem,
             "circular_buffer dequeue "
             "unread: %d, next: %d, buffer_sz: %d, unit_sz: %d",
             _unread, _next, _buffer_sz, unit_sz);

    assert(_next + unit_sz <= _buffer_sz);
    if (dst != NULL) {
        memcpy(dst, &_buffer[_next], unit_sz);
    }
    KLOG(kernel, mem, "shifted data from index %d", _next);
    _unread -= unit_sz;
    _next += unit_sz;
    if (_next == _buffer_sz) {
        _next = 0;
    }

    // Shrink if possible.
    if (_buffer_sz > initial_size() && _unread <= _buffer_sz / 4) {
        shrink();
    }
}
Esempio n. 4
0
MemoryUsage PSGenerationPool::get_memory_usage() {
  size_t maxSize   = (available_for_allocation() ? max_size() : 0);
  size_t used      = used_in_bytes();
  size_t committed = _old_gen->capacity_in_bytes();

  return MemoryUsage(initial_size(), used, committed, maxSize);
}
Esempio n. 5
0
MemoryUsage SurvivorContiguousSpacePool::get_memory_usage() {
  size_t maxSize = (available_for_allocation() ? max_size() : 0);
  size_t used    = used_in_bytes();
  size_t committed = committed_in_bytes();

  return MemoryUsage(initial_size(), used, committed, maxSize);
}
Esempio n. 6
0
MemoryUsage G1OldGenPool::get_memory_usage() {
  size_t initial_sz = initial_size();
  size_t max_sz     = max_size();
  size_t used       = used_in_bytes();
  size_t committed  = _g1mm->old_space_committed();

  return MemoryUsage(initial_sz, used, committed, max_sz);
}
Esempio n. 7
0
void MemoryPool::record_peak_memory_usage() {
  // Caller in JDK is responsible for synchronization -
  // acquire the lock for this memory pool before calling VM
  MemoryUsage usage = get_memory_usage();
  size_t peak_used = get_max_value(usage.used(), _peak_usage.used());
  size_t peak_committed = get_max_value(usage.committed(), _peak_usage.committed());
  size_t peak_max_size = get_max_value(usage.max_size(), _peak_usage.max_size());

  _peak_usage = MemoryUsage(initial_size(), peak_used, peak_committed, peak_max_size);
}
Esempio n. 8
0
void
circular_buffer::shrink() {
    size_t new_buffer_sz = _buffer_sz / 2;
    I(dom, initial_size() <= new_buffer_sz);
    DLOG(dom, mem, "circular_buffer is shrinking to %d bytes", new_buffer_sz);
    void *new_buffer = dom->malloc(new_buffer_sz);
    transfer(new_buffer);
    dom->free(_buffer);
    _buffer = (uint8_t *)new_buffer;
    _next = 0;
    _buffer_sz = new_buffer_sz;
}
Esempio n. 9
0
void
circular_buffer::shrink() {
    size_t new_buffer_sz = _buffer_sz / 2;
    assert(initial_size() <= new_buffer_sz);
    KLOG(kernel, mem, "circular_buffer is shrinking to %d bytes",
         new_buffer_sz);
    void *new_buffer = kernel->malloc(new_buffer_sz,
                                    "new circular_buffer (shrink)");
    transfer(new_buffer);
    kernel->free(_buffer);
    _buffer = (uint8_t *)new_buffer;
    _next = 0;
    _buffer_sz = new_buffer_sz;
}
Esempio n. 10
0
circular_buffer::circular_buffer(rust_kernel *kernel, size_t unit_sz) :
    kernel(kernel),
    unit_sz(unit_sz),
    _buffer_sz(initial_size()),
    _next(0),
    _unread(0),
    _buffer((uint8_t *)kernel->malloc(_buffer_sz, "circular_buffer")) {

    assert(unit_sz && "Unit size must be larger than zero.");

    KLOG(kernel, mem, "new circular_buffer(buffer_sz=%d, unread=%d)"
         "-> circular_buffer=0x%" PRIxPTR,
         _buffer_sz, _unread, this);

    assert(_buffer && "Failed to allocate buffer.");
}
Esempio n. 11
0
circular_buffer::circular_buffer(rust_dom *dom, size_t unit_sz) :
    dom(dom),
    unit_sz(unit_sz),
    _buffer_sz(initial_size()),
    _next(0),
    _unread(0),
    _buffer((uint8_t *)dom->malloc(_buffer_sz)) {

    A(dom, unit_sz, "Unit size must be larger than zero.");

    DLOG(dom, mem, "new circular_buffer(buffer_sz=%d, unread=%d)"
         "-> circular_buffer=0x%" PRIxPTR,
         _buffer_sz, _unread, this);

    A(dom, _buffer, "Failed to allocate buffer.");
}
Esempio n. 12
0
MemoryUsage CompressedKlassSpacePool::get_memory_usage() {
  size_t committed = MetaspaceAux::committed_bytes(Metaspace::ClassType);
  return MemoryUsage(initial_size(), used_in_bytes(), committed, max_size());
}
Esempio n. 13
0
MemoryUsage MetaspacePool::get_memory_usage() {
  size_t committed = MetaspaceAux::committed_bytes();
  return MemoryUsage(initial_size(), used_in_bytes(), committed, max_size());
}
void DefNewGeneration::compute_new_size() {
  // This is called after a GC that includes the old generation, so from-space
  // will normally be empty.
  // Note that we check both spaces, since if scavenge failed they revert roles.
  // If not we bail out (otherwise we would have to relocate the objects).
  if (!from()->is_empty() || !to()->is_empty()) {
    return;
  }

  GenCollectedHeap* gch = GenCollectedHeap::heap();

  size_t old_size = gch->old_gen()->capacity();
  size_t new_size_before = _virtual_space.committed_size();
  size_t min_new_size = initial_size();
  size_t max_new_size = reserved().byte_size();
  assert(min_new_size <= new_size_before &&
         new_size_before <= max_new_size,
         "just checking");
  // All space sizes must be multiples of Generation::GenGrain.
  size_t alignment = Generation::GenGrain;

  // Compute desired new generation size based on NewRatio and
  // NewSizeThreadIncrease
  size_t desired_new_size = old_size/NewRatio;
  int threads_count = Threads::number_of_non_daemon_threads();
  size_t thread_increase_size = threads_count * NewSizeThreadIncrease;
  desired_new_size = align_size_up(desired_new_size + thread_increase_size, alignment);

  // Adjust new generation size
  desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size);
  assert(desired_new_size <= max_new_size, "just checking");

  bool changed = false;
  if (desired_new_size > new_size_before) {
    size_t change = desired_new_size - new_size_before;
    assert(change % alignment == 0, "just checking");
    if (expand(change)) {
       changed = true;
    }
    // If the heap failed to expand to the desired size,
    // "changed" will be false.  If the expansion failed
    // (and at this point it was expected to succeed),
    // ignore the failure (leaving "changed" as false).
  }
  if (desired_new_size < new_size_before && eden()->is_empty()) {
    // bail out of shrinking if objects in eden
    size_t change = new_size_before - desired_new_size;
    assert(change % alignment == 0, "just checking");
    _virtual_space.shrink_by(change);
    changed = true;
  }
  if (changed) {
    // The spaces have already been mangled at this point but
    // may not have been cleared (set top = bottom) and should be.
    // Mangling was done when the heap was being expanded.
    compute_space_boundaries(eden()->used(),
                             SpaceDecorator::Clear,
                             SpaceDecorator::DontMangle);
    MemRegion cmr((HeapWord*)_virtual_space.low(),
                  (HeapWord*)_virtual_space.high());
    gch->barrier_set()->resize_covered_region(cmr);

    log_debug(gc, heap, ergo)(
        "New generation size " SIZE_FORMAT "K->" SIZE_FORMAT "K [eden=" SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]",
        new_size_before/K, _virtual_space.committed_size()/K,
        eden()->capacity()/K, from()->capacity()/K);
    log_trace(gc, heap, ergo)(
        "  [allowed " SIZE_FORMAT "K extra for %d threads]",
          thread_increase_size/K, threads_count);
      }
}