예제 #1
0
void ContiguousSpace::allocate_temporary_filler(int factor) {
  // allocate temporary type array decreasing free size with factor 'factor'
  assert(factor >= 0, "just checking");
  size_t size = pointer_delta(end(), top());

  // if space is full, return
  if (size == 0) return;

  if (factor > 0) {
    size -= size/factor;
  }
  size = align_object_size(size);

  const size_t array_header_size = typeArrayOopDesc::header_size(T_INT);
  if (size >= (size_t)align_object_size(array_header_size)) {
    size_t length = (size - array_header_size) * (HeapWordSize / sizeof(jint));
    // allocate uninitialized int array
    typeArrayOop t = (typeArrayOop) allocate(size);
    assert(t != NULL, "allocation should succeed");
    t->set_mark(markOopDesc::prototype());
    t->set_klass(Universe::intArrayKlassObj());
    t->set_length((int)length);
  } else {
    assert(size == CollectedHeap::min_fill_size(),
           "size for smallest fake object doesn't match");
    instanceOop obj = (instanceOop) allocate(size);
    obj->set_mark(markOopDesc::prototype());
    obj->set_klass_gap(0);
    obj->set_klass(SystemDictionary::Object_klass());
  }
}
예제 #2
0
// This is the shared initialization code. It sets up the basic pointers,
// and allows enough extra space for a filler object. We call a virtual
// method, "lab_is_valid()" to handle the different asserts the old/young
// labs require.
void PSPromotionLAB::initialize(MemRegion lab) {
  assert(lab_is_valid(lab), "Sanity");

  HeapWord* bottom = lab.start();
  HeapWord* end    = lab.end();

  set_bottom(bottom);
  set_end(end);
  set_top(bottom);

  // Initialize after VM starts up because header_size depends on compressed
  // oops.
  filler_header_size = align_object_size(typeArrayOopDesc::header_size(T_INT));

  // We can be initialized to a zero size!
  if (free() > 0) {
    if (ZapUnusedHeapArea) {
      debug_only(Copy::fill_to_words(top(), free()/HeapWordSize, badHeapWord));
    }

    // NOTE! We need to allow space for a filler object.
    assert(lab.word_size() >= filler_header_size, "lab is too small");
    end = end - filler_header_size;
    set_end(end);

    _state = needs_flush;
  } else {
    _state = zero_size;
  }

  assert(this->top() <= this->end(), "pointers out of order");
}
예제 #3
0
int instanceMirrorKlass::instance_size(KlassHandle k) {
  if (k() != NULL && k->oop_is_instance()) {
    return align_object_size(size_helper() + instanceKlass::cast(k())->static_field_size());
  }

  return size_helper();
}
예제 #4
0
// Compute desired plab size and latch result for later
// use. This should be called once at the end of parallel
// scavenge; it clears the sensor accumulators.
void PLABStats::adjust_desired_plab_sz() {
  assert(ResizePLAB, "Not set");
  if (_allocated == 0) {
    assert(_unused == 0, "Inconsistency in PLAB stats");
    _allocated = 1;
  }
  double wasted_frac    = (double)_unused/(double)_allocated;
  size_t target_refills = (size_t)((wasted_frac*TargetSurvivorRatio)/
                                   TargetPLABWastePct);
  if (target_refills == 0) {
    target_refills = 1;
  }
  _used = _allocated - _wasted - _unused;
  size_t plab_sz = _used/(target_refills*ParallelGCThreads);
  if (PrintPLAB) gclog_or_tty->print(" (plab_sz = %d ", plab_sz);
  // Take historical weighted average
  _filter.sample(plab_sz);
  // Clip from above and below, and align to object boundary
  plab_sz = MAX2(min_size(), (size_t)_filter.average());
  plab_sz = MIN2(max_size(), plab_sz);
  plab_sz = align_object_size(plab_sz);
  // Latch the result
  if (PrintPLAB) gclog_or_tty->print(" desired_plab_sz = %d) ", plab_sz);
  if (ResizePLAB) {
    _desired_plab_sz = plab_sz;
  }
  // Now clear the accumulators for next round:
  // note this needs to be fixed in the case where we
  // are retaining across scavenges. FIX ME !!! XXX
  _allocated = 0;
  _wasted    = 0;
  _unused    = 0;
}
예제 #5
0
arrayKlassHandle arrayKlass::base_create_array_klass(
const Klass_vtbl& cplusplus_vtbl, int header_size, KlassHandle klass, TRAPS) {
  // Allocation
  // Note: because the Java vtable must start at the same offset in all klasses,
  // we must insert filler fields into arrayKlass to make it the same size as instanceKlass.
  // If this assert fails, add filler to instanceKlass to make it bigger.
  assert(header_size <= instanceKlass::header_size(),
         "array klasses must be same size as instanceKlass");
  header_size = instanceKlass::header_size();
  // Arrays don't add any new methods, so their vtable is the same size as
  // the vtable of klass Object.
  int vtable_size = Universe::base_vtable_size();
  arrayKlassHandle k;
  KlassHandle base_klass = Klass::base_create_klass(klass,
                                                 header_size + vtable_size,
                                                 cplusplus_vtbl, CHECK_(k));
  k = arrayKlassHandle(THREAD, base_klass());

  assert(!k()->is_parsable(), "not expecting parsability yet.");
  k->set_super(Universe::is_bootstrapping() ? NULL : SystemDictionary::object_klass());
  k->set_size_helper(0);
  k->set_dimension(1);
  k->set_higher_dimension(NULL);
  k->set_lower_dimension(NULL);
  k->set_vtable_length(vtable_size);
  k->set_is_cloneable(); // All arrays are considered to be cloneable (See JLS 20.1.5)

  assert(k()->is_parsable(), "should be parsable here.");
  // Make sure size calculation is right
  assert(k()->size() == align_object_size(header_size + vtable_size), "wrong size for object");
  
  return k;
}
/**
 * 计算当前线程的本地分配缓冲区的新大小
 */
inline size_t ThreadLocalAllocBuffer::compute_size(size_t obj_size) {
  const size_t aligned_obj_size = align_object_size(obj_size);

  // Compute the size for the new TLAB.
  // The "last" tlab may be smaller to reduce fragmentation.
  // unsafe_max_tlab_alloc is just a hint.
  const size_t available_size = Universe::heap()->unsafe_max_tlab_alloc(myThread()) / HeapWordSize;
  size_t new_tlab_size = MIN2(available_size, desired_size() + aligned_obj_size);

  // Make sure there's enough room for object and filler int[].
  const size_t obj_plus_filler_size = aligned_obj_size + alignment_reserve();
  if (new_tlab_size < obj_plus_filler_size) {
    // If there isn't enough room for the allocation, return failure.
    if (PrintTLAB && Verbose) {
      gclog_or_tty->print_cr("ThreadLocalAllocBuffer::compute_size(" SIZE_FORMAT ")"
                    " returns failure",
                    obj_size);
    }
    return 0;
  }

  if (PrintTLAB && Verbose) {
    gclog_or_tty->print_cr("ThreadLocalAllocBuffer::compute_size(" SIZE_FORMAT ")"
                  " returns " SIZE_FORMAT,
                  obj_size, new_tlab_size);
  }
  return new_tlab_size;
}
예제 #7
0
void SharedHeap::fill_region_with_object(MemRegion mr) {
  // Disable allocation events, since this isn't a "real" allocation.
  JVMPIAllocEventDisabler dis;  

  size_t word_size = mr.word_size();
  size_t aligned_array_header_size =
    align_object_size(typeArrayOopDesc::header_size(T_INT));

  if (word_size >= aligned_array_header_size) {
    const size_t array_length =
      pointer_delta(mr.end(), mr.start()) -
      typeArrayOopDesc::header_size(T_INT);
    const size_t array_length_words =
      array_length * (HeapWordSize/sizeof(jint));
    post_allocation_setup_array(Universe::intArrayKlassObj(),
				mr.start(),
				mr.word_size(),
				(int)array_length_words);
#ifdef ASSERT
    HeapWord* elt_words = (mr.start() + typeArrayOopDesc::header_size(T_INT));
    Memory::set_words(elt_words, array_length, 0xDEAFBABE);
#endif
  } else {
    assert(word_size == (size_t)oopDesc::header_size(), "Unaligned?");
    post_allocation_setup_obj(SystemDictionary::object_klass(),
			      mr.start(),
			      mr.word_size());
  }
}
예제 #8
0
/**
 * 根据统计信息调整当前线程的本地分配缓冲区的基准大小
 */
void ThreadLocalAllocBuffer::resize() {

  if (ResizeTLAB) {	//允许调整线程的本地分配缓冲区大小
    // Compute the next tlab size using expected allocation amount
    size_t alloc = (size_t)(_allocation_fraction.average() *
                            (Universe::heap()->tlab_capacity(myThread()) / HeapWordSize));
    size_t new_size = alloc / _target_refills;

    //根据本地分配缓冲区大小允许的最大值/最小值来调整缓冲区的新大小
    new_size = MIN2(MAX2(new_size, min_size()), max_size());

    //内存对齐后的大小
    size_t aligned_new_size = align_object_size(new_size);

    if (PrintTLAB && Verbose) {
      gclog_or_tty->print("TLAB new size: thread: " INTPTR_FORMAT " [id: %2d]"
                          " refills %d  alloc: %8.6f desired_size: " SIZE_FORMAT " -> " SIZE_FORMAT "\n",
                          myThread(), myThread()->osthread()->thread_id(),
                          _target_refills, _allocation_fraction.average(), desired_size(), aligned_new_size);
    }

    set_desired_size(aligned_new_size);

    set_refill_waste_limit(initial_refill_waste_limit());
  }
}
klassOop typeArrayKlassKlass::create_klass(TRAPS) {
  typeArrayKlassKlass o;
  KlassHandle h_this_klass(THREAD, Universe::klassKlassObj());  
  KlassHandle k = base_create_klass(h_this_klass, header_size(), o.vtbl_value(), CHECK_0);
  assert(k()->size() == align_object_size(header_size()), "wrong size for object");
  java_lang_Class::create_mirror(k, CHECK_0); // Allocate mirror
  return k();
}
예제 #10
0
// There may be unallocated holes in the middle chunks
// that should be filled with dead objects to ensure parsability.
void MutableNUMASpace::ensure_parsability() {
  for (int i = 0; i < lgrp_spaces()->length(); i++) {
    LGRPSpace *ls = lgrp_spaces()->at(i);
    MutableSpace *s = ls->space();
    if (s->top() < top()) { // For all spaces preceding the one containing top()
      if (s->free_in_words() > 0) {
        intptr_t cur_top = (intptr_t)s->top();
        size_t words_left_to_fill = pointer_delta(s->end(), s->top());;
        while (words_left_to_fill > 0) {
          size_t words_to_fill = MIN2(words_left_to_fill, CollectedHeap::filler_array_max_size());
          assert(words_to_fill >= CollectedHeap::min_fill_size(),
                 "Remaining size (" SIZE_FORMAT ") is too small to fill (based on " SIZE_FORMAT " and " SIZE_FORMAT ")",
                 words_to_fill, words_left_to_fill, CollectedHeap::filler_array_max_size());
          CollectedHeap::fill_with_object((HeapWord*)cur_top, words_to_fill);
          if (!os::numa_has_static_binding()) {
            size_t touched_words = words_to_fill;
#ifndef ASSERT
            if (!ZapUnusedHeapArea) {
              touched_words = MIN2((size_t)align_object_size(typeArrayOopDesc::header_size(T_INT)),
                touched_words);
            }
#endif
            MemRegion invalid;
            HeapWord *crossing_start = (HeapWord*)round_to(cur_top, os::vm_page_size());
            HeapWord *crossing_end = (HeapWord*)round_to(cur_top + touched_words, os::vm_page_size());
            if (crossing_start != crossing_end) {
              // If object header crossed a small page boundary we mark the area
              // as invalid rounding it to a page_size().
              HeapWord *start = MAX2((HeapWord*)round_down(cur_top, page_size()), s->bottom());
              HeapWord *end = MIN2((HeapWord*)round_to(cur_top + touched_words, page_size()), s->end());
              invalid = MemRegion(start, end);
            }

            ls->add_invalid_region(invalid);
          }
          cur_top = cur_top + (words_to_fill * HeapWordSize);
          words_left_to_fill -= words_to_fill;
        }
      }
    } else {
      if (!os::numa_has_static_binding()) {
#ifdef ASSERT
        MemRegion invalid(s->top(), s->end());
        ls->add_invalid_region(invalid);
#else
        if (ZapUnusedHeapArea) {
          MemRegion invalid(s->top(), s->end());
          ls->add_invalid_region(invalid);
        } else {
          return;
        }
#endif
      } else {
          return;
      }
    }
  }
}
예제 #11
0
klassOop arrayKlassKlass::create_klass(TRAPS) {
  arrayKlassKlass o;
  KlassHandle h_this_klass(THREAD, Universe::klassKlassObj());
  KlassHandle k = base_create_klass(h_this_klass, header_size(), o.vtbl_value(), CHECK_NULL);
  // Make sure size calculation is right
  assert(k()->size() == align_object_size(header_size()), "wrong size for object");
  java_lang_Class::create_mirror(k, CHECK_NULL); // Allocate mirror, make links
  return k();
}
예제 #12
0
klassOop methodDataKlass::create_klass(TRAPS) {
  methodDataKlass o;
  KlassHandle h_this_klass(THREAD, Universe::klassKlassObj());  
  KlassHandle k = base_create_klass(h_this_klass, header_size(),
				    o.vtbl_value(), CHECK_0);
  // Make sure size calculation is right
  assert(k()->size() == align_object_size(header_size()),
	 "wrong size for object");
  return k();
}
예제 #13
0
PLAB::PLAB(size_t desired_plab_sz_) :
  _word_sz(desired_plab_sz_), _bottom(NULL), _top(NULL),
  _end(NULL), _hard_end(NULL), _allocated(0), _wasted(0), _undo_wasted(0)
{
  // ArrayOopDesc::header_size depends on command line initialization.
  AlignmentReserve = oopDesc::header_size() > MinObjAlignment ? align_object_size(arrayOopDesc::header_size(T_INT)) : 0;
  assert(min_size() > AlignmentReserve,
         err_msg("Minimum PLAB size " SIZE_FORMAT " must be larger than alignment reserve " SIZE_FORMAT " "
                 "to be able to contain objects", min_size(), AlignmentReserve));
}
예제 #14
0
void constMethodKlass::oop_verify_on(oop obj, outputStream* st) {
  Klass::oop_verify_on(obj, st);
  guarantee(obj->is_constMethod(), "object must be constMethod");
  constMethodOop m = constMethodOop(obj);
  guarantee(m->is_perm(),                            "should be in permspace");

  // Verification can occur during oop construction before the method or
  // other fields have been initialized.
  if (!obj->partially_loaded()) {
    guarantee(m->method()->is_perm(), "should be in permspace");
    guarantee(m->method()->is_method(), "should be method");
    typeArrayOop stackmap_data = m->stackmap_data();
    guarantee(stackmap_data == NULL ||
              stackmap_data->is_perm(),  "should be in permspace");
    guarantee(m->exception_table()->is_perm(), "should be in permspace");
    guarantee(m->exception_table()->is_typeArray(), "should be type array");

    address m_end = (address)((oop*) m + m->size());
    address compressed_table_start = m->code_end();
    guarantee(compressed_table_start <= m_end, "invalid method layout");
    address compressed_table_end = compressed_table_start;
    // Verify line number table
    if (m->has_linenumber_table()) {
      CompressedLineNumberReadStream stream(m->compressed_linenumber_table());
      while (stream.read_pair()) {
        guarantee(stream.bci() >= 0 && stream.bci() <= m->code_size(), "invalid bci in line number table");
      }
      compressed_table_end += stream.position();
    }
    guarantee(compressed_table_end <= m_end, "invalid method layout");
    // Verify checked exceptions and local variable tables
    if (m->has_checked_exceptions()) {
      u2* addr = m->checked_exceptions_length_addr();
      guarantee(*addr > 0 && (address) addr >= compressed_table_end && (address) addr < m_end, "invalid method layout");
    }
    if (m->has_localvariable_table()) {
      u2* addr = m->localvariable_table_length_addr();
      guarantee(*addr > 0 && (address) addr >= compressed_table_end && (address) addr < m_end, "invalid method layout");
    }
    // Check compressed_table_end relative to uncompressed_table_start
    u2* uncompressed_table_start;
    if (m->has_localvariable_table()) {
      uncompressed_table_start = (u2*) m->localvariable_table_start();
    } else {
      if (m->has_checked_exceptions()) {
        uncompressed_table_start = (u2*) m->checked_exceptions_start();
      } else {
        uncompressed_table_start = (u2*) m_end;
      }
    }
    int gap = (intptr_t) uncompressed_table_start - (intptr_t) compressed_table_end;
    int max_gap = align_object_size(1)*BytesPerWord;
    guarantee(gap >= 0 && gap < max_gap, "invalid method layout");
  }
}
예제 #15
0
ParGCAllocBuffer::ParGCAllocBuffer(size_t desired_plab_sz_) :
  _word_sz(desired_plab_sz_), _bottom(NULL), _top(NULL),
  _end(NULL), _hard_end(NULL),
  _retained(false), _retained_filler(),
  _allocated(0), _wasted(0)
{
  assert (min_size() > AlignmentReserve, "Inconsistency!");
  // arrayOopDesc::header_size depends on command line initialization.
  FillerHeaderSize = align_object_size(arrayOopDesc::header_size(T_INT));
  AlignmentReserve = oopDesc::header_size() > MinObjAlignment ? FillerHeaderSize : 0;
}
예제 #16
0
CollectedHeap::CollectedHeap() : _n_par_threads(0)
{
  const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
  const size_t elements_per_word = HeapWordSize / sizeof(jint);
  _filler_array_max_size = align_object_size(filler_array_hdr_size() +
                                             max_len / elements_per_word);

  _barrier_set = NULL;
  _is_gc_active = false;
  _total_collections = _total_full_collections = 0;
  _gc_cause = _gc_lastcause = GCCause::_no_gc;
  NOT_PRODUCT(_promotion_failure_alot_count = 0;)
예제 #17
0
int arrayKlass::object_size(int header_size) const {
  // size of an array klass object
  assert(header_size <= instanceKlass::header_size(), "bad header size");
  // If this assert fails, see comments in base_create_array_klass.
  header_size = instanceKlass::header_size();
#ifdef _LP64
  int size = header_size + align_object_offset(vtable_length());
#else
  int size = header_size + vtable_length();
#endif
  return align_object_size(size);
}
예제 #18
0
// There may be unallocated holes in the middle chunks
// that should be filled with dead objects to ensure parseability.
void MutableNUMASpace::ensure_parsability() {
  for (int i = 0; i < lgrp_spaces()->length(); i++) {
    LGRPSpace *ls = lgrp_spaces()->at(i);
    MutableSpace *s = ls->space();
    if (s->top() < top()) { // For all spaces preceding the one containing top()
      if (s->free_in_words() > 0) {
        size_t area_touched_words = pointer_delta(s->end(), s->top());
        CollectedHeap::fill_with_object(s->top(), area_touched_words);
#ifndef ASSERT
        if (!ZapUnusedHeapArea) {
          area_touched_words = MIN2((size_t)align_object_size(typeArrayOopDesc::header_size(T_INT)),
                                    area_touched_words);
        }
#endif
        if (!os::numa_has_static_binding()) {
          MemRegion invalid;
          HeapWord *crossing_start = (HeapWord*)round_to((intptr_t)s->top(), os::vm_page_size());
          HeapWord *crossing_end = (HeapWord*)round_to((intptr_t)(s->top() + area_touched_words),
                                                       os::vm_page_size());
          if (crossing_start != crossing_end) {
            // If object header crossed a small page boundary we mark the area
            // as invalid rounding it to a page_size().
            HeapWord *start = MAX2((HeapWord*)round_down((intptr_t)s->top(), page_size()), s->bottom());
            HeapWord *end = MIN2((HeapWord*)round_to((intptr_t)(s->top() + area_touched_words), page_size()),
                                 s->end());
            invalid = MemRegion(start, end);
          }

          ls->add_invalid_region(invalid);
        }
      }
    } else {
      if (!os::numa_has_static_binding()) {
#ifdef ASSERT
        MemRegion invalid(s->top(), s->end());
        ls->add_invalid_region(invalid);
#else
        if (ZapUnusedHeapArea) {
          MemRegion invalid(s->top(), s->end());
          ls->add_invalid_region(invalid);
        } else {
          return;
        }
#endif
      } else {
          return;
      }
    }
  }
}
예제 #19
0
CollectedHeap::CollectedHeap() :
  _barrier_set(NULL),
  _is_gc_active(false),
  _total_collections(0),
  _total_full_collections(0),
  _gc_cause(GCCause::_no_gc),
  _gc_lastcause(GCCause::_no_gc),
  _defer_initial_card_mark(false) // strengthened by subclass in pre_initialize() below.
{
  const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
  const size_t elements_per_word = HeapWordSize / sizeof(jint);
  _filler_array_max_size = align_object_size(filler_array_hdr_size() +
                                             max_len / elements_per_word);

  NOT_PRODUCT(_promotion_failure_alot_count = 0;)
size_t ThreadLocalAllocBuffer::initial_desired_size() {
  size_t init_sz = 0;

  if (TLABSize > 0) {
    init_sz = TLABSize / HeapWordSize;
  } else if (global_stats() != NULL) {
    // Initial size is a function of the average number of allocating threads.
    unsigned nof_threads = global_stats()->allocating_threads_avg();

    init_sz  = (Universe::heap()->tlab_capacity(myThread()) / HeapWordSize) /
                      (nof_threads * target_refills());
    init_sz = align_object_size(init_sz);
  }
  init_sz = MIN2(MAX2(init_sz, min_size()), max_size());
  return init_sz;
}
예제 #21
0
// Compute desired plab size for one gc worker thread and latch result for later
// use. This should be called once at the end of parallel
// scavenge; it clears the sensor accumulators.
void PLABStats::adjust_desired_plab_sz() {
  log_plab_allocation();

  if (!ResizePLAB) {
    // Clear accumulators for next round.
    reset();
    return;
  }

  assert(is_object_aligned(max_size()) && min_size() <= max_size(),
         "PLAB clipping computation may be incorrect");

  if (_allocated == 0) {
    assert(_unused == 0,
           "Inconsistency in PLAB stats: "
           "_allocated: " SIZE_FORMAT ", "
           "_wasted: " SIZE_FORMAT ", "
           "_unused: " SIZE_FORMAT ", "
           "_undo_wasted: " SIZE_FORMAT,
           _allocated, _wasted, _unused, _undo_wasted);

    _allocated = 1;
  }
  double wasted_frac    = (double)_unused / (double)_allocated;
  size_t target_refills = (size_t)((wasted_frac * TargetSurvivorRatio) / TargetPLABWastePct);
  if (target_refills == 0) {
    target_refills = 1;
  }
  size_t used = _allocated - _wasted - _unused;
  // Assumed to have 1 gc worker thread
  size_t recent_plab_sz = used / target_refills;
  // Take historical weighted average
  _filter.sample(recent_plab_sz);
  // Clip from above and below, and align to object boundary
  size_t new_plab_sz = MAX2(min_size(), (size_t)_filter.average());
  new_plab_sz = MIN2(max_size(), new_plab_sz);
  new_plab_sz = align_object_size(new_plab_sz);
  // Latch the result
  _desired_net_plab_sz = new_plab_sz;

  log_sizing(recent_plab_sz, new_plab_sz);

  reset();
}
// Compute desired plab size and latch result for later
// use. This should be called once at the end of parallel
// scavenge; it clears the sensor accumulators.
void PLABStats::adjust_desired_plab_sz(uint no_of_gc_workers) {
  assert(ResizePLAB, "Not set");

  assert(is_object_aligned(max_size()) && min_size() <= max_size(),
         "PLAB clipping computation may be incorrect");

  if (_allocated == 0) {
    assert(_unused == 0,
           err_msg("Inconsistency in PLAB stats: "
                   "_allocated: "SIZE_FORMAT", "
                   "_wasted: "SIZE_FORMAT", "
                   "_unused: "SIZE_FORMAT", "
                   "_used  : "SIZE_FORMAT,
                   _allocated, _wasted, _unused, _used));

    _allocated = 1;
  }
  double wasted_frac    = (double)_unused/(double)_allocated;
  size_t target_refills = (size_t)((wasted_frac*TargetSurvivorRatio)/
                                   TargetPLABWastePct);
  if (target_refills == 0) {
    target_refills = 1;
  }
  _used = _allocated - _wasted - _unused;
  size_t plab_sz = _used/(target_refills*no_of_gc_workers);
  if (PrintPLAB) gclog_or_tty->print(" (plab_sz = %d ", plab_sz);
  // Take historical weighted average
  _filter.sample(plab_sz);
  // Clip from above and below, and align to object boundary
  plab_sz = MAX2(min_size(), (size_t)_filter.average());
  plab_sz = MIN2(max_size(), plab_sz);
  plab_sz = align_object_size(plab_sz);
  // Latch the result
  if (PrintPLAB) gclog_or_tty->print(" desired_plab_sz = %d) ", plab_sz);
  _desired_plab_sz = plab_sz;
  // Now clear the accumulators for next round:
  // note this needs to be fixed in the case where we
  // are retaining across scavenges. FIX ME !!! XXX
  _allocated = 0;
  _wasted    = 0;
  _unused    = 0;
}
int constMethodOopDesc::object_size(int code_size,
                                    int compressed_line_number_size,
                                    int local_variable_table_length,
                                    int checked_exceptions_length) {
  int extra_bytes = code_size;
  if (compressed_line_number_size > 0) {
    extra_bytes += compressed_line_number_size;
  }
  if (checked_exceptions_length > 0) {
    extra_bytes += sizeof(u2);
    extra_bytes += checked_exceptions_length * sizeof(CheckedExceptionElement);
  }
  if (local_variable_table_length > 0) {
    extra_bytes += sizeof(u2);
    extra_bytes +=
              local_variable_table_length * sizeof(LocalVariableTableElement);
  }
  int extra_words = align_size_up(extra_bytes, BytesPerWord) / BytesPerWord;
  return align_object_size(header_size() + extra_words);
}
void ThreadLocalAllocBuffer::resize() {
  // Compute the next tlab size using expected allocation amount
  assert(ResizeTLAB, "Should not call this otherwise");
  size_t alloc = (size_t)(_allocation_fraction.average() *
                          (Universe::heap()->tlab_capacity(myThread()) / HeapWordSize));
  size_t new_size = alloc / _target_refills;

  new_size = MIN2(MAX2(new_size, min_size()), max_size());

  size_t aligned_new_size = align_object_size(new_size);

  if (PrintTLAB && Verbose) {
    gclog_or_tty->print("TLAB new size: thread: " INTPTR_FORMAT " [id: %2d]"
                        " refills %d  alloc: %8.6f desired_size: " SIZE_FORMAT " -> " SIZE_FORMAT "\n",
                        p2i(myThread()), myThread()->osthread()->thread_id(),
                        _target_refills, _allocation_fraction.average(), desired_size(), aligned_new_size);
  }
  set_desired_size(aligned_new_size);
  set_refill_waste_limit(initial_refill_waste_limit());
}
예제 #25
0
klassOop Klass::base_create_klass_oop(KlassHandle& klass, int size,
                                      const Klass_vtbl& vtbl, TRAPS) {
  size = align_object_size(size);
  // allocate and initialize vtable
  Klass*   kl = (Klass*) vtbl.allocate_permanent(klass, size, CHECK_NULL);
  klassOop k  = kl->as_klassOop();

  { // Preinitialize supertype information.
    // A later call to initialize_supers() may update these settings:
    kl->set_super(NULL);
    for (juint i = 0; i < Klass::primary_super_limit(); i++) {
      kl->_primary_supers[i] = NULL;
    }
    kl->set_secondary_supers(NULL);
    oop_store_without_check((oop*) &kl->_primary_supers[0], k);
    kl->set_super_check_offset(in_bytes(primary_supers_offset()));
  }

  kl->set_java_mirror(NULL);
  kl->set_modifier_flags(0);
  kl->set_layout_helper(Klass::_lh_neutral_value);
  kl->set_name(NULL);
  AccessFlags af;
  af.set_flags(0);
  kl->set_access_flags(af);
  kl->set_subklass(NULL);
  kl->set_next_sibling(NULL);
  kl->set_alloc_count(0);
  kl->set_alloc_size(0);
  TRACE_SET_KLASS_TRACE_ID(kl, 0);

  kl->set_prototype_header(markOopDesc::prototype());
  kl->set_biased_lock_revocation_count(0);
  kl->set_last_biased_lock_bulk_revocation_time(0);

  return k;
}
예제 #26
0
파일: plab.cpp 프로젝트: shelan/jdk9-mirror
// Compute desired plab size and latch result for later
// use. This should be called once at the end of parallel
// scavenge; it clears the sensor accumulators.
void PLABStats::adjust_desired_plab_sz(uint no_of_gc_workers) {
  assert(ResizePLAB, "Not set");

  assert(is_object_aligned(max_size()) && min_size() <= max_size(),
         "PLAB clipping computation may be incorrect");

  if (_allocated == 0) {
    assert(_unused == 0,
           err_msg("Inconsistency in PLAB stats: "
                   "_allocated: "SIZE_FORMAT", "
                   "_wasted: "SIZE_FORMAT", "
                   "_unused: "SIZE_FORMAT,
                   _allocated, _wasted, _unused));

    _allocated = 1;
  }
  double wasted_frac    = (double)_unused / (double)_allocated;
  size_t target_refills = (size_t)((wasted_frac * TargetSurvivorRatio) / TargetPLABWastePct);
  if (target_refills == 0) {
    target_refills = 1;
  }
  size_t used = _allocated - _wasted - _unused;
  size_t recent_plab_sz = used / (target_refills * no_of_gc_workers);
  // Take historical weighted average
  _filter.sample(recent_plab_sz);
  // Clip from above and below, and align to object boundary
  size_t new_plab_sz = MAX2(min_size(), (size_t)_filter.average());
  new_plab_sz = MIN2(max_size(), new_plab_sz);
  new_plab_sz = align_object_size(new_plab_sz);
  // Latch the result
  if (PrintPLAB) {
    gclog_or_tty->print(" (plab_sz = " SIZE_FORMAT" desired_plab_sz = " SIZE_FORMAT") ", recent_plab_sz, new_plab_sz);
  }
  _desired_plab_sz = new_plab_sz;

  reset();
}
예제 #27
0
size_t PLAB::min_size() {
  // Make sure that we return something that is larger than AlignmentReserve
  return align_object_size(MAX2(MinTLABSize / HeapWordSize, (uintx)oopDesc::header_size())) + AlignmentReserve;
}
예제 #28
0
// Calculates plab size for current number of gc worker threads.
size_t PLABStats::desired_plab_sz(uint no_of_gc_workers) {
  return MAX2(min_size(), (size_t)align_object_size(_desired_net_plab_sz / no_of_gc_workers));
}
예제 #29
0
// Compute the size of the methodDataOop necessary to store
// profiling information about a given method.  Size is in words
int methodDataOopDesc::compute_allocation_size_in_words(methodHandle method) {
  int byte_size = compute_allocation_size_in_bytes(method);
  int word_size = align_size_up(byte_size, BytesPerWord) / BytesPerWord;
  return align_object_size(word_size);
}
예제 #30
0
 int object_size() const {
   return align_object_size(header_size());
 }