예제 #1
0
CardGeneration::CardGeneration(ReservedSpace rs,
                               size_t initial_byte_size,
                               CardTableRS* remset) :
  Generation(rs, initial_byte_size), _rs(remset),
  _shrink_factor(0), _min_heap_delta_bytes(), _capacity_at_prologue(),
  _used_at_prologue()
{
  HeapWord* start = (HeapWord*)rs.base();
  size_t reserved_byte_size = rs.size();
  assert((uintptr_t(start) & 3) == 0, "bad alignment");
  assert((reserved_byte_size & 3) == 0, "bad alignment");
  MemRegion reserved_mr(start, heap_word_size(reserved_byte_size));
  _bts = new BlockOffsetSharedArray(reserved_mr,
                                    heap_word_size(initial_byte_size));
  MemRegion committed_mr(start, heap_word_size(initial_byte_size));
  _rs->resize_covered_region(committed_mr);
  if (_bts == NULL) {
    vm_exit_during_initialization("Could not allocate a BlockOffsetArray");
  }

  // Verify that the start and end of this generation is the start of a card.
  // If this wasn't true, a single card could span more than on generation,
  // which would cause problems when we commit/uncommit memory, and when we
  // clear and dirty cards.
  guarantee(_rs->is_aligned(reserved_mr.start()), "generation must be card aligned");
  if (reserved_mr.end() != GenCollectedHeap::heap()->reserved_region().end()) {
    // Don't check at the very end of the heap as we'll assert that we're probing off
    // the end if we try.
    guarantee(_rs->is_aligned(reserved_mr.end()), "generation must be card aligned");
  }
  _min_heap_delta_bytes = MinHeapDeltaBytes;
  _capacity_at_prologue = initial_byte_size;
  _used_at_prologue = 0;
}
예제 #2
0
CardGeneration::CardGeneration(ReservedSpace rs, size_t initial_byte_size,
                               int level,
                               GenRemSet* remset) :
    Generation(rs, initial_byte_size, level), _rs(remset)
{
    HeapWord* start = (HeapWord*)rs.base();
    size_t reserved_byte_size = rs.size();
    assert((uintptr_t(start) & 3) == 0, "bad alignment");
    assert((reserved_byte_size & 3) == 0, "bad alignment");
    MemRegion reserved_mr(start, heap_word_size(reserved_byte_size));
    _bts = new BlockOffsetSharedArray(reserved_mr,
                                      heap_word_size(initial_byte_size));
    MemRegion committed_mr(start, heap_word_size(initial_byte_size));
    _rs->resize_covered_region(committed_mr);
    if (_bts == NULL)
        vm_exit_during_initialization("Could not allocate a BlockOffsetArray");

    // Verify that the start and end of this generation is the start of a card.
    // If this wasn't true, a single card could span more than on generation,
    // which would cause problems when we commit/uncommit memory, and when we
    // clear and dirty cards.
    guarantee(_rs->is_aligned(reserved_mr.start()), "generation must be card aligned");
    if (reserved_mr.end() != Universe::heap()->reserved_region().end()) {
        // Don't check at the very end of the heap as we'll assert that we're probing off
        // the end if we try.
        guarantee(_rs->is_aligned(reserved_mr.end()), "generation must be card aligned");
    }
}
예제 #3
0
bool CardGeneration::grow_by(size_t bytes) {
  assert_correct_size_change_locking();
  bool result = _virtual_space.expand_by(bytes);
  if (result) {
    size_t new_word_size =
       heap_word_size(_virtual_space.committed_size());
    MemRegion mr(space()->bottom(), new_word_size);
    // Expand card table
    GenCollectedHeap::heap()->barrier_set()->resize_covered_region(mr);
    // Expand shared block offset array
    _bts->resize(new_word_size);

    // Fix for bug #4668531
    if (ZapUnusedHeapArea) {
      MemRegion mangle_region(space()->end(),
      (HeapWord*)_virtual_space.high());
      SpaceMangler::mangle_region(mangle_region);
    }

    // Expand space -- also expands space's BOT
    // (which uses (part of) shared array above)
    space()->set_end((HeapWord*)_virtual_space.high());

    // update the space and generation capacity counters
    update_counters();

    size_t new_mem_size = _virtual_space.committed_size();
    size_t old_mem_size = new_mem_size - bytes;
    log_trace(gc, heap)("Expanding %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K",
                    name(), old_mem_size/K, bytes/K, new_mem_size/K);
  }
  return result;
}
예제 #4
0
/**
 * 当前是否需要尝试在旧生代分配内存
 * 		1).待分配的内存大小 > 年青代容量
 * 		2).当前内存堆需要一次GC
 * 		3).内存堆的增量式GC刚刚失败
 */
bool GenCollectorPolicy::should_try_older_generation_allocation(
        size_t word_size) const {
  GenCollectedHeap* gch = GenCollectedHeap::heap();
  size_t gen0_capacity = gch->get_gen(0)->capacity_before_gc();
  return  (word_size > heap_word_size(gen0_capacity))
         || GC_locker::is_active_and_needs_gc() || gch->incremental_collection_failed();
}
예제 #5
0
bool OneContigSpaceCardGeneration::grow_by(size_t bytes) {
    assert_locked_or_safepoint(ExpandHeap_lock);
    bool result = _virtual_space.expand_by(bytes);
    if (result) {
        size_t new_word_size =
            heap_word_size(_virtual_space.committed_size());
        MemRegion mr(_the_space->bottom(), new_word_size);
        // Expand card table
        Universe::heap()->barrier_set()->resize_covered_region(mr);
        // Expand shared block offset array
        _bts->resize(new_word_size);

        // Fix for bug #4668531
        if (ZapUnusedHeapArea) {
            MemRegion mangle_region(_the_space->end(),
                                    (HeapWord*)_virtual_space.high());
            SpaceMangler::mangle_region(mangle_region);
        }

        // Expand space -- also expands space's BOT
        // (which uses (part of) shared array above)
        _the_space->set_end((HeapWord*)_virtual_space.high());

        // update the space and generation capacity counters
        update_counters();

        if (Verbose && PrintGC) {
            size_t new_mem_size = _virtual_space.committed_size();
            size_t old_mem_size = new_mem_size - bytes;
            gclog_or_tty->print_cr("Expanding %s from " SIZE_FORMAT "K by "
                                   SIZE_FORMAT "K to " SIZE_FORMAT "K",
                                   name(), old_mem_size/K, bytes/K, new_mem_size/K);
        }
    }
    return result;
}
예제 #6
0
// NOTE! We need to be careful about resizing. During a GC, multiple
// allocators may be active during heap expansion. If we allow the
// heap resizing to become visible before we have correctly resized
// all heap related data structures, we may cause program failures.
void PSOldGen::post_resize() {
  // First construct a memregion representing the new size
  MemRegion new_memregion((HeapWord*)_virtual_space.low(), (HeapWord*)_virtual_space.high());
  size_t new_word_size = new_memregion.word_size();

  // Block offset table resize FIX ME!!!!!
  // _bts->resize(new_word_size);
  Universe::heap()->barrier_set()->resize_covered_region(new_memregion);

  // Did we expand?
  if (object_space()->end() < (HeapWord*) _virtual_space.high()) {
    // We need to mangle the newly expanded area. The memregion spans
    // end -> new_end, we assume that top -> end is already mangled.
    // This cannot be safely tested for, as allocation may be taking
    // place.
    MemRegion mangle_region(object_space()->end(),(HeapWord*) _virtual_space.high());
    object_space()->mangle_region(mangle_region); 
  }

  // ALWAYS do this last!!
  object_space()->set_end((HeapWord*) _virtual_space.high());

  assert(new_word_size == heap_word_size(object_space()->capacity_in_bytes()), "Sanity");
}
예제 #7
0
CompactingPermGenGen::CompactingPermGenGen(ReservedSpace rs,
                                           ReservedSpace shared_rs,
                                           size_t initial_byte_size,
                                           int level, GenRemSet* remset,
                                           ContiguousSpace* space,
                                           PermanentGenerationSpec* spec_) :
  OneContigSpaceCardGeneration(rs, initial_byte_size, MinPermHeapExpansion,
                               level, remset, space) {

  set_spec(spec_);
  if (!UseSharedSpaces && !DumpSharedSpaces) {
    spec()->disable_sharing();
  }

  // Break virtual space into address ranges for all spaces.

  if (spec()->enable_shared_spaces()) {
    shared_end = (HeapWord*)(shared_rs.base() + shared_rs.size());
      misccode_end = shared_end;
      misccode_bottom = misccode_end - heap_word_size(spec()->misc_code_size());
      miscdata_end = misccode_bottom;
      miscdata_bottom = miscdata_end - heap_word_size(spec()->misc_data_size());
      readwrite_end = miscdata_bottom;
      readwrite_bottom =
        readwrite_end - heap_word_size(spec()->read_write_size());
      readonly_end = readwrite_bottom;
      readonly_bottom =
        readonly_end - heap_word_size(spec()->read_only_size());
    shared_bottom = readonly_bottom;
    unshared_end = shared_bottom;
    assert((char*)shared_bottom == shared_rs.base(), "shared space mismatch");
  } else {
    shared_end = (HeapWord*)(rs.base() + rs.size());
      misccode_end = shared_end;
      misccode_bottom = shared_end;
      miscdata_end = shared_end;
      miscdata_bottom = shared_end;
      readwrite_end = shared_end;
      readwrite_bottom = shared_end;
      readonly_end = shared_end;
      readonly_bottom = shared_end;
    shared_bottom = shared_end;
    unshared_end = shared_bottom;
  }
  unshared_bottom = (HeapWord*) rs.base();

  // Verify shared and unshared spaces adjacent.
  assert((char*)shared_bottom == rs.base()+rs.size(), "shared space mismatch");
  assert(unshared_end > unshared_bottom, "shared space mismatch");

  // Split reserved memory into pieces.

  ReservedSpace ro_rs   = shared_rs.first_part(spec()->read_only_size(),
                                              UseSharedSpaces);
  ReservedSpace tmp_rs1 = shared_rs.last_part(spec()->read_only_size());
  ReservedSpace rw_rs   = tmp_rs1.first_part(spec()->read_write_size(),
                                             UseSharedSpaces);
  ReservedSpace tmp_rs2 = tmp_rs1.last_part(spec()->read_write_size());
  ReservedSpace md_rs   = tmp_rs2.first_part(spec()->misc_data_size(),
                                             UseSharedSpaces);
  ReservedSpace mc_rs   = tmp_rs2.last_part(spec()->misc_data_size());

  _shared_space_size = spec()->read_only_size()
                     + spec()->read_write_size()
                     + spec()->misc_data_size()
                     + spec()->misc_code_size();

  // Allocate the unshared (default) space.
  _the_space = new ContigPermSpace(_bts,
               MemRegion(unshared_bottom, heap_word_size(initial_byte_size)));
  if (_the_space == NULL)
    vm_exit_during_initialization("Could not allocate an unshared"
                                  " CompactingPermGen Space");

  // Allocate shared spaces
  if (spec()->enable_shared_spaces()) {

    // If mapping a shared file, the space is not committed, don't
    // mangle.
    NOT_PRODUCT(bool old_ZapUnusedHeapArea = ZapUnusedHeapArea;)
예제 #8
0
 static int size(int length) {
   size_t sz = heap_word_size(sizeof(SymbolBase) + (length > 0 ? length : 0));
   return align_object_size(sz);
 }
예제 #9
0
// Return true if
//  The allocation won't fit into the maximum young gen heap
//  jvmpi_slow_allocation
bool TwoGenerationCollectorPolicy::should_try_older_generation_allocation(
	size_t word_size) const {
  return (word_size > heap_word_size(_max_gen0_size)) ||
    Universe::jvmpi_slow_allocation();
}