void G1PageBasedVirtualSpace::initialize_with_page_size(ReservedSpace rs, size_t used_size, size_t page_size) {
  guarantee(rs.is_reserved(), "Given reserved space must have been reserved already.");

  vmassert(_low_boundary == NULL, "VirtualSpace already initialized");
  vmassert(page_size > 0, "Page size must be non-zero.");

  guarantee(is_ptr_aligned(rs.base(), page_size),
            "Reserved space base " PTR_FORMAT " is not aligned to requested page size " SIZE_FORMAT, p2i(rs.base()), page_size);
  guarantee(is_size_aligned(used_size, os::vm_page_size()),
            "Given used reserved space size needs to be OS page size aligned (%d bytes) but is " SIZE_FORMAT, os::vm_page_size(), used_size);
  guarantee(used_size <= rs.size(),
            "Used size of reserved space " SIZE_FORMAT " bytes is smaller than reservation at " SIZE_FORMAT " bytes", used_size, rs.size());
  guarantee(is_size_aligned(rs.size(), page_size),
            "Expected that the virtual space is size aligned, but " SIZE_FORMAT " is not aligned to page size " SIZE_FORMAT, rs.size(), page_size);

  _low_boundary  = rs.base();
  _high_boundary = _low_boundary + used_size;

  _special = rs.special();
  _executable = rs.executable();

  _page_size = page_size;

  vmassert(_committed.size() == 0, "virtual space initialized more than once");
  BitMap::idx_t size_in_pages = rs.size() / page_size;
  _committed.initialize(size_in_pages);
  if (_special) {
    _dirty.initialize(size_in_pages);
  }

  _tail_size = used_size % _page_size;
}
  G1RegionsSmallerThanCommitSizeMapper(ReservedSpace rs,
                                       size_t os_commit_granularity,
                                       size_t alloc_granularity,
                                       size_t commit_factor,
                                       MemoryType type) :
     G1RegionToSpaceMapper(rs, os_commit_granularity, alloc_granularity, type),
    _regions_per_page((os_commit_granularity * commit_factor) / alloc_granularity), _refcounts() {

    guarantee((os_commit_granularity * commit_factor) >= alloc_granularity, "allocation granularity smaller than commit granularity");
    _refcounts.initialize((HeapWord*)rs.base(), (HeapWord*)(rs.base() + rs.size()), os_commit_granularity);
    _commit_map.resize(rs.size() * commit_factor / alloc_granularity, /* in_resource_area */ false);
  }
Beispiel #3
0
CardGeneration::CardGeneration(ReservedSpace rs, size_t initial_byte_size,
                               int level,
                               GenRemSet* remset) :
    Generation(rs, initial_byte_size, level), _rs(remset)
{
    HeapWord* start = (HeapWord*)rs.base();
    size_t reserved_byte_size = rs.size();
    assert((uintptr_t(start) & 3) == 0, "bad alignment");
    assert((reserved_byte_size & 3) == 0, "bad alignment");
    MemRegion reserved_mr(start, heap_word_size(reserved_byte_size));
    _bts = new BlockOffsetSharedArray(reserved_mr,
                                      heap_word_size(initial_byte_size));
    MemRegion committed_mr(start, heap_word_size(initial_byte_size));
    _rs->resize_covered_region(committed_mr);
    if (_bts == NULL)
        vm_exit_during_initialization("Could not allocate a BlockOffsetArray");

    // Verify that the start and end of this generation is the start of a card.
    // If this wasn't true, a single card could span more than on generation,
    // which would cause problems when we commit/uncommit memory, and when we
    // clear and dirty cards.
    guarantee(_rs->is_aligned(reserved_mr.start()), "generation must be card aligned");
    if (reserved_mr.end() != Universe::heap()->reserved_region().end()) {
        // Don't check at the very end of the heap as we'll assert that we're probing off
        // the end if we try.
        guarantee(_rs->is_aligned(reserved_mr.end()), "generation must be card aligned");
    }
}
Beispiel #4
0
CardGeneration::CardGeneration(ReservedSpace rs,
                               size_t initial_byte_size,
                               CardTableRS* remset) :
  Generation(rs, initial_byte_size), _rs(remset),
  _shrink_factor(0), _min_heap_delta_bytes(), _capacity_at_prologue(),
  _used_at_prologue()
{
  HeapWord* start = (HeapWord*)rs.base();
  size_t reserved_byte_size = rs.size();
  assert((uintptr_t(start) & 3) == 0, "bad alignment");
  assert((reserved_byte_size & 3) == 0, "bad alignment");
  MemRegion reserved_mr(start, heap_word_size(reserved_byte_size));
  _bts = new BlockOffsetSharedArray(reserved_mr,
                                    heap_word_size(initial_byte_size));
  MemRegion committed_mr(start, heap_word_size(initial_byte_size));
  _rs->resize_covered_region(committed_mr);
  if (_bts == NULL) {
    vm_exit_during_initialization("Could not allocate a BlockOffsetArray");
  }

  // Verify that the start and end of this generation is the start of a card.
  // If this wasn't true, a single card could span more than on generation,
  // which would cause problems when we commit/uncommit memory, and when we
  // clear and dirty cards.
  guarantee(_rs->is_aligned(reserved_mr.start()), "generation must be card aligned");
  if (reserved_mr.end() != GenCollectedHeap::heap()->reserved_region().end()) {
    // Don't check at the very end of the heap as we'll assert that we're probing off
    // the end if we try.
    guarantee(_rs->is_aligned(reserved_mr.end()), "generation must be card aligned");
  }
  _min_heap_delta_bytes = MinHeapDeltaBytes;
  _capacity_at_prologue = initial_byte_size;
  _used_at_prologue = 0;
}
Beispiel #5
0
bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
  if(!rs.is_reserved()) return false;  // allocation failed.
  assert(_low_boundary == NULL, "VirtualSpace already initialized");
  _low_boundary  = rs.base();
  _high_boundary = low_boundary() + rs.size();

  _low = low_boundary();
  _high = low();

  _special = rs.special();
  _executable = rs.executable();

  // When a VirtualSpace begins life at a large size, make all future expansion
  // and shrinking occur aligned to a granularity of large pages.  This avoids
  // fragmentation of physical addresses that inhibits the use of large pages
  // by the OS virtual memory system.  Empirically,  we see that with a 4MB
  // page size, the only spaces that get handled this way are codecache and
  // the heap itself, both of which provide a substantial performance
  // boost in many benchmarks when covered by large pages.
  //
  // No attempt is made to force large page alignment at the very top and
  // bottom of the space if they are not aligned so already.
  _lower_alignment  = os::vm_page_size();
  _middle_alignment = os::page_size_for_region(rs.size(), rs.size(), 1);
  _upper_alignment  = os::vm_page_size();

  // End of each region
  _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment());
  _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment());
  _upper_high_boundary = high_boundary();

  // High address of each region
  _lower_high = low_boundary();
  _middle_high = lower_high_boundary();
  _upper_high = middle_high_boundary();

  // commit to initial size
  if (committed_size > 0) {
    if (!expand_by(committed_size)) {
      return false;
    }
  }
  return true;
}
Beispiel #6
0
// Deprecated.
bool PSVirtualSpace::initialize(ReservedSpace rs,
                                size_t commit_size) {
  set_reserved(rs);
  set_committed(reserved_low_addr(), reserved_low_addr());

  // Commit to initial size.
  assert(commit_size <= rs.size(), "commit_size too big");
  bool result = commit_size > 0 ? expand_by(commit_size) : true;
  DEBUG_ONLY(verify());
  return result;
}
  G1RegionsLargerThanCommitSizeMapper(ReservedSpace rs,
                                      size_t os_commit_granularity,
                                      size_t alloc_granularity,
                                      size_t commit_factor,
                                      MemoryType type) :
     G1RegionToSpaceMapper(rs, os_commit_granularity, alloc_granularity, type),
    _pages_per_region(alloc_granularity / (os_commit_granularity * commit_factor)) {

    guarantee(alloc_granularity >= os_commit_granularity, "allocation granularity smaller than commit granularity");
    _commit_map.resize(rs.size() * commit_factor / alloc_granularity, /* in_resource_area */ false);
  }
bool G1PageBasedVirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t page_size) {
  if (!rs.is_reserved()) {
    return false;  // Allocation failed.
  }
  assert(_low_boundary == NULL, "VirtualSpace already initialized");
  assert(page_size > 0, "Granularity must be non-zero.");

  _low_boundary  = rs.base();
  _high_boundary = _low_boundary + rs.size();

  _special = rs.special();
  _executable = rs.executable();

  _page_size = page_size;

  assert(_committed.size() == 0, "virtual space initialized more than once");
  uintx size_in_bits = rs.size() / page_size;
  _committed.resize(size_in_bits, /* in_resource_area */ false);

  return true;
}
G1RegionToSpaceMapper::G1RegionToSpaceMapper(ReservedSpace rs,
                                             size_t used_size,
                                             size_t page_size,
                                             size_t region_granularity,
                                             size_t commit_factor,
                                             MemoryType type) :
  _storage(rs, used_size, page_size),
  _region_granularity(region_granularity),
  _listener(NULL),
  _commit_map(rs.size() * commit_factor / region_granularity) {
  guarantee(is_power_of_2(page_size), "must be");
  guarantee(is_power_of_2(region_granularity), "must be");

  MemTracker::record_virtual_memory_type((address)rs.base(), type);
}
void CodeCache::add_heap(ReservedSpace rs, const char* name, int code_blob_type) {
  // Check if heap is needed
  if (!heap_available(code_blob_type)) {
    return;
  }

  // Create CodeHeap
  CodeHeap* heap = new CodeHeap(name, code_blob_type);
  _heaps->append(heap);

  // Reserve Space
  size_t size_initial = MIN2(InitialCodeCacheSize, rs.size());
  size_initial = round_to(size_initial, os::vm_page_size());
  if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) {
    vm_exit_during_initialization("Could not reserve enough space for code cache");
  }

  // Register the CodeHeap
  MemoryService::add_code_heap_memory_pool(heap, name);
}
CompactingPermGenGen::CompactingPermGenGen(ReservedSpace rs,
                                           ReservedSpace shared_rs,
                                           size_t initial_byte_size,
                                           int level, GenRemSet* remset,
                                           ContiguousSpace* space,
                                           PermanentGenerationSpec* spec_) :
  OneContigSpaceCardGeneration(rs, initial_byte_size, MinPermHeapExpansion,
                               level, remset, space) {

  set_spec(spec_);
  if (!UseSharedSpaces && !DumpSharedSpaces) {
    spec()->disable_sharing();
  }

  // Break virtual space into address ranges for all spaces.

  if (spec()->enable_shared_spaces()) {
    shared_end = (HeapWord*)(shared_rs.base() + shared_rs.size());
      misccode_end = shared_end;
      misccode_bottom = misccode_end - heap_word_size(spec()->misc_code_size());
      miscdata_end = misccode_bottom;
      miscdata_bottom = miscdata_end - heap_word_size(spec()->misc_data_size());
      readwrite_end = miscdata_bottom;
      readwrite_bottom =
        readwrite_end - heap_word_size(spec()->read_write_size());
      readonly_end = readwrite_bottom;
      readonly_bottom =
        readonly_end - heap_word_size(spec()->read_only_size());
    shared_bottom = readonly_bottom;
    unshared_end = shared_bottom;
    assert((char*)shared_bottom == shared_rs.base(), "shared space mismatch");
  } else {
    shared_end = (HeapWord*)(rs.base() + rs.size());
      misccode_end = shared_end;
      misccode_bottom = shared_end;
      miscdata_end = shared_end;
      miscdata_bottom = shared_end;
      readwrite_end = shared_end;
      readwrite_bottom = shared_end;
      readonly_end = shared_end;
      readonly_bottom = shared_end;
    shared_bottom = shared_end;
    unshared_end = shared_bottom;
  }
  unshared_bottom = (HeapWord*) rs.base();

  // Verify shared and unshared spaces adjacent.
  assert((char*)shared_bottom == rs.base()+rs.size(), "shared space mismatch");
  assert(unshared_end > unshared_bottom, "shared space mismatch");

  // Split reserved memory into pieces.

  ReservedSpace ro_rs   = shared_rs.first_part(spec()->read_only_size(),
                                              UseSharedSpaces);
  ReservedSpace tmp_rs1 = shared_rs.last_part(spec()->read_only_size());
  ReservedSpace rw_rs   = tmp_rs1.first_part(spec()->read_write_size(),
                                             UseSharedSpaces);
  ReservedSpace tmp_rs2 = tmp_rs1.last_part(spec()->read_write_size());
  ReservedSpace md_rs   = tmp_rs2.first_part(spec()->misc_data_size(),
                                             UseSharedSpaces);
  ReservedSpace mc_rs   = tmp_rs2.last_part(spec()->misc_data_size());

  _shared_space_size = spec()->read_only_size()
                     + spec()->read_write_size()
                     + spec()->misc_data_size()
                     + spec()->misc_code_size();

  // Allocate the unshared (default) space.
  _the_space = new ContigPermSpace(_bts,
               MemRegion(unshared_bottom, heap_word_size(initial_byte_size)));
  if (_the_space == NULL)
    vm_exit_during_initialization("Could not allocate an unshared"
                                  " CompactingPermGen Space");

  // Allocate shared spaces
  if (spec()->enable_shared_spaces()) {

    // If mapping a shared file, the space is not committed, don't
    // mangle.
    NOT_PRODUCT(bool old_ZapUnusedHeapArea = ZapUnusedHeapArea;)
Beispiel #12
0
inline void PSVirtualSpace::set_reserved(ReservedSpace rs) {
  set_reserved(rs.base(), rs.base() + rs.size(), rs.special());
}
AdjoiningGenerations::AdjoiningGenerations(ReservedSpace old_young_rs,
					   size_t init_low_byte_size,
					   size_t min_low_byte_size,
					   size_t max_low_byte_size,
					   size_t init_high_byte_size,
					   size_t min_high_byte_size,
					   size_t max_high_byte_size,
					   size_t alignment) :
  _virtual_spaces(old_young_rs, min_low_byte_size,
		  min_high_byte_size, alignment) {
  assert(min_low_byte_size <= init_low_byte_size &&
	 init_low_byte_size <= max_low_byte_size, "Parameter check");
  assert(min_high_byte_size <= init_high_byte_size &&
	 init_high_byte_size <= max_high_byte_size, "Parameter check");
  // Create the generations differently based on the option to
  // move the boundary.
  if (UseAdaptiveGCBoundary) {
    // Initialize the adjoining virtual spaces.  Then pass the
    // a virtual to each generation for initialization of the
    // generation.

    // Does the actual creation of the virtual spaces
    _virtual_spaces.initialize(max_low_byte_size,
			       init_low_byte_size,
			       init_high_byte_size);

    // Place the young gen at the high end.  Passes in the virtual space.
    _young_gen = new ASPSYoungGen(_virtual_spaces.high(),
				  _virtual_spaces.high()->committed_size(),
				  min_high_byte_size,
				  _virtual_spaces.high_byte_size_limit());

    // Place the old gen at the low end. Passes in the virtual space.
    _old_gen = new ASPSOldGen(_virtual_spaces.low(),
			      _virtual_spaces.low()->committed_size(),
                              min_low_byte_size,
			      _virtual_spaces.low_byte_size_limit(),
                              "old", 1);
    
    // AZUL - Initialize in this order - PERM OLD YOUNG
    old_gen()->initialize_work("old", 1);
    assert(old_gen()->reserved().byte_size() <= old_gen()->gen_size_limit(), 
     "Consistency check");
    assert(old_young_rs.size() >= old_gen()->gen_size_limit(), 
     "Consistency check");
    
    young_gen()->initialize_work();
    assert(young_gen()->reserved().byte_size() <= young_gen()->gen_size_limit(),
     "Consistency check");
    assert(old_young_rs.size() >= young_gen()->gen_size_limit(),
     "Consistency check");

  } else {

    // Layout the reserved space for the generations.
    ReservedSpace old_rs   = 
      virtual_spaces()->reserved_space().first_part(max_low_byte_size);
    ReservedSpace heap_rs  = 
      virtual_spaces()->reserved_space().last_part(max_low_byte_size);
    ReservedSpace young_rs = heap_rs.first_part(max_high_byte_size);
    assert(young_rs.size() == heap_rs.size(), "Didn't reserve all of the heap");

    // Create the generations.  Virtual spaces are not passed in.
    _young_gen = new PSYoungGen(init_high_byte_size,
				min_high_byte_size,
				max_high_byte_size);
    _old_gen = new PSOldGen(init_low_byte_size,
                            min_low_byte_size,
			    max_low_byte_size,
                            "old", 1);

    // The virtual spaces are created by the initialization of the gens.
    // AZUL - Initialize in this order - PERM OLD YOUNG
    _old_gen->initialize(old_rs, alignment, "old", 1);
    assert(old_gen()->gen_size_limit() == old_rs.size(), "Consistency check");
    _young_gen->initialize(young_rs, alignment);
    assert(young_gen()->gen_size_limit() == young_rs.size(), 
      "Consistency check");
  }
}
bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
  const size_t max_commit_granularity = os::page_size_for_region(rs.size(), rs.size(), 1);
  return initialize_with_granularity(rs, committed_size, max_commit_granularity);
}
  G1RegionsSmallerThanCommitSizeMapper(ReservedSpace rs,
                                       size_t actual_size,
                                       size_t page_size,
                                       size_t alloc_granularity,
                                       size_t commit_factor,
                                       MemoryType type) :
    G1RegionToSpaceMapper(rs, actual_size, page_size, alloc_granularity, commit_factor, type),
    _regions_per_page((page_size * commit_factor) / alloc_granularity), _refcounts() {

    guarantee((page_size * commit_factor) >= alloc_granularity, "allocation granularity smaller than commit granularity");
    _refcounts.initialize((HeapWord*)rs.base(), (HeapWord*)(rs.base() + align_size_up(rs.size(), page_size)), page_size);
  }