Esempio n. 1
0
void G1PageBasedVirtualSpace::initialize_with_page_size(ReservedSpace rs, size_t used_size, size_t page_size) {
  guarantee(rs.is_reserved(), "Given reserved space must have been reserved already.");

  vmassert(_low_boundary == NULL, "VirtualSpace already initialized");
  vmassert(page_size > 0, "Page size must be non-zero.");

  guarantee(is_ptr_aligned(rs.base(), page_size),
            "Reserved space base " PTR_FORMAT " is not aligned to requested page size " SIZE_FORMAT, p2i(rs.base()), page_size);
  guarantee(is_size_aligned(used_size, os::vm_page_size()),
            "Given used reserved space size needs to be OS page size aligned (%d bytes) but is " SIZE_FORMAT, os::vm_page_size(), used_size);
  guarantee(used_size <= rs.size(),
            "Used size of reserved space " SIZE_FORMAT " bytes is smaller than reservation at " SIZE_FORMAT " bytes", used_size, rs.size());
  guarantee(is_size_aligned(rs.size(), page_size),
            "Expected that the virtual space is size aligned, but " SIZE_FORMAT " is not aligned to page size " SIZE_FORMAT, rs.size(), page_size);

  _low_boundary  = rs.base();
  _high_boundary = _low_boundary + used_size;

  _special = rs.special();
  _executable = rs.executable();

  _page_size = page_size;

  vmassert(_committed.size() == 0, "virtual space initialized more than once");
  BitMap::idx_t size_in_pages = rs.size() / page_size;
  _committed.initialize(size_in_pages);
  if (_special) {
    _dirty.initialize(size_in_pages);
  }

  _tail_size = used_size % _page_size;
}
bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) {
  if(!rs.is_reserved()) return false;  // allocation failed.
  assert(_low_boundary == NULL, "VirtualSpace already initialized");
  assert(max_commit_granularity > 0, "Granularity must be non-zero.");

  _low_boundary  = rs.base();
  _high_boundary = low_boundary() + rs.size();

  _low = low_boundary();
  _high = low();

  _special = rs.special();
  _executable = rs.executable();

  // When a VirtualSpace begins life at a large size, make all future expansion
  // and shrinking occur aligned to a granularity of large pages.  This avoids
  // fragmentation of physical addresses that inhibits the use of large pages
  // by the OS virtual memory system.  Empirically,  we see that with a 4MB
  // page size, the only spaces that get handled this way are codecache and
  // the heap itself, both of which provide a substantial performance
  // boost in many benchmarks when covered by large pages.
  //
  // No attempt is made to force large page alignment at the very top and
  // bottom of the space if they are not aligned so already.
  _lower_alignment  = os::vm_page_size();
  _middle_alignment = max_commit_granularity;
  _upper_alignment  = os::vm_page_size();

  // End of each region
  _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment());
  _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment());
  _upper_high_boundary = high_boundary();

  // High address of each region
  _lower_high = low_boundary();
  _middle_high = lower_high_boundary();
  _upper_high = middle_high_boundary();

  // commit to initial size
  if (committed_size > 0) {
    if (!expand_by(committed_size)) {
      return false;
    }
  }
  return true;
}
bool G1PageBasedVirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t page_size) {
  if (!rs.is_reserved()) {
    return false;  // Allocation failed.
  }
  assert(_low_boundary == NULL, "VirtualSpace already initialized");
  assert(page_size > 0, "Granularity must be non-zero.");

  _low_boundary  = rs.base();
  _high_boundary = _low_boundary + rs.size();

  _special = rs.special();
  _executable = rs.executable();

  _page_size = page_size;

  assert(_committed.size() == 0, "virtual space initialized more than once");
  uintx size_in_bits = rs.size() / page_size;
  _committed.resize(size_in_bits, /* in_resource_area */ false);

  return true;
}