예제 #1
0
void CodeCache::initialize_heaps() {
  bool non_nmethod_set      = FLAG_IS_CMDLINE(NonNMethodCodeHeapSize);
  bool profiled_set         = FLAG_IS_CMDLINE(ProfiledCodeHeapSize);
  bool non_profiled_set     = FLAG_IS_CMDLINE(NonProfiledCodeHeapSize);
  size_t min_size           = os::vm_page_size();
  size_t cache_size         = ReservedCodeCacheSize;
  size_t non_nmethod_size   = NonNMethodCodeHeapSize;
  size_t profiled_size      = ProfiledCodeHeapSize;
  size_t non_profiled_size  = NonProfiledCodeHeapSize;
  // Check if total size set via command line flags exceeds the reserved size
  check_heap_sizes((non_nmethod_set  ? non_nmethod_size  : min_size),
                   (profiled_set     ? profiled_size     : min_size),
                   (non_profiled_set ? non_profiled_size : min_size),
                   cache_size,
                   non_nmethod_set && profiled_set && non_profiled_set);

  // Determine size of compiler buffers
  size_t code_buffers_size = 0;
#ifdef COMPILER1
  // C1 temporary code buffers (see Compiler::init_buffer_blob())
  const int c1_count = CompilationPolicy::policy()->compiler_count(CompLevel_simple);
  code_buffers_size += c1_count * Compiler::code_buffer_size();
#endif
#ifdef COMPILER2
  // C2 scratch buffers (see Compile::init_scratch_buffer_blob())
  const int c2_count = CompilationPolicy::policy()->compiler_count(CompLevel_full_optimization);
  // Initial size of constant table (this may be increased if a compiled method needs more space)
  code_buffers_size += c2_count * C2Compiler::initial_code_buffer_size();
#endif

  // Increase default non_nmethod_size to account for compiler buffers
  if (!non_nmethod_set) {
    non_nmethod_size += code_buffers_size;
  }
  // Calculate default CodeHeap sizes if not set by user
  if (!non_nmethod_set && !profiled_set && !non_profiled_set) {
    // Check if we have enough space for the non-nmethod code heap
    if (cache_size > non_nmethod_size) {
      // Use the default value for non_nmethod_size and one half of the
      // remaining size for non-profiled and one half for profiled methods
      size_t remaining_size = cache_size - non_nmethod_size;
      profiled_size = remaining_size / 2;
      non_profiled_size = remaining_size - profiled_size;
    } else {
      // Use all space for the non-nmethod heap and set other heaps to minimal size
      non_nmethod_size = cache_size - 2 * min_size;
      profiled_size = min_size;
      non_profiled_size = min_size;
    }
  } else if (!non_nmethod_set || !profiled_set || !non_profiled_set) {
    // The user explicitly set some code heap sizes. Increase or decrease the (default)
    // sizes of the other code heaps accordingly. First adapt non-profiled and profiled
    // code heap sizes and then only change non-nmethod code heap size if still necessary.
    intx diff_size = cache_size - (non_nmethod_size + profiled_size + non_profiled_size);
    if (non_profiled_set) {
      if (!profiled_set) {
        // Adapt size of profiled code heap
        if (diff_size < 0 && ((intx)profiled_size + diff_size) <= 0) {
          // Not enough space available, set to minimum size
          diff_size += profiled_size - min_size;
          profiled_size = min_size;
        } else {
          profiled_size += diff_size;
          diff_size = 0;
        }
      }
    } else if (profiled_set) {
      // Adapt size of non-profiled code heap
      if (diff_size < 0 && ((intx)non_profiled_size + diff_size) <= 0) {
        // Not enough space available, set to minimum size
        diff_size += non_profiled_size - min_size;
        non_profiled_size = min_size;
      } else {
        non_profiled_size += diff_size;
        diff_size = 0;
      }
    } else if (non_nmethod_set) {
      // Distribute remaining size between profiled and non-profiled code heaps
      diff_size = cache_size - non_nmethod_size;
      profiled_size = diff_size / 2;
      non_profiled_size = diff_size - profiled_size;
      diff_size = 0;
    }
    if (diff_size != 0) {
      // Use non-nmethod code heap for remaining space requirements
      assert(!non_nmethod_set && ((intx)non_nmethod_size + diff_size) > 0, "sanity");
      non_nmethod_size += diff_size;
    }
  }

  // We do not need the profiled CodeHeap, use all space for the non-profiled CodeHeap
  if(!heap_available(CodeBlobType::MethodProfiled)) {
    non_profiled_size += profiled_size;
    profiled_size = 0;
  }
  // We do not need the non-profiled CodeHeap, use all space for the non-nmethod CodeHeap
  if(!heap_available(CodeBlobType::MethodNonProfiled)) {
    non_nmethod_size += non_profiled_size;
    non_profiled_size = 0;
  }
  // Make sure we have enough space for VM internal code
  uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3);
  if (non_nmethod_size < (min_code_cache_size + code_buffers_size)) {
    vm_exit_during_initialization(err_msg(
        "Not enough space in non-nmethod code heap to run VM: %zuK < %zuK",
        non_nmethod_size/K, (min_code_cache_size + code_buffers_size)/K));
  }

  // Verify sizes and update flag values
  assert(non_profiled_size + profiled_size + non_nmethod_size == cache_size, "Invalid code heap sizes");
  FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, non_nmethod_size);
  FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, profiled_size);
  FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, non_profiled_size);

  // Align CodeHeaps
  size_t alignment = heap_alignment();
  non_nmethod_size = align_size_up(non_nmethod_size, alignment);
  profiled_size   = align_size_down(profiled_size, alignment);

  // Reserve one continuous chunk of memory for CodeHeaps and split it into
  // parts for the individual heaps. The memory layout looks like this:
  // ---------- high -----------
  //    Non-profiled nmethods
  //      Profiled nmethods
  //         Non-nmethods
  // ---------- low ------------
  ReservedCodeSpace rs = reserve_heap_memory(cache_size);
  ReservedSpace non_method_space    = rs.first_part(non_nmethod_size);
  ReservedSpace rest                = rs.last_part(non_nmethod_size);
  ReservedSpace profiled_space      = rest.first_part(profiled_size);
  ReservedSpace non_profiled_space  = rest.last_part(profiled_size);

  // Non-nmethods (stubs, adapters, ...)
  add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod);
  // Tier 2 and tier 3 (profiled) methods
  add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled);
  // Tier 1 and tier 4 (non-profiled) methods and native methods
  add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled);
}
예제 #2
0
CompactingPermGenGen::CompactingPermGenGen(ReservedSpace rs,
                                           ReservedSpace shared_rs,
                                           size_t initial_byte_size,
                                           int level, GenRemSet* remset,
                                           ContiguousSpace* space,
                                           PermanentGenerationSpec* spec_) :
  OneContigSpaceCardGeneration(rs, initial_byte_size, MinPermHeapExpansion,
                               level, remset, space) {

  set_spec(spec_);
  if (!UseSharedSpaces && !DumpSharedSpaces) {
    spec()->disable_sharing();
  }

  // Break virtual space into address ranges for all spaces.

  if (spec()->enable_shared_spaces()) {
    shared_end = (HeapWord*)(shared_rs.base() + shared_rs.size());
      misccode_end = shared_end;
      misccode_bottom = misccode_end - heap_word_size(spec()->misc_code_size());
      miscdata_end = misccode_bottom;
      miscdata_bottom = miscdata_end - heap_word_size(spec()->misc_data_size());
      readwrite_end = miscdata_bottom;
      readwrite_bottom =
        readwrite_end - heap_word_size(spec()->read_write_size());
      readonly_end = readwrite_bottom;
      readonly_bottom =
        readonly_end - heap_word_size(spec()->read_only_size());
    shared_bottom = readonly_bottom;
    unshared_end = shared_bottom;
    assert((char*)shared_bottom == shared_rs.base(), "shared space mismatch");
  } else {
    shared_end = (HeapWord*)(rs.base() + rs.size());
      misccode_end = shared_end;
      misccode_bottom = shared_end;
      miscdata_end = shared_end;
      miscdata_bottom = shared_end;
      readwrite_end = shared_end;
      readwrite_bottom = shared_end;
      readonly_end = shared_end;
      readonly_bottom = shared_end;
    shared_bottom = shared_end;
    unshared_end = shared_bottom;
  }
  unshared_bottom = (HeapWord*) rs.base();

  // Verify shared and unshared spaces adjacent.
  assert((char*)shared_bottom == rs.base()+rs.size(), "shared space mismatch");
  assert(unshared_end > unshared_bottom, "shared space mismatch");

  // Split reserved memory into pieces.

  ReservedSpace ro_rs   = shared_rs.first_part(spec()->read_only_size(),
                                              UseSharedSpaces);
  ReservedSpace tmp_rs1 = shared_rs.last_part(spec()->read_only_size());
  ReservedSpace rw_rs   = tmp_rs1.first_part(spec()->read_write_size(),
                                             UseSharedSpaces);
  ReservedSpace tmp_rs2 = tmp_rs1.last_part(spec()->read_write_size());
  ReservedSpace md_rs   = tmp_rs2.first_part(spec()->misc_data_size(),
                                             UseSharedSpaces);
  ReservedSpace mc_rs   = tmp_rs2.last_part(spec()->misc_data_size());

  _shared_space_size = spec()->read_only_size()
                     + spec()->read_write_size()
                     + spec()->misc_data_size()
                     + spec()->misc_code_size();

  // Allocate the unshared (default) space.
  _the_space = new ContigPermSpace(_bts,
               MemRegion(unshared_bottom, heap_word_size(initial_byte_size)));
  if (_the_space == NULL)
    vm_exit_during_initialization("Could not allocate an unshared"
                                  " CompactingPermGen Space");

  // Allocate shared spaces
  if (spec()->enable_shared_spaces()) {

    // If mapping a shared file, the space is not committed, don't
    // mangle.
    NOT_PRODUCT(bool old_ZapUnusedHeapArea = ZapUnusedHeapArea;)
예제 #3
0
void CodeCache::initialize_heaps() {
  // Determine size of compiler buffers
  size_t code_buffers_size = 0;
#ifdef COMPILER1
  // C1 temporary code buffers (see Compiler::init_buffer_blob())
  const int c1_count = CompilationPolicy::policy()->compiler_count(CompLevel_simple);
  code_buffers_size += c1_count * Compiler::code_buffer_size();
#endif
#ifdef COMPILER2
  // C2 scratch buffers (see Compile::init_scratch_buffer_blob())
  const int c2_count = CompilationPolicy::policy()->compiler_count(CompLevel_full_optimization);
  // Initial size of constant table (this may be increased if a compiled method needs more space)
  code_buffers_size += c2_count * C2Compiler::initial_code_buffer_size();
#endif

  // Calculate default CodeHeap sizes if not set by user
  if (!FLAG_IS_CMDLINE(NonNMethodCodeHeapSize) && !FLAG_IS_CMDLINE(ProfiledCodeHeapSize)
      && !FLAG_IS_CMDLINE(NonProfiledCodeHeapSize)) {
    // Increase default NonNMethodCodeHeapSize to account for compiler buffers
    FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, NonNMethodCodeHeapSize + code_buffers_size);

    // Check if we have enough space for the non-nmethod code heap
    if (ReservedCodeCacheSize > NonNMethodCodeHeapSize) {
      // Use the default value for NonNMethodCodeHeapSize and one half of the
      // remaining size for non-profiled methods and one half for profiled methods
      size_t remaining_size = ReservedCodeCacheSize - NonNMethodCodeHeapSize;
      size_t profiled_size = remaining_size / 2;
      size_t non_profiled_size = remaining_size - profiled_size;
      FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, profiled_size);
      FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, non_profiled_size);
    } else {
      // Use all space for the non-nmethod heap and set other heaps to minimal size
      FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, ReservedCodeCacheSize - os::vm_page_size() * 2);
      FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, os::vm_page_size());
      FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, os::vm_page_size());
    }
  }

  // We do not need the profiled CodeHeap, use all space for the non-profiled CodeHeap
  if(!heap_available(CodeBlobType::MethodProfiled)) {
    FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, NonProfiledCodeHeapSize + ProfiledCodeHeapSize);
    FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, 0);
  }
  // We do not need the non-profiled CodeHeap, use all space for the non-nmethod CodeHeap
  if(!heap_available(CodeBlobType::MethodNonProfiled)) {
    FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, NonNMethodCodeHeapSize + NonProfiledCodeHeapSize);
    FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, 0);
  }

  // Make sure we have enough space for VM internal code
  uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3);
  if (NonNMethodCodeHeapSize < (min_code_cache_size + code_buffers_size)) {
    vm_exit_during_initialization("Not enough space in non-nmethod code heap to run VM.");
  }
  guarantee(NonProfiledCodeHeapSize + ProfiledCodeHeapSize + NonNMethodCodeHeapSize <= ReservedCodeCacheSize, "Size check");

  // Align CodeHeaps
  size_t alignment = heap_alignment();
  size_t non_method_size = align_size_up(NonNMethodCodeHeapSize, alignment);
  size_t profiled_size   = align_size_down(ProfiledCodeHeapSize, alignment);

  // Reserve one continuous chunk of memory for CodeHeaps and split it into
  // parts for the individual heaps. The memory layout looks like this:
  // ---------- high -----------
  //    Non-profiled nmethods
  //      Profiled nmethods
  //         Non-nmethods
  // ---------- low ------------
  ReservedCodeSpace rs = reserve_heap_memory(ReservedCodeCacheSize);
  ReservedSpace non_method_space    = rs.first_part(non_method_size);
  ReservedSpace rest                = rs.last_part(non_method_size);
  ReservedSpace profiled_space      = rest.first_part(profiled_size);
  ReservedSpace non_profiled_space  = rest.last_part(profiled_size);

  // Non-nmethods (stubs, adapters, ...)
  add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod);
  // Tier 2 and tier 3 (profiled) methods
  add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled);
  // Tier 1 and tier 4 (non-profiled) methods and native methods
  add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled);
}