CardGeneration::CardGeneration(ReservedSpace rs, size_t initial_byte_size, int level, GenRemSet* remset) : Generation(rs, initial_byte_size, level), _rs(remset) { HeapWord* start = (HeapWord*)rs.base(); size_t reserved_byte_size = rs.size(); assert((uintptr_t(start) & 3) == 0, "bad alignment"); assert((reserved_byte_size & 3) == 0, "bad alignment"); MemRegion reserved_mr(start, heap_word_size(reserved_byte_size)); _bts = new BlockOffsetSharedArray(reserved_mr, heap_word_size(initial_byte_size)); MemRegion committed_mr(start, heap_word_size(initial_byte_size)); _rs->resize_covered_region(committed_mr); if (_bts == NULL) vm_exit_during_initialization("Could not allocate a BlockOffsetArray"); // Verify that the start and end of this generation is the start of a card. // If this wasn't true, a single card could span more than on generation, // which would cause problems when we commit/uncommit memory, and when we // clear and dirty cards. guarantee(_rs->is_aligned(reserved_mr.start()), "generation must be card aligned"); if (reserved_mr.end() != Universe::heap()->reserved_region().end()) { // Don't check at the very end of the heap as we'll assert that we're probing off // the end if we try. guarantee(_rs->is_aligned(reserved_mr.end()), "generation must be card aligned"); } }
CardGeneration::CardGeneration(ReservedSpace rs, size_t initial_byte_size, CardTableRS* remset) : Generation(rs, initial_byte_size), _rs(remset), _shrink_factor(0), _min_heap_delta_bytes(), _capacity_at_prologue(), _used_at_prologue() { HeapWord* start = (HeapWord*)rs.base(); size_t reserved_byte_size = rs.size(); assert((uintptr_t(start) & 3) == 0, "bad alignment"); assert((reserved_byte_size & 3) == 0, "bad alignment"); MemRegion reserved_mr(start, heap_word_size(reserved_byte_size)); _bts = new BlockOffsetSharedArray(reserved_mr, heap_word_size(initial_byte_size)); MemRegion committed_mr(start, heap_word_size(initial_byte_size)); _rs->resize_covered_region(committed_mr); if (_bts == NULL) { vm_exit_during_initialization("Could not allocate a BlockOffsetArray"); } // Verify that the start and end of this generation is the start of a card. // If this wasn't true, a single card could span more than on generation, // which would cause problems when we commit/uncommit memory, and when we // clear and dirty cards. guarantee(_rs->is_aligned(reserved_mr.start()), "generation must be card aligned"); if (reserved_mr.end() != GenCollectedHeap::heap()->reserved_region().end()) { // Don't check at the very end of the heap as we'll assert that we're probing off // the end if we try. guarantee(_rs->is_aligned(reserved_mr.end()), "generation must be card aligned"); } _min_heap_delta_bytes = MinHeapDeltaBytes; _capacity_at_prologue = initial_byte_size; _used_at_prologue = 0; }
G1RegionsSmallerThanCommitSizeMapper(ReservedSpace rs, size_t os_commit_granularity, size_t alloc_granularity, size_t commit_factor, MemoryType type) : G1RegionToSpaceMapper(rs, os_commit_granularity, alloc_granularity, type), _regions_per_page((os_commit_granularity * commit_factor) / alloc_granularity), _refcounts() { guarantee((os_commit_granularity * commit_factor) >= alloc_granularity, "allocation granularity smaller than commit granularity"); _refcounts.initialize((HeapWord*)rs.base(), (HeapWord*)(rs.base() + rs.size()), os_commit_granularity); _commit_map.resize(rs.size() * commit_factor / alloc_granularity, /* in_resource_area */ false); }
G1RegionsSmallerThanCommitSizeMapper(ReservedSpace rs, size_t actual_size, size_t page_size, size_t alloc_granularity, size_t commit_factor, MemoryType type) : G1RegionToSpaceMapper(rs, actual_size, page_size, alloc_granularity, commit_factor, type), _regions_per_page((page_size * commit_factor) / alloc_granularity), _refcounts() { guarantee((page_size * commit_factor) >= alloc_granularity, "allocation granularity smaller than commit granularity"); _refcounts.initialize((HeapWord*)rs.base(), (HeapWord*)(rs.base() + align_size_up(rs.size(), page_size)), page_size); }
void G1PageBasedVirtualSpace::initialize_with_page_size(ReservedSpace rs, size_t used_size, size_t page_size) { guarantee(rs.is_reserved(), "Given reserved space must have been reserved already."); vmassert(_low_boundary == NULL, "VirtualSpace already initialized"); vmassert(page_size > 0, "Page size must be non-zero."); guarantee(is_ptr_aligned(rs.base(), page_size), "Reserved space base " PTR_FORMAT " is not aligned to requested page size " SIZE_FORMAT, p2i(rs.base()), page_size); guarantee(is_size_aligned(used_size, os::vm_page_size()), "Given used reserved space size needs to be OS page size aligned (%d bytes) but is " SIZE_FORMAT, os::vm_page_size(), used_size); guarantee(used_size <= rs.size(), "Used size of reserved space " SIZE_FORMAT " bytes is smaller than reservation at " SIZE_FORMAT " bytes", used_size, rs.size()); guarantee(is_size_aligned(rs.size(), page_size), "Expected that the virtual space is size aligned, but " SIZE_FORMAT " is not aligned to page size " SIZE_FORMAT, rs.size(), page_size); _low_boundary = rs.base(); _high_boundary = _low_boundary + used_size; _special = rs.special(); _executable = rs.executable(); _page_size = page_size; vmassert(_committed.size() == 0, "virtual space initialized more than once"); BitMap::idx_t size_in_pages = rs.size() / page_size; _committed.initialize(size_in_pages); if (_special) { _dirty.initialize(size_in_pages); } _tail_size = used_size % _page_size; }
G1RegionToSpaceMapper::G1RegionToSpaceMapper(ReservedSpace rs, size_t used_size, size_t page_size, size_t region_granularity, size_t commit_factor, MemoryType type) : _storage(rs, used_size, page_size), _region_granularity(region_granularity), _listener(NULL), _commit_map(rs.size() * commit_factor / region_granularity) { guarantee(is_power_of_2(page_size), "must be"); guarantee(is_power_of_2(region_granularity), "must be"); MemTracker::record_virtual_memory_type((address)rs.base(), type); }
bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) { if(!rs.is_reserved()) return false; // allocation failed. assert(_low_boundary == NULL, "VirtualSpace already initialized"); assert(max_commit_granularity > 0, "Granularity must be non-zero."); _low_boundary = rs.base(); _high_boundary = low_boundary() + rs.size(); _low = low_boundary(); _high = low(); _special = rs.special(); _executable = rs.executable(); // When a VirtualSpace begins life at a large size, make all future expansion // and shrinking occur aligned to a granularity of large pages. This avoids // fragmentation of physical addresses that inhibits the use of large pages // by the OS virtual memory system. Empirically, we see that with a 4MB // page size, the only spaces that get handled this way are codecache and // the heap itself, both of which provide a substantial performance // boost in many benchmarks when covered by large pages. // // No attempt is made to force large page alignment at the very top and // bottom of the space if they are not aligned so already. _lower_alignment = os::vm_page_size(); _middle_alignment = max_commit_granularity; _upper_alignment = os::vm_page_size(); // End of each region _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment()); _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment()); _upper_high_boundary = high_boundary(); // High address of each region _lower_high = low_boundary(); _middle_high = lower_high_boundary(); _upper_high = middle_high_boundary(); // commit to initial size if (committed_size > 0) { if (!expand_by(committed_size)) { return false; } } return true; }
G1RegionsLargerThanCommitSizeMapper(ReservedSpace rs, size_t os_commit_granularity, size_t alloc_granularity, size_t commit_factor, MemoryType type) : G1RegionToSpaceMapper(rs, os_commit_granularity, alloc_granularity, type), _pages_per_region(alloc_granularity / (os_commit_granularity * commit_factor)) { guarantee(alloc_granularity >= os_commit_granularity, "allocation granularity smaller than commit granularity"); _commit_map.resize(rs.size() * commit_factor / alloc_granularity, /* in_resource_area */ false); }
// Deprecated. bool PSVirtualSpace::initialize(ReservedSpace rs, size_t commit_size) { set_reserved(rs); set_committed(reserved_low_addr(), reserved_low_addr()); // Commit to initial size. assert(commit_size <= rs.size(), "commit_size too big"); bool result = commit_size > 0 ? expand_by(commit_size) : true; DEBUG_ONLY(verify()); return result; }
G1RegionToSpaceMapper::G1RegionToSpaceMapper(ReservedSpace rs, size_t commit_granularity, size_t region_granularity, MemoryType type) : _storage(), _commit_granularity(commit_granularity), _region_granularity(region_granularity), _listener(NULL), _commit_map() { guarantee(is_power_of_2(commit_granularity), "must be"); guarantee(is_power_of_2(region_granularity), "must be"); _storage.initialize_with_granularity(rs, commit_granularity); MemTracker::record_virtual_memory_type((address)rs.base(), type); }
void CodeCache::add_heap(ReservedSpace rs, const char* name, int code_blob_type) { // Check if heap is needed if (!heap_available(code_blob_type)) { return; } // Create CodeHeap CodeHeap* heap = new CodeHeap(name, code_blob_type); _heaps->append(heap); // Reserve Space size_t size_initial = MIN2(InitialCodeCacheSize, rs.size()); size_initial = round_to(size_initial, os::vm_page_size()); if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) { vm_exit_during_initialization("Could not reserve enough space for code cache"); } // Register the CodeHeap MemoryService::add_code_heap_memory_pool(heap, name); }
bool G1PageBasedVirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t page_size) { if (!rs.is_reserved()) { return false; // Allocation failed. } assert(_low_boundary == NULL, "VirtualSpace already initialized"); assert(page_size > 0, "Granularity must be non-zero."); _low_boundary = rs.base(); _high_boundary = _low_boundary + rs.size(); _special = rs.special(); _executable = rs.executable(); _page_size = page_size; assert(_committed.size() == 0, "virtual space initialized more than once"); uintx size_in_bits = rs.size() / page_size; _committed.resize(size_in_bits, /* in_resource_area */ false); return true; }
CompactingPermGenGen::CompactingPermGenGen(ReservedSpace rs, ReservedSpace shared_rs, size_t initial_byte_size, int level, GenRemSet* remset, ContiguousSpace* space, PermanentGenerationSpec* spec_) : OneContigSpaceCardGeneration(rs, initial_byte_size, MinPermHeapExpansion, level, remset, space) { set_spec(spec_); if (!UseSharedSpaces && !DumpSharedSpaces) { spec()->disable_sharing(); } // Break virtual space into address ranges for all spaces. if (spec()->enable_shared_spaces()) { shared_end = (HeapWord*)(shared_rs.base() + shared_rs.size()); misccode_end = shared_end; misccode_bottom = misccode_end - heap_word_size(spec()->misc_code_size()); miscdata_end = misccode_bottom; miscdata_bottom = miscdata_end - heap_word_size(spec()->misc_data_size()); readwrite_end = miscdata_bottom; readwrite_bottom = readwrite_end - heap_word_size(spec()->read_write_size()); readonly_end = readwrite_bottom; readonly_bottom = readonly_end - heap_word_size(spec()->read_only_size()); shared_bottom = readonly_bottom; unshared_end = shared_bottom; assert((char*)shared_bottom == shared_rs.base(), "shared space mismatch"); } else { shared_end = (HeapWord*)(rs.base() + rs.size()); misccode_end = shared_end; misccode_bottom = shared_end; miscdata_end = shared_end; miscdata_bottom = shared_end; readwrite_end = shared_end; readwrite_bottom = shared_end; readonly_end = shared_end; readonly_bottom = shared_end; shared_bottom = shared_end; unshared_end = shared_bottom; } unshared_bottom = (HeapWord*) rs.base(); // Verify shared and unshared spaces adjacent. assert((char*)shared_bottom == rs.base()+rs.size(), "shared space mismatch"); assert(unshared_end > unshared_bottom, "shared space mismatch"); // Split reserved memory into pieces. ReservedSpace ro_rs = shared_rs.first_part(spec()->read_only_size(), UseSharedSpaces); ReservedSpace tmp_rs1 = shared_rs.last_part(spec()->read_only_size()); ReservedSpace rw_rs = tmp_rs1.first_part(spec()->read_write_size(), UseSharedSpaces); ReservedSpace tmp_rs2 = tmp_rs1.last_part(spec()->read_write_size()); ReservedSpace md_rs = tmp_rs2.first_part(spec()->misc_data_size(), UseSharedSpaces); ReservedSpace mc_rs = tmp_rs2.last_part(spec()->misc_data_size()); _shared_space_size = spec()->read_only_size() + spec()->read_write_size() + spec()->misc_data_size() + spec()->misc_code_size(); // Allocate the unshared (default) space. _the_space = new ContigPermSpace(_bts, MemRegion(unshared_bottom, heap_word_size(initial_byte_size))); if (_the_space == NULL) vm_exit_during_initialization("Could not allocate an unshared" " CompactingPermGen Space"); // Allocate shared spaces if (spec()->enable_shared_spaces()) { // If mapping a shared file, the space is not committed, don't // mangle. NOT_PRODUCT(bool old_ZapUnusedHeapArea = ZapUnusedHeapArea;)
bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) { const size_t max_commit_granularity = os::page_size_for_region(rs.size(), rs.size(), 1); return initialize_with_granularity(rs, committed_size, max_commit_granularity); }
inline void PSVirtualSpace::set_reserved(ReservedSpace rs) { set_reserved(rs.base(), rs.base() + rs.size(), rs.special()); }
void CodeCache::initialize_heaps() { // Determine size of compiler buffers size_t code_buffers_size = 0; #ifdef COMPILER1 // C1 temporary code buffers (see Compiler::init_buffer_blob()) const int c1_count = CompilationPolicy::policy()->compiler_count(CompLevel_simple); code_buffers_size += c1_count * Compiler::code_buffer_size(); #endif #ifdef COMPILER2 // C2 scratch buffers (see Compile::init_scratch_buffer_blob()) const int c2_count = CompilationPolicy::policy()->compiler_count(CompLevel_full_optimization); // Initial size of constant table (this may be increased if a compiled method needs more space) code_buffers_size += c2_count * C2Compiler::initial_code_buffer_size(); #endif // Calculate default CodeHeap sizes if not set by user if (!FLAG_IS_CMDLINE(NonNMethodCodeHeapSize) && !FLAG_IS_CMDLINE(ProfiledCodeHeapSize) && !FLAG_IS_CMDLINE(NonProfiledCodeHeapSize)) { // Increase default NonNMethodCodeHeapSize to account for compiler buffers FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, NonNMethodCodeHeapSize + code_buffers_size); // Check if we have enough space for the non-nmethod code heap if (ReservedCodeCacheSize > NonNMethodCodeHeapSize) { // Use the default value for NonNMethodCodeHeapSize and one half of the // remaining size for non-profiled methods and one half for profiled methods size_t remaining_size = ReservedCodeCacheSize - NonNMethodCodeHeapSize; size_t profiled_size = remaining_size / 2; size_t non_profiled_size = remaining_size - profiled_size; FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, profiled_size); FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, non_profiled_size); } else { // Use all space for the non-nmethod heap and set other heaps to minimal size FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, ReservedCodeCacheSize - os::vm_page_size() * 2); FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, os::vm_page_size()); FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, os::vm_page_size()); } } // We do not need the profiled CodeHeap, use all space for the non-profiled CodeHeap if(!heap_available(CodeBlobType::MethodProfiled)) { FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, NonProfiledCodeHeapSize + ProfiledCodeHeapSize); FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, 0); } // We do not need the non-profiled CodeHeap, use all space for the non-nmethod CodeHeap if(!heap_available(CodeBlobType::MethodNonProfiled)) { FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, NonNMethodCodeHeapSize + NonProfiledCodeHeapSize); FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, 0); } // Make sure we have enough space for VM internal code uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3); if (NonNMethodCodeHeapSize < (min_code_cache_size + code_buffers_size)) { vm_exit_during_initialization("Not enough space in non-nmethod code heap to run VM."); } guarantee(NonProfiledCodeHeapSize + ProfiledCodeHeapSize + NonNMethodCodeHeapSize <= ReservedCodeCacheSize, "Size check"); // Align CodeHeaps size_t alignment = heap_alignment(); size_t non_method_size = align_size_up(NonNMethodCodeHeapSize, alignment); size_t profiled_size = align_size_down(ProfiledCodeHeapSize, alignment); // Reserve one continuous chunk of memory for CodeHeaps and split it into // parts for the individual heaps. The memory layout looks like this: // ---------- high ----------- // Non-profiled nmethods // Profiled nmethods // Non-nmethods // ---------- low ------------ ReservedCodeSpace rs = reserve_heap_memory(ReservedCodeCacheSize); ReservedSpace non_method_space = rs.first_part(non_method_size); ReservedSpace rest = rs.last_part(non_method_size); ReservedSpace profiled_space = rest.first_part(profiled_size); ReservedSpace non_profiled_space = rest.last_part(profiled_size); // Non-nmethods (stubs, adapters, ...) add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod); // Tier 2 and tier 3 (profiled) methods add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled); // Tier 1 and tier 4 (non-profiled) methods and native methods add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled); }
AdjoiningGenerations::AdjoiningGenerations(ReservedSpace old_young_rs, size_t init_low_byte_size, size_t min_low_byte_size, size_t max_low_byte_size, size_t init_high_byte_size, size_t min_high_byte_size, size_t max_high_byte_size, size_t alignment) : _virtual_spaces(old_young_rs, min_low_byte_size, min_high_byte_size, alignment) { assert(min_low_byte_size <= init_low_byte_size && init_low_byte_size <= max_low_byte_size, "Parameter check"); assert(min_high_byte_size <= init_high_byte_size && init_high_byte_size <= max_high_byte_size, "Parameter check"); // Create the generations differently based on the option to // move the boundary. if (UseAdaptiveGCBoundary) { // Initialize the adjoining virtual spaces. Then pass the // a virtual to each generation for initialization of the // generation. // Does the actual creation of the virtual spaces _virtual_spaces.initialize(max_low_byte_size, init_low_byte_size, init_high_byte_size); // Place the young gen at the high end. Passes in the virtual space. _young_gen = new ASPSYoungGen(_virtual_spaces.high(), _virtual_spaces.high()->committed_size(), min_high_byte_size, _virtual_spaces.high_byte_size_limit()); // Place the old gen at the low end. Passes in the virtual space. _old_gen = new ASPSOldGen(_virtual_spaces.low(), _virtual_spaces.low()->committed_size(), min_low_byte_size, _virtual_spaces.low_byte_size_limit(), "old", 1); // AZUL - Initialize in this order - PERM OLD YOUNG old_gen()->initialize_work("old", 1); assert(old_gen()->reserved().byte_size() <= old_gen()->gen_size_limit(), "Consistency check"); assert(old_young_rs.size() >= old_gen()->gen_size_limit(), "Consistency check"); young_gen()->initialize_work(); assert(young_gen()->reserved().byte_size() <= young_gen()->gen_size_limit(), "Consistency check"); assert(old_young_rs.size() >= young_gen()->gen_size_limit(), "Consistency check"); } else { // Layout the reserved space for the generations. ReservedSpace old_rs = virtual_spaces()->reserved_space().first_part(max_low_byte_size); ReservedSpace heap_rs = virtual_spaces()->reserved_space().last_part(max_low_byte_size); ReservedSpace young_rs = heap_rs.first_part(max_high_byte_size); assert(young_rs.size() == heap_rs.size(), "Didn't reserve all of the heap"); // Create the generations. Virtual spaces are not passed in. _young_gen = new PSYoungGen(init_high_byte_size, min_high_byte_size, max_high_byte_size); _old_gen = new PSOldGen(init_low_byte_size, min_low_byte_size, max_low_byte_size, "old", 1); // The virtual spaces are created by the initialization of the gens. // AZUL - Initialize in this order - PERM OLD YOUNG _old_gen->initialize(old_rs, alignment, "old", 1); assert(old_gen()->gen_size_limit() == old_rs.size(), "Consistency check"); _young_gen->initialize(young_rs, alignment); assert(young_gen()->gen_size_limit() == young_rs.size(), "Consistency check"); } }
void CodeCache::initialize_heaps() { bool non_nmethod_set = FLAG_IS_CMDLINE(NonNMethodCodeHeapSize); bool profiled_set = FLAG_IS_CMDLINE(ProfiledCodeHeapSize); bool non_profiled_set = FLAG_IS_CMDLINE(NonProfiledCodeHeapSize); size_t min_size = os::vm_page_size(); size_t cache_size = ReservedCodeCacheSize; size_t non_nmethod_size = NonNMethodCodeHeapSize; size_t profiled_size = ProfiledCodeHeapSize; size_t non_profiled_size = NonProfiledCodeHeapSize; // Check if total size set via command line flags exceeds the reserved size check_heap_sizes((non_nmethod_set ? non_nmethod_size : min_size), (profiled_set ? profiled_size : min_size), (non_profiled_set ? non_profiled_size : min_size), cache_size, non_nmethod_set && profiled_set && non_profiled_set); // Determine size of compiler buffers size_t code_buffers_size = 0; #ifdef COMPILER1 // C1 temporary code buffers (see Compiler::init_buffer_blob()) const int c1_count = CompilationPolicy::policy()->compiler_count(CompLevel_simple); code_buffers_size += c1_count * Compiler::code_buffer_size(); #endif #ifdef COMPILER2 // C2 scratch buffers (see Compile::init_scratch_buffer_blob()) const int c2_count = CompilationPolicy::policy()->compiler_count(CompLevel_full_optimization); // Initial size of constant table (this may be increased if a compiled method needs more space) code_buffers_size += c2_count * C2Compiler::initial_code_buffer_size(); #endif // Increase default non_nmethod_size to account for compiler buffers if (!non_nmethod_set) { non_nmethod_size += code_buffers_size; } // Calculate default CodeHeap sizes if not set by user if (!non_nmethod_set && !profiled_set && !non_profiled_set) { // Check if we have enough space for the non-nmethod code heap if (cache_size > non_nmethod_size) { // Use the default value for non_nmethod_size and one half of the // remaining size for non-profiled and one half for profiled methods size_t remaining_size = cache_size - non_nmethod_size; profiled_size = remaining_size / 2; non_profiled_size = remaining_size - profiled_size; } else { // Use all space for the non-nmethod heap and set other heaps to minimal size non_nmethod_size = cache_size - 2 * min_size; profiled_size = min_size; non_profiled_size = min_size; } } else if (!non_nmethod_set || !profiled_set || !non_profiled_set) { // The user explicitly set some code heap sizes. Increase or decrease the (default) // sizes of the other code heaps accordingly. First adapt non-profiled and profiled // code heap sizes and then only change non-nmethod code heap size if still necessary. intx diff_size = cache_size - (non_nmethod_size + profiled_size + non_profiled_size); if (non_profiled_set) { if (!profiled_set) { // Adapt size of profiled code heap if (diff_size < 0 && ((intx)profiled_size + diff_size) <= 0) { // Not enough space available, set to minimum size diff_size += profiled_size - min_size; profiled_size = min_size; } else { profiled_size += diff_size; diff_size = 0; } } } else if (profiled_set) { // Adapt size of non-profiled code heap if (diff_size < 0 && ((intx)non_profiled_size + diff_size) <= 0) { // Not enough space available, set to minimum size diff_size += non_profiled_size - min_size; non_profiled_size = min_size; } else { non_profiled_size += diff_size; diff_size = 0; } } else if (non_nmethod_set) { // Distribute remaining size between profiled and non-profiled code heaps diff_size = cache_size - non_nmethod_size; profiled_size = diff_size / 2; non_profiled_size = diff_size - profiled_size; diff_size = 0; } if (diff_size != 0) { // Use non-nmethod code heap for remaining space requirements assert(!non_nmethod_set && ((intx)non_nmethod_size + diff_size) > 0, "sanity"); non_nmethod_size += diff_size; } } // We do not need the profiled CodeHeap, use all space for the non-profiled CodeHeap if(!heap_available(CodeBlobType::MethodProfiled)) { non_profiled_size += profiled_size; profiled_size = 0; } // We do not need the non-profiled CodeHeap, use all space for the non-nmethod CodeHeap if(!heap_available(CodeBlobType::MethodNonProfiled)) { non_nmethod_size += non_profiled_size; non_profiled_size = 0; } // Make sure we have enough space for VM internal code uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3); if (non_nmethod_size < (min_code_cache_size + code_buffers_size)) { vm_exit_during_initialization(err_msg( "Not enough space in non-nmethod code heap to run VM: %zuK < %zuK", non_nmethod_size/K, (min_code_cache_size + code_buffers_size)/K)); } // Verify sizes and update flag values assert(non_profiled_size + profiled_size + non_nmethod_size == cache_size, "Invalid code heap sizes"); FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, non_nmethod_size); FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, profiled_size); FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, non_profiled_size); // Align CodeHeaps size_t alignment = heap_alignment(); non_nmethod_size = align_size_up(non_nmethod_size, alignment); profiled_size = align_size_down(profiled_size, alignment); // Reserve one continuous chunk of memory for CodeHeaps and split it into // parts for the individual heaps. The memory layout looks like this: // ---------- high ----------- // Non-profiled nmethods // Profiled nmethods // Non-nmethods // ---------- low ------------ ReservedCodeSpace rs = reserve_heap_memory(cache_size); ReservedSpace non_method_space = rs.first_part(non_nmethod_size); ReservedSpace rest = rs.last_part(non_nmethod_size); ReservedSpace profiled_space = rest.first_part(profiled_size); ReservedSpace non_profiled_space = rest.last_part(profiled_size); // Non-nmethods (stubs, adapters, ...) add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod); // Tier 2 and tier 3 (profiled) methods add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled); // Tier 1 and tier 4 (non-profiled) methods and native methods add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled); }