コード例 #1
0
  static void test() {
    size_t flag_value;

    save_flags();

    // Set some limits that makes the math simple.
    FLAG_SET_ERGO(uintx, MaxHeapSize, 180 * M);
    FLAG_SET_ERGO(uintx, InitialHeapSize, 120 * M);
    Arguments::set_min_heap_size(40 * M);

    // If NewSize is set on the command line, it should be used
    // for both min and initial young size if less than min heap.
    flag_value = 20 * M;
    FLAG_SET_CMDLINE(uintx, NewSize, flag_value);
    verify_min(flag_value);
    verify_initial(flag_value);

    // If NewSize is set on command line, but is larger than the min
    // heap size, it should only be used for initial young size.
    flag_value = 80 * M;
    FLAG_SET_CMDLINE(uintx, NewSize, flag_value);
    verify_initial(flag_value);

    // If NewSize has been ergonomically set, the collector policy
    // should use it for min but calculate the initial young size
    // using NewRatio.
    flag_value = 20 * M;
    FLAG_SET_ERGO(uintx, NewSize, flag_value);
    verify_min(flag_value);
    verify_scaled_initial(InitialHeapSize);

    restore_flags();

  }
コード例 #2
0
ファイル: collectorPolicy.cpp プロジェクト: gaoxiaojun/dync
 static void set_basic_flag_values() {
   FLAG_SET_ERGO(size_t, MaxHeapSize, 180 * M);
   FLAG_SET_ERGO(size_t, InitialHeapSize, 100 * M);
   FLAG_SET_ERGO(size_t, OldSize, 4 * M);
   FLAG_SET_ERGO(size_t, NewSize, 1 * M);
   FLAG_SET_ERGO(size_t, MaxNewSize, 80 * M);
   Arguments::set_min_heap_size(40 * M);
 }
コード例 #3
0
void AdvancedThresholdPolicy::initialize() {
  // Turn on ergonomic compiler count selection
  if (FLAG_IS_DEFAULT(CICompilerCountPerCPU) && FLAG_IS_DEFAULT(CICompilerCount)) {
    FLAG_SET_DEFAULT(CICompilerCountPerCPU, true);
  }
  int count = CICompilerCount;
  if (CICompilerCountPerCPU) {
    // Simple log n seems to grow too slowly for tiered, try something faster: log n * log log n
    int log_cpu = log2_intptr(os::active_processor_count());
    int loglog_cpu = log2_intptr(MAX2(log_cpu, 1));
    count = MAX2(log_cpu * loglog_cpu, 1) * 3 / 2;
  }

  set_c1_count(MAX2(count / 3, 1));
  set_c2_count(MAX2(count - c1_count(), 1));
  FLAG_SET_ERGO(intx, CICompilerCount, c1_count() + c2_count());

  // Some inlining tuning
#ifdef X86
  if (FLAG_IS_DEFAULT(InlineSmallCode)) {
    FLAG_SET_DEFAULT(InlineSmallCode, 2000);
  }
#endif

#if defined SPARC || defined AARCH64
  if (FLAG_IS_DEFAULT(InlineSmallCode)) {
    FLAG_SET_DEFAULT(InlineSmallCode, 2500);
  }
#endif

  set_increase_threshold_at_ratio();
  set_start_time(os::javaTimeMillis());
}
コード例 #4
0
ファイル: vm_version_ppc.cpp プロジェクト: netroby/jdk9-dev
void VM_Version::initialize() {

  // Test which instructions are supported and measure cache line size.
  determine_features();

  // If PowerArchitecturePPC64 hasn't been specified explicitly determine from features.
  if (FLAG_IS_DEFAULT(PowerArchitecturePPC64)) {
    if (VM_Version::has_lqarx()) {
      FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 8);
    } else if (VM_Version::has_popcntw()) {
      FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 7);
    } else if (VM_Version::has_cmpb()) {
      FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 6);
    } else if (VM_Version::has_popcntb()) {
      FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 5);
    } else {
      FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 0);
    }
  }

  bool PowerArchitecturePPC64_ok = false;
  switch (PowerArchitecturePPC64) {
    case 8: if (!VM_Version::has_lqarx()  ) break;
    case 7: if (!VM_Version::has_popcntw()) break;
    case 6: if (!VM_Version::has_cmpb()   ) break;
    case 5: if (!VM_Version::has_popcntb()) break;
    case 0: PowerArchitecturePPC64_ok = true; break;
    default: break;
  }
  guarantee(PowerArchitecturePPC64_ok, "PowerArchitecturePPC64 cannot be set to "
            UINTX_FORMAT " on this machine", PowerArchitecturePPC64);

  // Power 8: Configure Data Stream Control Register.
  if (has_mfdscr()) {
    config_dscr();
  }

  if (!UseSIGTRAP) {
    MSG(TrapBasedICMissChecks);
    MSG(TrapBasedNotEntrantChecks);
    MSG(TrapBasedNullChecks);
    FLAG_SET_ERGO(bool, TrapBasedNotEntrantChecks, false);
    FLAG_SET_ERGO(bool, TrapBasedNullChecks,       false);
    FLAG_SET_ERGO(bool, TrapBasedICMissChecks,     false);
  }
コード例 #5
0
void SimpleThresholdPolicy::initialize() {
  if (FLAG_IS_DEFAULT(CICompilerCount)) {
    FLAG_SET_DEFAULT(CICompilerCount, 3);
  }
  int count = CICompilerCount;
  if (CICompilerCountPerCPU) {
    count = MAX2(log2_intptr(os::active_processor_count()), 1) * 3 / 2;
  }
  set_c1_count(MAX2(count / 3, 1));
  set_c2_count(MAX2(count - c1_count(), 1));
  FLAG_SET_ERGO(intx, CICompilerCount, c1_count() + c2_count());
}
コード例 #6
0
void NonTieredCompPolicy::initialize() {
  // Setup the compiler thread numbers
  if (CICompilerCountPerCPU) {
    // Example: if CICompilerCountPerCPU is true, then we get
    // max(log2(8)-1,1) = 2 compiler threads on an 8-way machine.
    // May help big-app startup time.
    _compiler_count = MAX2(log2_intptr(os::active_processor_count())-1,1);
    FLAG_SET_ERGO(intx, CICompilerCount, _compiler_count);
  } else {
    _compiler_count = CICompilerCount;
  }
}
コード例 #7
0
void VM_Version::initialize() {

  // Test which instructions are supported and measure cache line size.
  determine_features();

  // If PowerArchitecturePPC64 hasn't been specified explicitly determine from features.
  if (FLAG_IS_DEFAULT(PowerArchitecturePPC64)) {
    if (VM_Version::has_lqarx()) {
      FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 8);
    } else if (VM_Version::has_popcntw()) {
      FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 7);
    } else if (VM_Version::has_cmpb()) {
      FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 6);
    } else if (VM_Version::has_popcntb()) {
      FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 5);
    } else {
      FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 0);
    }
  }
  guarantee(PowerArchitecturePPC64 == 0 || PowerArchitecturePPC64 == 5 ||
            PowerArchitecturePPC64 == 6 || PowerArchitecturePPC64 == 7 ||
            PowerArchitecturePPC64 == 8,
            "PowerArchitecturePPC64 should be 0, 5, 6, 7, or 8");

  // Power 8: Configure Data Stream Control Register.
  if (PowerArchitecturePPC64 >= 8) {
    config_dscr();
  }

  if (!UseSIGTRAP) {
    MSG(TrapBasedICMissChecks);
    MSG(TrapBasedNotEntrantChecks);
    MSG(TrapBasedNullChecks);
    FLAG_SET_ERGO(bool, TrapBasedNotEntrantChecks, false);
    FLAG_SET_ERGO(bool, TrapBasedNullChecks,       false);
    FLAG_SET_ERGO(bool, TrapBasedICMissChecks,     false);
  }
コード例 #8
0
ファイル: collectorPolicy.cpp プロジェクト: gaoxiaojun/dync
  static void test_new_size() {
    size_t flag_value;

    save_flags();

    // If NewSize is set on the command line, it should be used
    // for both min and initial young size if less than min heap.
    flag_value = 20 * M;
    set_basic_flag_values();
    FLAG_SET_CMDLINE(size_t, NewSize, flag_value);
    verify_young_min(flag_value);

    set_basic_flag_values();
    FLAG_SET_CMDLINE(size_t, NewSize, flag_value);
    verify_young_initial(flag_value);

    // If NewSize is set on command line, but is larger than the min
    // heap size, it should only be used for initial young size.
    flag_value = 80 * M;
    set_basic_flag_values();
    FLAG_SET_CMDLINE(size_t, NewSize, flag_value);
    verify_young_initial(flag_value);

    // If NewSize has been ergonomically set, the collector policy
    // should use it for min but calculate the initial young size
    // using NewRatio.
    flag_value = 20 * M;
    set_basic_flag_values();
    FLAG_SET_ERGO(size_t, NewSize, flag_value);
    verify_young_min(flag_value);

    set_basic_flag_values();
    FLAG_SET_ERGO(size_t, NewSize, flag_value);
    verify_scaled_young_initial(InitialHeapSize);

    restore_flags();
  }
コード例 #9
0
void TwoGenerationCollectorPolicy::initialize_flags() {
  GenCollectorPolicy::initialize_flags();

  if (!is_size_aligned(OldSize, _gen_alignment)) {
    FLAG_SET_ERGO(uintx, OldSize, align_size_down(OldSize, _gen_alignment));
  }

  if (FLAG_IS_CMDLINE(OldSize) && FLAG_IS_DEFAULT(MaxHeapSize)) {
    // NewRatio will be used later to set the young generation size so we use
    // it to calculate how big the heap should be based on the requested OldSize
    // and NewRatio.
    assert(NewRatio > 0, "NewRatio should have been set up earlier");
    size_t calculated_heapsize = (OldSize / NewRatio) * (NewRatio + 1);

    calculated_heapsize = align_size_up(calculated_heapsize, _heap_alignment);
    FLAG_SET_ERGO(uintx, MaxHeapSize, calculated_heapsize);
    _max_heap_byte_size = MaxHeapSize;
    FLAG_SET_ERGO(uintx, InitialHeapSize, calculated_heapsize);
    _initial_heap_byte_size = InitialHeapSize;
  }

  // adjust max heap size if necessary
  if (NewSize + OldSize > MaxHeapSize) {
    if (_max_heap_size_cmdline) {
      // somebody set a maximum heap size with the intention that we should not
      // exceed it. Adjust New/OldSize as necessary.
      uintx calculated_size = NewSize + OldSize;
      double shrink_factor = (double) MaxHeapSize / calculated_size;
      uintx smaller_new_size = align_size_down((uintx)(NewSize * shrink_factor), _gen_alignment);
      FLAG_SET_ERGO(uintx, NewSize, MAX2(young_gen_size_lower_bound(), smaller_new_size));
      _initial_gen0_size = NewSize;

      // OldSize is already aligned because above we aligned MaxHeapSize to
      // _heap_alignment, and we just made sure that NewSize is aligned to
      // _gen_alignment. In initialize_flags() we verified that _heap_alignment
      // is a multiple of _gen_alignment.
      FLAG_SET_ERGO(uintx, OldSize, MaxHeapSize - NewSize);
    } else {
      FLAG_SET_ERGO(uintx, MaxHeapSize, align_size_up(NewSize + OldSize, _heap_alignment));
      _max_heap_byte_size = MaxHeapSize;
    }
  }

  always_do_update_barrier = UseConcMarkSweepGC;

  DEBUG_ONLY(TwoGenerationCollectorPolicy::assert_flags();)
}
コード例 #10
0
void SimpleThresholdPolicy::initialize() {
  if (FLAG_IS_DEFAULT(CICompilerCount)) {
    FLAG_SET_DEFAULT(CICompilerCount, 3);
  }
  int count = CICompilerCount;
#ifdef _LP64
  // On 64-bit systems, scale the number of compiler threads with
  // the number of cores available on the system. Scaling is not
  // performed on 32-bit systems because it can lead to exhaustion
  // of the virtual memory address space available to the JVM.
  if (CICompilerCountPerCPU) {
    count = MAX2(log2_intptr(os::active_processor_count()) * 3 / 2, 2);
    FLAG_SET_ERGO(intx, CICompilerCount, count);
  }
#endif
  if (TieredStopAtLevel < CompLevel_full_optimization) {
    // No C2 compiler thread required
    set_c1_count(count);
  } else {
    set_c1_count(MAX2(count / 3, 1));
    set_c2_count(MAX2(count - c1_count(), 1));
  }
  assert(count == c1_count() + c2_count(), "inconsistent compiler thread count");
}
コード例 #11
0
void GenCollectorPolicy::initialize_flags() {
  CollectorPolicy::initialize_flags();

  assert(_gen_alignment != 0, "Generation alignment not set up properly");
  assert(_heap_alignment >= _gen_alignment,
         err_msg("heap_alignment: " SIZE_FORMAT " less than gen_alignment: " SIZE_FORMAT,
                 _heap_alignment, _gen_alignment));
  assert(_gen_alignment % _space_alignment == 0,
         err_msg("gen_alignment: " SIZE_FORMAT " not aligned by space_alignment: " SIZE_FORMAT,
                 _gen_alignment, _space_alignment));
  assert(_heap_alignment % _gen_alignment == 0,
         err_msg("heap_alignment: " SIZE_FORMAT " not aligned by gen_alignment: " SIZE_FORMAT,
                 _heap_alignment, _gen_alignment));

  // All generational heaps have a youngest gen; handle those flags here

  // Make sure the heap is large enough for two generations
  uintx smallest_new_size = young_gen_size_lower_bound();
  uintx smallest_heap_size = align_size_up(smallest_new_size + align_size_up(_space_alignment, _gen_alignment),
                                           _heap_alignment);
  if (MaxHeapSize < smallest_heap_size) {
    FLAG_SET_ERGO(uintx, MaxHeapSize, smallest_heap_size);
    _max_heap_byte_size = MaxHeapSize;
  }
  // If needed, synchronize _min_heap_byte size and _initial_heap_byte_size
  if (_min_heap_byte_size < smallest_heap_size) {
    _min_heap_byte_size = smallest_heap_size;
    if (InitialHeapSize < _min_heap_byte_size) {
      FLAG_SET_ERGO(uintx, InitialHeapSize, smallest_heap_size);
      _initial_heap_byte_size = smallest_heap_size;
    }
  }

  // Now take the actual NewSize into account. We will silently increase NewSize
  // if the user specified a smaller or unaligned value.
  smallest_new_size = MAX2(smallest_new_size, (uintx)align_size_down(NewSize, _gen_alignment));
  if (smallest_new_size != NewSize) {
    // Do not use FLAG_SET_ERGO to update NewSize here, since this will override
    // if NewSize was set on the command line or not. This information is needed
    // later when setting the initial and minimum young generation size.
    NewSize = smallest_new_size;
  }
  _initial_gen0_size = NewSize;

  if (!FLAG_IS_DEFAULT(MaxNewSize)) {
    uintx min_new_size = MAX2(_gen_alignment, _min_gen0_size);

    if (MaxNewSize >= MaxHeapSize) {
      // Make sure there is room for an old generation
      uintx smaller_max_new_size = MaxHeapSize - _gen_alignment;
      if (FLAG_IS_CMDLINE(MaxNewSize)) {
        warning("MaxNewSize (" SIZE_FORMAT "k) is equal to or greater than the entire "
                "heap (" SIZE_FORMAT "k).  A new max generation size of " SIZE_FORMAT "k will be used.",
                MaxNewSize/K, MaxHeapSize/K, smaller_max_new_size/K);
      }
      FLAG_SET_ERGO(uintx, MaxNewSize, smaller_max_new_size);
      if (NewSize > MaxNewSize) {
        FLAG_SET_ERGO(uintx, NewSize, MaxNewSize);
        _initial_gen0_size = NewSize;
      }
    } else if (MaxNewSize < min_new_size) {
      FLAG_SET_ERGO(uintx, MaxNewSize, min_new_size);
    } else if (!is_size_aligned(MaxNewSize, _gen_alignment)) {
      FLAG_SET_ERGO(uintx, MaxNewSize, align_size_down(MaxNewSize, _gen_alignment));
    }
    _max_gen0_size = MaxNewSize;
  }

  if (NewSize > MaxNewSize) {
    // At this point this should only happen if the user specifies a large NewSize and/or
    // a small (but not too small) MaxNewSize.
    if (FLAG_IS_CMDLINE(MaxNewSize)) {
      warning("NewSize (" SIZE_FORMAT "k) is greater than the MaxNewSize (" SIZE_FORMAT "k). "
              "A new max generation size of " SIZE_FORMAT "k will be used.",
              NewSize/K, MaxNewSize/K, NewSize/K);
    }
    FLAG_SET_ERGO(uintx, MaxNewSize, NewSize);
    _max_gen0_size = MaxNewSize;
  }

  if (SurvivorRatio < 1 || NewRatio < 1) {
    vm_exit_during_initialization("Invalid young gen ratio specified");
  }

  DEBUG_ONLY(GenCollectorPolicy::assert_flags();)
}
コード例 #12
0
void CollectorPolicy::initialize_flags() {
  assert(_space_alignment != 0, "Space alignment not set up properly");
  assert(_heap_alignment != 0, "Heap alignment not set up properly");
  assert(_heap_alignment >= _space_alignment,
         err_msg("heap_alignment: " SIZE_FORMAT " less than space_alignment: " SIZE_FORMAT,
                 _heap_alignment, _space_alignment));
  assert(_heap_alignment % _space_alignment == 0,
         err_msg("heap_alignment: " SIZE_FORMAT " not aligned by space_alignment: " SIZE_FORMAT,
                 _heap_alignment, _space_alignment));

  if (FLAG_IS_CMDLINE(MaxHeapSize)) {
    if (FLAG_IS_CMDLINE(InitialHeapSize) && InitialHeapSize > MaxHeapSize) {
      vm_exit_during_initialization("Initial heap size set to a larger value than the maximum heap size");
    }
    if (_min_heap_byte_size != 0 && MaxHeapSize < _min_heap_byte_size) {
      vm_exit_during_initialization("Incompatible minimum and maximum heap sizes specified");
    }
    _max_heap_size_cmdline = true;
  }

  // Check heap parameter properties
  if (InitialHeapSize < M) {
    vm_exit_during_initialization("Too small initial heap");
  }
  if (_min_heap_byte_size < M) {
    vm_exit_during_initialization("Too small minimum heap");
  }

  // User inputs from -Xmx and -Xms must be aligned
  _min_heap_byte_size = align_size_up(_min_heap_byte_size, _heap_alignment);
  uintx aligned_initial_heap_size = align_size_up(InitialHeapSize, _heap_alignment);
  uintx aligned_max_heap_size = align_size_up(MaxHeapSize, _heap_alignment);

  // Write back to flags if the values changed
  if (aligned_initial_heap_size != InitialHeapSize) {
    FLAG_SET_ERGO(uintx, InitialHeapSize, aligned_initial_heap_size);
  }
  if (aligned_max_heap_size != MaxHeapSize) {
    FLAG_SET_ERGO(uintx, MaxHeapSize, aligned_max_heap_size);
  }

  if (FLAG_IS_CMDLINE(InitialHeapSize) && _min_heap_byte_size != 0 &&
      InitialHeapSize < _min_heap_byte_size) {
    vm_exit_during_initialization("Incompatible minimum and initial heap sizes specified");
  }
  if (!FLAG_IS_DEFAULT(InitialHeapSize) && InitialHeapSize > MaxHeapSize) {
    FLAG_SET_ERGO(uintx, MaxHeapSize, InitialHeapSize);
  } else if (!FLAG_IS_DEFAULT(MaxHeapSize) && InitialHeapSize > MaxHeapSize) {
    FLAG_SET_ERGO(uintx, InitialHeapSize, MaxHeapSize);
    if (InitialHeapSize < _min_heap_byte_size) {
      _min_heap_byte_size = InitialHeapSize;
    }
  }

  _initial_heap_byte_size = InitialHeapSize;
  _max_heap_byte_size = MaxHeapSize;

  FLAG_SET_ERGO(uintx, MinHeapDeltaBytes, align_size_up(MinHeapDeltaBytes, _space_alignment));

  DEBUG_ONLY(CollectorPolicy::assert_flags();)
}
コード例 #13
0
ファイル: vm_version_sparc.cpp プロジェクト: gaoxiaojun/dync
void VM_Version::initialize() {
  _features = determine_features();
  PrefetchCopyIntervalInBytes = prefetch_copy_interval_in_bytes();
  PrefetchScanIntervalInBytes = prefetch_scan_interval_in_bytes();
  PrefetchFieldsAhead         = prefetch_fields_ahead();

  // Allocation prefetch settings
  intx cache_line_size = prefetch_data_size();
  if( cache_line_size > AllocatePrefetchStepSize )
    AllocatePrefetchStepSize = cache_line_size;

  assert(AllocatePrefetchLines > 0, "invalid value");
  if( AllocatePrefetchLines < 1 )     // set valid value in product VM
    AllocatePrefetchLines = 3;
  assert(AllocateInstancePrefetchLines > 0, "invalid value");
  if( AllocateInstancePrefetchLines < 1 ) // set valid value in product VM
    AllocateInstancePrefetchLines = 1;

  AllocatePrefetchDistance = allocate_prefetch_distance();
  AllocatePrefetchStyle    = allocate_prefetch_style();

  if (AllocatePrefetchStyle == 3 && !has_blk_init()) {
    warning("BIS instructions are not available on this CPU");
    FLAG_SET_DEFAULT(AllocatePrefetchStyle, 1);
  }

  guarantee(VM_Version::has_v9(), "only SPARC v9 is supported");

  UseSSE = 0; // Only on x86 and x64

  _supports_cx8 = has_v9();
  _supports_atomic_getset4 = true; // swap instruction

  if (is_niagara()) {
    // Indirect branch is the same cost as direct
    if (FLAG_IS_DEFAULT(UseInlineCaches)) {
      FLAG_SET_DEFAULT(UseInlineCaches, false);
    }
    // Align loops on a single instruction boundary.
    if (FLAG_IS_DEFAULT(OptoLoopAlignment)) {
      FLAG_SET_DEFAULT(OptoLoopAlignment, 4);
    }
#ifdef _LP64
    // 32-bit oops don't make sense for the 64-bit VM on sparc
    // since the 32-bit VM has the same registers and smaller objects.
    Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
    Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
#endif // _LP64
#ifdef COMPILER2
    // Indirect branch is the same cost as direct
    if (FLAG_IS_DEFAULT(UseJumpTables)) {
      FLAG_SET_DEFAULT(UseJumpTables, true);
    }
    // Single-issue, so entry and loop tops are
    // aligned on a single instruction boundary
    if (FLAG_IS_DEFAULT(InteriorEntryAlignment)) {
      FLAG_SET_DEFAULT(InteriorEntryAlignment, 4);
    }
    if (is_niagara_plus()) {
      if (has_blk_init() && UseTLAB &&
          FLAG_IS_DEFAULT(AllocatePrefetchInstr)) {
        // Use BIS instruction for TLAB allocation prefetch.
        FLAG_SET_ERGO(intx, AllocatePrefetchInstr, 1);
        if (FLAG_IS_DEFAULT(AllocatePrefetchStyle)) {
          FLAG_SET_ERGO(intx, AllocatePrefetchStyle, 3);
        }
        if (FLAG_IS_DEFAULT(AllocatePrefetchDistance)) {
          // Use smaller prefetch distance with BIS
          FLAG_SET_DEFAULT(AllocatePrefetchDistance, 64);
        }
      }
      if (is_T4()) {
        // Double number of prefetched cache lines on T4
        // since L2 cache line size is smaller (32 bytes).
        if (FLAG_IS_DEFAULT(AllocatePrefetchLines)) {
          FLAG_SET_ERGO(intx, AllocatePrefetchLines, AllocatePrefetchLines*2);
        }
        if (FLAG_IS_DEFAULT(AllocateInstancePrefetchLines)) {
          FLAG_SET_ERGO(intx, AllocateInstancePrefetchLines, AllocateInstancePrefetchLines*2);
        }
      }
      if (AllocatePrefetchStyle != 3 && FLAG_IS_DEFAULT(AllocatePrefetchDistance)) {
        // Use different prefetch distance without BIS
        FLAG_SET_DEFAULT(AllocatePrefetchDistance, 256);
      }
      if (AllocatePrefetchInstr == 1) {
        // Need a space at the end of TLAB for BIS since it
        // will fault when accessing memory outside of heap.

        // +1 for rounding up to next cache line, +1 to be safe
        int lines = AllocatePrefetchLines + 2;
        int step_size = AllocatePrefetchStepSize;
        int distance = AllocatePrefetchDistance;
        _reserve_for_allocation_prefetch = (distance + step_size*lines)/(int)HeapWordSize;
      }
    }
#endif
  }

  // Use hardware population count instruction if available.
  if (has_hardware_popc()) {
    if (FLAG_IS_DEFAULT(UsePopCountInstruction)) {
      FLAG_SET_DEFAULT(UsePopCountInstruction, true);
    }
  } else if (UsePopCountInstruction) {
    warning("POPC instruction is not available on this CPU");
    FLAG_SET_DEFAULT(UsePopCountInstruction, false);
  }

  // T4 and newer Sparc cpus have new compare and branch instruction.
  if (has_cbcond()) {
    if (FLAG_IS_DEFAULT(UseCBCond)) {
      FLAG_SET_DEFAULT(UseCBCond, true);
    }
  } else if (UseCBCond) {
    warning("CBCOND instruction is not available on this CPU");
    FLAG_SET_DEFAULT(UseCBCond, false);
  }

  assert(BlockZeroingLowLimit > 0, "invalid value");
  if (has_block_zeroing() && cache_line_size > 0) {
    if (FLAG_IS_DEFAULT(UseBlockZeroing)) {
      FLAG_SET_DEFAULT(UseBlockZeroing, true);
    }
  } else if (UseBlockZeroing) {
    warning("BIS zeroing instructions are not available on this CPU");
    FLAG_SET_DEFAULT(UseBlockZeroing, false);
  }

  assert(BlockCopyLowLimit > 0, "invalid value");
  if (has_block_zeroing() && cache_line_size > 0) { // has_blk_init() && is_T4(): core's local L2 cache
    if (FLAG_IS_DEFAULT(UseBlockCopy)) {
      FLAG_SET_DEFAULT(UseBlockCopy, true);
    }
  } else if (UseBlockCopy) {
    warning("BIS instructions are not available or expensive on this CPU");
    FLAG_SET_DEFAULT(UseBlockCopy, false);
  }

#ifdef COMPILER2
  // T4 and newer Sparc cpus have fast RDPC.
  if (has_fast_rdpc() && FLAG_IS_DEFAULT(UseRDPCForConstantTableBase)) {
    FLAG_SET_DEFAULT(UseRDPCForConstantTableBase, true);
  }

  // Currently not supported anywhere.
  FLAG_SET_DEFAULT(UseFPUForSpilling, false);

  MaxVectorSize = 8;

  assert((InteriorEntryAlignment % relocInfo::addr_unit()) == 0, "alignment is not a multiple of NOP size");
#endif

  assert((CodeEntryAlignment % relocInfo::addr_unit()) == 0, "alignment is not a multiple of NOP size");
  assert((OptoLoopAlignment % relocInfo::addr_unit()) == 0, "alignment is not a multiple of NOP size");

  char buf[512];
  jio_snprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
               (has_v9() ? ", v9" : (has_v8() ? ", v8" : "")),
               (has_hardware_popc() ? ", popc" : ""),
               (has_vis1() ? ", vis1" : ""),
               (has_vis2() ? ", vis2" : ""),
               (has_vis3() ? ", vis3" : ""),
               (has_blk_init() ? ", blk_init" : ""),
               (has_cbcond() ? ", cbcond" : ""),
               (has_aes() ? ", aes" : ""),
               (has_sha1() ? ", sha1" : ""),
               (has_sha256() ? ", sha256" : ""),
               (has_sha512() ? ", sha512" : ""),
               (has_crc32c() ? ", crc32c" : ""),
               (is_ultra3() ? ", ultra3" : ""),
               (is_sun4v() ? ", sun4v" : ""),
               (is_niagara_plus() ? ", niagara_plus" : (is_niagara() ? ", niagara" : "")),
               (is_sparc64() ? ", sparc64" : ""),
               (!has_hardware_mul32() ? ", no-mul32" : ""),
               (!has_hardware_div32() ? ", no-div32" : ""),
               (!has_hardware_fsmuld() ? ", no-fsmuld" : ""));

  // buf is started with ", " or is empty
  _features_str = os::strdup(strlen(buf) > 2 ? buf + 2 : buf);

  // UseVIS is set to the smallest of what hardware supports and what
  // the command line requires.  I.e., you cannot set UseVIS to 3 on
  // older UltraSparc which do not support it.
  if (UseVIS > 3) UseVIS=3;
  if (UseVIS < 0) UseVIS=0;
  if (!has_vis3()) // Drop to 2 if no VIS3 support
    UseVIS = MIN2((intx)2,UseVIS);
  if (!has_vis2()) // Drop to 1 if no VIS2 support
    UseVIS = MIN2((intx)1,UseVIS);
  if (!has_vis1()) // Drop to 0 if no VIS1 support
    UseVIS = 0;

  // SPARC T4 and above should have support for AES instructions
  if (has_aes()) {
    if (FLAG_IS_DEFAULT(UseAES)) {
      FLAG_SET_DEFAULT(UseAES, true);
    }
    if (!UseAES) {
      if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) {
        warning("AES intrinsics require UseAES flag to be enabled. Intrinsics will be disabled.");
      }
      FLAG_SET_DEFAULT(UseAESIntrinsics, false);
    } else {
      // The AES intrinsic stubs require AES instruction support (of course)
      // but also require VIS3 mode or higher for instructions it use.
      if (UseVIS > 2) {
        if (FLAG_IS_DEFAULT(UseAESIntrinsics)) {
          FLAG_SET_DEFAULT(UseAESIntrinsics, true);
        }
      } else {
        if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) {
          warning("SPARC AES intrinsics require VIS3 instructions. Intrinsics will be disabled.");
        }
        FLAG_SET_DEFAULT(UseAESIntrinsics, false);
      }
    }
  } else if (UseAES || UseAESIntrinsics) {
    if (UseAES && !FLAG_IS_DEFAULT(UseAES)) {
      warning("AES instructions are not available on this CPU");
      FLAG_SET_DEFAULT(UseAES, false);
    }
    if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) {
      warning("AES intrinsics are not available on this CPU");
      FLAG_SET_DEFAULT(UseAESIntrinsics, false);
    }
  }

  // GHASH/GCM intrinsics
  if (has_vis3() && (UseVIS > 2)) {
    if (FLAG_IS_DEFAULT(UseGHASHIntrinsics)) {
      UseGHASHIntrinsics = true;
    }
  } else if (UseGHASHIntrinsics) {
    if (!FLAG_IS_DEFAULT(UseGHASHIntrinsics))
      warning("GHASH intrinsics require VIS3 instruction support. Intrinsics will be disabled");
    FLAG_SET_DEFAULT(UseGHASHIntrinsics, false);
  }

  // SHA1, SHA256, and SHA512 instructions were added to SPARC T-series at different times
  if (has_sha1() || has_sha256() || has_sha512()) {
    if (UseVIS > 0) { // SHA intrinsics use VIS1 instructions
      if (FLAG_IS_DEFAULT(UseSHA)) {
        FLAG_SET_DEFAULT(UseSHA, true);
      }
    } else {
      if (UseSHA) {
        warning("SPARC SHA intrinsics require VIS1 instruction support. Intrinsics will be disabled.");
        FLAG_SET_DEFAULT(UseSHA, false);
      }
    }
  } else if (UseSHA) {
    warning("SHA instructions are not available on this CPU");
    FLAG_SET_DEFAULT(UseSHA, false);
  }

  if (UseSHA && has_sha1()) {
    if (FLAG_IS_DEFAULT(UseSHA1Intrinsics)) {
      FLAG_SET_DEFAULT(UseSHA1Intrinsics, true);
    }
  } else if (UseSHA1Intrinsics) {
    warning("Intrinsics for SHA-1 crypto hash functions not available on this CPU.");
    FLAG_SET_DEFAULT(UseSHA1Intrinsics, false);
  }

  if (UseSHA && has_sha256()) {
    if (FLAG_IS_DEFAULT(UseSHA256Intrinsics)) {
      FLAG_SET_DEFAULT(UseSHA256Intrinsics, true);
    }
  } else if (UseSHA256Intrinsics) {
    warning("Intrinsics for SHA-224 and SHA-256 crypto hash functions not available on this CPU.");
    FLAG_SET_DEFAULT(UseSHA256Intrinsics, false);
  }

  if (UseSHA && has_sha512()) {
    if (FLAG_IS_DEFAULT(UseSHA512Intrinsics)) {
      FLAG_SET_DEFAULT(UseSHA512Intrinsics, true);
    }
  } else if (UseSHA512Intrinsics) {
    warning("Intrinsics for SHA-384 and SHA-512 crypto hash functions not available on this CPU.");
    FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
  }

  if (!(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics)) {
    FLAG_SET_DEFAULT(UseSHA, false);
  }

  // SPARC T4 and above should have support for CRC32C instruction
  if (has_crc32c()) {
    if (UseVIS > 2) { // CRC32C intrinsics use VIS3 instructions
      if (FLAG_IS_DEFAULT(UseCRC32CIntrinsics)) {
        FLAG_SET_DEFAULT(UseCRC32CIntrinsics, true);
      }
    } else {
      if (UseCRC32CIntrinsics) {
        warning("SPARC CRC32C intrinsics require VIS3 instruction support. Intrinsics will be disabled.");
        FLAG_SET_DEFAULT(UseCRC32CIntrinsics, false);
      }
    }
  } else if (UseCRC32CIntrinsics) {
    warning("CRC32C instruction is not available on this CPU");
    FLAG_SET_DEFAULT(UseCRC32CIntrinsics, false);
  }

  if (UseVIS > 2) {
    if (FLAG_IS_DEFAULT(UseAdler32Intrinsics)) {
      FLAG_SET_DEFAULT(UseAdler32Intrinsics, true);
    }
  } else if (UseAdler32Intrinsics) {
    warning("SPARC Adler32 intrinsics require VIS3 instruction support. Intrinsics will be disabled.");
    FLAG_SET_DEFAULT(UseAdler32Intrinsics, false);
  }

  if (UseVIS > 2) {
    if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) {
      FLAG_SET_DEFAULT(UseCRC32Intrinsics, true);
    }
  } else if (UseCRC32Intrinsics) {
    warning("SPARC CRC32 intrinsics require VIS3 insructions support. Intriniscs will be disabled");
    FLAG_SET_DEFAULT(UseCRC32Intrinsics, false);
  }

  if (FLAG_IS_DEFAULT(ContendedPaddingWidth) &&
    (cache_line_size > ContendedPaddingWidth))
    ContendedPaddingWidth = cache_line_size;

  // This machine does not allow unaligned memory accesses
  if (UseUnalignedAccesses) {
    if (!FLAG_IS_DEFAULT(UseUnalignedAccesses))
      warning("Unaligned memory access is not available on this CPU");
    FLAG_SET_DEFAULT(UseUnalignedAccesses, false);
  }

  if (PrintMiscellaneous && Verbose) {
    tty->print_cr("L1 data cache line size: %u", L1_data_cache_line_size());
    tty->print_cr("L2 data cache line size: %u", L2_data_cache_line_size());
    tty->print("Allocation");
    if (AllocatePrefetchStyle <= 0) {
      tty->print_cr(": no prefetching");
    } else {
      tty->print(" prefetching: ");
      if (AllocatePrefetchInstr == 0) {
          tty->print("PREFETCH");
      } else if (AllocatePrefetchInstr == 1) {
          tty->print("BIS");
      }
      if (AllocatePrefetchLines > 1) {
        tty->print_cr(" at distance %d, %d lines of %d bytes", (int) AllocatePrefetchDistance, (int) AllocatePrefetchLines, (int) AllocatePrefetchStepSize);
      } else {
        tty->print_cr(" at distance %d, one line of %d bytes", (int) AllocatePrefetchDistance, (int) AllocatePrefetchStepSize);
      }
    }
    if (PrefetchCopyIntervalInBytes > 0) {
      tty->print_cr("PrefetchCopyIntervalInBytes %d", (int) PrefetchCopyIntervalInBytes);
    }
    if (PrefetchScanIntervalInBytes > 0) {
      tty->print_cr("PrefetchScanIntervalInBytes %d", (int) PrefetchScanIntervalInBytes);
    }
    if (PrefetchFieldsAhead > 0) {
      tty->print_cr("PrefetchFieldsAhead %d", (int) PrefetchFieldsAhead);
    }
    if (ContendedPaddingWidth > 0) {
      tty->print_cr("ContendedPaddingWidth %d", (int) ContendedPaddingWidth);
    }
  }
}
コード例 #14
0
ファイル: collectorPolicy.cpp プロジェクト: netroby/jdk9-dev
// Minimum sizes of the generations may be different than
// the initial sizes.  An inconsistency is permitted here
// in the total size that can be specified explicitly by
// command line specification of OldSize and NewSize and
// also a command line specification of -Xms.  Issue a warning
// but allow the values to pass.
void GenCollectorPolicy::initialize_size_info() {
  CollectorPolicy::initialize_size_info();

  _initial_young_size = NewSize;
  _max_young_size = MaxNewSize;
  _initial_old_size = OldSize;

  // Determine maximum size of the young generation.

  if (FLAG_IS_DEFAULT(MaxNewSize)) {
    _max_young_size = scale_by_NewRatio_aligned(_max_heap_byte_size);
    // Bound the maximum size by NewSize below (since it historically
    // would have been NewSize and because the NewRatio calculation could
    // yield a size that is too small) and bound it by MaxNewSize above.
    // Ergonomics plays here by previously calculating the desired
    // NewSize and MaxNewSize.
    _max_young_size = MIN2(MAX2(_max_young_size, _initial_young_size), MaxNewSize);
  }

  // Given the maximum young size, determine the initial and
  // minimum young sizes.

  if (_max_heap_byte_size == _initial_heap_byte_size) {
    // The maximum and initial heap sizes are the same so the generation's
    // initial size must be the same as it maximum size. Use NewSize as the
    // size if set on command line.
    _max_young_size = FLAG_IS_CMDLINE(NewSize) ? NewSize : _max_young_size;
    _initial_young_size = _max_young_size;

    // Also update the minimum size if min == initial == max.
    if (_max_heap_byte_size == _min_heap_byte_size) {
      _min_young_size = _max_young_size;
    }
  } else {
    if (FLAG_IS_CMDLINE(NewSize)) {
      // If NewSize is set on the command line, we should use it as
      // the initial size, but make sure it is within the heap bounds.
      _initial_young_size =
        MIN2(_max_young_size, bound_minus_alignment(NewSize, _initial_heap_byte_size));
      _min_young_size = bound_minus_alignment(_initial_young_size, _min_heap_byte_size);
    } else {
      // For the case where NewSize is not set on the command line, use
      // NewRatio to size the initial generation size. Use the current
      // NewSize as the floor, because if NewRatio is overly large, the resulting
      // size can be too small.
      _initial_young_size =
        MIN2(_max_young_size, MAX2(scale_by_NewRatio_aligned(_initial_heap_byte_size), NewSize));
    }
  }

  log_trace(gc, heap)("1: Minimum young " SIZE_FORMAT "  Initial young " SIZE_FORMAT "  Maximum young " SIZE_FORMAT,
                      _min_young_size, _initial_young_size, _max_young_size);

  // At this point the minimum, initial and maximum sizes
  // of the overall heap and of the young generation have been determined.
  // The maximum old size can be determined from the maximum young
  // and maximum heap size since no explicit flags exist
  // for setting the old generation maximum.
  _max_old_size = MAX2(_max_heap_byte_size - _max_young_size, _gen_alignment);

  // If no explicit command line flag has been set for the
  // old generation size, use what is left.
  if (!FLAG_IS_CMDLINE(OldSize)) {
    // The user has not specified any value but the ergonomics
    // may have chosen a value (which may or may not be consistent
    // with the overall heap size).  In either case make
    // the minimum, maximum and initial sizes consistent
    // with the young sizes and the overall heap sizes.
    _min_old_size = _gen_alignment;
    _initial_old_size = MIN2(_max_old_size, MAX2(_initial_heap_byte_size - _initial_young_size, _min_old_size));
    // _max_old_size has already been made consistent above.
  } else {
    // OldSize has been explicitly set on the command line. Use it
    // for the initial size but make sure the minimum allow a young
    // generation to fit as well.
    // If the user has explicitly set an OldSize that is inconsistent
    // with other command line flags, issue a warning.
    // The generation minimums and the overall heap minimum should
    // be within one generation alignment.
    if (_initial_old_size > _max_old_size) {
      log_warning(gc, ergo)("Inconsistency between maximum heap size and maximum "
                            "generation sizes: using maximum heap = " SIZE_FORMAT
                            ", -XX:OldSize flag is being ignored",
                            _max_heap_byte_size);
      _initial_old_size = _max_old_size;
    }

    _min_old_size = MIN2(_initial_old_size, _min_heap_byte_size - _min_young_size);
  }

  // The initial generation sizes should match the initial heap size,
  // if not issue a warning and resize the generations. This behavior
  // differs from JDK8 where the generation sizes have higher priority
  // than the initial heap size.
  if ((_initial_old_size + _initial_young_size) != _initial_heap_byte_size) {
    log_warning(gc, ergo)("Inconsistency between generation sizes and heap size, resizing "
                          "the generations to fit the heap.");

    size_t desired_young_size = _initial_heap_byte_size - _initial_old_size;
    if (_initial_heap_byte_size < _initial_old_size) {
      // Old want all memory, use minimum for young and rest for old
      _initial_young_size = _min_young_size;
      _initial_old_size = _initial_heap_byte_size - _min_young_size;
    } else if (desired_young_size > _max_young_size) {
      // Need to increase both young and old generation
      _initial_young_size = _max_young_size;
      _initial_old_size = _initial_heap_byte_size - _max_young_size;
    } else if (desired_young_size < _min_young_size) {
      // Need to decrease both young and old generation
      _initial_young_size = _min_young_size;
      _initial_old_size = _initial_heap_byte_size - _min_young_size;
    } else {
      // The young generation boundaries allow us to only update the
      // young generation.
      _initial_young_size = desired_young_size;
    }

    log_trace(gc, heap)("2: Minimum young " SIZE_FORMAT "  Initial young " SIZE_FORMAT "  Maximum young " SIZE_FORMAT,
                    _min_young_size, _initial_young_size, _max_young_size);
  }

  // Write back to flags if necessary.
  if (NewSize != _initial_young_size) {
    FLAG_SET_ERGO(size_t, NewSize, _initial_young_size);
  }

  if (MaxNewSize != _max_young_size) {
    FLAG_SET_ERGO(size_t, MaxNewSize, _max_young_size);
  }

  if (OldSize != _initial_old_size) {
    FLAG_SET_ERGO(size_t, OldSize, _initial_old_size);
  }

  log_trace(gc, heap)("Minimum old " SIZE_FORMAT "  Initial old " SIZE_FORMAT "  Maximum old " SIZE_FORMAT,
                  _min_old_size, _initial_old_size, _max_old_size);

  DEBUG_ONLY(GenCollectorPolicy::assert_size_info();)
}
コード例 #15
0
ファイル: collectorPolicy.cpp プロジェクト: netroby/jdk9-dev
void GenCollectorPolicy::initialize_flags() {
  CollectorPolicy::initialize_flags();

  assert(_gen_alignment != 0, "Generation alignment not set up properly");
  assert(_heap_alignment >= _gen_alignment,
         "heap_alignment: " SIZE_FORMAT " less than gen_alignment: " SIZE_FORMAT,
         _heap_alignment, _gen_alignment);
  assert(_gen_alignment % _space_alignment == 0,
         "gen_alignment: " SIZE_FORMAT " not aligned by space_alignment: " SIZE_FORMAT,
         _gen_alignment, _space_alignment);
  assert(_heap_alignment % _gen_alignment == 0,
         "heap_alignment: " SIZE_FORMAT " not aligned by gen_alignment: " SIZE_FORMAT,
         _heap_alignment, _gen_alignment);

  // All generational heaps have a young gen; handle those flags here

  // Make sure the heap is large enough for two generations
  size_t smallest_new_size = young_gen_size_lower_bound();
  size_t smallest_heap_size = align_size_up(smallest_new_size + old_gen_size_lower_bound(),
                                           _heap_alignment);
  if (MaxHeapSize < smallest_heap_size) {
    FLAG_SET_ERGO(size_t, MaxHeapSize, smallest_heap_size);
    _max_heap_byte_size = MaxHeapSize;
  }
  // If needed, synchronize _min_heap_byte size and _initial_heap_byte_size
  if (_min_heap_byte_size < smallest_heap_size) {
    _min_heap_byte_size = smallest_heap_size;
    if (InitialHeapSize < _min_heap_byte_size) {
      FLAG_SET_ERGO(size_t, InitialHeapSize, smallest_heap_size);
      _initial_heap_byte_size = smallest_heap_size;
    }
  }

  // Make sure NewSize allows an old generation to fit even if set on the command line
  if (FLAG_IS_CMDLINE(NewSize) && NewSize >= _initial_heap_byte_size) {
    log_warning(gc, ergo)("NewSize was set larger than initial heap size, will use initial heap size.");
    FLAG_SET_ERGO(size_t, NewSize, bound_minus_alignment(NewSize, _initial_heap_byte_size));
  }

  // Now take the actual NewSize into account. We will silently increase NewSize
  // if the user specified a smaller or unaligned value.
  size_t bounded_new_size = bound_minus_alignment(NewSize, MaxHeapSize);
  bounded_new_size = MAX2(smallest_new_size, (size_t)align_size_down(bounded_new_size, _gen_alignment));
  if (bounded_new_size != NewSize) {
    FLAG_SET_ERGO(size_t, NewSize, bounded_new_size);
  }
  _min_young_size = smallest_new_size;
  _initial_young_size = NewSize;

  if (!FLAG_IS_DEFAULT(MaxNewSize)) {
    if (MaxNewSize >= MaxHeapSize) {
      // Make sure there is room for an old generation
      size_t smaller_max_new_size = MaxHeapSize - _gen_alignment;
      if (FLAG_IS_CMDLINE(MaxNewSize)) {
        log_warning(gc, ergo)("MaxNewSize (" SIZE_FORMAT "k) is equal to or greater than the entire "
                              "heap (" SIZE_FORMAT "k).  A new max generation size of " SIZE_FORMAT "k will be used.",
                              MaxNewSize/K, MaxHeapSize/K, smaller_max_new_size/K);
      }
      FLAG_SET_ERGO(size_t, MaxNewSize, smaller_max_new_size);
      if (NewSize > MaxNewSize) {
        FLAG_SET_ERGO(size_t, NewSize, MaxNewSize);
        _initial_young_size = NewSize;
      }
    } else if (MaxNewSize < _initial_young_size) {
      FLAG_SET_ERGO(size_t, MaxNewSize, _initial_young_size);
    } else if (!is_size_aligned(MaxNewSize, _gen_alignment)) {
      FLAG_SET_ERGO(size_t, MaxNewSize, align_size_down(MaxNewSize, _gen_alignment));
    }
    _max_young_size = MaxNewSize;
  }

  if (NewSize > MaxNewSize) {
    // At this point this should only happen if the user specifies a large NewSize and/or
    // a small (but not too small) MaxNewSize.
    if (FLAG_IS_CMDLINE(MaxNewSize)) {
      log_warning(gc, ergo)("NewSize (" SIZE_FORMAT "k) is greater than the MaxNewSize (" SIZE_FORMAT "k). "
                            "A new max generation size of " SIZE_FORMAT "k will be used.",
                            NewSize/K, MaxNewSize/K, NewSize/K);
    }
    FLAG_SET_ERGO(size_t, MaxNewSize, NewSize);
    _max_young_size = MaxNewSize;
  }

  if (SurvivorRatio < 1 || NewRatio < 1) {
    vm_exit_during_initialization("Invalid young gen ratio specified");
  }

  if (OldSize < old_gen_size_lower_bound()) {
    FLAG_SET_ERGO(size_t, OldSize, old_gen_size_lower_bound());
  }
  if (!is_size_aligned(OldSize, _gen_alignment)) {
    FLAG_SET_ERGO(size_t, OldSize, align_size_down(OldSize, _gen_alignment));
  }

  if (FLAG_IS_CMDLINE(OldSize) && FLAG_IS_DEFAULT(MaxHeapSize)) {
    // NewRatio will be used later to set the young generation size so we use
    // it to calculate how big the heap should be based on the requested OldSize
    // and NewRatio.
    assert(NewRatio > 0, "NewRatio should have been set up earlier");
    size_t calculated_heapsize = (OldSize / NewRatio) * (NewRatio + 1);

    calculated_heapsize = align_size_up(calculated_heapsize, _heap_alignment);
    FLAG_SET_ERGO(size_t, MaxHeapSize, calculated_heapsize);
    _max_heap_byte_size = MaxHeapSize;
    FLAG_SET_ERGO(size_t, InitialHeapSize, calculated_heapsize);
    _initial_heap_byte_size = InitialHeapSize;
  }

  // Adjust NewSize and OldSize or MaxHeapSize to match each other
  if (NewSize + OldSize > MaxHeapSize) {
    if (FLAG_IS_CMDLINE(MaxHeapSize)) {
      // Somebody has set a maximum heap size with the intention that we should not
      // exceed it. Adjust New/OldSize as necessary.
      size_t calculated_size = NewSize + OldSize;
      double shrink_factor = (double) MaxHeapSize / calculated_size;
      size_t smaller_new_size = align_size_down((size_t)(NewSize * shrink_factor), _gen_alignment);
      FLAG_SET_ERGO(size_t, NewSize, MAX2(young_gen_size_lower_bound(), smaller_new_size));
      _initial_young_size = NewSize;

      // OldSize is already aligned because above we aligned MaxHeapSize to
      // _heap_alignment, and we just made sure that NewSize is aligned to
      // _gen_alignment. In initialize_flags() we verified that _heap_alignment
      // is a multiple of _gen_alignment.
      FLAG_SET_ERGO(size_t, OldSize, MaxHeapSize - NewSize);
    } else {
      FLAG_SET_ERGO(size_t, MaxHeapSize, align_size_up(NewSize + OldSize, _heap_alignment));
      _max_heap_byte_size = MaxHeapSize;
    }
  }

  // Update NewSize, if possible, to avoid sizing the young gen too small when only
  // OldSize is set on the command line.
  if (FLAG_IS_CMDLINE(OldSize) && !FLAG_IS_CMDLINE(NewSize)) {
    if (OldSize < _initial_heap_byte_size) {
      size_t new_size = _initial_heap_byte_size - OldSize;
      // Need to compare against the flag value for max since _max_young_size
      // might not have been set yet.
      if (new_size >= _min_young_size && new_size <= MaxNewSize) {
        FLAG_SET_ERGO(size_t, NewSize, new_size);
        _initial_young_size = NewSize;
      }
    }
  }

  always_do_update_barrier = UseConcMarkSweepGC;

  DEBUG_ONLY(GenCollectorPolicy::assert_flags();)
}
コード例 #16
0
// Values set on the command line win over any ergonomically
// set command line parameters.
// Ergonomic choice of parameters are done before this
// method is called.  Values for command line parameters such as NewSize
// and MaxNewSize feed those ergonomic choices into this method.
// This method makes the final generation sizings consistent with
// themselves and with overall heap sizings.
// In the absence of explicitly set command line flags, policies
// such as the use of NewRatio are used to size the generation.
void GenCollectorPolicy::initialize_size_info() {
  CollectorPolicy::initialize_size_info();

  // _space_alignment is used for alignment within a generation.
  // There is additional alignment done down stream for some
  // collectors that sometimes causes unwanted rounding up of
  // generations sizes.

  // Determine maximum size of gen0

  size_t max_new_size = 0;
  if (!FLAG_IS_DEFAULT(MaxNewSize)) {
    max_new_size = MaxNewSize;
  } else {
    max_new_size = scale_by_NewRatio_aligned(_max_heap_byte_size);
    // Bound the maximum size by NewSize below (since it historically
    // would have been NewSize and because the NewRatio calculation could
    // yield a size that is too small) and bound it by MaxNewSize above.
    // Ergonomics plays here by previously calculating the desired
    // NewSize and MaxNewSize.
    max_new_size = MIN2(MAX2(max_new_size, NewSize), MaxNewSize);
  }
  assert(max_new_size > 0, "All paths should set max_new_size");

  // Given the maximum gen0 size, determine the initial and
  // minimum gen0 sizes.

  if (_max_heap_byte_size == _min_heap_byte_size) {
    // The maximum and minimum heap sizes are the same so
    // the generations minimum and initial must be the
    // same as its maximum.
    _min_gen0_size = max_new_size;
    _initial_gen0_size = max_new_size;
    _max_gen0_size = max_new_size;
  } else {
    size_t desired_new_size = 0;
    if (FLAG_IS_CMDLINE(NewSize)) {
      // If NewSize is set on the command line, we must use it as
      // the initial size and it also makes sense to use it as the
      // lower limit.
      _min_gen0_size = NewSize;
      desired_new_size = NewSize;
      max_new_size = MAX2(max_new_size, NewSize);
    } else if (FLAG_IS_ERGO(NewSize)) {
      // If NewSize is set ergonomically, we should use it as a lower
      // limit, but use NewRatio to calculate the initial size.
      _min_gen0_size = NewSize;
      desired_new_size =
        MAX2(scale_by_NewRatio_aligned(_initial_heap_byte_size), NewSize);
      max_new_size = MAX2(max_new_size, NewSize);
    } else {
      // For the case where NewSize is the default, use NewRatio
      // to size the minimum and initial generation sizes.
      // Use the default NewSize as the floor for these values.  If
      // NewRatio is overly large, the resulting sizes can be too
      // small.
      _min_gen0_size = MAX2(scale_by_NewRatio_aligned(_min_heap_byte_size), NewSize);
      desired_new_size =
        MAX2(scale_by_NewRatio_aligned(_initial_heap_byte_size), NewSize);
    }

    assert(_min_gen0_size > 0, "Sanity check");
    _initial_gen0_size = desired_new_size;
    _max_gen0_size = max_new_size;

    // At this point the desirable initial and minimum sizes have been
    // determined without regard to the maximum sizes.

    // Bound the sizes by the corresponding overall heap sizes.
    _min_gen0_size = bound_minus_alignment(_min_gen0_size, _min_heap_byte_size);
    _initial_gen0_size = bound_minus_alignment(_initial_gen0_size, _initial_heap_byte_size);
    _max_gen0_size = bound_minus_alignment(_max_gen0_size, _max_heap_byte_size);

    // At this point all three sizes have been checked against the
    // maximum sizes but have not been checked for consistency
    // among the three.

    // Final check min <= initial <= max
    _min_gen0_size = MIN2(_min_gen0_size, _max_gen0_size);
    _initial_gen0_size = MAX2(MIN2(_initial_gen0_size, _max_gen0_size), _min_gen0_size);
    _min_gen0_size = MIN2(_min_gen0_size, _initial_gen0_size);
  }

  // Write back to flags if necessary
  if (NewSize != _initial_gen0_size) {
    FLAG_SET_ERGO(uintx, NewSize, _initial_gen0_size);
  }

  if (MaxNewSize != _max_gen0_size) {
    FLAG_SET_ERGO(uintx, MaxNewSize, _max_gen0_size);
  }

  if (PrintGCDetails && Verbose) {
    gclog_or_tty->print_cr("1: Minimum gen0 " SIZE_FORMAT "  Initial gen0 "
      SIZE_FORMAT "  Maximum gen0 " SIZE_FORMAT,
      _min_gen0_size, _initial_gen0_size, _max_gen0_size);
  }

  DEBUG_ONLY(GenCollectorPolicy::assert_size_info();)
}
コード例 #17
0
void TwoGenerationCollectorPolicy::initialize_size_info() {
  GenCollectorPolicy::initialize_size_info();

  // At this point the minimum, initial and maximum sizes
  // of the overall heap and of gen0 have been determined.
  // The maximum gen1 size can be determined from the maximum gen0
  // and maximum heap size since no explicit flags exits
  // for setting the gen1 maximum.
  _max_gen1_size = MAX2(_max_heap_byte_size - _max_gen0_size, _gen_alignment);

  // If no explicit command line flag has been set for the
  // gen1 size, use what is left for gen1.
  if (!FLAG_IS_CMDLINE(OldSize)) {
    // The user has not specified any value but the ergonomics
    // may have chosen a value (which may or may not be consistent
    // with the overall heap size).  In either case make
    // the minimum, maximum and initial sizes consistent
    // with the gen0 sizes and the overall heap sizes.
    _min_gen1_size = MAX2(_min_heap_byte_size - _min_gen0_size, _gen_alignment);
    _initial_gen1_size = MAX2(_initial_heap_byte_size - _initial_gen0_size, _gen_alignment);
    // _max_gen1_size has already been made consistent above
    FLAG_SET_ERGO(uintx, OldSize, _initial_gen1_size);
  } else {
    // It's been explicitly set on the command line.  Use the
    // OldSize and then determine the consequences.
    _min_gen1_size = MIN2(OldSize, _min_heap_byte_size - _min_gen0_size);
    _initial_gen1_size = OldSize;

    // If the user has explicitly set an OldSize that is inconsistent
    // with other command line flags, issue a warning.
    // The generation minimums and the overall heap mimimum should
    // be within one generation alignment.
    if ((_min_gen1_size + _min_gen0_size + _gen_alignment) < _min_heap_byte_size) {
      warning("Inconsistency between minimum heap size and minimum "
              "generation sizes: using minimum heap = " SIZE_FORMAT,
              _min_heap_byte_size);
    }
    if (OldSize > _max_gen1_size) {
      warning("Inconsistency between maximum heap size and maximum "
              "generation sizes: using maximum heap = " SIZE_FORMAT
              " -XX:OldSize flag is being ignored",
              _max_heap_byte_size);
    }
    // If there is an inconsistency between the OldSize and the minimum and/or
    // initial size of gen0, since OldSize was explicitly set, OldSize wins.
    if (adjust_gen0_sizes(&_min_gen0_size, &_min_gen1_size, _min_heap_byte_size)) {
      if (PrintGCDetails && Verbose) {
        gclog_or_tty->print_cr("2: Minimum gen0 " SIZE_FORMAT "  Initial gen0 "
              SIZE_FORMAT "  Maximum gen0 " SIZE_FORMAT,
              _min_gen0_size, _initial_gen0_size, _max_gen0_size);
      }
    }
    // Initial size
    if (adjust_gen0_sizes(&_initial_gen0_size, &_initial_gen1_size,
                          _initial_heap_byte_size)) {
      if (PrintGCDetails && Verbose) {
        gclog_or_tty->print_cr("3: Minimum gen0 " SIZE_FORMAT "  Initial gen0 "
          SIZE_FORMAT "  Maximum gen0 " SIZE_FORMAT,
          _min_gen0_size, _initial_gen0_size, _max_gen0_size);
      }
    }
  }
  // Enforce the maximum gen1 size.
  _min_gen1_size = MIN2(_min_gen1_size, _max_gen1_size);

  // Check that min gen1 <= initial gen1 <= max gen1
  _initial_gen1_size = MAX2(_initial_gen1_size, _min_gen1_size);
  _initial_gen1_size = MIN2(_initial_gen1_size, _max_gen1_size);

  // Write back to flags if necessary
  if (NewSize != _initial_gen0_size) {
    FLAG_SET_ERGO(uintx, NewSize, _initial_gen0_size);
  }

  if (MaxNewSize != _max_gen0_size) {
    FLAG_SET_ERGO(uintx, MaxNewSize, _max_gen0_size);
  }

  if (OldSize != _initial_gen1_size) {
    FLAG_SET_ERGO(uintx, OldSize, _initial_gen1_size);
  }

  if (PrintGCDetails && Verbose) {
    gclog_or_tty->print_cr("Minimum gen1 " SIZE_FORMAT "  Initial gen1 "
      SIZE_FORMAT "  Maximum gen1 " SIZE_FORMAT,
      _min_gen1_size, _initial_gen1_size, _max_gen1_size);
  }

  DEBUG_ONLY(TwoGenerationCollectorPolicy::assert_size_info();)
}
コード例 #18
0
void CodeCache::initialize_heaps() {
  // Determine size of compiler buffers
  size_t code_buffers_size = 0;
#ifdef COMPILER1
  // C1 temporary code buffers (see Compiler::init_buffer_blob())
  const int c1_count = CompilationPolicy::policy()->compiler_count(CompLevel_simple);
  code_buffers_size += c1_count * Compiler::code_buffer_size();
#endif
#ifdef COMPILER2
  // C2 scratch buffers (see Compile::init_scratch_buffer_blob())
  const int c2_count = CompilationPolicy::policy()->compiler_count(CompLevel_full_optimization);
  // Initial size of constant table (this may be increased if a compiled method needs more space)
  code_buffers_size += c2_count * C2Compiler::initial_code_buffer_size();
#endif

  // Calculate default CodeHeap sizes if not set by user
  if (!FLAG_IS_CMDLINE(NonNMethodCodeHeapSize) && !FLAG_IS_CMDLINE(ProfiledCodeHeapSize)
      && !FLAG_IS_CMDLINE(NonProfiledCodeHeapSize)) {
    // Increase default NonNMethodCodeHeapSize to account for compiler buffers
    FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, NonNMethodCodeHeapSize + code_buffers_size);

    // Check if we have enough space for the non-nmethod code heap
    if (ReservedCodeCacheSize > NonNMethodCodeHeapSize) {
      // Use the default value for NonNMethodCodeHeapSize and one half of the
      // remaining size for non-profiled methods and one half for profiled methods
      size_t remaining_size = ReservedCodeCacheSize - NonNMethodCodeHeapSize;
      size_t profiled_size = remaining_size / 2;
      size_t non_profiled_size = remaining_size - profiled_size;
      FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, profiled_size);
      FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, non_profiled_size);
    } else {
      // Use all space for the non-nmethod heap and set other heaps to minimal size
      FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, ReservedCodeCacheSize - os::vm_page_size() * 2);
      FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, os::vm_page_size());
      FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, os::vm_page_size());
    }
  }

  // We do not need the profiled CodeHeap, use all space for the non-profiled CodeHeap
  if(!heap_available(CodeBlobType::MethodProfiled)) {
    FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, NonProfiledCodeHeapSize + ProfiledCodeHeapSize);
    FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, 0);
  }
  // We do not need the non-profiled CodeHeap, use all space for the non-nmethod CodeHeap
  if(!heap_available(CodeBlobType::MethodNonProfiled)) {
    FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, NonNMethodCodeHeapSize + NonProfiledCodeHeapSize);
    FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, 0);
  }

  // Make sure we have enough space for VM internal code
  uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3);
  if (NonNMethodCodeHeapSize < (min_code_cache_size + code_buffers_size)) {
    vm_exit_during_initialization("Not enough space in non-nmethod code heap to run VM.");
  }
  guarantee(NonProfiledCodeHeapSize + ProfiledCodeHeapSize + NonNMethodCodeHeapSize <= ReservedCodeCacheSize, "Size check");

  // Align CodeHeaps
  size_t alignment = heap_alignment();
  size_t non_method_size = align_size_up(NonNMethodCodeHeapSize, alignment);
  size_t profiled_size   = align_size_down(ProfiledCodeHeapSize, alignment);

  // Reserve one continuous chunk of memory for CodeHeaps and split it into
  // parts for the individual heaps. The memory layout looks like this:
  // ---------- high -----------
  //    Non-profiled nmethods
  //      Profiled nmethods
  //         Non-nmethods
  // ---------- low ------------
  ReservedCodeSpace rs = reserve_heap_memory(ReservedCodeCacheSize);
  ReservedSpace non_method_space    = rs.first_part(non_method_size);
  ReservedSpace rest                = rs.last_part(non_method_size);
  ReservedSpace profiled_space      = rest.first_part(profiled_size);
  ReservedSpace non_profiled_space  = rest.last_part(profiled_size);

  // Non-nmethods (stubs, adapters, ...)
  add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod);
  // Tier 2 and tier 3 (profiled) methods
  add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled);
  // Tier 1 and tier 4 (non-profiled) methods and native methods
  add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled);
}
コード例 #19
0
void VM_Version::initialize() {
  _features = determine_features();
  PrefetchCopyIntervalInBytes = prefetch_copy_interval_in_bytes();
  PrefetchScanIntervalInBytes = prefetch_scan_interval_in_bytes();
  PrefetchFieldsAhead         = prefetch_fields_ahead();

  assert(0 <= AllocatePrefetchInstr && AllocatePrefetchInstr <= 1, "invalid value");
  if( AllocatePrefetchInstr < 0 ) AllocatePrefetchInstr = 0;
  if( AllocatePrefetchInstr > 1 ) AllocatePrefetchInstr = 0;

  // Allocation prefetch settings
  intx cache_line_size = prefetch_data_size();
  if( cache_line_size > AllocatePrefetchStepSize )
    AllocatePrefetchStepSize = cache_line_size;

  assert(AllocatePrefetchLines > 0, "invalid value");
  if( AllocatePrefetchLines < 1 )     // set valid value in product VM
    AllocatePrefetchLines = 3;
  assert(AllocateInstancePrefetchLines > 0, "invalid value");
  if( AllocateInstancePrefetchLines < 1 ) // set valid value in product VM
    AllocateInstancePrefetchLines = 1;

  AllocatePrefetchDistance = allocate_prefetch_distance();
  AllocatePrefetchStyle    = allocate_prefetch_style();

  assert((AllocatePrefetchDistance % AllocatePrefetchStepSize) == 0 &&
         (AllocatePrefetchDistance > 0), "invalid value");
  if ((AllocatePrefetchDistance % AllocatePrefetchStepSize) != 0 ||
      (AllocatePrefetchDistance <= 0)) {
    AllocatePrefetchDistance = AllocatePrefetchStepSize;
  }

  if (AllocatePrefetchStyle == 3 && !has_blk_init()) {
    warning("BIS instructions are not available on this CPU");
    FLAG_SET_DEFAULT(AllocatePrefetchStyle, 1);
  }

  guarantee(VM_Version::has_v9(), "only SPARC v9 is supported");

  assert(ArraycopySrcPrefetchDistance < 4096, "invalid value");
  if (ArraycopySrcPrefetchDistance >= 4096)
    ArraycopySrcPrefetchDistance = 4064;
  assert(ArraycopyDstPrefetchDistance < 4096, "invalid value");
  if (ArraycopyDstPrefetchDistance >= 4096)
    ArraycopyDstPrefetchDistance = 4064;

  UseSSE = 0; // Only on x86 and x64

  _supports_cx8 = has_v9();
  _supports_atomic_getset4 = true; // swap instruction

  // There are Fujitsu Sparc64 CPUs which support blk_init as well so
  // we have to take this check out of the 'is_niagara()' block below.
  if (has_blk_init()) {
    // When using CMS or G1, we cannot use memset() in BOT updates
    // because the sun4v/CMT version in libc_psr uses BIS which
    // exposes "phantom zeros" to concurrent readers. See 6948537.
    if (FLAG_IS_DEFAULT(UseMemSetInBOT) && (UseConcMarkSweepGC || UseG1GC)) {
      FLAG_SET_DEFAULT(UseMemSetInBOT, false);
    }
    // Issue a stern warning if the user has explicitly set
    // UseMemSetInBOT (it is known to cause issues), but allow
    // use for experimentation and debugging.
    if (UseConcMarkSweepGC || UseG1GC) {
      if (UseMemSetInBOT) {
        assert(!FLAG_IS_DEFAULT(UseMemSetInBOT), "Error");
        warning("Experimental flag -XX:+UseMemSetInBOT is known to cause instability"
                " on sun4v; please understand that you are using at your own risk!");
      }
    }
  }

  if (is_niagara()) {
    // Indirect branch is the same cost as direct
    if (FLAG_IS_DEFAULT(UseInlineCaches)) {
      FLAG_SET_DEFAULT(UseInlineCaches, false);
    }
    // Align loops on a single instruction boundary.
    if (FLAG_IS_DEFAULT(OptoLoopAlignment)) {
      FLAG_SET_DEFAULT(OptoLoopAlignment, 4);
    }
#ifdef _LP64
    // 32-bit oops don't make sense for the 64-bit VM on sparc
    // since the 32-bit VM has the same registers and smaller objects.
    Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
    Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
#endif // _LP64
#ifdef COMPILER2
    // Indirect branch is the same cost as direct
    if (FLAG_IS_DEFAULT(UseJumpTables)) {
      FLAG_SET_DEFAULT(UseJumpTables, true);
    }
    // Single-issue, so entry and loop tops are
    // aligned on a single instruction boundary
    if (FLAG_IS_DEFAULT(InteriorEntryAlignment)) {
      FLAG_SET_DEFAULT(InteriorEntryAlignment, 4);
    }
    if (is_niagara_plus()) {
      if (has_blk_init() && UseTLAB &&
          FLAG_IS_DEFAULT(AllocatePrefetchInstr)) {
        // Use BIS instruction for TLAB allocation prefetch.
        FLAG_SET_ERGO(intx, AllocatePrefetchInstr, 1);
        if (FLAG_IS_DEFAULT(AllocatePrefetchStyle)) {
          FLAG_SET_ERGO(intx, AllocatePrefetchStyle, 3);
        }
        if (FLAG_IS_DEFAULT(AllocatePrefetchDistance)) {
          // Use smaller prefetch distance with BIS
          FLAG_SET_DEFAULT(AllocatePrefetchDistance, 64);
        }
      }
      if (is_T4()) {
        // Double number of prefetched cache lines on T4
        // since L2 cache line size is smaller (32 bytes).
        if (FLAG_IS_DEFAULT(AllocatePrefetchLines)) {
          FLAG_SET_ERGO(intx, AllocatePrefetchLines, AllocatePrefetchLines*2);
        }
        if (FLAG_IS_DEFAULT(AllocateInstancePrefetchLines)) {
          FLAG_SET_ERGO(intx, AllocateInstancePrefetchLines, AllocateInstancePrefetchLines*2);
        }
      }
      if (AllocatePrefetchStyle != 3 && FLAG_IS_DEFAULT(AllocatePrefetchDistance)) {
        // Use different prefetch distance without BIS
        FLAG_SET_DEFAULT(AllocatePrefetchDistance, 256);
      }
      if (AllocatePrefetchInstr == 1) {
        // Need a space at the end of TLAB for BIS since it
        // will fault when accessing memory outside of heap.

        // +1 for rounding up to next cache line, +1 to be safe
        int lines = AllocatePrefetchLines + 2;
        int step_size = AllocatePrefetchStepSize;
        int distance = AllocatePrefetchDistance;
        _reserve_for_allocation_prefetch = (distance + step_size*lines)/(int)HeapWordSize;
      }
    }
#endif
  }

  // Use hardware population count instruction if available.
  if (has_hardware_popc()) {
    if (FLAG_IS_DEFAULT(UsePopCountInstruction)) {
      FLAG_SET_DEFAULT(UsePopCountInstruction, true);
    }
  } else if (UsePopCountInstruction) {
    warning("POPC instruction is not available on this CPU");
    FLAG_SET_DEFAULT(UsePopCountInstruction, false);
  }

  // T4 and newer Sparc cpus have new compare and branch instruction.
  if (has_cbcond()) {
    if (FLAG_IS_DEFAULT(UseCBCond)) {
      FLAG_SET_DEFAULT(UseCBCond, true);
    }
  } else if (UseCBCond) {
    warning("CBCOND instruction is not available on this CPU");
    FLAG_SET_DEFAULT(UseCBCond, false);
  }

  assert(BlockZeroingLowLimit > 0, "invalid value");
  if (has_block_zeroing()) {
    if (FLAG_IS_DEFAULT(UseBlockZeroing)) {
      FLAG_SET_DEFAULT(UseBlockZeroing, true);
    }
  } else if (UseBlockZeroing) {
    warning("BIS zeroing instructions are not available on this CPU");
    FLAG_SET_DEFAULT(UseBlockZeroing, false);
  }

  assert(BlockCopyLowLimit > 0, "invalid value");
  if (has_block_zeroing()) { // has_blk_init() && is_T4(): core's local L2 cache
    if (FLAG_IS_DEFAULT(UseBlockCopy)) {
      FLAG_SET_DEFAULT(UseBlockCopy, true);
    }
  } else if (UseBlockCopy) {
    warning("BIS instructions are not available or expensive on this CPU");
    FLAG_SET_DEFAULT(UseBlockCopy, false);
  }

#ifdef COMPILER2
  // T4 and newer Sparc cpus have fast RDPC.
  if (has_fast_rdpc() && FLAG_IS_DEFAULT(UseRDPCForConstantTableBase)) {
    FLAG_SET_DEFAULT(UseRDPCForConstantTableBase, true);
  }

  // Currently not supported anywhere.
  FLAG_SET_DEFAULT(UseFPUForSpilling, false);

  MaxVectorSize = 8;

  assert((InteriorEntryAlignment % relocInfo::addr_unit()) == 0, "alignment is not a multiple of NOP size");
#endif

  assert((CodeEntryAlignment % relocInfo::addr_unit()) == 0, "alignment is not a multiple of NOP size");
  assert((OptoLoopAlignment % relocInfo::addr_unit()) == 0, "alignment is not a multiple of NOP size");

  char buf[512];
  jio_snprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
               (has_v9() ? ", v9" : (has_v8() ? ", v8" : "")),
               (has_hardware_popc() ? ", popc" : ""),
               (has_vis1() ? ", vis1" : ""),
               (has_vis2() ? ", vis2" : ""),
               (has_vis3() ? ", vis3" : ""),
               (has_blk_init() ? ", blk_init" : ""),
               (has_cbcond() ? ", cbcond" : ""),
               (has_aes() ? ", aes" : ""),
               (is_ultra3() ? ", ultra3" : ""),
               (is_sun4v() ? ", sun4v" : ""),
               (is_niagara_plus() ? ", niagara_plus" : (is_niagara() ? ", niagara" : "")),
               (is_sparc64() ? ", sparc64" : ""),
               (!has_hardware_mul32() ? ", no-mul32" : ""),
               (!has_hardware_div32() ? ", no-div32" : ""),
               (!has_hardware_fsmuld() ? ", no-fsmuld" : ""));

  // buf is started with ", " or is empty
  _features_str = strdup(strlen(buf) > 2 ? buf + 2 : buf);

  // UseVIS is set to the smallest of what hardware supports and what
  // the command line requires.  I.e., you cannot set UseVIS to 3 on
  // older UltraSparc which do not support it.
  if (UseVIS > 3) UseVIS=3;
  if (UseVIS < 0) UseVIS=0;
  if (!has_vis3()) // Drop to 2 if no VIS3 support
    UseVIS = MIN2((intx)2,UseVIS);
  if (!has_vis2()) // Drop to 1 if no VIS2 support
    UseVIS = MIN2((intx)1,UseVIS);
  if (!has_vis1()) // Drop to 0 if no VIS1 support
    UseVIS = 0;

  // T2 and above should have support for AES instructions
  if (has_aes()) {
    if (UseVIS > 0) { // AES intrinsics use FXOR instruction which is VIS1
      if (FLAG_IS_DEFAULT(UseAES)) {
        FLAG_SET_DEFAULT(UseAES, true);
      }
      if (FLAG_IS_DEFAULT(UseAESIntrinsics)) {
        FLAG_SET_DEFAULT(UseAESIntrinsics, true);
      }
      // we disable both the AES flags if either of them is disabled on the command line
      if (!UseAES || !UseAESIntrinsics) {
        FLAG_SET_DEFAULT(UseAES, false);
        FLAG_SET_DEFAULT(UseAESIntrinsics, false);
      }
    } else {
        if (UseAES || UseAESIntrinsics) {
          warning("SPARC AES intrinsics require VIS1 instruction support. Intrinsics will be disabled.");
          if (UseAES) {
            FLAG_SET_DEFAULT(UseAES, false);
          }
          if (UseAESIntrinsics) {
            FLAG_SET_DEFAULT(UseAESIntrinsics, false);
          }
        }
    }
  } else if (UseAES || UseAESIntrinsics) {
    warning("AES instructions are not available on this CPU");
    if (UseAES) {
      FLAG_SET_DEFAULT(UseAES, false);
    }
    if (UseAESIntrinsics) {
      FLAG_SET_DEFAULT(UseAESIntrinsics, false);
    }
  }

  if (FLAG_IS_DEFAULT(ContendedPaddingWidth) &&
    (cache_line_size > ContendedPaddingWidth))
    ContendedPaddingWidth = cache_line_size;

#ifndef PRODUCT
  if (PrintMiscellaneous && Verbose) {
    tty->print("Allocation");
    if (AllocatePrefetchStyle <= 0) {
      tty->print_cr(": no prefetching");
    } else {
      tty->print(" prefetching: ");
      if (AllocatePrefetchInstr == 0) {
          tty->print("PREFETCH");
      } else if (AllocatePrefetchInstr == 1) {
          tty->print("BIS");
      }
      if (AllocatePrefetchLines > 1) {
        tty->print_cr(" at distance %d, %d lines of %d bytes", AllocatePrefetchDistance, AllocatePrefetchLines, AllocatePrefetchStepSize);
      } else {
        tty->print_cr(" at distance %d, one line of %d bytes", AllocatePrefetchDistance, AllocatePrefetchStepSize);
      }
    }
    if (PrefetchCopyIntervalInBytes > 0) {
      tty->print_cr("PrefetchCopyIntervalInBytes %d", PrefetchCopyIntervalInBytes);
    }
    if (PrefetchScanIntervalInBytes > 0) {
      tty->print_cr("PrefetchScanIntervalInBytes %d", PrefetchScanIntervalInBytes);
    }
    if (PrefetchFieldsAhead > 0) {
      tty->print_cr("PrefetchFieldsAhead %d", PrefetchFieldsAhead);
    }
    if (ContendedPaddingWidth > 0) {
      tty->print_cr("ContendedPaddingWidth %d", ContendedPaddingWidth);
    }
  }
#endif // PRODUCT
}
コード例 #20
0
void CodeCache::initialize_heaps() {
  bool non_nmethod_set      = FLAG_IS_CMDLINE(NonNMethodCodeHeapSize);
  bool profiled_set         = FLAG_IS_CMDLINE(ProfiledCodeHeapSize);
  bool non_profiled_set     = FLAG_IS_CMDLINE(NonProfiledCodeHeapSize);
  size_t min_size           = os::vm_page_size();
  size_t cache_size         = ReservedCodeCacheSize;
  size_t non_nmethod_size   = NonNMethodCodeHeapSize;
  size_t profiled_size      = ProfiledCodeHeapSize;
  size_t non_profiled_size  = NonProfiledCodeHeapSize;
  // Check if total size set via command line flags exceeds the reserved size
  check_heap_sizes((non_nmethod_set  ? non_nmethod_size  : min_size),
                   (profiled_set     ? profiled_size     : min_size),
                   (non_profiled_set ? non_profiled_size : min_size),
                   cache_size,
                   non_nmethod_set && profiled_set && non_profiled_set);

  // Determine size of compiler buffers
  size_t code_buffers_size = 0;
#ifdef COMPILER1
  // C1 temporary code buffers (see Compiler::init_buffer_blob())
  const int c1_count = CompilationPolicy::policy()->compiler_count(CompLevel_simple);
  code_buffers_size += c1_count * Compiler::code_buffer_size();
#endif
#ifdef COMPILER2
  // C2 scratch buffers (see Compile::init_scratch_buffer_blob())
  const int c2_count = CompilationPolicy::policy()->compiler_count(CompLevel_full_optimization);
  // Initial size of constant table (this may be increased if a compiled method needs more space)
  code_buffers_size += c2_count * C2Compiler::initial_code_buffer_size();
#endif

  // Increase default non_nmethod_size to account for compiler buffers
  if (!non_nmethod_set) {
    non_nmethod_size += code_buffers_size;
  }
  // Calculate default CodeHeap sizes if not set by user
  if (!non_nmethod_set && !profiled_set && !non_profiled_set) {
    // Check if we have enough space for the non-nmethod code heap
    if (cache_size > non_nmethod_size) {
      // Use the default value for non_nmethod_size and one half of the
      // remaining size for non-profiled and one half for profiled methods
      size_t remaining_size = cache_size - non_nmethod_size;
      profiled_size = remaining_size / 2;
      non_profiled_size = remaining_size - profiled_size;
    } else {
      // Use all space for the non-nmethod heap and set other heaps to minimal size
      non_nmethod_size = cache_size - 2 * min_size;
      profiled_size = min_size;
      non_profiled_size = min_size;
    }
  } else if (!non_nmethod_set || !profiled_set || !non_profiled_set) {
    // The user explicitly set some code heap sizes. Increase or decrease the (default)
    // sizes of the other code heaps accordingly. First adapt non-profiled and profiled
    // code heap sizes and then only change non-nmethod code heap size if still necessary.
    intx diff_size = cache_size - (non_nmethod_size + profiled_size + non_profiled_size);
    if (non_profiled_set) {
      if (!profiled_set) {
        // Adapt size of profiled code heap
        if (diff_size < 0 && ((intx)profiled_size + diff_size) <= 0) {
          // Not enough space available, set to minimum size
          diff_size += profiled_size - min_size;
          profiled_size = min_size;
        } else {
          profiled_size += diff_size;
          diff_size = 0;
        }
      }
    } else if (profiled_set) {
      // Adapt size of non-profiled code heap
      if (diff_size < 0 && ((intx)non_profiled_size + diff_size) <= 0) {
        // Not enough space available, set to minimum size
        diff_size += non_profiled_size - min_size;
        non_profiled_size = min_size;
      } else {
        non_profiled_size += diff_size;
        diff_size = 0;
      }
    } else if (non_nmethod_set) {
      // Distribute remaining size between profiled and non-profiled code heaps
      diff_size = cache_size - non_nmethod_size;
      profiled_size = diff_size / 2;
      non_profiled_size = diff_size - profiled_size;
      diff_size = 0;
    }
    if (diff_size != 0) {
      // Use non-nmethod code heap for remaining space requirements
      assert(!non_nmethod_set && ((intx)non_nmethod_size + diff_size) > 0, "sanity");
      non_nmethod_size += diff_size;
    }
  }

  // We do not need the profiled CodeHeap, use all space for the non-profiled CodeHeap
  if(!heap_available(CodeBlobType::MethodProfiled)) {
    non_profiled_size += profiled_size;
    profiled_size = 0;
  }
  // We do not need the non-profiled CodeHeap, use all space for the non-nmethod CodeHeap
  if(!heap_available(CodeBlobType::MethodNonProfiled)) {
    non_nmethod_size += non_profiled_size;
    non_profiled_size = 0;
  }
  // Make sure we have enough space for VM internal code
  uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3);
  if (non_nmethod_size < (min_code_cache_size + code_buffers_size)) {
    vm_exit_during_initialization(err_msg(
        "Not enough space in non-nmethod code heap to run VM: %zuK < %zuK",
        non_nmethod_size/K, (min_code_cache_size + code_buffers_size)/K));
  }

  // Verify sizes and update flag values
  assert(non_profiled_size + profiled_size + non_nmethod_size == cache_size, "Invalid code heap sizes");
  FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, non_nmethod_size);
  FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, profiled_size);
  FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, non_profiled_size);

  // Align CodeHeaps
  size_t alignment = heap_alignment();
  non_nmethod_size = align_size_up(non_nmethod_size, alignment);
  profiled_size   = align_size_down(profiled_size, alignment);

  // Reserve one continuous chunk of memory for CodeHeaps and split it into
  // parts for the individual heaps. The memory layout looks like this:
  // ---------- high -----------
  //    Non-profiled nmethods
  //      Profiled nmethods
  //         Non-nmethods
  // ---------- low ------------
  ReservedCodeSpace rs = reserve_heap_memory(cache_size);
  ReservedSpace non_method_space    = rs.first_part(non_nmethod_size);
  ReservedSpace rest                = rs.last_part(non_nmethod_size);
  ReservedSpace profiled_space      = rest.first_part(profiled_size);
  ReservedSpace non_profiled_space  = rest.last_part(profiled_size);

  // Non-nmethods (stubs, adapters, ...)
  add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod);
  // Tier 2 and tier 3 (profiled) methods
  add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled);
  // Tier 1 and tier 4 (non-profiled) methods and native methods
  add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled);
}