Esempio n. 1
0
void PSYoungGen::initialize_work() {

  _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(),
                        (HeapWord*)virtual_space()->high_boundary());

  MemRegion cmr((HeapWord*)virtual_space()->low(),
                (HeapWord*)virtual_space()->high());
  Universe::heap()->barrier_set()->resize_covered_region(cmr);

  if (ZapUnusedHeapArea) {
    // Mangle newly committed space immediately because it
    // can be done here more simply that after the new
    // spaces have been computed.
    SpaceMangler::mangle_region(cmr);
  }

  if (UseNUMA) {
    _eden_space = new MutableNUMASpace(virtual_space()->alignment());
  } else {
    _eden_space = new MutableSpace(virtual_space()->alignment());
  }
  _from_space = new MutableSpace(virtual_space()->alignment());
  _to_space   = new MutableSpace(virtual_space()->alignment());

  if (_eden_space == NULL || _from_space == NULL || _to_space == NULL) {
    vm_exit_during_initialization("Could not allocate a young gen space");
  }

  // Allocate the mark sweep views of spaces
  _eden_mark_sweep =
      new PSMarkSweepDecorator(_eden_space, NULL, MarkSweepDeadRatio);
  _from_mark_sweep =
      new PSMarkSweepDecorator(_from_space, NULL, MarkSweepDeadRatio);
  _to_mark_sweep =
      new PSMarkSweepDecorator(_to_space, NULL, MarkSweepDeadRatio);

  if (_eden_mark_sweep == NULL ||
      _from_mark_sweep == NULL ||
      _to_mark_sweep == NULL) {
    vm_exit_during_initialization("Could not complete allocation"
                                  " of the young generation");
  }

  // Generation Counters - generation 0, 3 subspaces
  _gen_counters = new PSGenerationCounters("new", 0, 3, _virtual_space);

  // Compute maximum space sizes for performance counters
  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  size_t alignment = heap->intra_heap_alignment();
  size_t size = virtual_space()->reserved_size();

  size_t max_survivor_size;
  size_t max_eden_size;

  if (UseAdaptiveSizePolicy) {
    max_survivor_size = size / MinSurvivorRatio;

    // round the survivor space size down to the nearest alignment
    // and make sure its size is greater than 0.
    max_survivor_size = align_size_down(max_survivor_size, alignment);
    max_survivor_size = MAX2(max_survivor_size, alignment);

    // set the maximum size of eden to be the size of the young gen
    // less two times the minimum survivor size. The minimum survivor
    // size for UseAdaptiveSizePolicy is one alignment.
    max_eden_size = size - 2 * alignment;
  } else {
    max_survivor_size = size / InitialSurvivorRatio;

    // round the survivor space size down to the nearest alignment
    // and make sure its size is greater than 0.
    max_survivor_size = align_size_down(max_survivor_size, alignment);
    max_survivor_size = MAX2(max_survivor_size, alignment);

    // set the maximum size of eden to be the size of the young gen
    // less two times the survivor size when the generation is 100%
    // committed. The minimum survivor size for -UseAdaptiveSizePolicy
    // is dependent on the committed portion (current capacity) of the
    // generation - the less space committed, the smaller the survivor
    // space, possibly as small as an alignment. However, we are interested
    // in the case where the young generation is 100% committed, as this
    // is the point where eden reachs its maximum size. At this point,
    // the size of a survivor space is max_survivor_size.
    max_eden_size = size - 2 * max_survivor_size;
  }

  _eden_counters = new SpaceCounters("eden", 0, max_eden_size, _eden_space,
                                     _gen_counters);
  _from_counters = new SpaceCounters("s0", 1, max_survivor_size, _from_space,
                                     _gen_counters);
  _to_counters = new SpaceCounters("s1", 2, max_survivor_size, _to_space,
                                   _gen_counters);

  compute_initial_space_boundaries();
}
void ParallelScavengeHeap::initialize() {
  // Cannot be initialized until after the flags are parsed
  GenerationSizer flag_parser;

  const size_t alignment = min_alignment();

  // Check alignments
// NEEDS_CLEANUP   The default TwoGenerationCollectorPolicy uses
//   NewRatio;  it should check UseAdaptiveSizePolicy. Changes from
//   generationSizer could move to the common code.
  size_t young_size = align_size_up(flag_parser.young_gen_size(), alignment);
  size_t max_young_size = align_size_up(flag_parser.max_young_gen_size(), 
                                                                  alignment);

  size_t old_size = align_size_up(flag_parser.old_gen_size(), alignment);
  size_t max_old_size = align_size_up(flag_parser.max_old_gen_size(), 
                                                                  alignment);

  size_t perm_size = align_size_up(flag_parser.perm_gen_size(), alignment);
  size_t max_perm_size = align_size_up(flag_parser.max_perm_gen_size(), 
                                                                  alignment);

  // Calculate the total size.
  size_t total_reserved = max_young_size + max_old_size + max_perm_size;

  if (UseISM || UsePermISM) {
    total_reserved = round_to(total_reserved, LargePageSizeInBytes);
  }

  ReservedSpace heap_rs(total_reserved, alignment, UseISM || UsePermISM);
  if (!heap_rs.is_reserved()) {
    vm_exit_during_initialization("Could not reserve enough space for "
                                  "object heap");
  }

  _reserved = MemRegion((HeapWord*)heap_rs.base(),
			(HeapWord*)(heap_rs.base() + heap_rs.size()));

  HeapWord* boundary = (HeapWord*)(heap_rs.base() + max_young_size);
  CardTableExtension* card_table_barrier_set = new CardTableExtension(_reserved, 3);
  _barrier_set = card_table_barrier_set;

  oopDesc::set_bs(_barrier_set);
  if (_barrier_set == NULL) {
    vm_exit_during_initialization("Could not reserve enough space for "
                                  "barrier set"); 
  }

  // Initial young gen size is 4 Mb
  size_t init_young_size = align_size_up(4 * M, alignment);
  init_young_size = MAX2(MIN2(init_young_size, max_young_size), young_size);

  ReservedSpace generation_rs = heap_rs.first_part(max_young_size);
  _young_gen = new PSYoungGen(generation_rs, 
                              init_young_size, 
                              young_size, 
                              max_young_size);
  heap_rs = heap_rs.last_part(max_young_size);

  generation_rs = heap_rs.first_part(max_old_size);
  _old_gen = new PSOldGen(generation_rs,
                          old_size,
                          old_size,
                          max_old_size,
                          "old", 1);

  heap_rs = heap_rs.last_part(max_old_size);

  _perm_gen = new PSPermGen(heap_rs,
                            perm_size,
                            perm_size,
                            max_perm_size,
                            "perm", 2);

  _size_policy = new AdaptiveSizePolicy(young_gen()->eden_space()->capacity_in_bytes(),
                                        old_gen()->capacity_in_bytes(),
                                        young_gen()->to_space()->capacity_in_bytes(),
                                        max_young_size,
                                        max_old_size,
                                        alignment);

  // initialize the policy counters - 2 collectors, 3 generations
  _gc_policy_counters = new GCPolicyCounters(PERF_GC, "ParScav:MSC", 2, 3);
}
Esempio n. 3
0
void VM_Version::get_processor_features() {

  _cpu = 4; // 486 by default
  _model = 0;
  _stepping = 0;
  _cpuFeatures = 0;
  _logical_processors_per_package = 1;

  if (!Use486InstrsOnly) {
    // Get raw processor info
    getPsrInfo_stub(&_cpuid_info);
    assert_is_initialized();
    _cpu = extended_cpu_family();
    _model = extended_cpu_model();
    _stepping = cpu_stepping();

    if (cpu_family() > 4) { // it supports CPUID
      _cpuFeatures = feature_flags();
      // Logical processors are only available on P4s and above,
      // and only if hyperthreading is available.
      _logical_processors_per_package = logical_processor_count();
    }
  }

  _supports_cx8 = supports_cmpxchg8();

#ifdef _LP64
  // OS should support SSE for x64 and hardware should support at least SSE2.
  if (!VM_Version::supports_sse2()) {
    vm_exit_during_initialization("Unknown x64 processor: SSE2 not supported");
  }
  // in 64 bit the use of SSE2 is the minimum
  if (UseSSE < 2) UseSSE = 2;
#endif

  // If the OS doesn't support SSE, we can't use this feature even if the HW does
  if (!os::supports_sse())
    _cpuFeatures &= ~(CPU_SSE|CPU_SSE2|CPU_SSE3|CPU_SSSE3|CPU_SSE4A|CPU_SSE4_1|CPU_SSE4_2);

  if (UseSSE < 4) {
    _cpuFeatures &= ~CPU_SSE4_1;
    _cpuFeatures &= ~CPU_SSE4_2;
  }

  if (UseSSE < 3) {
    _cpuFeatures &= ~CPU_SSE3;
    _cpuFeatures &= ~CPU_SSSE3;
    _cpuFeatures &= ~CPU_SSE4A;
  }

  if (UseSSE < 2)
    _cpuFeatures &= ~CPU_SSE2;

  if (UseSSE < 1)
    _cpuFeatures &= ~CPU_SSE;

  if (logical_processors_per_package() == 1) {
    // HT processor could be installed on a system which doesn't support HT.
    _cpuFeatures &= ~CPU_HT;
  }

  char buf[256];
  jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
               cores_per_cpu(), threads_per_core(),
               cpu_family(), _model, _stepping,
               (supports_cmov() ? ", cmov" : ""),
               (supports_cmpxchg8() ? ", cx8" : ""),
               (supports_fxsr() ? ", fxsr" : ""),
               (supports_mmx()  ? ", mmx"  : ""),
               (supports_sse()  ? ", sse"  : ""),
               (supports_sse2() ? ", sse2" : ""),
               (supports_sse3() ? ", sse3" : ""),
               (supports_ssse3()? ", ssse3": ""),
               (supports_sse4_1() ? ", sse4.1" : ""),
               (supports_sse4_2() ? ", sse4.2" : ""),
               (supports_popcnt() ? ", popcnt" : ""),
               (supports_mmx_ext() ? ", mmxext" : ""),
               (supports_3dnow_prefetch() ? ", 3dnowpref" : ""),
               (supports_lzcnt()   ? ", lzcnt": ""),
               (supports_sse4a()   ? ", sse4a": ""),
               (supports_ht() ? ", ht": ""));
  _features_str = strdup(buf);

  // UseSSE is set to the smaller of what hardware supports and what
  // the command line requires.  I.e., you cannot set UseSSE to 2 on
  // older Pentiums which do not support it.
  if( UseSSE > 4 ) UseSSE=4;
  if( UseSSE < 0 ) UseSSE=0;
  if( !supports_sse4_1() ) // Drop to 3 if no SSE4 support
    UseSSE = MIN2((intx)3,UseSSE);
  if( !supports_sse3() ) // Drop to 2 if no SSE3 support
    UseSSE = MIN2((intx)2,UseSSE);
  if( !supports_sse2() ) // Drop to 1 if no SSE2 support
    UseSSE = MIN2((intx)1,UseSSE);
  if( !supports_sse () ) // Drop to 0 if no SSE  support
    UseSSE = 0;

  // On new cpus instructions which update whole XMM register should be used
  // to prevent partial register stall due to dependencies on high half.
  //
  // UseXmmLoadAndClearUpper == true  --> movsd(xmm, mem)
  // UseXmmLoadAndClearUpper == false --> movlpd(xmm, mem)
  // UseXmmRegToRegMoveAll == true  --> movaps(xmm, xmm), movapd(xmm, xmm).
  // UseXmmRegToRegMoveAll == false --> movss(xmm, xmm),  movsd(xmm, xmm).

  if( is_amd() ) { // AMD cpus specific settings
    if( supports_sse2() && FLAG_IS_DEFAULT(UseAddressNop) ) {
      // Use it on new AMD cpus starting from Opteron.
      UseAddressNop = true;
    }
    if( supports_sse2() && FLAG_IS_DEFAULT(UseNewLongLShift) ) {
      // Use it on new AMD cpus starting from Opteron.
      UseNewLongLShift = true;
    }
    if( FLAG_IS_DEFAULT(UseXmmLoadAndClearUpper) ) {
      if( supports_sse4a() ) {
        UseXmmLoadAndClearUpper = true; // use movsd only on '10h' Opteron
      } else {
        UseXmmLoadAndClearUpper = false;
      }
    }
    if( FLAG_IS_DEFAULT(UseXmmRegToRegMoveAll) ) {
      if( supports_sse4a() ) {
        UseXmmRegToRegMoveAll = true; // use movaps, movapd only on '10h'
      } else {
        UseXmmRegToRegMoveAll = false;
      }
    }
    if( FLAG_IS_DEFAULT(UseXmmI2F) ) {
      if( supports_sse4a() ) {
        UseXmmI2F = true;
      } else {
        UseXmmI2F = false;
      }
    }
    if( FLAG_IS_DEFAULT(UseXmmI2D) ) {
      if( supports_sse4a() ) {
        UseXmmI2D = true;
      } else {
        UseXmmI2D = false;
      }
    }
    if( FLAG_IS_DEFAULT(UseSSE42Intrinsics) ) {
      if( supports_sse4_2() && UseSSE >= 4 ) {
        UseSSE42Intrinsics = true;
      }
    }

    // Use count leading zeros count instruction if available.
    if (supports_lzcnt()) {
      if (FLAG_IS_DEFAULT(UseCountLeadingZerosInstruction)) {
        UseCountLeadingZerosInstruction = true;
      }
    }

    // some defaults for AMD family 15h
    if ( cpu_family() == 0x15 ) {
      // On family 15h processors default is no sw prefetch
      if (FLAG_IS_DEFAULT(AllocatePrefetchStyle)) {
        AllocatePrefetchStyle = 0;
      }
      // Also, if some other prefetch style is specified, default instruction type is PREFETCHW
      if (FLAG_IS_DEFAULT(AllocatePrefetchInstr)) {
        AllocatePrefetchInstr = 3;
      }
      // On family 15h processors use XMM and UnalignedLoadStores for Array Copy
      if( FLAG_IS_DEFAULT(UseXMMForArrayCopy) ) {
        UseXMMForArrayCopy = true;
      }
      if( FLAG_IS_DEFAULT(UseUnalignedLoadStores) && UseXMMForArrayCopy ) {
        UseUnalignedLoadStores = true;
      }
    }

  }

  if( is_intel() ) { // Intel cpus specific settings
    if( FLAG_IS_DEFAULT(UseStoreImmI16) ) {
      UseStoreImmI16 = false; // don't use it on Intel cpus
    }
    if( cpu_family() == 6 || cpu_family() == 15 ) {
      if( FLAG_IS_DEFAULT(UseAddressNop) ) {
        // Use it on all Intel cpus starting from PentiumPro
        UseAddressNop = true;
      }
    }
    if( FLAG_IS_DEFAULT(UseXmmLoadAndClearUpper) ) {
      UseXmmLoadAndClearUpper = true; // use movsd on all Intel cpus
    }
    if( FLAG_IS_DEFAULT(UseXmmRegToRegMoveAll) ) {
      if( supports_sse3() ) {
        UseXmmRegToRegMoveAll = true; // use movaps, movapd on new Intel cpus
      } else {
        UseXmmRegToRegMoveAll = false;
      }
    }
    if( cpu_family() == 6 && supports_sse3() ) { // New Intel cpus
#ifdef COMPILER2
      if( FLAG_IS_DEFAULT(MaxLoopPad) ) {
        // For new Intel cpus do the next optimization:
        // don't align the beginning of a loop if there are enough instructions
        // left (NumberOfLoopInstrToAlign defined in c2_globals.hpp)
        // in current fetch line (OptoLoopAlignment) or the padding
        // is big (> MaxLoopPad).
        // Set MaxLoopPad to 11 for new Intel cpus to reduce number of
        // generated NOP instructions. 11 is the largest size of one
        // address NOP instruction '0F 1F' (see Assembler::nop(i)).
        MaxLoopPad = 11;
      }
#endif // COMPILER2
      if( FLAG_IS_DEFAULT(UseXMMForArrayCopy) ) {
        UseXMMForArrayCopy = true; // use SSE2 movq on new Intel cpus
      }
      if( supports_sse4_2() && supports_ht() ) { // Newest Intel cpus
        if( FLAG_IS_DEFAULT(UseUnalignedLoadStores) && UseXMMForArrayCopy ) {
          UseUnalignedLoadStores = true; // use movdqu on newest Intel cpus
        }
      }
      if( supports_sse4_2() && UseSSE >= 4 ) {
        if( FLAG_IS_DEFAULT(UseSSE42Intrinsics)) {
          UseSSE42Intrinsics = true;
        }
      }
    }
  }

  // Use population count instruction if available.
  if (supports_popcnt()) {
    if (FLAG_IS_DEFAULT(UsePopCountInstruction)) {
      UsePopCountInstruction = true;
    }
  }

#ifdef COMPILER2
  if (UseFPUForSpilling) {
    if (UseSSE < 2) {
      // Only supported with SSE2+
      FLAG_SET_DEFAULT(UseFPUForSpilling, false);
    }
  }
#endif

  assert(0 <= ReadPrefetchInstr && ReadPrefetchInstr <= 3, "invalid value");
  assert(0 <= AllocatePrefetchInstr && AllocatePrefetchInstr <= 3, "invalid value");

  // set valid Prefetch instruction
  if( ReadPrefetchInstr < 0 ) ReadPrefetchInstr = 0;
  if( ReadPrefetchInstr > 3 ) ReadPrefetchInstr = 3;
  if( ReadPrefetchInstr == 3 && !supports_3dnow_prefetch() ) ReadPrefetchInstr = 0;
  if( !supports_sse() && supports_3dnow_prefetch() ) ReadPrefetchInstr = 3;

  if( AllocatePrefetchInstr < 0 ) AllocatePrefetchInstr = 0;
  if( AllocatePrefetchInstr > 3 ) AllocatePrefetchInstr = 3;
  if( AllocatePrefetchInstr == 3 && !supports_3dnow_prefetch() ) AllocatePrefetchInstr=0;
  if( !supports_sse() && supports_3dnow_prefetch() ) AllocatePrefetchInstr = 3;

  // Allocation prefetch settings
  intx cache_line_size = L1_data_cache_line_size();
  if( cache_line_size > AllocatePrefetchStepSize )
    AllocatePrefetchStepSize = cache_line_size;
  if( FLAG_IS_DEFAULT(AllocatePrefetchLines) )
    AllocatePrefetchLines = 3; // Optimistic value
  assert(AllocatePrefetchLines > 0, "invalid value");
  if( AllocatePrefetchLines < 1 ) // set valid value in product VM
    AllocatePrefetchLines = 1; // Conservative value

  AllocatePrefetchDistance = allocate_prefetch_distance();
  AllocatePrefetchStyle    = allocate_prefetch_style();

  if( is_intel() && cpu_family() == 6 && supports_sse3() ) {
    if( AllocatePrefetchStyle == 2 ) { // watermark prefetching on Core
#ifdef _LP64
      AllocatePrefetchDistance = 384;
#else
      AllocatePrefetchDistance = 320;
#endif
    }
    if( supports_sse4_2() && supports_ht() ) { // Nehalem based cpus
      AllocatePrefetchDistance = 192;
      AllocatePrefetchLines = 4;
#ifdef COMPILER2
      if (AggressiveOpts && FLAG_IS_DEFAULT(UseFPUForSpilling)) {
        FLAG_SET_DEFAULT(UseFPUForSpilling, true);
      }
#endif
    }
  }
  assert(AllocatePrefetchDistance % AllocatePrefetchStepSize == 0, "invalid value");

#ifdef _LP64
  // Prefetch settings
  PrefetchCopyIntervalInBytes = prefetch_copy_interval_in_bytes();
  PrefetchScanIntervalInBytes = prefetch_scan_interval_in_bytes();
  PrefetchFieldsAhead         = prefetch_fields_ahead();
#endif

#ifndef PRODUCT
  if (PrintMiscellaneous && Verbose) {
    tty->print_cr("Logical CPUs per core: %u",
                  logical_processors_per_package());
    tty->print_cr("UseSSE=%d",UseSSE);
    tty->print("Allocation");
    if (AllocatePrefetchStyle <= 0 || UseSSE == 0 && !supports_3dnow_prefetch()) {
      tty->print_cr(": no prefetching");
    } else {
      tty->print(" prefetching: ");
      if (UseSSE == 0 && supports_3dnow_prefetch()) {
        tty->print("PREFETCHW");
      } else if (UseSSE >= 1) {
        if (AllocatePrefetchInstr == 0) {
          tty->print("PREFETCHNTA");
        } else if (AllocatePrefetchInstr == 1) {
          tty->print("PREFETCHT0");
        } else if (AllocatePrefetchInstr == 2) {
          tty->print("PREFETCHT2");
        } else if (AllocatePrefetchInstr == 3) {
          tty->print("PREFETCHW");
        }
      }
      if (AllocatePrefetchLines > 1) {
        tty->print_cr(" %d, %d lines with step %d bytes", AllocatePrefetchDistance, AllocatePrefetchLines, AllocatePrefetchStepSize);
      } else {
        tty->print_cr(" %d, one line", AllocatePrefetchDistance);
      }
    }

    if (PrefetchCopyIntervalInBytes > 0) {
      tty->print_cr("PrefetchCopyIntervalInBytes %d", PrefetchCopyIntervalInBytes);
    }
    if (PrefetchScanIntervalInBytes > 0) {
      tty->print_cr("PrefetchScanIntervalInBytes %d", PrefetchScanIntervalInBytes);
    }
    if (PrefetchFieldsAhead > 0) {
      tty->print_cr("PrefetchFieldsAhead %d", PrefetchFieldsAhead);
    }
  }
#endif // !PRODUCT
}
Esempio n. 4
0
// Starts the Attach Listener thread
void AttachListener::init() {
    EXCEPTION_MARK;
    Klass* k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_Thread(), true, CHECK);
    instanceKlassHandle klass (THREAD, k);
    instanceHandle thread_oop = klass->allocate_instance_handle(CHECK);

    const char thread_name[] = "Attach Listener";
    Handle string = java_lang_String::create_from_str(thread_name, CHECK);

    // Initialize thread_oop to put it into the system threadGroup
    Handle thread_group (THREAD, Universe::system_thread_group());
    JavaValue result(T_VOID);
    JavaCalls::call_special(&result, thread_oop,
                            klass,
                            vmSymbols::object_initializer_name(),
                            vmSymbols::threadgroup_string_void_signature(),
                            thread_group,
                            string,
                            THREAD);

    if (HAS_PENDING_EXCEPTION) {
        tty->print_cr("Exception in VM (AttachListener::init) : ");
        java_lang_Throwable::print(PENDING_EXCEPTION, tty);
        tty->cr();

        CLEAR_PENDING_EXCEPTION;

        return;
    }

    KlassHandle group(THREAD, SystemDictionary::ThreadGroup_klass());
    JavaCalls::call_special(&result,
                            thread_group,
                            group,
                            vmSymbols::add_method_name(),
                            vmSymbols::thread_void_signature(),
                            thread_oop,             // ARG 1
                            THREAD);

    if (HAS_PENDING_EXCEPTION) {
        tty->print_cr("Exception in VM (AttachListener::init) : ");
        java_lang_Throwable::print(PENDING_EXCEPTION, tty);
        tty->cr();

        CLEAR_PENDING_EXCEPTION;

        return;
    }

    {   MutexLocker mu(Threads_lock);
        JavaThread* listener_thread = new JavaThread(&attach_listener_thread_entry);

        // Check that thread and osthread were created
        if (listener_thread == NULL || listener_thread->osthread() == NULL) {
            vm_exit_during_initialization("java.lang.OutOfMemoryError",
                                          "unable to create new native thread");
        }

        java_lang_Thread::set_thread(thread_oop(), listener_thread);
        java_lang_Thread::set_daemon(thread_oop());

        listener_thread->set_threadObj(thread_oop());
        Threads::add(listener_thread);
        Thread::start(listener_thread);
    }
}
Esempio n. 5
0
PSOldGen::PSOldGen(ReservedSpace rs, size_t initial_size,
                   size_t min_size, size_t max_size,
                   const char* gen_name, int level):
  _init_gen_size(initial_size), _min_gen_size(min_size),
  _max_gen_size(max_size)

{
  //
  // Basic memory initialization
  //

  if (!_virtual_space.initialize(rs, initial_size)) {
    vm_exit_during_initialization("Could not reserve enough space for "
                                  "object heap");
  }

  _reserved = MemRegion((HeapWord*)_virtual_space.low_boundary(),
                        (HeapWord*)_virtual_space.high_boundary());

  //
  // Object start stuff
  //

  _start_array.initialize(_reserved);

  //
  // Card table stuff
  //

  MemRegion cmr((HeapWord*)_virtual_space.low(),
		(HeapWord*)_virtual_space.high());
  Universe::heap()->barrier_set()->resize_covered_region(cmr);

  CardTableModRefBS* _ct = (CardTableModRefBS*)Universe::heap()->barrier_set();
  assert (_ct->kind() == BarrierSet::CardTableModRef, "Sanity");

  // Verify that the start and end of this generation is the start of a card.
  // If this wasn't true, a single card could span more than one generation,
  // which would cause problems when we commit/uncommit memory, and when we
  // clear and dirty cards.
  guarantee(_ct->is_card_aligned(_reserved.start()), "generation must be card aligned");
  if (_reserved.end() != Universe::heap()->reserved_region().end()) {
    // Don't check at the very end of the heap as we'll assert that we're probing off
    // the end if we try.
    guarantee(_ct->is_card_aligned(_reserved.end()), "generation must be card aligned");
  }

  //
  // ObjectSpace stuff
  //

  _object_space = new MutableSpace();
  
  if (_object_space == NULL)
    vm_exit_during_initialization("Could not allocate an old gen space");

  object_space()->initialize(cmr, true);

  _object_mark_sweep = new PSMarkSweepDecorator(_object_space, start_array(), MarkSweepDeadRatio);

  if (_object_mark_sweep == NULL)
    vm_exit_during_initialization("Could not complete allocation of old generation");

  // Generation Counters, generation 'level', 1 subspace
  _gen_counters = new GenerationCounters(PERF_GC, gen_name, level, 1, &_virtual_space);
  _space_counters = new SpaceCounters(_gen_counters->name_space(), gen_name,
                                      0, _virtual_space.reserved_size(),
                                      _object_space);
}
Esempio n. 6
0
void CollectorPolicy::initialize_flags() {
  assert(_space_alignment != 0, "Space alignment not set up properly");
  assert(_heap_alignment != 0, "Heap alignment not set up properly");
  assert(_heap_alignment >= _space_alignment,
         "heap_alignment: " SIZE_FORMAT " less than space_alignment: " SIZE_FORMAT,
         _heap_alignment, _space_alignment);
  assert(_heap_alignment % _space_alignment == 0,
         "heap_alignment: " SIZE_FORMAT " not aligned by space_alignment: " SIZE_FORMAT,
         _heap_alignment, _space_alignment);

  if (FLAG_IS_CMDLINE(MaxHeapSize)) {
    if (FLAG_IS_CMDLINE(InitialHeapSize) && InitialHeapSize > MaxHeapSize) {
      vm_exit_during_initialization("Initial heap size set to a larger value than the maximum heap size");
    }
    if (_min_heap_byte_size != 0 && MaxHeapSize < _min_heap_byte_size) {
      vm_exit_during_initialization("Incompatible minimum and maximum heap sizes specified");
    }
    _max_heap_size_cmdline = true;
  }

  // Check heap parameter properties
  if (InitialHeapSize < M) {
    vm_exit_during_initialization("Too small initial heap");
  }
  if (_min_heap_byte_size < M) {
    vm_exit_during_initialization("Too small minimum heap");
  }

  // User inputs from -Xmx and -Xms must be aligned
  _min_heap_byte_size = align_size_up(_min_heap_byte_size, _heap_alignment);
  size_t aligned_initial_heap_size = align_size_up(InitialHeapSize, _heap_alignment);
  size_t aligned_max_heap_size = align_size_up(MaxHeapSize, _heap_alignment);

  // Write back to flags if the values changed
  if (aligned_initial_heap_size != InitialHeapSize) {
    FLAG_SET_ERGO(size_t, InitialHeapSize, aligned_initial_heap_size);
  }
  if (aligned_max_heap_size != MaxHeapSize) {
    FLAG_SET_ERGO(size_t, MaxHeapSize, aligned_max_heap_size);
  }

  if (FLAG_IS_CMDLINE(InitialHeapSize) && _min_heap_byte_size != 0 &&
      InitialHeapSize < _min_heap_byte_size) {
    vm_exit_during_initialization("Incompatible minimum and initial heap sizes specified");
  }
  if (!FLAG_IS_DEFAULT(InitialHeapSize) && InitialHeapSize > MaxHeapSize) {
    FLAG_SET_ERGO(size_t, MaxHeapSize, InitialHeapSize);
  } else if (!FLAG_IS_DEFAULT(MaxHeapSize) && InitialHeapSize > MaxHeapSize) {
    FLAG_SET_ERGO(size_t, InitialHeapSize, MaxHeapSize);
    if (InitialHeapSize < _min_heap_byte_size) {
      _min_heap_byte_size = InitialHeapSize;
    }
  }

  _initial_heap_byte_size = InitialHeapSize;
  _max_heap_byte_size = MaxHeapSize;

  FLAG_SET_ERGO(size_t, MinHeapDeltaBytes, align_size_up(MinHeapDeltaBytes, _space_alignment));

  DEBUG_ONLY(CollectorPolicy::assert_flags();)
}
Esempio n. 7
0
void GenCollectorPolicy::initialize_flags() {
  CollectorPolicy::initialize_flags();

  assert(_gen_alignment != 0, "Generation alignment not set up properly");
  assert(_heap_alignment >= _gen_alignment,
         "heap_alignment: " SIZE_FORMAT " less than gen_alignment: " SIZE_FORMAT,
         _heap_alignment, _gen_alignment);
  assert(_gen_alignment % _space_alignment == 0,
         "gen_alignment: " SIZE_FORMAT " not aligned by space_alignment: " SIZE_FORMAT,
         _gen_alignment, _space_alignment);
  assert(_heap_alignment % _gen_alignment == 0,
         "heap_alignment: " SIZE_FORMAT " not aligned by gen_alignment: " SIZE_FORMAT,
         _heap_alignment, _gen_alignment);

  // All generational heaps have a youngest gen; handle those flags here

  // Make sure the heap is large enough for two generations
  size_t smallest_new_size = young_gen_size_lower_bound();
  size_t smallest_heap_size = align_size_up(smallest_new_size + old_gen_size_lower_bound(),
                                           _heap_alignment);
  if (MaxHeapSize < smallest_heap_size) {
    FLAG_SET_ERGO(size_t, MaxHeapSize, smallest_heap_size);
    _max_heap_byte_size = MaxHeapSize;
  }
  // If needed, synchronize _min_heap_byte size and _initial_heap_byte_size
  if (_min_heap_byte_size < smallest_heap_size) {
    _min_heap_byte_size = smallest_heap_size;
    if (InitialHeapSize < _min_heap_byte_size) {
      FLAG_SET_ERGO(size_t, InitialHeapSize, smallest_heap_size);
      _initial_heap_byte_size = smallest_heap_size;
    }
  }

  // Make sure NewSize allows an old generation to fit even if set on the command line
  if (FLAG_IS_CMDLINE(NewSize) && NewSize >= _initial_heap_byte_size) {
    warning("NewSize was set larger than initial heap size, will use initial heap size.");
    NewSize = bound_minus_alignment(NewSize, _initial_heap_byte_size);
  }

  // Now take the actual NewSize into account. We will silently increase NewSize
  // if the user specified a smaller or unaligned value.
  size_t bounded_new_size = bound_minus_alignment(NewSize, MaxHeapSize);
  bounded_new_size = MAX2(smallest_new_size, (size_t)align_size_down(bounded_new_size, _gen_alignment));
  if (bounded_new_size != NewSize) {
    // Do not use FLAG_SET_ERGO to update NewSize here, since this will override
    // if NewSize was set on the command line or not. This information is needed
    // later when setting the initial and minimum young generation size.
    NewSize = bounded_new_size;
  }
  _min_young_size = smallest_new_size;
  _initial_young_size = NewSize;

  if (!FLAG_IS_DEFAULT(MaxNewSize)) {
    if (MaxNewSize >= MaxHeapSize) {
      // Make sure there is room for an old generation
      size_t smaller_max_new_size = MaxHeapSize - _gen_alignment;
      if (FLAG_IS_CMDLINE(MaxNewSize)) {
        warning("MaxNewSize (" SIZE_FORMAT "k) is equal to or greater than the entire "
                "heap (" SIZE_FORMAT "k).  A new max generation size of " SIZE_FORMAT "k will be used.",
                MaxNewSize/K, MaxHeapSize/K, smaller_max_new_size/K);
      }
      FLAG_SET_ERGO(size_t, MaxNewSize, smaller_max_new_size);
      if (NewSize > MaxNewSize) {
        FLAG_SET_ERGO(size_t, NewSize, MaxNewSize);
        _initial_young_size = NewSize;
      }
    } else if (MaxNewSize < _initial_young_size) {
      FLAG_SET_ERGO(size_t, MaxNewSize, _initial_young_size);
    } else if (!is_size_aligned(MaxNewSize, _gen_alignment)) {
      FLAG_SET_ERGO(size_t, MaxNewSize, align_size_down(MaxNewSize, _gen_alignment));
    }
    _max_young_size = MaxNewSize;
  }

  if (NewSize > MaxNewSize) {
    // At this point this should only happen if the user specifies a large NewSize and/or
    // a small (but not too small) MaxNewSize.
    if (FLAG_IS_CMDLINE(MaxNewSize)) {
      warning("NewSize (" SIZE_FORMAT "k) is greater than the MaxNewSize (" SIZE_FORMAT "k). "
              "A new max generation size of " SIZE_FORMAT "k will be used.",
              NewSize/K, MaxNewSize/K, NewSize/K);
    }
    FLAG_SET_ERGO(size_t, MaxNewSize, NewSize);
    _max_young_size = MaxNewSize;
  }

  if (SurvivorRatio < 1 || NewRatio < 1) {
    vm_exit_during_initialization("Invalid young gen ratio specified");
  }

  OldSize = MAX2(OldSize, old_gen_size_lower_bound());
  if (!is_size_aligned(OldSize, _gen_alignment)) {
    // Setting OldSize directly to preserve information about the possible
    // setting of OldSize on the command line.
    OldSize = align_size_down(OldSize, _gen_alignment);
  }

  if (FLAG_IS_CMDLINE(OldSize) && FLAG_IS_DEFAULT(MaxHeapSize)) {
    // NewRatio will be used later to set the young generation size so we use
    // it to calculate how big the heap should be based on the requested OldSize
    // and NewRatio.
    assert(NewRatio > 0, "NewRatio should have been set up earlier");
    size_t calculated_heapsize = (OldSize / NewRatio) * (NewRatio + 1);

    calculated_heapsize = align_size_up(calculated_heapsize, _heap_alignment);
    FLAG_SET_ERGO(size_t, MaxHeapSize, calculated_heapsize);
    _max_heap_byte_size = MaxHeapSize;
    FLAG_SET_ERGO(size_t, InitialHeapSize, calculated_heapsize);
    _initial_heap_byte_size = InitialHeapSize;
  }

  // Adjust NewSize and OldSize or MaxHeapSize to match each other
  if (NewSize + OldSize > MaxHeapSize) {
    if (_max_heap_size_cmdline) {
      // Somebody has set a maximum heap size with the intention that we should not
      // exceed it. Adjust New/OldSize as necessary.
      size_t calculated_size = NewSize + OldSize;
      double shrink_factor = (double) MaxHeapSize / calculated_size;
      size_t smaller_new_size = align_size_down((size_t)(NewSize * shrink_factor), _gen_alignment);
      FLAG_SET_ERGO(size_t, NewSize, MAX2(young_gen_size_lower_bound(), smaller_new_size));
      _initial_young_size = NewSize;

      // OldSize is already aligned because above we aligned MaxHeapSize to
      // _heap_alignment, and we just made sure that NewSize is aligned to
      // _gen_alignment. In initialize_flags() we verified that _heap_alignment
      // is a multiple of _gen_alignment.
      FLAG_SET_ERGO(size_t, OldSize, MaxHeapSize - NewSize);
    } else {
      FLAG_SET_ERGO(size_t, MaxHeapSize, align_size_up(NewSize + OldSize, _heap_alignment));
      _max_heap_byte_size = MaxHeapSize;
    }
  }

  // Update NewSize, if possible, to avoid sizing the young gen too small when only
  // OldSize is set on the command line.
  if (FLAG_IS_CMDLINE(OldSize) && !FLAG_IS_CMDLINE(NewSize)) {
    if (OldSize < _initial_heap_byte_size) {
      size_t new_size = _initial_heap_byte_size - OldSize;
      // Need to compare against the flag value for max since _max_young_size
      // might not have been set yet.
      if (new_size >= _min_young_size && new_size <= MaxNewSize) {
        FLAG_SET_ERGO(size_t, NewSize, new_size);
        _initial_young_size = NewSize;
      }
    }
  }

  always_do_update_barrier = UseConcMarkSweepGC;

  DEBUG_ONLY(GenCollectorPolicy::assert_flags();)
}