size_t ParallelScavengeHeap::used() const {
  size_t value = young_gen()->used_in_bytes() + old_gen()->used_in_bytes();
  return value;
}
void ParallelScavengeHeap::update_counters() {
  young_gen()->update_counters();
  old_gen()->update_counters();
  MetaspaceCounters::update_performance_counters();
  CompressedClassSpaceCounters::update_performance_counters();
}
size_t ParallelScavengeHeap::capacity() const {
  size_t value = young_gen()->capacity_in_bytes() + old_gen()->capacity_in_bytes();
  return value;
}
void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
  young_gen()->object_iterate(cl);
  old_gen()->object_iterate(cl);
  perm_gen()->object_iterate(cl);
}
void ParallelScavengeHeap::print_on(outputStream* st) const {
  young_gen()->print_on(st);
  old_gen()->print_on(st);
  perm_gen()->print_on(st);
}
void ParallelScavengeHeap::update_counters() {
  young_gen()->update_counters();
  old_gen()->update_counters();
  perm_gen()->update_counters();
}
// Basic allocation policy. Should never be called at a safepoint, or
// from the VM thread.
//
// This method must handle cases where many mem_allocate requests fail
// simultaneously. When that happens, only one VM operation will succeed,
// and the rest will not be executed. For that reason, this method loops
// during failed allocation attempts. If the java heap becomes exhausted,
// we rely on the size_policy object to force a bail out.
HeapWord* ParallelScavengeHeap::mem_allocate(size_t size, bool is_noref, bool is_tlab) {
  assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
  assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
  assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");

  HeapWord* result = young_gen()->allocate(size, is_noref, is_tlab);

  uint loop_count = 0;
  uint gc_count = 0;

  while (result == NULL) {
    // We don't want to have multiple collections for a single filled generation.
    // To prevent this, each thread tracks the total_collections() value, and if
    // the count has changed, does not do a new collection.
    //
    // The collection count must be read only while holding the heap lock. VM
    // operations also hold the heap lock during collections. There is a lock
    // contention case where thread A blocks waiting on the Heap_lock, while
    // thread B is holding it doing a collection. When thread A gets the lock,
    // the collection count has already changed. To prevent duplicate collections,
    // The policy MUST attempt allocations during the same period it reads the
    // total_collections() value!
    {
      MutexLocker ml(Heap_lock);
      gc_count = Universe::heap()->total_collections();

      result = young_gen()->allocate(size, is_noref, is_tlab);
  
      // In some cases, the requested object will be too large to easily
      // fit in the young_gen. Rather than force a safepoint and collection
      // for each one, try allocation in old_gen for objects likely to fail
      // allocation in eden.
      if (result == NULL && size >= 
         (young_gen()->eden_space()->capacity_in_words() / 2) && !is_tlab) {
        result = old_gen()->allocate(size, is_noref, is_tlab);
      }
    }

    if (result == NULL) {

      // Exit the loop if if the gc time limit has been exceeded.
      // The allocation must have failed above (result must be NULL),
      // and the most recent collection must have exceeded the
      // gc time limit.  Exit the loop so that an out-of-memory
      // will be thrown (returning a NULL will do that), but
      // clear gc_time_limit_exceeded so that the next collection
      // will succeeded if the applications decides to handle the
      // out-of-memory and tries to go on.
      if (size_policy()->gc_time_limit_exceeded()) {
        size_policy()->set_gc_time_limit_exceeded(false);
        if (PrintGCDetails && Verbose) {
	gclog_or_tty->print_cr("ParallelScavengeHeap::mem_allocate: "
	  "return NULL because gc_time_limit_exceeded is set");
        }
        return NULL;
      }

      // Generate a VM operation
      VM_ParallelGCFailedAllocation op(size, is_noref, is_tlab, gc_count);
      VMThread::execute(&op);

      // Did the VM operation execute? If so, return the result directly.
      // This prevents us from looping until time out on requests that can
      // not be satisfied.
      if (op.prologue_succeeded()) {
        assert(Universe::heap()->is_in_or_null(op.result()), 
          "result not in heap");
	// If a NULL results is being returned, an out-of-memory
	// will be thrown now.  Clear the gc_time_limit_exceeded
	// flag to avoid the following situation.
	// 	gc_time_limit_exceeded is set during a collection
	//	the collection fails to return enough space and an OOM is thrown
	//	the next GC is skipped because the gc_time_limit_exceeded
	//	  flag is set and another OOM is thrown
	if (op.result() == NULL) {
          size_policy()->set_gc_time_limit_exceeded(false);
	}
        return op.result();
      }
    }

    // The policy object will prevent us from looping forever. If the
    // time spent in gc crosses a threshold, we will bail out.
    loop_count++;
    if ((result == NULL) && (QueuedAllocationWarningCount > 0) && 
        (loop_count % QueuedAllocationWarningCount == 0)) {
      warning("ParallelScavengeHeap::mem_allocate retries %d times \n\t"
              " size=%d %s", loop_count, size, is_tlab ? "(TLAB)" : "");
    }
  }

  return result;
}
void ParallelScavengeHeap::initialize() {
  // Cannot be initialized until after the flags are parsed
  GenerationSizer flag_parser;

  const size_t alignment = min_alignment();

  // Check alignments
// NEEDS_CLEANUP   The default TwoGenerationCollectorPolicy uses
//   NewRatio;  it should check UseAdaptiveSizePolicy. Changes from
//   generationSizer could move to the common code.
  size_t young_size = align_size_up(flag_parser.young_gen_size(), alignment);
  size_t max_young_size = align_size_up(flag_parser.max_young_gen_size(), 
                                                                  alignment);

  size_t old_size = align_size_up(flag_parser.old_gen_size(), alignment);
  size_t max_old_size = align_size_up(flag_parser.max_old_gen_size(), 
                                                                  alignment);

  size_t perm_size = align_size_up(flag_parser.perm_gen_size(), alignment);
  size_t max_perm_size = align_size_up(flag_parser.max_perm_gen_size(), 
                                                                  alignment);

  // Calculate the total size.
  size_t total_reserved = max_young_size + max_old_size + max_perm_size;

  if (UseISM || UsePermISM) {
    total_reserved = round_to(total_reserved, LargePageSizeInBytes);
  }

  ReservedSpace heap_rs(total_reserved, alignment, UseISM || UsePermISM);
  if (!heap_rs.is_reserved()) {
    vm_exit_during_initialization("Could not reserve enough space for "
                                  "object heap");
  }

  _reserved = MemRegion((HeapWord*)heap_rs.base(),
			(HeapWord*)(heap_rs.base() + heap_rs.size()));

  HeapWord* boundary = (HeapWord*)(heap_rs.base() + max_young_size);
  CardTableExtension* card_table_barrier_set = new CardTableExtension(_reserved, 3);
  _barrier_set = card_table_barrier_set;

  oopDesc::set_bs(_barrier_set);
  if (_barrier_set == NULL) {
    vm_exit_during_initialization("Could not reserve enough space for "
                                  "barrier set"); 
  }

  // Initial young gen size is 4 Mb
  size_t init_young_size = align_size_up(4 * M, alignment);
  init_young_size = MAX2(MIN2(init_young_size, max_young_size), young_size);

  ReservedSpace generation_rs = heap_rs.first_part(max_young_size);
  _young_gen = new PSYoungGen(generation_rs, 
                              init_young_size, 
                              young_size, 
                              max_young_size);
  heap_rs = heap_rs.last_part(max_young_size);

  generation_rs = heap_rs.first_part(max_old_size);
  _old_gen = new PSOldGen(generation_rs,
                          old_size,
                          old_size,
                          max_old_size,
                          "old", 1);

  heap_rs = heap_rs.last_part(max_old_size);

  _perm_gen = new PSPermGen(heap_rs,
                            perm_size,
                            perm_size,
                            max_perm_size,
                            "perm", 2);

  _size_policy = new AdaptiveSizePolicy(young_gen()->eden_space()->capacity_in_bytes(),
                                        old_gen()->capacity_in_bytes(),
                                        young_gen()->to_space()->capacity_in_bytes(),
                                        max_young_size,
                                        max_old_size,
                                        alignment);

  // initialize the policy counters - 2 collectors, 3 generations
  _gc_policy_counters = new GCPolicyCounters(PERF_GC, "ParScav:MSC", 2, 3);
}
jint ParallelScavengeHeap::initialize() {
  // Cannot be initialized until after the flags are parsed
  GenerationSizer flag_parser;

  size_t max_young_size = flag_parser.max_young_gen_size();
  size_t max_old_size = flag_parser.max_old_gen_size();
  if (UseMPSS && max_young_size + max_old_size >= LargePageHeapSizeThreshold) {
    set_generation_alignment(LargePageSizeInBytes);
  }
  const size_t alignment = generation_alignment();

  // Check alignments
// NEEDS_CLEANUP   The default TwoGenerationCollectorPolicy uses
//   NewRatio;  it should check UseAdaptiveSizePolicy. Changes from
//   generationSizer could move to the common code.
  size_t min_young_size = 
    align_size_up(flag_parser.min_young_gen_size(), alignment);
  size_t young_size = align_size_up(flag_parser.young_gen_size(), alignment);
  max_young_size = align_size_up(max_young_size, alignment);

  size_t min_old_size = 
    align_size_up(flag_parser.min_old_gen_size(), alignment);
  size_t old_size = align_size_up(flag_parser.old_gen_size(), alignment);
  old_size = MAX2(old_size, min_old_size);
  max_old_size = align_size_up(max_old_size, alignment);

  size_t perm_size = align_size_up(flag_parser.perm_gen_size(), alignment);
  size_t max_perm_size = align_size_up(flag_parser.max_perm_gen_size(), 
                                                                  alignment);

  // Calculate the total size.
  size_t total_reserved = max_young_size + max_old_size + max_perm_size;

  if (UseISM || UsePermISM) {
    total_reserved = round_to(total_reserved, LargePageSizeInBytes);
  }

  ReservedSpace heap_rs(total_reserved, alignment, UseISM || UsePermISM);
  if (!heap_rs.is_reserved()) {
    vm_shutdown_during_initialization(
      "Could not reserve enough space for object heap");
    return JNI_ENOMEM;
  }

  _reserved_byte_size = heap_rs.size();
  _reserved = MemRegion((HeapWord*)heap_rs.base(),
			(HeapWord*)(heap_rs.base() + heap_rs.size()));

  HeapWord* boundary = (HeapWord*)(heap_rs.base() + max_young_size);
  CardTableExtension* card_table_barrier_set = new CardTableExtension(_reserved, 3);
  _barrier_set = card_table_barrier_set;

  oopDesc::set_bs(_barrier_set);
  if (_barrier_set == NULL) {
    vm_shutdown_during_initialization(
      "Could not reserve enough space for barrier set"); 
    return JNI_ENOMEM;
  }

  // Initial young gen size is 4 Mb
  size_t init_young_size = align_size_up(4 * M, alignment);
  init_young_size = MAX2(MIN2(init_young_size, max_young_size), young_size);

  // Divide up the reserved space: perm, old, young
  ReservedSpace perm_rs  = heap_rs.first_part(max_perm_size);
  ReservedSpace old_young_rs                
			 = heap_rs.last_part(max_perm_size);
  ReservedSpace old_rs   = old_young_rs.first_part(max_old_size);
  heap_rs                = old_young_rs.last_part(max_old_size);
  ReservedSpace young_rs = heap_rs.first_part(max_young_size);
  assert(young_rs.size() == heap_rs.size(), "Didn't reserve all of the heap");

  // Make up the generations
  // Calculate the maximum size that a generation can grow.  This
  // includes growth into the other generation.  Note that the
  // parameter _max_gen_size is kept as the maximum 
  // size of the generation as the boundaries currently stand.
  // _max_gen_size is still used as that value.
  double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
  double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0;

// Regarding SEPARATE_PATHS.  If SEPARATE_PATHS is defined, then
// the generations are created without the use of AdjoiningGenerations
// in the case where boundary moving is not an option.  This is
// being kept until the code review in case there is some desire
// to keep the new code out of the path of the previous code.
// One effect of using AdjoiningGenerations for both cases is that
// is that the generations in AdjoiningGenerations need to be
// PSOldGen and PSYoungGen as opposed to ASPSOldGen and ASPSYoungGen.
// This latter means that methods such as available_for_expansion()
// need to be defined in PSOldGen.

#undef SEPARATE_PATHS
#ifdef SEPARATE_PATHS
  if (UseAdaptiveSizePolicy && UseAdaptiveGCBoundary) {
#endif
    _gens = new AdjoiningGenerations(old_young_rs,
    				     old_size,
		                     min_old_size,
		                     max_old_size,
		                     init_young_size,
		                     min_young_size,
		                     max_young_size,
				     alignment);

    _old_gen = _gens->old_gen();
    _young_gen = _gens->young_gen();

    _size_policy =
      new PSAdaptiveSizePolicy(young_gen()->eden_space()->capacity_in_bytes(),
			       old_gen()->capacity_in_bytes(),
			       young_gen()->to_space()->capacity_in_bytes(),
			       generation_alignment(),
			       intra_generation_alignment(),
			       max_gc_pause_sec,
			       max_gc_minor_pause_sec,
			       GCTimeRatio
			       );

#ifdef SEPARATE_PATHS
  } else {
    // Same as for case where boundary does not move.
    size_t old_size_limit, young_size_limit;
    old_size_limit = max_old_size;
    young_size_limit = max_young_size;

    _young_gen = new PSYoungGen(init_young_size,
                              min_young_size,
                              max_young_size);
    _old_gen = new PSOldGen(old_size,
                          min_old_size,
                          max_old_size,
                          "old", 1);
    _gens = 0;
    _young_gen->initialize(young_rs, alignment);
    _old_gen->initialize(old_rs, alignment, "old", 1);

    _size_policy = 
      new PSAdaptiveSizePolicy(young_gen()->eden_space()->capacity_in_bytes(),
                               old_gen()->capacity_in_bytes(),
                               young_gen()->to_space()->capacity_in_bytes(),
                               generation_alignment(),
			       intra_generation_alignment(),
			       max_gc_pause_sec,
			       max_gc_minor_pause_sec,
			       GCTimeRatio
			       );
  }
#endif

  _perm_gen = new PSPermGen(perm_rs,
			    alignment,
                            perm_size,
                            perm_size,
                            max_perm_size,
                            "perm", 2);

  assert(!UseAdaptiveGCBoundary ||
    (old_gen()->virtual_space()->high_boundary() == 
     young_gen()->virtual_space()->low_boundary()),
    "Boundaries must meet");
  // initialize the policy counters - 2 collectors, 3 generations
  _gc_policy_counters = 
    new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 3, _size_policy);
  _psh = this;

  return JNI_OK;
}
jint ParallelScavengeHeap::initialize() {
    CollectedHeap::pre_initialize();

    // Cannot be initialized until after the flags are parsed
    // GenerationSizer flag_parser;
    _collector_policy = new GenerationSizer();

    size_t yg_min_size = _collector_policy->min_young_gen_size();
    size_t yg_max_size = _collector_policy->max_young_gen_size();
    size_t og_min_size = _collector_policy->min_old_gen_size();
    size_t og_max_size = _collector_policy->max_old_gen_size();
    // Why isn't there a min_perm_gen_size()?
    size_t pg_min_size = _collector_policy->perm_gen_size();
    size_t pg_max_size = _collector_policy->max_perm_gen_size();

    trace_gen_sizes("ps heap raw",
                    pg_min_size, pg_max_size,
                    og_min_size, og_max_size,
                    yg_min_size, yg_max_size);

    // The ReservedSpace ctor used below requires that the page size for the perm
    // gen is <= the page size for the rest of the heap (young + old gens).
    const size_t og_page_sz = os::page_size_for_region(yg_min_size + og_min_size,
                              yg_max_size + og_max_size,
                              8);
    const size_t pg_page_sz = MIN2(os::page_size_for_region(pg_min_size,
                                   pg_max_size, 16),
                                   og_page_sz);

    const size_t pg_align = set_alignment(_perm_gen_alignment,  pg_page_sz);
    const size_t og_align = set_alignment(_old_gen_alignment,   og_page_sz);
    const size_t yg_align = set_alignment(_young_gen_alignment, og_page_sz);

    // Update sizes to reflect the selected page size(s).
    //
    // NEEDS_CLEANUP.  The default TwoGenerationCollectorPolicy uses NewRatio; it
    // should check UseAdaptiveSizePolicy.  Changes from generationSizer could
    // move to the common code.
    yg_min_size = align_size_up(yg_min_size, yg_align);
    yg_max_size = align_size_up(yg_max_size, yg_align);
    size_t yg_cur_size =
        align_size_up(_collector_policy->young_gen_size(), yg_align);
    yg_cur_size = MAX2(yg_cur_size, yg_min_size);

    og_min_size = align_size_up(og_min_size, og_align);
    // Align old gen size down to preserve specified heap size.
    assert(og_align == yg_align, "sanity");
    og_max_size = align_size_down(og_max_size, og_align);
    og_max_size = MAX2(og_max_size, og_min_size);
    size_t og_cur_size =
        align_size_down(_collector_policy->old_gen_size(), og_align);
    og_cur_size = MAX2(og_cur_size, og_min_size);

    pg_min_size = align_size_up(pg_min_size, pg_align);
    pg_max_size = align_size_up(pg_max_size, pg_align);
    size_t pg_cur_size = pg_min_size;

    trace_gen_sizes("ps heap rnd",
                    pg_min_size, pg_max_size,
                    og_min_size, og_max_size,
                    yg_min_size, yg_max_size);

    const size_t total_reserved = pg_max_size + og_max_size + yg_max_size;
    char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);

    // The main part of the heap (old gen + young gen) can often use a larger page
    // size than is needed or wanted for the perm gen.  Use the "compound
    // alignment" ReservedSpace ctor to avoid having to use the same page size for
    // all gens.

    ReservedHeapSpace heap_rs(pg_max_size, pg_align, og_max_size + yg_max_size,
                              og_align, addr);

    if (UseCompressedOops) {
        if (addr != NULL && !heap_rs.is_reserved()) {
            // Failed to reserve at specified address - the requested memory
            // region is taken already, for example, by 'java' launcher.
            // Try again to reserver heap higher.
            addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop);
            ReservedHeapSpace heap_rs0(pg_max_size, pg_align, og_max_size + yg_max_size,
                                       og_align, addr);
            if (addr != NULL && !heap_rs0.is_reserved()) {
                // Failed to reserve at specified address again - give up.
                addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop);
                assert(addr == NULL, "");
                ReservedHeapSpace heap_rs1(pg_max_size, pg_align, og_max_size + yg_max_size,
                                           og_align, addr);
                heap_rs = heap_rs1;
            } else {
                heap_rs = heap_rs0;
            }
        }
    }

    os::trace_page_sizes("ps perm", pg_min_size, pg_max_size, pg_page_sz,
                         heap_rs.base(), pg_max_size);
    os::trace_page_sizes("ps main", og_min_size + yg_min_size,
                         og_max_size + yg_max_size, og_page_sz,
                         heap_rs.base() + pg_max_size,
                         heap_rs.size() - pg_max_size);
    if (!heap_rs.is_reserved()) {
        vm_shutdown_during_initialization(
            "Could not reserve enough space for object heap");
        return JNI_ENOMEM;
    }

    _reserved = MemRegion((HeapWord*)heap_rs.base(),
                          (HeapWord*)(heap_rs.base() + heap_rs.size()));

    CardTableExtension* const barrier_set = new CardTableExtension(_reserved, 3);
    _barrier_set = barrier_set;
    oopDesc::set_bs(_barrier_set);
    if (_barrier_set == NULL) {
        vm_shutdown_during_initialization(
            "Could not reserve enough space for barrier set");
        return JNI_ENOMEM;
    }

    // Initial young gen size is 4 Mb
    //
    // XXX - what about flag_parser.young_gen_size()?
    const size_t init_young_size = align_size_up(4 * M, yg_align);
    yg_cur_size = MAX2(MIN2(init_young_size, yg_max_size), yg_cur_size);

    // Split the reserved space into perm gen and the main heap (everything else).
    // The main heap uses a different alignment.
    ReservedSpace perm_rs = heap_rs.first_part(pg_max_size);
    ReservedSpace main_rs = heap_rs.last_part(pg_max_size, og_align);

    // Make up the generations
    // Calculate the maximum size that a generation can grow.  This
    // includes growth into the other generation.  Note that the
    // parameter _max_gen_size is kept as the maximum
    // size of the generation as the boundaries currently stand.
    // _max_gen_size is still used as that value.
    double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
    double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0;

    _gens = new AdjoiningGenerations(main_rs,
                                     og_cur_size,
                                     og_min_size,
                                     og_max_size,
                                     yg_cur_size,
                                     yg_min_size,
                                     yg_max_size,
                                     yg_align);

    _old_gen = _gens->old_gen();
    _young_gen = _gens->young_gen();

    const size_t eden_capacity = _young_gen->eden_space()->capacity_in_bytes();
    const size_t old_capacity = _old_gen->capacity_in_bytes();
    const size_t initial_promo_size = MIN2(eden_capacity, old_capacity);
    _size_policy =
        new PSAdaptiveSizePolicy(eden_capacity,
                                 initial_promo_size,
                                 young_gen()->to_space()->capacity_in_bytes(),
                                 intra_heap_alignment(),
                                 max_gc_pause_sec,
                                 max_gc_minor_pause_sec,
                                 GCTimeRatio
                                );

    _perm_gen = new PSPermGen(perm_rs,
                              pg_align,
                              pg_cur_size,
                              pg_cur_size,
                              pg_max_size,
                              "perm", 2);

    assert(!UseAdaptiveGCBoundary ||
           (old_gen()->virtual_space()->high_boundary() ==
            young_gen()->virtual_space()->low_boundary()),
           "Boundaries must meet");
    // initialize the policy counters - 2 collectors, 3 generations
    _gc_policy_counters =
        new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 3, _size_policy);
    _psh = this;

    // Set up the GCTaskManager
    _gc_task_manager = GCTaskManager::create(ParallelGCThreads);

    if (UseParallelOldGC && !PSParallelCompact::initialize()) {
        return JNI_ENOMEM;
    }

    return JNI_OK;
}
// Basic allocation policy. Should never be called at a safepoint, or
// from the VM thread.
//
// This method must handle cases where many mem_allocate requests fail
// simultaneously. When that happens, only one VM operation will succeed,
// and the rest will not be executed. For that reason, this method loops
// during failed allocation attempts. If the java heap becomes exhausted,
// we rely on the size_policy object to force a bail out.
HeapWord* ParallelScavengeHeap::mem_allocate(
    size_t size,
    bool is_noref,
    bool is_tlab,
    bool* gc_overhead_limit_was_exceeded) {
    assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
    assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
    assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");

    // In general gc_overhead_limit_was_exceeded should be false so
    // set it so here and reset it to true only if the gc time
    // limit is being exceeded as checked below.
    *gc_overhead_limit_was_exceeded = false;

    HeapWord* result = young_gen()->allocate(size, is_tlab);

    uint loop_count = 0;
    uint gc_count = 0;

    while (result == NULL) {
        // We don't want to have multiple collections for a single filled generation.
        // To prevent this, each thread tracks the total_collections() value, and if
        // the count has changed, does not do a new collection.
        //
        // The collection count must be read only while holding the heap lock. VM
        // operations also hold the heap lock during collections. There is a lock
        // contention case where thread A blocks waiting on the Heap_lock, while
        // thread B is holding it doing a collection. When thread A gets the lock,
        // the collection count has already changed. To prevent duplicate collections,
        // The policy MUST attempt allocations during the same period it reads the
        // total_collections() value!
        {
            MutexLocker ml(Heap_lock);
            gc_count = Universe::heap()->total_collections();

            result = young_gen()->allocate(size, is_tlab);

            // (1) If the requested object is too large to easily fit in the
            //     young_gen, or
            // (2) If GC is locked out via GCLocker, young gen is full and
            //     the need for a GC already signalled to GCLocker (done
            //     at a safepoint),
            // ... then, rather than force a safepoint and (a potentially futile)
            // collection (attempt) for each allocation, try allocation directly
            // in old_gen. For case (2) above, we may in the future allow
            // TLAB allocation directly in the old gen.
            if (result != NULL) {
                return result;
            }
            if (!is_tlab &&
                    size >= (young_gen()->eden_space()->capacity_in_words(Thread::current()) / 2)) {
                result = old_gen()->allocate(size, is_tlab);
                if (result != NULL) {
                    return result;
                }
            }
            if (GC_locker::is_active_and_needs_gc()) {
                // GC is locked out. If this is a TLAB allocation,
                // return NULL; the requestor will retry allocation
                // of an idividual object at a time.
                if (is_tlab) {
                    return NULL;
                }

                // If this thread is not in a jni critical section, we stall
                // the requestor until the critical section has cleared and
                // GC allowed. When the critical section clears, a GC is
                // initiated by the last thread exiting the critical section; so
                // we retry the allocation sequence from the beginning of the loop,
                // rather than causing more, now probably unnecessary, GC attempts.
                JavaThread* jthr = JavaThread::current();
                if (!jthr->in_critical()) {
                    MutexUnlocker mul(Heap_lock);
                    GC_locker::stall_until_clear();
                    continue;
                } else {
                    if (CheckJNICalls) {
                        fatal("Possible deadlock due to allocating while"
                              " in jni critical section");
                    }
                    return NULL;
                }
            }
        }

        if (result == NULL) {

            // Generate a VM operation
            VM_ParallelGCFailedAllocation op(size, is_tlab, gc_count);
            VMThread::execute(&op);

            // Did the VM operation execute? If so, return the result directly.
            // This prevents us from looping until time out on requests that can
            // not be satisfied.
            if (op.prologue_succeeded()) {
                assert(Universe::heap()->is_in_or_null(op.result()),
                       "result not in heap");

                // If GC was locked out during VM operation then retry allocation
                // and/or stall as necessary.
                if (op.gc_locked()) {
                    assert(op.result() == NULL, "must be NULL if gc_locked() is true");
                    continue;  // retry and/or stall as necessary
                }

                // Exit the loop if the gc time limit has been exceeded.
                // The allocation must have failed above ("result" guarding
                // this path is NULL) and the most recent collection has exceeded the
                // gc overhead limit (although enough may have been collected to
                // satisfy the allocation).  Exit the loop so that an out-of-memory
                // will be thrown (return a NULL ignoring the contents of
                // op.result()),
                // but clear gc_overhead_limit_exceeded so that the next collection
                // starts with a clean slate (i.e., forgets about previous overhead
                // excesses).  Fill op.result() with a filler object so that the
                // heap remains parsable.
                const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
                const bool softrefs_clear = collector_policy()->all_soft_refs_clear();
                assert(!limit_exceeded || softrefs_clear, "Should have been cleared");
                if (limit_exceeded && softrefs_clear) {
                    *gc_overhead_limit_was_exceeded = true;
                    size_policy()->set_gc_overhead_limit_exceeded(false);
                    if (PrintGCDetails && Verbose) {
                        gclog_or_tty->print_cr("ParallelScavengeHeap::mem_allocate: "
                                               "return NULL because gc_overhead_limit_exceeded is set");
                    }
                    if (op.result() != NULL) {
                        CollectedHeap::fill_with_object(op.result(), size);
                    }
                    return NULL;
                }

                return op.result();
            }
        }

        // The policy object will prevent us from looping forever. If the
        // time spent in gc crosses a threshold, we will bail out.
        loop_count++;
        if ((result == NULL) && (QueuedAllocationWarningCount > 0) &&
                (loop_count % QueuedAllocationWarningCount == 0)) {
            warning("ParallelScavengeHeap::mem_allocate retries %d times \n\t"
                    " size=%d %s", loop_count, size, is_tlab ? "(TLAB)" : "");
        }
    }

    return result;
}
// Don't implement this by using is_in_young().  This method is used
// in some cases to check that is_in_young() is correct.
bool ParallelScavengeHeap::is_in_partial_collection(const void *p) {
    assert(is_in_reserved(p) || p == NULL,
           "Does not work if address is non-null and outside of the heap");
    // The order of the generations is perm (low addr), old, young (high addr)
    return p >= old_gen()->reserved().end();
}
bool ParallelScavengeHeap::is_maximal_no_gc() const {
    return old_gen()->is_maximal_no_gc() && young_gen()->is_maximal_no_gc();
}
AdjoiningGenerations::AdjoiningGenerations(ReservedSpace old_young_rs,
                                           size_t init_low_byte_size,
                                           size_t min_low_byte_size,
                                           size_t max_low_byte_size,
                                           size_t init_high_byte_size,
                                           size_t min_high_byte_size,
                                           size_t max_high_byte_size,
                                           size_t alignment) :
  _virtual_spaces(old_young_rs, min_low_byte_size,
                  min_high_byte_size, alignment) {
  assert(min_low_byte_size <= init_low_byte_size &&
         init_low_byte_size <= max_low_byte_size, "Parameter check");
  assert(min_high_byte_size <= init_high_byte_size &&
         init_high_byte_size <= max_high_byte_size, "Parameter check");
  // Create the generations differently based on the option to
  // move the boundary.
  if (UseAdaptiveGCBoundary) {
    // Initialize the adjoining virtual spaces.  Then pass the
    // a virtual to each generation for initialization of the
    // generation.

    // Does the actual creation of the virtual spaces
    _virtual_spaces.initialize(max_low_byte_size,
                               init_low_byte_size,
                               init_high_byte_size);

    // Place the young gen at the high end.  Passes in the virtual space.
    _young_gen = new ASPSYoungGen(_virtual_spaces.high(),
                                  _virtual_spaces.high()->committed_size(),
                                  min_high_byte_size,
                                  _virtual_spaces.high_byte_size_limit());

    // Place the old gen at the low end. Passes in the virtual space.
    _old_gen = new ASPSOldGen(_virtual_spaces.low(),
                              _virtual_spaces.low()->committed_size(),
                              min_low_byte_size,
                              _virtual_spaces.low_byte_size_limit(),
                              "old", 1);

    young_gen()->initialize_work();
    assert(young_gen()->reserved().byte_size() <= young_gen()->gen_size_limit(),
     "Consistency check");
    assert(old_young_rs.size() >= young_gen()->gen_size_limit(),
     "Consistency check");

    old_gen()->initialize_work("old", 1);
    assert(old_gen()->reserved().byte_size() <= old_gen()->gen_size_limit(),
     "Consistency check");
    assert(old_young_rs.size() >= old_gen()->gen_size_limit(),
     "Consistency check");
  } else {

    // Layout the reserved space for the generations.
    ReservedSpace old_rs   =
      virtual_spaces()->reserved_space().first_part(max_low_byte_size);
    ReservedSpace heap_rs  =
      virtual_spaces()->reserved_space().last_part(max_low_byte_size);
    ReservedSpace young_rs = heap_rs.first_part(max_high_byte_size);
    assert(young_rs.size() == heap_rs.size(), "Didn't reserve all of the heap");

    // Create the generations.  Virtual spaces are not passed in.
    _young_gen = new PSYoungGen(init_high_byte_size,
                                min_high_byte_size,
                                max_high_byte_size);
    _old_gen = new PSOldGen(init_low_byte_size,
                            min_low_byte_size,
                            max_low_byte_size,
                            "old", 1);

    // The virtual spaces are created by the initialization of the gens.
    _young_gen->initialize(young_rs, alignment);
    assert(young_gen()->gen_size_limit() == young_rs.size(),
      "Consistency check");
    _old_gen->initialize(old_rs, alignment, "old", 1);
    assert(old_gen()->gen_size_limit() == old_rs.size(), "Consistency check");
  }
}