HeapWord* ParallelScavengeHeap::block_start(const void* addr) const {
  if (young_gen()->is_in(addr)) {
    Unimplemented();
  } else if (old_gen()->is_in(addr)) {
    return old_gen()->start_array()->object_start((HeapWord*)addr);
  } else if (perm_gen()->is_in(addr)) {
    return perm_gen()->start_array()->object_start((HeapWord*)addr);
  }
  return 0;
}
void ParallelScavengeHeap::record_gen_tops_before_GC() {
  if (ZapUnusedHeapArea) {
    young_gen()->record_spaces_top();
    old_gen()->record_spaces_top();
    perm_gen()->record_spaces_top();
  }
}
VirtualSpaceSummary ParallelScavengeHeap::create_perm_gen_space_summary() {
  PSVirtualSpace* space = perm_gen()->virtual_space();
  return VirtualSpaceSummary(
    (HeapWord*)space->low_boundary(),
    (HeapWord*)space->high(),
    (HeapWord*)space->high_boundary());
}
HeapWord* ParallelScavengeHeap::block_start(const void* addr) const {
  if (young_gen()->is_in_reserved(addr)) {
    assert(young_gen()->is_in(addr),
           "addr should be in allocated part of young gen");
    // called from os::print_location by find or VMError
    if (Debugging || VMError::fatal_error_in_progress())  return NULL;
    Unimplemented();
  } else if (old_gen()->is_in_reserved(addr)) {
    assert(old_gen()->is_in(addr),
           "addr should be in allocated part of old gen");
    return old_gen()->start_array()->object_start((HeapWord*)addr);
  } else if (perm_gen()->is_in_reserved(addr)) {
    assert(perm_gen()->is_in(addr),
           "addr should be in allocated part of perm gen");
    return perm_gen()->start_array()->object_start((HeapWord*)addr);
  }
  return 0;
}
void ParallelScavengeHeap::gen_mangle_unused_area() {
  if (ZapUnusedHeapArea) {
    young_gen()->eden_space()->mangle_unused_area();
    young_gen()->to_space()->mangle_unused_area();
    young_gen()->from_space()->mangle_unused_area();
    old_gen()->object_space()->mangle_unused_area();
    perm_gen()->object_space()->mangle_unused_area();
  }
}
size_t ParallelScavengeHeap::max_capacity() const {
  size_t estimated = reserved_region().byte_size();
  estimated -= perm_gen()->reserved().byte_size();
  if (UseAdaptiveSizePolicy) {
    estimated -= _size_policy->max_survivor_size(young_gen()->max_size());
  } else {
    estimated -= young_gen()->to_space()->capacity_in_bytes();
  }
  return MAX2(estimated, capacity());
}
//
// This is the policy code for permanent allocations which have failed
// and require a collection. Note that just as in failed_mem_allocate,
// we do not set collection policy, only where & when to allocate and
// collect.
HeapWord* ParallelScavengeHeap::failed_permanent_mem_allocate(bool& notify_ref_lock, size_t size) {
  assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
  assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
  assert(!Universe::heap()->is_gc_active(), "not reentrant");
  assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
  assert(size > perm_gen()->free_in_words(), "Allocation should fail");

  // We assume (and assert!) that an allocation at this point will fail
  // unless we collect.

  // First level allocation failure. mark sweep and allocate in perm gen..  
  PSMarkSweep::invoke(notify_ref_lock, false);
  HeapWord* result = perm_gen()->allocate_permanent(size);

  // Second level allocation failure. We're running out of memory.
  if (result == NULL) {
    PSMarkSweep::invoke(notify_ref_lock, true /* max_heap_compaction */);
    result = perm_gen()->allocate_permanent(size);
  }

  return result;
}
//
// This is the policy code for permanent allocations which have failed
// and require a collection. Note that just as in failed_mem_allocate,
// we do not set collection policy, only where & when to allocate and
// collect.
HeapWord* ParallelScavengeHeap::failed_permanent_mem_allocate(size_t size) {
  assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
  assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
  assert(!Universe::heap()->is_gc_active(), "not reentrant");
  assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
  assert(size > perm_gen()->free_in_words(), "Allocation should fail");

  // We assume (and assert!) that an allocation at this point will fail
  // unless we collect.

  // First level allocation failure.  Mark-sweep and allocate in perm gen.
  GCCauseSetter gccs(this, GCCause::_allocation_failure);
  invoke_full_gc(false);
  HeapWord* result = perm_gen()->allocate_permanent(size);

  // Second level allocation failure. We're running out of memory.
  if (result == NULL) {
    invoke_full_gc(true);
    result = perm_gen()->allocate_permanent(size);
  }

  return result;
}
bool ParallelScavengeHeap::is_in_reserved(const void* p) const {
  if (young_gen()->is_in_reserved(p)) {
    return true;
  }

  if (old_gen()->is_in_reserved(p)) {
    return true;
  }

  if (perm_gen()->is_in_reserved(p)) {
    return true;
  }

  return false;
}
void ParallelScavengeHeap::verify(bool silent, VerifyOption option /* ignored */) {
  // Why do we need the total_collections()-filter below?
  if (total_collections() > 0) {
    if (!silent) {
      gclog_or_tty->print("permanent ");
    }
    perm_gen()->verify();

    if (!silent) {
      gclog_or_tty->print("tenured ");
    }
    old_gen()->verify();

    if (!silent) {
      gclog_or_tty->print("eden ");
    }
    young_gen()->verify();
  }
}
void ParallelScavengeHeap::verify(bool allow_dirty, bool silent) {
  // Really stupid. Can't fill the tlabs, can't verify. FIX ME!
  if (total_collections() > 0) {
    if (!silent) {
      gclog_or_tty->print("permanent ");
    }
    perm_gen()->verify(allow_dirty);

    if (!silent) {
      gclog_or_tty->print("tenured ");
    }
    old_gen()->verify(allow_dirty);

    if (!silent) {
      gclog_or_tty->print("eden ");
    }
    young_gen()->verify(allow_dirty);
  }
}
//
// This is the policy loop for allocating in the permanent generation.
// If the initial allocation fails, we create a vm operation which will
// cause a collection.
HeapWord* ParallelScavengeHeap::permanent_mem_allocate(size_t size) {
  assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
  assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
  assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");

  HeapWord* result;
  uint loop_count = 0;

  do {
    {
      MutexLocker ml(Heap_lock);
      result = perm_gen()->allocate_permanent(size);
    }

    if (result == NULL) {
      // Generate a VM operation
      VM_ParallelGCFailedPermanentAllocation op(size);
      VMThread::execute(&op);
      
      // Did the VM operation execute? If so, return the result directly.
      // This prevents us from looping until time out on requests that can
      // not be satisfied.
      if (op.prologue_succeeded()) {
        assert(Universe::heap()->is_in_permanent_or_null(op.result()), "result not in heap");
        return op.result();
      }
    }
    // The policy object will prevent us from looping forever. If the
    // time spent in gc crosses a threshold, we will bail out.
    loop_count++;
    if ((QueuedAllocationWarningCount > 0) && (loop_count % QueuedAllocationWarningCount == 0)) {
      warning("ParallelScavengeHeap::permanent_mem_allocate retries %d times \n\t"
              " size=%d", loop_count, size);
    }
  } while (result == NULL && !size_policy()->gc_time_limit_exceeded());

  return result;
}
void ParallelScavengeHeap::verify(bool allow_dirty, bool silent, bool option /* ignored */) {
  // Why do we need the total_collections()-filter below?
  if (total_collections() > 0) {
    if (!silent) {
      gclog_or_tty->print("permanent ");
    }
    perm_gen()->verify(allow_dirty);

    if (!silent) {
      gclog_or_tty->print("tenured ");
    }
    old_gen()->verify(allow_dirty);

    if (!silent) {
      gclog_or_tty->print("eden ");
    }
    young_gen()->verify(allow_dirty);
  }
  if (!silent) {
    gclog_or_tty->print("ref_proc ");
  }
  ReferenceProcessor::verify();
}
void SharedHeap::ref_processing_init() {
  perm_gen()->ref_processor_init();
}
//
// This is the policy loop for allocating in the permanent generation.
// If the initial allocation fails, we create a vm operation which will
// cause a collection.
HeapWord* ParallelScavengeHeap::permanent_mem_allocate(size_t size) {
  assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
  assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
  assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");

  HeapWord* result;

  uint loop_count = 0;
  uint gc_count = 0;

  do {
    // We don't want to have multiple collections for a single filled generation.
    // To prevent this, each thread tracks the total_collections() value, and if
    // the count has changed, does not do a new collection.
    //
    // The collection count must be read only while holding the heap lock. VM
    // operations also hold the heap lock during collections. There is a lock
    // contention case where thread A blocks waiting on the Heap_lock, while
    // thread B is holding it doing a collection. When thread A gets the lock,
    // the collection count has already changed. To prevent duplicate collections,
    // The policy MUST attempt allocations during the same period it reads the
    // total_collections() value!
    {
      MutexLocker ml(Heap_lock);
      gc_count = Universe::heap()->total_collections();

      result = perm_gen()->allocate_permanent(size);
    }

    if (result == NULL) {

      // Exit the loop if if the gc time limit has been exceeded.
      // The allocation must have failed above (result must be NULL),
      // and the most recent collection must have exceeded the
      // gc time limit.  Exit the loop so that an out-of-memory
      // will be thrown (returning a NULL will do that), but
      // clear gc_time_limit_exceeded so that the next collection
      // will succeeded if the applications decides to handle the
      // out-of-memory and tries to go on.
      if (size_policy()->gc_time_limit_exceeded()) {
        size_policy()->set_gc_time_limit_exceeded(false);
        if (PrintGCDetails && Verbose) {
	gclog_or_tty->print_cr("ParallelScavengeHeap::permanent_mem_allocate: "
	  "return NULL because gc_time_limit_exceeded is set");
        }
        assert(result == NULL, "Allocation did not fail");
        return NULL;
      }

      // Generate a VM operation
      VM_ParallelGCFailedPermanentAllocation op(size, gc_count);
      VMThread::execute(&op);
        
      // Did the VM operation execute? If so, return the result directly.
      // This prevents us from looping until time out on requests that can
      // not be satisfied.
      if (op.prologue_succeeded()) {
        assert(Universe::heap()->is_in_permanent_or_null(op.result()), 
          "result not in heap");
	// If a NULL results is being returned, an out-of-memory
	// will be thrown now.  Clear the gc_time_limit_exceeded
	// flag to avoid the following situation.
	// 	gc_time_limit_exceeded is set during a collection
	//	the collection fails to return enough space and an OOM is thrown
	//	the next GC is skipped because the gc_time_limit_exceeded
	//	  flag is set and another OOM is thrown
	if (op.result() == NULL) {
          size_policy()->set_gc_time_limit_exceeded(false);
	}
        return op.result();
      }
    }

    // The policy object will prevent us from looping forever. If the
    // time spent in gc crosses a threshold, we will bail out.
    loop_count++;
    if ((QueuedAllocationWarningCount > 0) && 
	(loop_count % QueuedAllocationWarningCount == 0)) {
      warning("ParallelScavengeHeap::permanent_mem_allocate retries %d times \n\t"
              " size=%d", loop_count, size);
    }
  } while (result == NULL);

  return result;
}
void ParallelScavengeHeap::update_counters() {
  young_gen()->update_counters();
  old_gen()->update_counters();
  perm_gen()->update_counters();
}
 void permanent_oop_iterate(OopClosure* cl) {
     assert(perm_gen(), "NULL perm gen");
     _perm_gen->oop_iterate(cl);
 }
 // Different from is_in_permanent in that is_in_permanent
 // only checks if p is in the reserved area of the heap
 // and this checks to see if it in the commited area.
 // This is typically used by things like the forte stackwalker
 // during verification of suspicious frame values.
 bool is_permanent(const void *p) const {
     assert(perm_gen(), "NULL perm gen");
     return perm_gen()->is_in(p);
 }
void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
  young_gen()->object_iterate(cl);
  old_gen()->object_iterate(cl);
  perm_gen()->object_iterate(cl);
}
//
// This is the policy loop for allocating in the permanent generation.
// If the initial allocation fails, we create a vm operation which will
// cause a collection.
HeapWord* ParallelScavengeHeap::permanent_mem_allocate(size_t size) {
  assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
  assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
  assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");

  HeapWord* result;

  uint loop_count = 0;
  uint gc_count = 0;
  uint full_gc_count = 0;

  do {
    // We don't want to have multiple collections for a single filled generation.
    // To prevent this, each thread tracks the total_collections() value, and if
    // the count has changed, does not do a new collection.
    //
    // The collection count must be read only while holding the heap lock. VM
    // operations also hold the heap lock during collections. There is a lock
    // contention case where thread A blocks waiting on the Heap_lock, while
    // thread B is holding it doing a collection. When thread A gets the lock,
    // the collection count has already changed. To prevent duplicate collections,
    // The policy MUST attempt allocations during the same period it reads the
    // total_collections() value!
    {
      MutexLocker ml(Heap_lock);
      gc_count      = Universe::heap()->total_collections();
      full_gc_count = Universe::heap()->total_full_collections();

      result = perm_gen()->allocate_permanent(size);

      if (result != NULL) {
        return result;
      }

      if (GC_locker::is_active_and_needs_gc()) {
        // If this thread is not in a jni critical section, we stall
        // the requestor until the critical section has cleared and
        // GC allowed. When the critical section clears, a GC is
        // initiated by the last thread exiting the critical section; so
        // we retry the allocation sequence from the beginning of the loop,
        // rather than causing more, now probably unnecessary, GC attempts.
        JavaThread* jthr = JavaThread::current();
        if (!jthr->in_critical()) {
          MutexUnlocker mul(Heap_lock);
          GC_locker::stall_until_clear();
          continue;
        } else {
          if (CheckJNICalls) {
            fatal("Possible deadlock due to allocating while"
                  " in jni critical section");
          }
          return NULL;
        }
      }
    }

    if (result == NULL) {

      // Exit the loop if the gc time limit has been exceeded.
      // The allocation must have failed above (result must be NULL),
      // and the most recent collection must have exceeded the
      // gc time limit.  Exit the loop so that an out-of-memory
      // will be thrown (returning a NULL will do that), but
      // clear gc_overhead_limit_exceeded so that the next collection
      // will succeeded if the applications decides to handle the
      // out-of-memory and tries to go on.
      const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
      if (limit_exceeded) {
        size_policy()->set_gc_overhead_limit_exceeded(false);
        if (PrintGCDetails && Verbose) {
          gclog_or_tty->print_cr("ParallelScavengeHeap::permanent_mem_allocate:"
            " return NULL because gc_overhead_limit_exceeded is set");
        }
        assert(result == NULL, "Allocation did not fail");
        return NULL;
      }

      // Generate a VM operation
      VM_ParallelGCFailedPermanentAllocation op(size, gc_count, full_gc_count);
      VMThread::execute(&op);

      // Did the VM operation execute? If so, return the result directly.
      // This prevents us from looping until time out on requests that can
      // not be satisfied.
      if (op.prologue_succeeded()) {
        assert(Universe::heap()->is_in_permanent_or_null(op.result()),
          "result not in heap");
        // If GC was locked out during VM operation then retry allocation
        // and/or stall as necessary.
        if (op.gc_locked()) {
          assert(op.result() == NULL, "must be NULL if gc_locked() is true");
          continue;  // retry and/or stall as necessary
        }
        // If a NULL results is being returned, an out-of-memory
        // will be thrown now.  Clear the gc_overhead_limit_exceeded
        // flag to avoid the following situation.
        //      gc_overhead_limit_exceeded is set during a collection
        //      the collection fails to return enough space and an OOM is thrown
        //      a subsequent GC prematurely throws an out-of-memory because
        //        the gc_overhead_limit_exceeded counts did not start
        //        again from 0.
        if (op.result() == NULL) {
          size_policy()->reset_gc_overhead_limit_count();
        }
        return op.result();
      }
    }

    // The policy object will prevent us from looping forever. If the
    // time spent in gc crosses a threshold, we will bail out.
    loop_count++;
    if ((QueuedAllocationWarningCount > 0) &&
        (loop_count % QueuedAllocationWarningCount == 0)) {
      warning("ParallelScavengeHeap::permanent_mem_allocate retries %d times \n\t"
              " size=%d", loop_count, size);
    }
  } while (result == NULL);

  return result;
}
void SharedHeap::process_strong_roots(bool activate_scope,
                                      bool collecting_perm_gen,
                                      ScanningOption so,
                                      OopClosure* roots,
                                      CodeBlobClosure* code_roots,
                                      OopsInGenClosure* perm_blk) {
    StrongRootsScope srs(this, activate_scope);
    // General strong roots.
    assert(_strong_roots_parity != 0, "must have called prologue code");
    if (!_process_strong_tasks->is_task_claimed(SH_PS_Universe_oops_do)) {
        Universe::oops_do(roots);
        ReferenceProcessor::oops_do(roots);
        // Consider perm-gen discovered lists to be strong.
        perm_gen()->ref_processor()->weak_oops_do(roots);
    }
    // Global (strong) JNI handles
    if (!_process_strong_tasks->is_task_claimed(SH_PS_JNIHandles_oops_do))
        JNIHandles::oops_do(roots);
    // All threads execute this; the individual threads are task groups.
    if (ParallelGCThreads > 0) {
        Threads::possibly_parallel_oops_do(roots, code_roots);
    } else {
        Threads::oops_do(roots, code_roots);
    }
    if (!_process_strong_tasks-> is_task_claimed(SH_PS_ObjectSynchronizer_oops_do))
        ObjectSynchronizer::oops_do(roots);
    if (!_process_strong_tasks->is_task_claimed(SH_PS_FlatProfiler_oops_do))
        FlatProfiler::oops_do(roots);
    if (!_process_strong_tasks->is_task_claimed(SH_PS_Management_oops_do))
        Management::oops_do(roots);
    if (!_process_strong_tasks->is_task_claimed(SH_PS_jvmti_oops_do))
        JvmtiExport::oops_do(roots);

    if (!_process_strong_tasks->is_task_claimed(SH_PS_SystemDictionary_oops_do)) {
        if (so & SO_AllClasses) {
            SystemDictionary::oops_do(roots);
        } else if (so & SO_SystemClasses) {
            SystemDictionary::always_strong_oops_do(roots);
        }
    }

    if (!_process_strong_tasks->is_task_claimed(SH_PS_StringTable_oops_do)) {
        if (so & SO_Strings || (!collecting_perm_gen && !JavaObjectsInPerm)) {
            StringTable::oops_do(roots);
        }
        if (JavaObjectsInPerm) {
            // Verify the string table contents are in the perm gen
            NOT_PRODUCT(StringTable::oops_do(&assert_is_perm_closure));
        }
    }

    if (!_process_strong_tasks->is_task_claimed(SH_PS_CodeCache_oops_do)) {
        if (so & SO_CodeCache) {
            // (Currently, CMSCollector uses this to do intermediate-strength collections.)
            assert(collecting_perm_gen, "scanning all of code cache");
            assert(code_roots != NULL, "must supply closure for code cache");
            if (code_roots != NULL) {
                CodeCache::blobs_do(code_roots);
            }
        } else if (so & (SO_SystemClasses|SO_AllClasses)) {
            if (!collecting_perm_gen) {
                // If we are collecting from class statics, but we are not going to
                // visit all of the CodeCache, collect from the non-perm roots if any.
                // This makes the code cache function temporarily as a source of strong
                // roots for oops, until the next major collection.
                //
                // If collecting_perm_gen is true, we require that this phase will call
                // CodeCache::do_unloading.  This will kill off nmethods with expired
                // weak references, such as stale invokedynamic targets.
                CodeCache::scavenge_root_nmethods_do(code_roots);
            }
        }
        // Verify that the code cache contents are not subject to
        // movement by a scavenging collection.
        DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, /*do_marking=*/ false));
        DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
    }

    if (!collecting_perm_gen) {
        // All threads perform this; coordination is handled internally.

        rem_set()->younger_refs_iterate(perm_gen(), perm_blk);
    }
    _process_strong_tasks->all_tasks_completed();
}
size_t ParallelScavengeHeap::permanent_used() const {
  return perm_gen()->used_in_bytes();
}
size_t ParallelScavengeHeap::permanent_capacity() const {
  return perm_gen()->capacity_in_bytes();
}
 size_t permanent_capacity() const {
     assert(perm_gen(), "NULL perm gen");
     return perm_gen()->capacity();
 }
 size_t permanent_used() const {
     assert(perm_gen(), "NULL perm gen");
     return perm_gen()->used();
 }
void ParallelScavengeHeap::permanent_object_iterate(ObjectClosure* cl) {
  perm_gen()->object_iterate(cl);
}
 HeapWord* permanent_mem_allocate(size_t size) {
     assert(perm_gen(), "NULL perm gen");
     return _perm_gen->mem_allocate(size);
 }
void ParallelScavengeHeap::print_on(outputStream* st) const {
  young_gen()->print_on(st);
  old_gen()->print_on(st);
  perm_gen()->print_on(st);
}
 void permanent_object_iterate(ObjectClosure* cl) {
     assert(perm_gen(), "NULL perm gen");
     _perm_gen->object_iterate(cl);
 }
 bool is_in_permanent(const void *p) const {
   return perm_gen()->reserved().contains(p);
 }