void PSMarkSweep::allocate_stacks() { ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); PSYoungGen* young_gen = heap->young_gen(); MutableSpace* to_space = young_gen->to_space(); _preserved_marks = (PreservedMark*)to_space->top(); _preserved_count = 0; // We want to calculate the size in bytes first. _preserved_count_max = pointer_delta(to_space->end(), to_space->top(), sizeof(jbyte)); // Now divide by the size of a PreservedMark _preserved_count_max /= sizeof(PreservedMark); _preserved_mark_stack = NULL; _preserved_oop_stack = NULL; _marking_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(4000, true); int size = SystemDictionary::number_of_classes() * 2; _revisit_klass_stack = new (ResourceObj::C_HEAP) GrowableArray<Klass*>(size, true); // (#klass/k)^2, for k ~ 10 appears a better setting, but this will have to do for // now until we investigate a more optimal setting. _revisit_mdo_stack = new (ResourceObj::C_HEAP) GrowableArray<DataLayout*>(size*2, true); }
// This method contains all heap specific policy for invoking mark sweep. // PSMarkSweep::invoke_no_policy() will only attempt to mark-sweep-compact // the heap. It will do nothing further. If we need to bail out for policy // reasons, scavenge before full gc, or any other specialized behavior, it // needs to be added here. // // Note that this method should only be called from the vm_thread while // at a safepoint! void PSMarkSweep::invoke(bool maximum_heap_compaction) { assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); assert(!Universe::heap()->is_gc_active(), "not reentrant"); ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); GCCause::Cause gc_cause = heap->gc_cause(); PSAdaptiveSizePolicy* policy = heap->size_policy(); // Before each allocation/collection attempt, find out from the // policy object if GCs are, on the whole, taking too long. If so, // bail out without attempting a collection. The exceptions are // for explicitly requested GC's. if (!policy->gc_time_limit_exceeded() || GCCause::is_user_requested_gc(gc_cause) || GCCause::is_serviceability_requested_gc(gc_cause)) { IsGCActiveMark mark; if (ScavengeBeforeFullGC) { PSScavenge::invoke_no_policy(); } int count = (maximum_heap_compaction)?1:MarkSweepAlwaysCompactCount; IntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count); PSMarkSweep::invoke_no_policy(maximum_heap_compaction); } }
void VM_GC_RegularScavenge::doit() { HandleMark hm; ParallelScavengeHeap* psh = (ParallelScavengeHeap*)Universe::heap(); psh->ensure_parsability(false); // must happen, even if collection does // not happen (e.g. due to GC_locker) psh->young_collect_as_vm_thread(GCCause::_regular_scavenge); }
CheckForUnmarkedObjects() { ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); _young_gen = heap->young_gen(); _card_table = barrier_set_cast<CardTableExtension>(heap->barrier_set()); // No point in asserting barrier set type here. Need to make CardTableExtension // a unique barrier set type. }
void PSMarkSweep::mark_sweep_phase2() { EventMark m("2 compute new addresses"); TraceTime tm("phase 2", PrintGCDetails && Verbose, true, gclog_or_tty); trace("2"); // Now all live objects are marked, compute the new object addresses. // It is imperative that we traverse perm_gen LAST. If dead space is // allowed a range of dead object may get overwritten by a dead int // array. If perm_gen is not traversed last a klassOop may get // overwritten. This is fine since it is dead, but if the class has dead // instances we have to skip them, and in order to find their size we // need the klassOop! // // It is not required that we traverse spaces in the same order in // phase2, phase3 and phase4, but the ValidateMarkSweep live oops // tracking expects us to do so. See comment under phase4. ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); PSOldGen* old_gen = heap->old_gen(); PSPermGen* perm_gen = heap->perm_gen(); // Begin compacting into the old gen PSMarkSweepDecorator::set_destination_decorator_tenured(); // This will also compact the young gen spaces. old_gen->precompact(); // Compact the perm gen into the perm gen PSMarkSweepDecorator::set_destination_decorator_perm_gen(); perm_gen->precompact(); }
// This method iterates over all objects in the young generation, // unforwarding markOops. It then restores any preserved mark oops, // and clears the _preserved_mark_stack. void PSScavenge::clean_up_failed_promotion() { ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); PSYoungGen* young_gen = heap->young_gen(); { ResourceMark rm; // Unforward all pointers in the young gen. PSPromotionFailedClosure unforward_closure; young_gen->object_iterate(&unforward_closure); if (PrintGC && Verbose) { gclog_or_tty->print_cr("Restoring %d marks", _preserved_oop_stack.size()); } // Restore any saved marks. while (!_preserved_oop_stack.is_empty()) { oop obj = _preserved_oop_stack.pop(); markOop mark = _preserved_mark_stack.pop(); obj->set_mark(mark); } // Clear the preserved mark and oop stack caches. _preserved_mark_stack.clear(true); _preserved_oop_stack.clear(true); } // Reset the PromotionFailureALot counters. NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();) }
PSKeepAliveClosure(PSPromotionManager* pm) : _promotion_manager(pm) { ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); _to_space = heap->young_gen()->to_space(); assert(_promotion_manager != NULL, "Sanity"); }
// This method assumes that from-space has live data and that // any shrinkage of the young gen is limited by location of // from-space. size_t PSYoungGen::available_to_live() { size_t delta_in_survivor = 0; ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); const size_t space_alignment = heap->intra_heap_alignment(); const size_t gen_alignment = heap->young_gen_alignment(); MutableSpace* space_shrinking = NULL; if (from_space()->end() > to_space()->end()) { space_shrinking = from_space(); } else { space_shrinking = to_space(); } // Include any space that is committed but not included in // the survivor spaces. assert(((HeapWord*)virtual_space()->high()) >= space_shrinking->end(), "Survivor space beyond high end"); size_t unused_committed = pointer_delta(virtual_space()->high(), space_shrinking->end(), sizeof(char)); if (space_shrinking->is_empty()) { // Don't let the space shrink to 0 assert(space_shrinking->capacity_in_bytes() >= space_alignment, "Space is too small"); delta_in_survivor = space_shrinking->capacity_in_bytes() - space_alignment; } else { delta_in_survivor = pointer_delta(space_shrinking->end(), space_shrinking->top(), sizeof(char)); } size_t delta_in_bytes = unused_committed + delta_in_survivor; delta_in_bytes = align_size_down(delta_in_bytes, gen_alignment); return delta_in_bytes; }
void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) { // Recursively traverse all live objects and mark them GCTraceTime tm("phase 1", PrintGCDetails && Verbose, true, _gc_timer); trace(" 1"); ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); // General strong roots. { ParallelScavengeHeap::ParStrongRootsScope psrs; Universe::oops_do(mark_and_push_closure()); JNIHandles::oops_do(mark_and_push_closure()); // Global (strong) JNI handles CodeBlobToOopClosure each_active_code_blob(mark_and_push_closure(), /*do_marking=*/ true); Threads::oops_do(mark_and_push_closure(), &each_active_code_blob); ObjectSynchronizer::oops_do(mark_and_push_closure()); FlatProfiler::oops_do(mark_and_push_closure()); Management::oops_do(mark_and_push_closure()); JvmtiExport::oops_do(mark_and_push_closure()); SystemDictionary::always_strong_oops_do(mark_and_push_closure()); // Do not treat nmethods as strong roots for mark/sweep, since we can unload them. //CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(mark_and_push_closure())); } // Flush marking stack. follow_stack(); // Process reference objects found during marking { ref_processor()->setup_policy(clear_all_softrefs); const ReferenceProcessorStats& stats = ref_processor()->process_discovered_references( is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL, _gc_timer); gc_tracer()->report_gc_reference_stats(stats); } // Follow system dictionary roots and unload classes bool purged_class = SystemDictionary::do_unloading(is_alive_closure()); // Follow code cache roots CodeCache::do_unloading(is_alive_closure(), mark_and_push_closure(), purged_class); follow_stack(); // Flush marking stack // Update subklass/sibling/implementor links of live klasses follow_weak_klass_links(); assert(_marking_stack.is_empty(), "just drained"); // Visit memoized mdo's and clear unmarked weak refs follow_mdo_weak_refs(); assert(_marking_stack.is_empty(), "just drained"); // Visit interned string tables and delete unmarked oops StringTable::unlink(is_alive_closure()); // Clean up unreferenced symbols in symbol table. SymbolTable::unlink(); assert(_marking_stack.is_empty(), "stack should be empty by now"); _gc_tracer->report_object_count_after_gc(is_alive_closure()); }
void PSMarkSweep::mark_sweep_phase1( bool& marked_for_unloading, bool clear_all_softrefs) { // Recursively traverse all live objects and mark them EventMark m("1 mark object"); TraceTime tm("phase 1", PrintGC && Verbose, true, gclog_or_tty); trace(" 1"); ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); // General strong roots. Universe::oops_do(mark_and_push_closure()); JNIHandles::oops_do(mark_and_push_closure()); // Global (strong) JNI handles Threads::oops_do(mark_and_push_closure()); ObjectSynchronizer::oops_do(mark_and_push_closure()); FlatProfiler::oops_do(mark_and_push_closure()); SystemDictionary::always_strong_oops_do(mark_and_push_closure()); guarantee(!jvmdi::enabled(), "Should not be used with jvmdi"); vmSymbols::oops_do(mark_and_push_closure()); // Flush marking stack. follow_stack(); // Process reference objects found during marking ReferencePolicy *soft_ref_policy; if (clear_all_softrefs) { soft_ref_policy = new AlwaysClearPolicy(); } else { NOT_COMPILER2(soft_ref_policy = new LRUCurrentHeapPolicy();) COMPILER2_ONLY(soft_ref_policy = new LRUMaxHeapPolicy();) }
inline bool PSScavenge::should_scavenge(T* p, bool check_to_space) { if (check_to_space) { ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); return should_scavenge(p, heap->young_gen()->to_space()); } return should_scavenge(p); }
void PSPromotionManager::initialize() { ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); _old_gen = heap->old_gen(); _young_space = heap->young_gen()->to_space(); assert(_manager_array == NULL, "Attempt to initialize twice"); _manager_array = NEW_C_HEAP_ARRAY(PSPromotionManager*, ParallelGCThreads+1, mtGC); guarantee(_manager_array != NULL, "Could not initialize promotion manager"); _stack_array_depth = new OopStarTaskQueueSet(ParallelGCThreads); guarantee(_stack_array_depth != NULL, "Cound not initialize promotion manager"); // Create and register the PSPromotionManager(s) for the worker threads. for(uint i=0; i<ParallelGCThreads; i++) { _manager_array[i] = new PSPromotionManager(); guarantee(_manager_array[i] != NULL, "Could not create PSPromotionManager"); stack_array_depth()->register_queue(i, _manager_array[i]->claimed_stack_depth()); } // The VMThread gets its own PSPromotionManager, which is not available // for work stealing. _manager_array[ParallelGCThreads] = new PSPromotionManager(); guarantee(_manager_array[ParallelGCThreads] != NULL, "Could not create PSPromotionManager"); }
void PSYoungGen::compute_initial_space_boundaries() { ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); // Compute sizes size_t alignment = heap->intra_heap_alignment(); size_t size = virtual_space()->committed_size(); size_t survivor_size = size / InitialSurvivorRatio; survivor_size = align_size_down(survivor_size, alignment); // ... but never less than an alignment survivor_size = MAX2(survivor_size, alignment); // Young generation is eden + 2 survivor spaces size_t eden_size = size - (2 * survivor_size); // Now go ahead and set 'em. set_space_boundaries(eden_size, survivor_size); space_invariants(); if (UsePerfData) { _eden_counters->update_capacity(); _from_counters->update_capacity(); _to_counters->update_capacity(); } }
void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) { // Recursively traverse all live objects and mark them GCTraceTime tm("phase 1", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id()); trace(" 1"); ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); // Need to clear claim bits before the tracing starts. ClassLoaderDataGraph::clear_claimed_marks(); // General strong roots. { ParallelScavengeHeap::ParStrongRootsScope psrs; Universe::oops_do(mark_and_push_closure()); JNIHandles::oops_do(mark_and_push_closure()); // Global (strong) JNI handles CLDToOopClosure mark_and_push_from_cld(mark_and_push_closure()); MarkingCodeBlobClosure each_active_code_blob(mark_and_push_closure(), !CodeBlobToOopClosure::FixRelocations); Threads::oops_do(mark_and_push_closure(), &mark_and_push_from_cld, &each_active_code_blob); ObjectSynchronizer::oops_do(mark_and_push_closure()); FlatProfiler::oops_do(mark_and_push_closure()); Management::oops_do(mark_and_push_closure()); JvmtiExport::oops_do(mark_and_push_closure()); SystemDictionary::always_strong_oops_do(mark_and_push_closure()); ClassLoaderDataGraph::always_strong_cld_do(follow_cld_closure()); // Do not treat nmethods as strong roots for mark/sweep, since we can unload them. //CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(mark_and_push_closure())); } // Flush marking stack. follow_stack(); // Process reference objects found during marking { ref_processor()->setup_policy(clear_all_softrefs); const ReferenceProcessorStats& stats = ref_processor()->process_discovered_references( is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL, _gc_timer, _gc_tracer->gc_id()); gc_tracer()->report_gc_reference_stats(stats); } // This is the point where the entire marking should have completed. assert(_marking_stack.is_empty(), "Marking should have completed"); // Unload classes and purge the SystemDictionary. bool purged_class = SystemDictionary::do_unloading(is_alive_closure()); // Unload nmethods. CodeCache::do_unloading(is_alive_closure(), purged_class); // Prune dead klasses from subklass/sibling/implementor lists. Klass::clean_weak_klass_links(is_alive_closure()); // Delete entries for dead interned strings. StringTable::unlink(is_alive_closure()); // Clean up unreferenced symbols in symbol table. SymbolTable::unlink(); _gc_tracer->report_object_count_after_gc(is_alive_closure()); }
size_t ASPSOldGen::available_for_expansion() { assert(virtual_space()->is_aligned(gen_size_limit()), "not aligned"); assert(gen_size_limit() >= virtual_space()->committed_size(), "bad gen size"); ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); size_t result = gen_size_limit() - virtual_space()->committed_size(); size_t result_aligned = align_size_down(result, heap->old_gen_alignment()); return result_aligned; }
CheckForUnmarkedObjects() { ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); _young_gen = heap->young_gen(); _card_table = (CardTableExtension*)heap->barrier_set(); // No point in asserting barrier set type here. Need to make CardTableExtension // a unique barrier set type. }
size_t ASPSYoungGen::available_for_expansion() { size_t current_committed_size = virtual_space()->committed_size(); assert((gen_size_limit() >= current_committed_size), "generation size limit is wrong"); ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); size_t result = gen_size_limit() - current_committed_size; size_t result_aligned = align_size_down(result, heap->generation_alignment()); return result_aligned; }
void PSPromotionManager::pre_scavenge() { ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); _young_space = heap->young_gen()->to_space(); for(uint i=0; i<ParallelGCThreads+1; i++) { manager_array(i)->reset(); } }
void RefProcTaskExecutor::execute(EnqueueTask& task) { ParallelScavengeHeap* heap = PSParallelCompact::gc_heap(); uint parallel_gc_threads = heap->gc_task_manager()->workers(); GCTaskQueue* q = GCTaskQueue::create(); for(uint i=0; i<parallel_gc_threads; i++) { q->enqueue(new RefEnqueueTaskProxy(task, i)); } PSParallelCompact::gc_task_manager()->execute_and_wait(q); }
bool PSYoungPromotionLAB::lab_is_valid(MemRegion lab) { ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); MutableSpace* to_space = heap->young_gen()->to_space(); MemRegion used = to_space->used_region(); if (used.contains(lab)) { return true; } return false; }
bool PSYoungPromotionLAB::lab_is_valid(MemRegion lab) { ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); MutableSpace* to_space = heap->young_gen()->to_space(); MemRegion used = to_space->used_region(); if (used.contains(lab)) { return true; } return false; }
void PSOldGen::precompact() { ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); // Reset start array first. _start_array.reset(); object_mark_sweep()->precompact(); // Now compact the young gen heap->young_gen()->precompact(); }
size_t ASPSOldGen::available_for_contraction() { size_t uncommitted_bytes = virtual_space()->uncommitted_size(); if (uncommitted_bytes != 0) { return uncommitted_bytes; } ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); const size_t gen_alignment = heap->old_gen_alignment(); PSAdaptiveSizePolicy* policy = heap->size_policy(); const size_t working_size = used_in_bytes() + (size_t) policy->avg_promoted()->padded_average(); const size_t working_aligned = align_size_up(working_size, gen_alignment); const size_t working_or_min = MAX2(working_aligned, min_gen_size()); if (working_or_min > reserved().byte_size()) { // If the used or minimum gen size (aligned up) is greater // than the total reserved size, then the space available // for contraction should (after proper alignment) be 0 return 0; } const size_t max_contraction = reserved().byte_size() - working_or_min; // Use the "increment" fraction instead of the "decrement" fraction // to allow the other gen to expand more aggressively. The // "decrement" fraction is conservative because its intent is to // only reduce the footprint. size_t result = policy->promo_increment_aligned_down(max_contraction); // Also adjust for inter-generational alignment size_t result_aligned = align_size_down(result, gen_alignment); if (PrintAdaptiveSizePolicy && Verbose) { gclog_or_tty->print_cr("\nASPSOldGen::available_for_contraction:" " %d K / 0x%x", result_aligned/K, result_aligned); gclog_or_tty->print_cr(" reserved().byte_size() %d K / 0x%x ", reserved().byte_size()/K, reserved().byte_size()); size_t working_promoted = (size_t) policy->avg_promoted()->padded_average(); gclog_or_tty->print_cr(" padded promoted %d K / 0x%x", working_promoted/K, working_promoted); gclog_or_tty->print_cr(" used %d K / 0x%x", used_in_bytes()/K, used_in_bytes()); gclog_or_tty->print_cr(" min_gen_size() %d K / 0x%x", min_gen_size()/K, min_gen_size()); gclog_or_tty->print_cr(" max_contraction %d K / 0x%x", max_contraction/K, max_contraction); gclog_or_tty->print_cr(" without alignment %d K / 0x%x", policy->promo_increment(max_contraction)/K, policy->promo_increment(max_contraction)); gclog_or_tty->print_cr(" alignment 0x%x", gen_alignment); } assert(result_aligned <= max_contraction, "arithmetic is wrong"); return result_aligned; }
// Static method bool ParallelScavengeHeap::is_in_young(oop* p) { ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Must be ParallelScavengeHeap"); PSYoungGen* young_gen = heap->young_gen(); if (young_gen->is_in(p)) { return true; } return false; }
HeapWord* HeapInspection::start_of_perm_gen() { if (is_shared_heap()) { SharedHeap* sh = SharedHeap::heap(); return sh->perm_gen()->used_region().start(); } #ifndef SERIALGC ParallelScavengeHeap* psh = (ParallelScavengeHeap*)Universe::heap(); return psh->perm_gen()->object_space()->used_region().start(); #else ShouldNotReachHere(); return NULL; #endif // SERIALGC }
bool PSOldPromotionLAB::lab_is_valid(MemRegion lab) { assert(_start_array->covered_region().contains(lab), "Sanity"); ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); PSOldGen* old_gen = heap->old_gen(); MemRegion used = old_gen->object_space()->used_region(); if (used.contains(lab)) { return true; } return false; }
ParCompactionManager::ParCompactionManager() : _action(CopyAndUpdate), _region_stack(NULL), _region_stack_index((uint)max_uintx) { ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); _old_gen = heap->old_gen(); _start_array = old_gen()->start_array(); marking_stack()->initialize(); _objarray_stack.initialize(); }
void PSMarkSweep::allocate_stacks() { ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); PSYoungGen* young_gen = heap->young_gen(); MutableSpace* to_space = young_gen->to_space(); _preserved_marks = (PreservedMark*)to_space->top(); _preserved_count = 0; // We want to calculate the size in bytes first. _preserved_count_max = pointer_delta(to_space->end(), to_space->top(), sizeof(jbyte)); // Now divide by the size of a PreservedMark _preserved_count_max /= sizeof(PreservedMark); }
void PSMarkSweep::mark_sweep_phase4() { EventMark m("4 compact heap"); GCTraceTime tm("phase 4", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id()); // All pointers are now adjusted, move objects accordingly ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); PSYoungGen* young_gen = heap->young_gen(); PSOldGen* old_gen = heap->old_gen(); old_gen->compact(); young_gen->compact(); }
void PSYoungGen::space_invariants() { ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); const size_t alignment = heap->intra_heap_alignment(); // Currently, our eden size cannot shrink to zero guarantee(eden_space()->capacity_in_bytes() >= alignment, "eden too small"); guarantee(from_space()->capacity_in_bytes() >= alignment, "from too small"); guarantee(to_space()->capacity_in_bytes() >= alignment, "to too small"); // Relationship of spaces to each other char* eden_start = (char*)eden_space()->bottom(); char* eden_end = (char*)eden_space()->end(); char* from_start = (char*)from_space()->bottom(); char* from_end = (char*)from_space()->end(); char* to_start = (char*)to_space()->bottom(); char* to_end = (char*)to_space()->end(); guarantee(eden_start >= virtual_space()->low(), "eden bottom"); guarantee(eden_start < eden_end, "eden space consistency"); guarantee(from_start < from_end, "from space consistency"); guarantee(to_start < to_end, "to space consistency"); // Check whether from space is below to space if (from_start < to_start) { // Eden, from, to guarantee(eden_end <= from_start, "eden/from boundary"); guarantee(from_end <= to_start, "from/to boundary"); guarantee(to_end <= virtual_space()->high(), "to end"); } else { // Eden, to, from guarantee(eden_end <= to_start, "eden/to boundary"); guarantee(to_end <= from_start, "to/from boundary"); guarantee(from_end <= virtual_space()->high(), "from end"); } // More checks that the virtual space is consistent with the spaces assert(virtual_space()->committed_size() >= (eden_space()->capacity_in_bytes() + to_space()->capacity_in_bytes() + from_space()->capacity_in_bytes()), "Committed size is inconsistent"); assert(virtual_space()->committed_size() <= virtual_space()->reserved_size(), "Space invariant"); char* eden_top = (char*)eden_space()->top(); char* from_top = (char*)from_space()->top(); char* to_top = (char*)to_space()->top(); assert(eden_top <= virtual_space()->high(), "eden top"); assert(from_top <= virtual_space()->high(), "from top"); assert(to_top <= virtual_space()->high(), "to top"); virtual_space()->verify(); }