GenRemSet* CollectorPolicy::create_rem_set(MemRegion whole_heap, int max_covered_regions) { switch (rem_set_name()) { case GenRemSet::CardTable: { CardTableRS* res = new CardTableRS(whole_heap, max_covered_regions); return res; } default: guarantee(false, "unrecognized GenRemSet::Name"); return NULL; } }
GenRemSet* CollectorPolicy::create_rem_set(MemRegion whole_heap, int max_covered_regions) { switch (rem_set_name()) { case GenRemSet::CardTable: { if (barrier_set_name() != BarrierSet::CardTableModRef) vm_exit_during_initialization("Mismatch between RS and BS."); CardTableRS* res = new CardTableRS(whole_heap, max_covered_regions); return res; } default: guarantee(false, "unrecognized GenRemSet::Name"); return(NULL); } }
size_t GenCollectorPolicy::compute_max_alignment() { // The card marking array and the offset arrays for old generations are // committed in os pages as well. Make sure they are entirely full (to // avoid partial page problems), e.g. if 512 bytes heap corresponds to 1 // byte entry and the os page size is 4096, the maximum heap size should // be 512*4096 = 2MB aligned. size_t alignment = GenRemSet::max_alignment_constraint(rem_set_name()); // Parallel GC does its own alignment of the generations to avoid requiring a // large page (256M on some platforms) for the permanent generation. The // other collectors should also be updated to do their own alignment and then // this use of lcm() should be removed. if (UseLargePages && !UseParallelGC) { // in presence of large pages we have to make sure that our // alignment is large page aware alignment = lcm(os::large_page_size(), alignment); } return alignment; }
void TwoGenerationCollectorPolicy::initialize_flags() { // All space sizes must be multiples of car size in order for the // CarTable to work. Similarly with the should_scavenge region size. set_min_alignment(MAX2((uintx) CarSpace::car_size(), (uintx) Generation::GenGrain)); // The card marking array and the offset arrays for old generations are // committed in os pages as well. Make sure they are entirely full (to // avoid partial page problems), e.g. if 512 bytes heap corresponds to 1 // byte entry and the os page size is 4096, the maximum heap size should // be 512*4096 = 2MB aligned. set_max_alignment(GenRemSet::max_alignment_constraint(rem_set_name())); assert(max_alignment() >= min_alignment() && max_alignment() % min_alignment() == 0, "invalid alignment constraints"); // Adjust max size parameters if (NewSize > MaxNewSize) { MaxNewSize = NewSize; } NewSize = align_size_down(NewSize, min_alignment()); MaxNewSize = align_size_down(MaxNewSize, min_alignment()); OldSize = align_size_down(OldSize, min_alignment()); if (NewSize + OldSize > MaxHeapSize) { MaxHeapSize = NewSize + OldSize; } MaxHeapSize = align_size_up(MaxHeapSize, max_alignment()); if (PermSize > MaxPermSize) { MaxPermSize = PermSize; } PermSize = align_size_down(PermSize, min_alignment()); MaxPermSize = align_size_down(MaxPermSize, max_alignment()); MinPermHeapExpansion = align_size_down(MinPermHeapExpansion, min_alignment()); MaxPermHeapExpansion = align_size_down(MaxPermHeapExpansion, min_alignment()); MinHeapDeltaBytes = align_size_up(MinHeapDeltaBytes, min_alignment()); always_do_update_barrier = UseConcMarkSweepGC; BlockOffsetArrayUseUnallocatedBlock |= ParallelGCThreads > 0; SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment()); SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment()); SharedMiscDataSize = align_size_up(SharedMiscDataSize, max_alignment()); // Check validity of heap flags assert(NewSize % min_alignment() == 0, "eden space alignment"); assert(MaxNewSize % min_alignment() == 0, "survivor space alignment"); assert(OldSize % min_alignment() == 0, "old space alignment"); assert(MaxHeapSize % max_alignment() == 0, "maximum heap alignment"); assert(PermSize % min_alignment() == 0, "permanent space alignment"); assert(MaxPermSize % max_alignment() == 0, "maximum permanent space alignment"); assert(SharedReadOnlySize % max_alignment() == 0, "read-only space alignment"); assert(SharedReadWriteSize % max_alignment() == 0, "read-write space alignment"); assert(SharedMiscDataSize % max_alignment() == 0, "misc-data space alignment"); if (NewSize < 3*min_alignment()) { // make sure there room for eden and two survivor spaces vm_exit_during_initialization("Too small new size specified"); } if (PermSize < M) { vm_exit_during_initialization("Too small initial permanent heap"); } if (SurvivorRatio < 1 || NewRatio < 1) { vm_exit_during_initialization("Invalid heap ratio specified"); } }