HeapWord* GenCollectorPolicy::expand_heap_and_allocate(size_t size, bool is_tlab) { GenCollectedHeap *gch = GenCollectedHeap::heap(); HeapWord* result = NULL; Generation *old = gch->old_gen(); if (old->should_allocate(size, is_tlab)) { result = old->expand_and_allocate(size, is_tlab); } if (result == NULL) { Generation *young = gch->young_gen(); if (young->should_allocate(size, is_tlab)) { result = young->expand_and_allocate(size, is_tlab); } } assert(result == NULL || gch->is_in_reserved(result), "result not in heap"); return result; }
HeapWord* GenCollectorPolicy::expand_heap_and_allocate(size_t size, bool is_tlab) { GenCollectedHeap *gch = GenCollectedHeap::heap(); HeapWord* result = NULL; for (int i = number_of_generations() - 1; i >= 0 && result == NULL; i--) { Generation *gen = gch->get_gen(i); if (gen->should_allocate(size, is_tlab)) { result = gen->expand_and_allocate(size, is_tlab); } } assert(result == NULL || gch->is_in_reserved(result), "result not in heap"); return result; }
HeapWord* TwoGenerationCollectorPolicy::satisfy_failed_allocation(size_t size, bool is_large_noref, bool is_tlab, bool* notify_ref_lock) { GenCollectedHeap *gch = GenCollectedHeap::heap(); GCCauseSetter x(gch, GCCause::_allocation_failure); HeapWord* result = NULL; // The gc_prologues have not executed yet. The value // for incremental_collection_will_fail() is the remanent // of the last collection. if (!gch->incremental_collection_will_fail()) { // Do an incremental collection. gch->do_collection(false /* full */, false /* clear_all_soft_refs */, size /* size */, is_large_noref /* is_large_noref */, is_tlab /* is_tlab */, number_of_generations() - 1 /* max_level */, notify_ref_lock /* notify_ref_lock */); } else { // The incremental_collection_will_fail flag is set if the // next incremental collection will not succeed (e.g., the // DefNewGeneration didn't think it had space to promote all // its objects). However, that last incremental collection // continued, allowing all older generations to collect (and // perhaps change the state of the flag). // // If we reach here, we know that an incremental collection of // all generations left us in the state where incremental collections // will fail, so we just try allocating the requested space. // If the allocation fails everywhere, force a full collection. // We're probably very close to being out of memory, so forcing many // collections now probably won't help. if (PrintGC && Verbose) { gclog_or_tty->print_cr("TwoGenerationCollectorPolicy::satisfy_failed_allocation:" " attempting allocation anywhere before full collection"); } result = gch->attempt_allocation(size, is_large_noref, is_tlab, false /* first_only */); if (result != NULL) { assert(gch->is_in(result), "result not in heap"); return result; } // Allocation request hasn't yet been met; try a full collection. gch->do_collection(true /* full */, false /* clear_all_soft_refs */, size /* size */, is_large_noref /* is_large_noref */, is_tlab /* is_tlab */, number_of_generations() - 1 /* max_level */, notify_ref_lock /* notify_ref_lock */); } result = gch->attempt_allocation(size, is_large_noref, is_tlab, false /*first_only*/); if (result != NULL) { assert(gch->is_in(result), "result not in heap"); return result; } // OK, collection failed, try expansion. for (int i = number_of_generations() - 1 ; i>= 0; i--) { Generation *gen = gch->get_gen(i); if (gen->should_allocate(size, is_large_noref, is_tlab)) { result = gen->expand_and_allocate(size, is_large_noref, is_tlab); if (result != NULL) { assert(gch->is_in(result), "result not in heap"); return result; } } } // If we reach this point, we're really out of memory. Try every trick // we can to reclaim memory. Force collection of soft references. Force // a complete compaction of the heap. Any additional methods for finding // free memory should be here, especially if they are expensive. If this // attempt fails, an OOM exception will be thrown. { IntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted gch->do_collection(true /* full */, true /* clear_all_soft_refs */, size /* size */, is_large_noref /* is_large_noref */, is_tlab /* is_tlab */, number_of_generations() - 1 /* max_level */, notify_ref_lock /* notify_ref_lock */); } result = gch->attempt_allocation(size, is_large_noref, is_tlab, false /* first_only */); if (result != NULL) { assert(gch->is_in(result), "result not in heap"); return result; } // What else? We might try synchronous finalization later. If the total // space available is large enough for the allocation, then a more // complete compaction phase than we've tried so far might be // appropriate. return NULL; }
HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size, bool is_tlab, bool* gc_overhead_limit_was_exceeded) { GenCollectedHeap *gch = GenCollectedHeap::heap(); debug_only(gch->check_for_valid_allocation_state()); assert(gch->no_gc_in_progress(), "Allocation during gc not allowed"); // In general gc_overhead_limit_was_exceeded should be false so // set it so here and reset it to true only if the gc time // limit is being exceeded as checked below. *gc_overhead_limit_was_exceeded = false; HeapWord* result = NULL; // Loop until the allocation is satisfied, or unsatisfied after GC. for (uint try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) { HandleMark hm; // Discard any handles allocated in each iteration. // First allocation attempt is lock-free. Generation *young = gch->young_gen(); assert(young->supports_inline_contig_alloc(), "Otherwise, must do alloc within heap lock"); if (young->should_allocate(size, is_tlab)) { result = young->par_allocate(size, is_tlab); if (result != NULL) { assert(gch->is_in_reserved(result), "result not in heap"); return result; } } uint gc_count_before; // Read inside the Heap_lock locked region. { MutexLocker ml(Heap_lock); log_trace(gc, alloc)("GenCollectorPolicy::mem_allocate_work: attempting locked slow path allocation"); // Note that only large objects get a shot at being // allocated in later generations. bool first_only = ! should_try_older_generation_allocation(size); result = gch->attempt_allocation(size, is_tlab, first_only); if (result != NULL) { assert(gch->is_in_reserved(result), "result not in heap"); return result; } if (GCLocker::is_active_and_needs_gc()) { if (is_tlab) { return NULL; // Caller will retry allocating individual object. } if (!gch->is_maximal_no_gc()) { // Try and expand heap to satisfy request. result = expand_heap_and_allocate(size, is_tlab); // Result could be null if we are out of space. if (result != NULL) { return result; } } if (gclocker_stalled_count > GCLockerRetryAllocationCount) { return NULL; // We didn't get to do a GC and we didn't get any memory. } // If this thread is not in a jni critical section, we stall // the requestor until the critical section has cleared and // GC allowed. When the critical section clears, a GC is // initiated by the last thread exiting the critical section; so // we retry the allocation sequence from the beginning of the loop, // rather than causing more, now probably unnecessary, GC attempts. JavaThread* jthr = JavaThread::current(); if (!jthr->in_critical()) { MutexUnlocker mul(Heap_lock); // Wait for JNI critical section to be exited GCLocker::stall_until_clear(); gclocker_stalled_count += 1; continue; } else { if (CheckJNICalls) { fatal("Possible deadlock due to allocating while" " in jni critical section"); } return NULL; } } // Read the gc count while the heap lock is held. gc_count_before = gch->total_collections(); } VM_GenCollectForAllocation op(size, is_tlab, gc_count_before); VMThread::execute(&op); if (op.prologue_succeeded()) { result = op.result(); if (op.gc_locked()) { assert(result == NULL, "must be NULL if gc_locked() is true"); continue; // Retry and/or stall as necessary. } // Allocation has failed and a collection // has been done. If the gc time limit was exceeded the // this time, return NULL so that an out-of-memory // will be thrown. Clear gc_overhead_limit_exceeded // so that the overhead exceeded does not persist. const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded(); const bool softrefs_clear = all_soft_refs_clear(); if (limit_exceeded && softrefs_clear) { *gc_overhead_limit_was_exceeded = true; size_policy()->set_gc_overhead_limit_exceeded(false); if (op.result() != NULL) { CollectedHeap::fill_with_object(op.result(), size); } return NULL; } assert(result == NULL || gch->is_in_reserved(result), "result not in heap"); return result; } // Give a warning if we seem to be looping forever. if ((QueuedAllocationWarningCount > 0) && (try_count % QueuedAllocationWarningCount == 0)) { log_warning(gc, ergo)("GenCollectorPolicy::mem_allocate_work retries %d times," " size=" SIZE_FORMAT " %s", try_count, size, is_tlab ? "(TLAB)" : ""); } } }