HeapWord* GenCollectorPolicy::satisfy_failed_allocation(size_t size, bool is_tlab) { GenCollectedHeap *gch = GenCollectedHeap::heap(); GCCauseSetter x(gch, GCCause::_allocation_failure); HeapWord* result = NULL; assert(size != 0, "Precondition violated"); if (GC_locker::is_active_and_needs_gc()) { // GC locker is active; instead of a collection we will attempt // to expand the heap, if there's room for expansion. if (!gch->is_maximal_no_gc()) { result = expand_heap_and_allocate(size, is_tlab); } return result; // could be null if we are out of space } else if (!gch->incremental_collection_will_fail(false /* don't consult_young */)) { // Do an incremental collection. gch->do_collection(false /* full */, false /* clear_all_soft_refs */, size /* size */, is_tlab /* is_tlab */, number_of_generations() - 1 /* max_level */); } else { if (Verbose && PrintGCDetails) { gclog_or_tty->print(" :: Trying full because partial may fail :: "); } // Try a full collection; see delta for bug id 6266275 // for the original code and why this has been simplified // with from-space allocation criteria modified and // such allocation moved out of the safepoint path. gch->do_collection(true /* full */, false /* clear_all_soft_refs */, size /* size */, is_tlab /* is_tlab */, number_of_generations() - 1 /* max_level */); } result = gch->attempt_allocation(size, is_tlab, false /*first_only*/); if (result != NULL) { assert(gch->is_in_reserved(result), "result not in heap"); return result; } // OK, collection failed, try expansion. result = expand_heap_and_allocate(size, is_tlab); if (result != NULL) { return result; } // If we reach this point, we're really out of memory. Try every trick // we can to reclaim memory. Force collection of soft references. Force // a complete compaction of the heap. Any additional methods for finding // free memory should be here, especially if they are expensive. If this // attempt fails, an OOM exception will be thrown. { UIntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted gch->do_collection(true /* full */, true /* clear_all_soft_refs */, size /* size */, is_tlab /* is_tlab */, number_of_generations() - 1 /* max_level */); } result = gch->attempt_allocation(size, is_tlab, false /* first_only */); if (result != NULL) { assert(gch->is_in_reserved(result), "result not in heap"); return result; } assert(!should_clear_all_soft_refs(), "Flag should have been handled and cleared prior to this point"); // What else? We might try synchronous finalization later. If the total // space available is large enough for the allocation, then a more // complete compaction phase than we've tried so far might be // appropriate. return NULL; }
HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size, bool is_tlab, bool* gc_overhead_limit_was_exceeded) { GenCollectedHeap *gch = GenCollectedHeap::heap(); debug_only(gch->check_for_valid_allocation_state()); assert(gch->no_gc_in_progress(), "Allocation during gc not allowed"); // In general gc_overhead_limit_was_exceeded should be false so // set it so here and reset it to true only if the gc time // limit is being exceeded as checked below. *gc_overhead_limit_was_exceeded = false; HeapWord* result = NULL; // Loop until the allocation is satisified, // or unsatisfied after GC. for (int try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) { HandleMark hm; // discard any handles allocated in each iteration // First allocation attempt is lock-free. Generation *gen0 = gch->get_gen(0); assert(gen0->supports_inline_contig_alloc(), "Otherwise, must do alloc within heap lock"); if (gen0->should_allocate(size, is_tlab)) { result = gen0->par_allocate(size, is_tlab); if (result != NULL) { assert(gch->is_in_reserved(result), "result not in heap"); return result; } } unsigned int gc_count_before; // read inside the Heap_lock locked region { MutexLocker ml(Heap_lock); if (PrintGC && Verbose) { gclog_or_tty->print_cr("TwoGenerationCollectorPolicy::mem_allocate_work:" " attempting locked slow path allocation"); } // Note that only large objects get a shot at being // allocated in later generations. bool first_only = ! should_try_older_generation_allocation(size); result = gch->attempt_allocation(size, is_tlab, first_only); if (result != NULL) { assert(gch->is_in_reserved(result), "result not in heap"); return result; } if (GC_locker::is_active_and_needs_gc()) { if (is_tlab) { return NULL; // Caller will retry allocating individual object } if (!gch->is_maximal_no_gc()) { // Try and expand heap to satisfy request result = expand_heap_and_allocate(size, is_tlab); // result could be null if we are out of space if (result != NULL) { return result; } } if (gclocker_stalled_count > GCLockerRetryAllocationCount) { return NULL; // we didn't get to do a GC and we didn't get any memory } // If this thread is not in a jni critical section, we stall // the requestor until the critical section has cleared and // GC allowed. When the critical section clears, a GC is // initiated by the last thread exiting the critical section; so // we retry the allocation sequence from the beginning of the loop, // rather than causing more, now probably unnecessary, GC attempts. JavaThread* jthr = JavaThread::current(); if (!jthr->in_critical()) { MutexUnlocker mul(Heap_lock); // Wait for JNI critical section to be exited GC_locker::stall_until_clear(); gclocker_stalled_count += 1; continue; } else { if (CheckJNICalls) { fatal("Possible deadlock due to allocating while" " in jni critical section"); } return NULL; } } // Read the gc count while the heap lock is held. gc_count_before = Universe::heap()->total_collections(); } VM_GenCollectForAllocation op(size, is_tlab, gc_count_before); VMThread::execute(&op); if (op.prologue_succeeded()) { result = op.result(); if (op.gc_locked()) { assert(result == NULL, "must be NULL if gc_locked() is true"); continue; // retry and/or stall as necessary } // Allocation has failed and a collection // has been done. If the gc time limit was exceeded the // this time, return NULL so that an out-of-memory // will be thrown. Clear gc_overhead_limit_exceeded // so that the overhead exceeded does not persist. const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded(); const bool softrefs_clear = all_soft_refs_clear(); if (limit_exceeded && softrefs_clear) { *gc_overhead_limit_was_exceeded = true; size_policy()->set_gc_overhead_limit_exceeded(false); if (op.result() != NULL) { CollectedHeap::fill_with_object(op.result(), size); } return NULL; } assert(result == NULL || gch->is_in_reserved(result), "result not in heap"); return result; } // Give a warning if we seem to be looping forever. if ((QueuedAllocationWarningCount > 0) && (try_count % QueuedAllocationWarningCount == 0)) { warning("TwoGenerationCollectorPolicy::mem_allocate_work retries %d times \n\t" " size=" SIZE_FORMAT " %s", try_count, size, is_tlab ? "(TLAB)" : ""); } } }
HeapWord* TwoGenerationCollectorPolicy::satisfy_failed_allocation(size_t size, bool is_large_noref, bool is_tlab, bool* notify_ref_lock) { GenCollectedHeap *gch = GenCollectedHeap::heap(); GCCauseSetter x(gch, GCCause::_allocation_failure); HeapWord* result = NULL; // The gc_prologues have not executed yet. The value // for incremental_collection_will_fail() is the remanent // of the last collection. if (!gch->incremental_collection_will_fail()) { // Do an incremental collection. gch->do_collection(false /* full */, false /* clear_all_soft_refs */, size /* size */, is_large_noref /* is_large_noref */, is_tlab /* is_tlab */, number_of_generations() - 1 /* max_level */, notify_ref_lock /* notify_ref_lock */); } else { // The incremental_collection_will_fail flag is set if the // next incremental collection will not succeed (e.g., the // DefNewGeneration didn't think it had space to promote all // its objects). However, that last incremental collection // continued, allowing all older generations to collect (and // perhaps change the state of the flag). // // If we reach here, we know that an incremental collection of // all generations left us in the state where incremental collections // will fail, so we just try allocating the requested space. // If the allocation fails everywhere, force a full collection. // We're probably very close to being out of memory, so forcing many // collections now probably won't help. if (PrintGC && Verbose) { gclog_or_tty->print_cr("TwoGenerationCollectorPolicy::satisfy_failed_allocation:" " attempting allocation anywhere before full collection"); } result = gch->attempt_allocation(size, is_large_noref, is_tlab, false /* first_only */); if (result != NULL) { assert(gch->is_in(result), "result not in heap"); return result; } // Allocation request hasn't yet been met; try a full collection. gch->do_collection(true /* full */, false /* clear_all_soft_refs */, size /* size */, is_large_noref /* is_large_noref */, is_tlab /* is_tlab */, number_of_generations() - 1 /* max_level */, notify_ref_lock /* notify_ref_lock */); } result = gch->attempt_allocation(size, is_large_noref, is_tlab, false /*first_only*/); if (result != NULL) { assert(gch->is_in(result), "result not in heap"); return result; } // OK, collection failed, try expansion. for (int i = number_of_generations() - 1 ; i>= 0; i--) { Generation *gen = gch->get_gen(i); if (gen->should_allocate(size, is_large_noref, is_tlab)) { result = gen->expand_and_allocate(size, is_large_noref, is_tlab); if (result != NULL) { assert(gch->is_in(result), "result not in heap"); return result; } } } // If we reach this point, we're really out of memory. Try every trick // we can to reclaim memory. Force collection of soft references. Force // a complete compaction of the heap. Any additional methods for finding // free memory should be here, especially if they are expensive. If this // attempt fails, an OOM exception will be thrown. { IntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted gch->do_collection(true /* full */, true /* clear_all_soft_refs */, size /* size */, is_large_noref /* is_large_noref */, is_tlab /* is_tlab */, number_of_generations() - 1 /* max_level */, notify_ref_lock /* notify_ref_lock */); } result = gch->attempt_allocation(size, is_large_noref, is_tlab, false /* first_only */); if (result != NULL) { assert(gch->is_in(result), "result not in heap"); return result; } // What else? We might try synchronous finalization later. If the total // space available is large enough for the allocation, then a more // complete compaction phase than we've tried so far might be // appropriate. return NULL; }
HeapWord* TwoGenerationCollectorPolicy::mem_allocate_work(size_t size, bool is_large_noref, bool is_tlab) { GenCollectedHeap *gch = GenCollectedHeap::heap(); debug_only(gch->check_for_valid_allocation_state()); assert(gch->no_gc_in_progress(), "Allocation during gc not allowed"); HeapWord* result = NULL; // Loop until the allocation is satisified, // or unsatisfied after GC. for (int try_count = 1; /* return or throw */; try_count += 1) { // First allocation attempt is lock-free. Generation *gen0 = gch->get_gen(0); assert(gen0->supports_inline_contig_alloc(), "Otherwise, must do alloc within heap lock"); if (gen0->should_allocate(size, is_large_noref, is_tlab)) { result = gen0->par_allocate(size, is_large_noref, is_tlab); if (result != NULL) { assert(gch->is_in(result), "result not in heap"); return result; } } int gc_count_before; // read inside the Heap_lock locked region { MutexLocker ml(Heap_lock); if (PrintGC && Verbose) { gclog_or_tty->print_cr("TwoGenerationCollectorPolicy::mem_allocate_work:" " attempting locked slow path allocation"); } // Note that only large objects get a shot at being // allocated in later generations. If jvmpi slow allocation // is enabled, allocate in later generations (since the // first generation is always full. bool first_only = ! should_try_older_generation_allocation(size); result = gch->attempt_allocation(size, is_large_noref, is_tlab, first_only); if (result != NULL) { assert(gch->is_in(result), "result not in heap"); return result; } // Read the gc count while the heap lock is held. gc_count_before = Universe::heap()->total_collections(); } VM_GenCollectForAllocation op(size, is_large_noref, is_tlab, gc_count_before); VMThread::execute(&op); if (op.prologue_succeeded()) { result = op.result(); assert(result == NULL || gch->is_in(result), "result not in heap"); return result; } // Give a warning if we seem to be looping forever. if ((QueuedAllocationWarningCount > 0) && (try_count % QueuedAllocationWarningCount == 0)) { warning("TwoGenerationCollectorPolicy::mem_allocate_work retries %d times \n\t" " size=%d %s", try_count, size, is_tlab ? "(TLAB)" : ""); } } }
/** * 处理上层应用线程(Java级线程)的一次内存申请失败 * 1. Gc类型选择 * 1).如果Gc操作已被触发但还无法被执行,则放弃本次Gc操作 * 2).如果执行增量式安全,则执行一次MinorGc * 3).只能执行一次Full Gc * 2. 从年青代-老年代依次尝试分配内存块 * 3. 从老年代-年青代依次扩展内存容量尝试分配内存块 * 4. 执行一次彻底的Full Gc(清理所有的软引用) * 5. 从年青代-老年代依次尝试分配内存块 */ HeapWord* GenCollectorPolicy::satisfy_failed_allocation(size_t size, bool is_tlab) { GenCollectedHeap *gch = GenCollectedHeap::heap(); GCCauseSetter x(gch, GCCause::_allocation_failure); HeapWord* result = NULL; assert(size != 0, "Precondition violated"); if (GC_locker::is_active_and_needs_gc()) { //Gc操作已被触发但还无法被执行 if (!gch->is_maximal_no_gc()) { // 当前有内存代允许扩展内存容量,则试图通过扩展内存代的容量来分配内存块 result = expand_heap_and_allocate(size, is_tlab); } return result; // could be null if we are out of space } else if (!gch->incremental_collection_will_fail(false /* don't consult_young */)) { //如果当前增量式可行,则只触发一个Minor Gc //增量式GC gch->do_collection(false /* full */, false /* clear_all_soft_refs */, size /* size */, is_tlab /* is_tlab */, number_of_generations() - 1 /* max_level */); } else { //执行一次Full Gc if (Verbose && PrintGCDetails) { gclog_or_tty->print(" :: Trying full because partial may fail :: "); } // Try a full collection; see delta for bug id 6266275 // for the original code and why this has been simplified // with from-space allocation criteria modified and // such allocation moved out of the safepoint path. gch->do_collection(true /* full */, false /* clear_all_soft_refs */, size /* size */, is_tlab /* is_tlab */, number_of_generations() - 1 /* max_level */); } //执行一次Gc之后,再次从内存堆的各个内存代中依次分配指定大小的内存块 result = gch->attempt_allocation(size, is_tlab, false /*first_only*/); if (result != NULL) { assert(gch->is_in_reserved(result), "result not in heap"); return result; } //执行一次Gc之后可能有剩余的空间来扩展各内存代的容量, //所以再次尝试通过允许扩展内存代容量的方式来试图分配指定大小的内存块 result = expand_heap_and_allocate(size, is_tlab); if (result != NULL) { return result; } // If we reach this point, we're really out of memory. Try every trick // we can to reclaim memory. Force collection of soft references. Force // a complete compaction of the heap. Any additional methods for finding // free memory should be here, especially if they are expensive. If this // attempt fails, an OOM exception will be thrown. { IntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted //最后再进行一次彻底的Gc: 回收所有的内存代+清除软引用 gch->do_collection(true /* full */, true /* clear_all_soft_refs */, size /* size */, is_tlab /* is_tlab */, number_of_generations() - 1 /* max_level */); } //经过一次彻底的Gc之后,最后一次尝试依次从各内存代分配指定大小的内存块 result = gch->attempt_allocation(size, is_tlab, false /* first_only */); if (result != NULL) { assert(gch->is_in_reserved(result), "result not in heap"); return result; } assert(!should_clear_all_soft_refs(), "Flag should have been handled and cleared prior to this point"); // What else? We might try synchronous finalization later. If the total // space available is large enough for the allocation, then a more // complete compaction phase than we've tried so far might be // appropriate. return NULL; }
/** * 分配指定大小的内存空间,基于内存分代的分配策略(轮寻式): * 1.(无锁式)年青代快速分配 * 2.(加锁式) * 1).抢占内存堆全局锁 * 2).如果请求的内存大小>年青代内存容量 || Gc被触发但无法被执行 || 增量式Gc会失败, 则依次尝试从年青代-老年代分配内存 * 否则,只从年青代分配内存 * 3).如果Gc被触发但目前还无法被执行: * a).如果某一内存代还可扩展其内存容量,则依次从老年代-年青代尝试扩展内存分配 * b).释放内存堆全局锁,并等待Gc被执行完成 * 4).释放内存堆全局锁,触发一次GC操作请求,并等待其被执行或放弃 * 5).如果Gc被放弃或由于Gc锁被禁止执行,则回到1 * 6).如果Gc超时,返回NULL,否则返回分配的内存块 * * * @param size 申请的内存空间大小 * @param is_tlab false: 从内存堆中分配内存空间 * true: 从当前线程的本地分配缓冲区中分配内存空间 * @gc_overhead_limit_was_exceeded Full Gc是否超时 * */ HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size, bool is_tlab, bool* gc_overhead_limit_was_exceeded) { GenCollectedHeap *gch = GenCollectedHeap::heap(); debug_only(gch->check_for_valid_allocation_state()); //确保当前JVM没有正在进行GC assert(gch->no_gc_in_progress(), "Allocation during gc not allowed"); // In general gc_overhead_limit_was_exceeded should be false so // set it so here and reset it to true only if the gc time // limit is being exceeded as checked below. *gc_overhead_limit_was_exceeded = false; HeapWord* result = NULL; // Loop until the allocation is satisified, // or unsatisfied after GC. for (int try_count = 1; /* return or throw */; try_count += 1) { HandleMark hm; // discard any handles allocated in each iteration //年青代必须支持无锁并发方式的内存分配 Generation *gen0 = gch->get_gen(0); assert(gen0->supports_inline_contig_alloc(), "Otherwise, must do alloc within heap lock"); //当前是否应该优先考虑从年青代分配内存 if (gen0->should_allocate(size, is_tlab)) { //试图从年青代快速分配内存块 result = gen0->par_allocate(size, is_tlab); if (result != NULL) { assert(gch->is_in_reserved(result), "result not in heap"); return result; } } unsigned int gc_count_before; // read inside the Heap_lock locked region { MutexLocker ml(Heap_lock); if (PrintGC && Verbose) { gclog_or_tty->print_cr("TwoGenerationCollectorPolicy::mem_allocate_work:" " attempting locked slow path allocation"); } // Note that only large objects get a shot at being // allocated in later generations. //当前是否应该只在年青代分配内存 bool first_only = ! should_try_older_generation_allocation(size); //依次尝试从内存堆的各内存代中分配内存空间 result = gch->attempt_allocation(size, is_tlab, first_only); if (result != NULL) { assert(gch->is_in_reserved(result), "result not in heap"); return result; } if (GC_locker::is_active_and_needs_gc()) { //当前其它线程已经触发了Gc if (is_tlab) { //当前线程是为本地分配缓冲区申请内存(进而再从本地分配缓冲区为对象分配内存),则返回NULL, //以让其直接从内存代中为对象申请内存 return NULL; } if (!gch->is_maximal_no_gc()) { //内存堆中的某一个内存代允许扩展其大小 //在允许扩展内存代大小的情况下尝试从内存堆的各内存代中分配内存空间 result = expand_heap_and_allocate(size, is_tlab); // result could be null if we are out of space if (result != NULL) { return result; } } // If this thread is not in a jni critical section, we stall // the requestor until the critical section has cleared and // GC allowed. When the critical section clears, a GC is // initiated by the last thread exiting the critical section; so // we retry the allocation sequence from the beginning of the loop, // rather than causing more, now probably unnecessary, GC attempts. JavaThread* jthr = JavaThread::current(); if (!jthr->in_critical()) { MutexUnlocker mul(Heap_lock); //等待所有的本地线程退出并执行完Gc操作 GC_locker::stall_until_clear(); continue; } else { if (CheckJNICalls) { fatal("Possible deadlock due to allocating while in jni critical section"); } return NULL; } } //分配失败,决定触发一次GC操作 gc_count_before = Universe::heap()->total_collections(); } //触发一次Gc操作,将GC型JVM操作加入VMThread的操作队列中 //Gc的真正执行是由VMThread或特型GC线程来完成的 VM_GenCollectForAllocation op(size, is_tlab, gc_count_before); VMThread::execute(&op); if (op.prologue_succeeded()) { //一次Gc操作已完成 result = op.result(); if (op.gc_locked()) { //当前线程没有成功触发GC(可能刚被其它线程触发了),则继续重试分配 assert(result == NULL, "must be NULL if gc_locked() is true"); continue; // retry and/or stall as necessary } // Allocation has failed and a collection // has been done. If the gc time limit was exceeded the // this time, return NULL so that an out-of-memory // will be thrown. Clear gc_overhead_limit_exceeded // so that the overhead exceeded does not persist. //本次Gc耗时是否超过了设置的GC时间上限 const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded(); const bool softrefs_clear = all_soft_refs_clear(); //本次GC超时一定是进行了清除软引用的操作 assert(!limit_exceeded || softrefs_clear, "Should have been cleared"); //Gc超时 if (limit_exceeded && softrefs_clear) { *gc_overhead_limit_was_exceeded = true; size_policy()->set_gc_overhead_limit_exceeded(false); if (op.result() != NULL) { CollectedHeap::fill_with_object(op.result(), size); } //Gc超时,给上层调用返回NULL,让其抛出内存溢出错误 return NULL; } //分配成功则确保该内存块一定在内存堆中 assert(result == NULL || gch->is_in_reserved(result), "result not in heap"); return result; } // Give a warning if we seem to be looping forever. if ((QueuedAllocationWarningCount > 0) && (try_count % QueuedAllocationWarningCount == 0)) { warning("TwoGenerationCollectorPolicy::mem_allocate_work retries %d times \n\t size=%d %s", try_count, size, is_tlab ? "(TLAB)" : ""); } }// for }