bool VM_GC_Operation::doit_prologue() { assert(Thread::current()->is_Java_thread(), "just checking"); assert(((_gc_cause != GCCause::_no_gc) && (_gc_cause != GCCause::_no_cause_specified)), "Illegal GCCause"); // To be able to handle a GC the VM initialization needs to be completed. if (!is_init_completed()) { vm_exit_during_initialization( err_msg("GC triggered before VM initialization completed. Try increasing " "NewSize, current value " UINTX_FORMAT "%s.", byte_size_in_proper_unit(NewSize), proper_unit_for_byte_size(NewSize))); } acquire_pending_list_lock(); // If the GC count has changed someone beat us to the collection // Get the Heap_lock after the pending_list_lock. Heap_lock->lock(); // Check invocations if (skip_operation()) { // skip collection Heap_lock->unlock(); release_and_notify_pending_list_lock(); _prologue_succeeded = false; } else { _prologue_succeeded = true; SharedHeap* sh = SharedHeap::heap(); if (sh != NULL) sh->_thread_holds_heap_lock_for_gc = true; } return _prologue_succeeded; }
void VM_GenCollectFullConcurrent::doit_epilogue() { Thread* thr = Thread::current(); assert(thr->is_Java_thread(), "just checking"); JavaThread* jt = (JavaThread*)thr; // Release the Heap_lock first. Heap_lock->unlock(); release_and_notify_pending_list_lock(); // It is fine to test whether completed collections has // exceeded our request count without locking because // the completion count is monotonically increasing; // this will break for very long-running apps when the // count overflows and wraps around. XXX fix me !!! // e.g. at the rate of 1 full gc per ms, this could // overflow in about 1000 years. GenCollectedHeap* gch = GenCollectedHeap::heap(); if (gch->total_full_collections_completed() <= _full_gc_count_before) { // Now, wait for witnessing concurrent gc cycle to complete, // but do so in native mode, because we want to lock the // FullGCEvent_lock, which may be needed by the VM thread // or by the CMS thread, so we do not want to be suspended // while holding that lock. ThreadToNativeFromVM native(jt); MutexLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag); // Either a concurrent or a stop-world full gc is sufficient // witness to our request. while (gch->total_full_collections_completed() <= _full_gc_count_before) { FullGCCount_lock->wait(Mutex::_no_safepoint_check_flag); } } // Enable iCMS back. CMSCollector::enable_icms(); }
void VM_GC_Operation::doit_epilogue() { assert(Thread::current()->is_Java_thread(), "just checking"); // Release the Heap_lock first. SharedHeap* sh = SharedHeap::heap(); if (sh != NULL) sh->_thread_holds_heap_lock_for_gc = false; Heap_lock->unlock(); release_and_notify_pending_list_lock(); }
void VM_CGC_Operation::doit_epilogue() { // Note the relative order of the unlocks must match that in // VM_GC_Operation::doit_epilogue() Heap_lock->unlock(); if (_needs_pll) { release_and_notify_pending_list_lock(); } }
void VM_CGC_Operation::doit_epilogue() { // Note the relative order of the unlocks must match that in // VM_GC_Operation::doit_epilogue() SharedHeap::heap()->_thread_holds_heap_lock_for_gc = false; Heap_lock->unlock(); if (_needs_pll) { release_and_notify_pending_list_lock(); } }
void VM_CMS_Operation::doit_epilogue() { assert(Thread::current()->is_ConcurrentGC_thread(), "just checking"); assert(!CMSCollector::foregroundGCShouldWait(), "Possible deadlock"); assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(), "Possible deadlock"); // Release the Heap_lock first. Heap_lock->unlock(); if (needs_pll()) { release_and_notify_pending_list_lock(); } }
bool VM_CMS_Operation::doit_prologue() { assert(Thread::current()->is_ConcurrentGC_thread(), "just checking"); assert(!CMSCollector::foregroundGCShouldWait(), "Possible deadlock"); assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(), "Possible deadlock"); if (needs_pll()) { acquire_pending_list_lock(); } // Get the Heap_lock after the pending_list_lock. Heap_lock->lock(); if (lost_race()) { assert(_prologue_succeeded == false, "Initialized in c'tor"); Heap_lock->unlock(); if (needs_pll()) { release_and_notify_pending_list_lock(); } } else { _prologue_succeeded = true; } return _prologue_succeeded; }
bool VM_GC_Operation::doit_prologue() { assert(Thread::current()->is_Java_thread(), "just checking"); assert(((_gc_cause != GCCause::_no_gc) && (_gc_cause != GCCause::_no_cause_specified)), "Illegal GCCause"); acquire_pending_list_lock(); // If the GC count has changed someone beat us to the collection // Get the Heap_lock after the pending_list_lock. Heap_lock->lock(); // Check invocations if (skip_operation()) { // skip collection Heap_lock->unlock(); release_and_notify_pending_list_lock(); _prologue_succeeded = false; } else { _prologue_succeeded = true; SharedHeap* sh = SharedHeap::heap(); if (sh != NULL) sh->_thread_holds_heap_lock_for_gc = true; } return _prologue_succeeded; }
void VM_GC_Operation::doit_epilogue() { assert(Thread::current()->is_Java_thread(), "just checking"); // Release the Heap_lock first. Heap_lock->unlock(); release_and_notify_pending_list_lock(); }