////////////////////////////////////////////////////////// // Methods in class VM_CMS_Final_Remark_Operation ////////////////////////////////////////////////////////// void VM_CMS_Final_Remark::doit() { if (lost_race()) { // Nothing to do. return; } HS_PRIVATE_CMS_REMARK_BEGIN(); GCIdMark gc_id_mark(_gc_id); _collector->_gc_timer_cm->register_gc_pause_start("Final Mark"); GenCollectedHeap* gch = GenCollectedHeap::heap(); GCCauseSetter gccs(gch, GCCause::_cms_final_remark); VM_CMS_Operation::verify_before_gc(); IsGCActiveMark x; // stop-world GC active _collector->do_CMS_operation(CMSCollector::CMS_op_checkpointRootsFinal, gch->gc_cause()); VM_CMS_Operation::verify_after_gc(); _collector->save_heap_summary(); _collector->_gc_timer_cm->register_gc_pause_end(); HS_PRIVATE_CMS_REMARK_END(); }
void GangWorker::run_task(WorkData data) { GCIdMark gc_id_mark(data._task->gc_id()); log_develop_trace(gc, workgang)("Running work gang: %s task: %s worker: %u", name(), data._task->name(), data._worker_id); data._task->work(data._worker_id); log_develop_trace(gc, workgang)("Finished work gang: %s task: %s worker: %u thread: " PTR_FORMAT, name(), data._task->name(), data._worker_id, p2i(Thread::current())); }
/////////////////////////////// // YieldingFlexibleGangWorker /////////////////////////////// void YieldingFlexibleGangWorker::loop() { int previous_sequence_number = 0; Monitor* gang_monitor = yf_gang()->monitor(); MutexLockerEx ml(gang_monitor, Mutex::_no_safepoint_check_flag); YieldingWorkData data; int id; while (true) { // Check if there is work to do. yf_gang()->internal_worker_poll(&data); if (data.task() != NULL && data.sequence_number() != previous_sequence_number) { // There is work to be done. // First check if we need to become active or if there // are already the requisite number of workers if (yf_gang()->started_workers() == yf_gang()->active_workers()) { // There are already enough workers, we do not need to // to run; fall through and wait on monitor. } else { // We need to pitch in and do the work. assert(yf_gang()->started_workers() < yf_gang()->active_workers(), "Unexpected state"); id = yf_gang()->started_workers(); yf_gang()->internal_note_start(); // Now, release the gang mutex and do the work. { MutexUnlockerEx mul(gang_monitor, Mutex::_no_safepoint_check_flag); GCIdMark gc_id_mark(data.task()->gc_id()); data.task()->work(id); // This might include yielding } // Reacquire monitor and note completion of this worker yf_gang()->internal_note_finish(); // Update status of task based on whether all workers have // finished or some have yielded assert(data.task() == yf_gang()->task(), "Confused task binding"); if (yf_gang()->finished_workers() == yf_gang()->active_workers()) { switch (data.yf_task()->status()) { case ABORTING: { data.yf_task()->set_status(ABORTED); break; } case ACTIVE: case COMPLETING: { data.yf_task()->set_status(COMPLETED); break; } default: ShouldNotReachHere(); } gang_monitor->notify_all(); // Notify overseer } else { // at least one worker is still working or yielded assert(yf_gang()->finished_workers() < yf_gang()->active_workers(), "Counts inconsistent"); switch (data.yf_task()->status()) { case ACTIVE: { // first, but not only thread to complete data.yf_task()->set_status(COMPLETING); break; } case YIELDING: { if (yf_gang()->finished_workers() + yf_gang()->yielded_workers() == yf_gang()->active_workers()) { data.yf_task()->set_status(YIELDED); gang_monitor->notify_all(); // notify overseer } break; } case ABORTING: case COMPLETING: { break; // nothing to do } default: // everything else: INACTIVE, YIELDED, ABORTED, COMPLETED ShouldNotReachHere(); } } } } // Remember the sequence number previous_sequence_number = data.sequence_number(); // Wait for more work gang_monitor->wait(Mutex::_no_safepoint_check_flag); } }