void YieldingFlexibleWorkGang::wait_for_gang() { assert(monitor()->owned_by_self(), "Data race"); // Wait for task to complete or yield for (Status status = yielding_task()->status(); status != COMPLETED && status != YIELDED && status != ABORTED; status = yielding_task()->status()) { assert(started_workers() <= active_workers(), "invariant"); assert(finished_workers() <= active_workers(), "invariant"); assert(yielded_workers() <= active_workers(), "invariant"); monitor()->wait(Mutex::_no_safepoint_check_flag); } switch (yielding_task()->status()) { case COMPLETED: case ABORTED: { assert(finished_workers() == active_workers(), "Inconsistent status"); assert(yielded_workers() == 0, "Invariant"); reset(); // for next task; gang<->task binding released break; } case YIELDED: { assert(yielded_workers() > 0, "Invariant"); assert(yielded_workers() + finished_workers() == active_workers(), "Inconsistent counts"); break; } case ACTIVE: case INACTIVE: case COMPLETING: case YIELDING: case ABORTING: default: ShouldNotReachHere(); } }
void YieldingFlexibleWorkGang::abort() { assert(task() != NULL, "Inconsistency; should have task binding"); MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag); assert(yielded_workers() < active_workers(), "Consistency check"); #ifndef PRODUCT switch (yielding_task()->status()) { // allowed states case ACTIVE: case ABORTING: case COMPLETING: case YIELDING: break; // not allowed states case INACTIVE: case ABORTED: case COMPLETED: case YIELDED: default: ShouldNotReachHere(); } #endif // !PRODUCT Status prev_status = yielding_task()->status(); yielding_task()->set_status(ABORTING); if (prev_status == YIELDING) { assert(yielded_workers() > 0, "Inconsistency"); // At least one thread has yielded, wake it up // so it can go back to waiting stations ASAP. monitor()->notify_all(); } }
void YieldingFlexibleWorkGang::start_task(YieldingFlexibleGangTask* new_task) { MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag); assert(task() == NULL, "Gang currently tied to a task"); assert(new_task != NULL, "Null task"); // Bind task to gang _task = new_task; new_task->set_gang(this); // Establish 2-way binding to support yielding _sequence_number++; uint requested_size = new_task->requested_size(); assert(requested_size >= 0, "Should be non-negative"); if (requested_size != 0) { _active_workers = MIN2(requested_size, total_workers()); } else { _active_workers = active_workers(); } new_task->set_actual_size(_active_workers); new_task->set_for_termination(_active_workers); assert(_started_workers == 0, "Tabula rasa non"); assert(_finished_workers == 0, "Tabula rasa non"); assert(_yielded_workers == 0, "Tabula rasa non"); yielding_task()->set_status(ACTIVE); // Wake up all the workers, the first few will get to work, // and the rest will go back to sleep monitor()->notify_all(); wait_for_gang(); }
void FlexibleWorkGang::run_task(AbstractGangTask* task) { // If active_workers() is passed, _finished_workers // must only be incremented for workers that find non_null // work (as opposed to all those that just check that the // task is not null). WorkGang::run_task(task, (uint) active_workers()); }
void YieldingFlexibleWorkGang::yield() { assert(task() != NULL, "Inconsistency; should have task binding"); MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag); assert(yielded_workers() < active_workers(), "Consistency check"); if (yielding_task()->status() == ABORTING) { // Do not yield; we need to abort as soon as possible // XXX NOTE: This can cause a performance pathology in the // current implementation in Mustang, as of today, and // pre-Mustang in that as soon as an overflow occurs, // yields will not be honoured. The right way to proceed // of course is to fix bug # TBF, so that abort's cause // us to return at each potential yield point. return; } if (++_yielded_workers + finished_workers() == active_workers()) { yielding_task()->set_status(YIELDED); monitor()->notify_all(); } else { yielding_task()->set_status(YIELDING); } while (true) { switch (yielding_task()->status()) { case YIELDING: case YIELDED: { monitor()->wait(Mutex::_no_safepoint_check_flag); break; // from switch } case ACTIVE: case ABORTING: case COMPLETING: { assert(_yielded_workers > 0, "Else why am i here?"); _yielded_workers--; return; } case INACTIVE: case ABORTED: case COMPLETED: default: { ShouldNotReachHere(); } } } // Only return is from inside switch statement above ShouldNotReachHere(); }
void AbstractWorkGang::stop() { // Tell all workers to terminate, then wait for them to become inactive. MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag); if (TraceWorkGang) { tty->print_cr("Stopping work gang %s task %s", name(), task()->name()); } _task = NULL; _terminate = true; monitor()->notify_all(); while (finished_workers() < active_workers()) { if (TraceWorkGang) { tty->print_cr("Waiting in work gang %s: %d/%d finished", name(), finished_workers(), active_workers()); } monitor()->wait(/* no_safepoint_check */ true); } }
void WorkGang::run_task(AbstractGangTask* task) { run_task(task, active_workers()); }