void YieldingFlexibleWorkGang::wait_for_gang() { assert(monitor()->owned_by_self(), "Data race"); // Wait for task to complete or yield for (Status status = yielding_task()->status(); status != COMPLETED && status != YIELDED && status != ABORTED; status = yielding_task()->status()) { assert(started_workers() <= active_workers(), "invariant"); assert(finished_workers() <= active_workers(), "invariant"); assert(yielded_workers() <= active_workers(), "invariant"); monitor()->wait(Mutex::_no_safepoint_check_flag); } switch (yielding_task()->status()) { case COMPLETED: case ABORTED: { assert(finished_workers() == active_workers(), "Inconsistent status"); assert(yielded_workers() == 0, "Invariant"); reset(); // for next task; gang<->task binding released break; } case YIELDED: { assert(yielded_workers() > 0, "Invariant"); assert(yielded_workers() + finished_workers() == active_workers(), "Inconsistent counts"); break; } case ACTIVE: case INACTIVE: case COMPLETING: case YIELDING: case ABORTING: default: ShouldNotReachHere(); } }
void WorkGang::run_task(AbstractGangTask* task) { // This thread is executed by the VM thread which does not block // on ordinary MutexLocker's. MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag); if (TraceWorkGang) { tty->print_cr("Running work gang %s task %s", name(), task->name()); } // Tell all the workers to run a task. assert(task != NULL, "Running a null task"); // Initialize. _task = task; _sequence_number += 1; _started_workers = 0; _finished_workers = 0; // Tell the workers to get to work. monitor()->notify_all(); // Wait for them to be finished while (finished_workers() < total_workers()) { if (TraceWorkGang) { tty->print_cr("Waiting in work gang %s: %d/%d finished sequence %d", name(), finished_workers(), total_workers(), _sequence_number); } monitor()->wait(/* no_safepoint_check */ true); } _task = NULL; if (TraceWorkGang) { tty->print_cr("/nFinished work gang %s: %d/%d sequence %d", name(), finished_workers(), total_workers(), _sequence_number); } }
void AbstractWorkGang::stop() { // Tell all workers to terminate, then wait for them to become inactive. MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag); if (TraceWorkGang) { tty->print_cr("Stopping work gang %s task %s", name(), task()->name()); } _task = NULL; _terminate = true; monitor()->notify_all(); while (finished_workers() < active_workers()) { if (TraceWorkGang) { tty->print_cr("Waiting in work gang %s: %d/%d finished", name(), finished_workers(), active_workers()); } monitor()->wait(/* no_safepoint_check */ true); } }
/** * 将任务交给若no_of_parallel_workers个工作线程去执行,并等待该任务被执行 */ void WorkGang::run_task(AbstractGangTask* task, uint no_of_parallel_workers) { task->set_for_termination(no_of_parallel_workers); //占用锁以阻塞其它线程再提交任务 MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag); if (TraceWorkGang) { tty->print_cr("Running work gang %s task %s", name(), task->name()); } // Tell all the workers to run a task. assert(task != NULL, "Running a null task"); //初始化本次任务 _task = task; _sequence_number += 1; //任务编号 _started_workers = 0; //开始当前任务的线程数量 _finished_workers = 0; //完成当前任务的线程数量 //唤醒所有的工作线程 monitor()->notify_all(); //等待任务被完成 while (finished_workers() < no_of_parallel_workers) { if (TraceWorkGang) { tty->print_cr("Waiting in work gang %s: %d/%d finished sequence %d", name(), finished_workers(), no_of_parallel_workers, _sequence_number); } printf("%s[%d] [tid: %lu]: 当前线程开始等待...\n", __FILE__, __LINE__, pthread_self()); monitor()->wait(/* no_safepoint_check */ true); } _task = NULL; if (TraceWorkGang) { tty->print_cr("\nFinished work gang %s: %d/%d sequence %d", name(), finished_workers(), no_of_parallel_workers, _sequence_number); Thread* me = Thread::current(); tty->print_cr(" T: 0x%x VM_thread: %d", me, me->is_VM_thread()); } }
void WorkGang::run_task(AbstractGangTask* task, uint no_of_parallel_workers) { task->set_for_termination(no_of_parallel_workers); // This thread is executed by the VM thread which does not block // on ordinary MutexLocker's. MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag); if (TraceWorkGang) { tty->print_cr("Running work gang %s task %s", name(), task->name()); } // Tell all the workers to run a task. assert(task != NULL, "Running a null task"); // Initialize. _task = task; _sequence_number += 1; _started_workers = 0; _finished_workers = 0; // Tell the workers to get to work. monitor()->notify_all(); // Wait for them to be finished while (finished_workers() < no_of_parallel_workers) { if (TraceWorkGang) { tty->print_cr("Waiting in work gang %s: %d/%d finished sequence %d", name(), finished_workers(), no_of_parallel_workers, _sequence_number); } monitor()->wait(/* no_safepoint_check */ true); } _task = NULL; if (TraceWorkGang) { tty->print_cr("\nFinished work gang %s: %d/%d sequence %d", name(), finished_workers(), no_of_parallel_workers, _sequence_number); Thread* me = Thread::current(); tty->print_cr(" T: 0x%x VM_thread: %d", me, me->is_VM_thread()); } }
void YieldingFlexibleWorkGang::yield() { assert(task() != NULL, "Inconsistency; should have task binding"); MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag); assert(yielded_workers() < active_workers(), "Consistency check"); if (yielding_task()->status() == ABORTING) { // Do not yield; we need to abort as soon as possible // XXX NOTE: This can cause a performance pathology in the // current implementation in Mustang, as of today, and // pre-Mustang in that as soon as an overflow occurs, // yields will not be honoured. The right way to proceed // of course is to fix bug # TBF, so that abort's cause // us to return at each potential yield point. return; } if (++_yielded_workers + finished_workers() == active_workers()) { yielding_task()->set_status(YIELDED); monitor()->notify_all(); } else { yielding_task()->set_status(YIELDING); } while (true) { switch (yielding_task()->status()) { case YIELDING: case YIELDED: { monitor()->wait(Mutex::_no_safepoint_check_flag); break; // from switch } case ACTIVE: case ABORTING: case COMPLETING: { assert(_yielded_workers > 0, "Else why am i here?"); _yielded_workers--; return; } case INACTIVE: case ABORTED: case COMPLETED: default: { ShouldNotReachHere(); } } } // Only return is from inside switch statement above ShouldNotReachHere(); }