void ZTaskManager::Run(float fDelta) { if (m_pCurrTask) { // 현재 태스크가 있으면 실행 ZTaskResult ret = m_pCurrTask->Run(fDelta); switch (ret) { case ZTR_RUNNING: { } break; case ZTR_COMPLETED: { CompleteCurrTask(); } break; case ZTR_CANCELED: { CancelCurrTask(); } break; } } else { if (PopTask()) { // 현재 태스크가 없으면 새로운 태스크를 꺼내서 시작한다. m_pCurrTask->Start(); } } }
void ThreadPool::ThreadFunc() { while ( bRunning() ) { boost::function<void () > func; while (PopTask(func)) { func(); } } }
//============================================================================ // NThreadPool::ExecuteTasks : Execute the tasks. //---------------------------------------------------------------------------- void NThreadPool::ExecuteTasks(void) { NThreadTask *theTask; bool areDone; // Update the pool NThreadUtilities::AtomicAdd32(mActiveThreads, 1); // Execute the tasks do { // Wait for the semaphore mSemaphore.Wait(); // Get the next task mLock.Lock(); areDone = mStopThreads; theTask = NULL; if (!areDone && !mTasks.empty()) { theTask = PopTask(mTasks, mHavePushed); mHavePushed = false; } mLock.Unlock(); // Execute the task if (theTask != NULL) { NN_MEMORY_POOL thePool; NThreadUtilities::AtomicAdd32(mActiveTasks, 1); if (!theTask->IsStopped()) theTask->Run(); delete theTask; NThreadUtilities::AtomicAdd32(mActiveTasks, -1); } } while (!areDone); // Update the pool NThreadUtilities::AtomicAdd32(mActiveThreads, -1); }
TaskCallback WorkerThreadPool::PopTaskOrQuit(WorkerThread *wt) { TaskCallback cb = PopTask(10); if (!cb.IsValid()) { util::Mutex::Lock lock(m_mutex); m_threads.remove(wt); delete wt; // TRACE << "-Now " << m_threads.size() << " threads\n"; if (m_threads.empty()) m_threads_empty.NotifyAll(); } return cb; }
void TaskProxy::ExecuteTaskGroups( bool _bUseCallerThread, TaskGroup* _aGroup, const uint32 _cnGroup ) { const Array<WorkerThread*>& rvpThreadPool = WorkerThreadManager::s_GetInstance()->_GetPool(); uint32 i, j, nThread = rvpThreadPool.size(); // Define proxy for threads for( i = 0; i < nThread; i++ ) rvpThreadPool[i]->SetTaskProxy( this ); for( j = 0; j < _cnGroup; j++ ) { const Array<Task*>& rvpTask = _aGroup[j].m_vpTask; uint32 k, nTask = rvpTask.size(); for( k = 0; k < nTask; k++ ) PushTask( rvpTask[k] ); // Signal threads for( i = 0; i < nThread; i++ ) rvpThreadPool[i]->ForceBusy(); uint32 iFinishedTask = 0; if( _bUseCallerThread ) { Task* pTask = NULL; while( (pTask = PopTask()) != NULL ) { pTask->Execute(); iFinishedTask++; } } // Wait other threads completion while( iFinishedTask != nTask ) { for( i = 0; i < nThread; i++ ) { if( rvpThreadPool[i]->PopFinishedTask() ) iFinishedTask++; } } } // [Remove proxy once finished for( i = 0; i < nThread; i++ ) rvpThreadPool[i]->SetTaskProxy( NULL ); }
void WorkerThread::DoTasks() { ThreadTask* task = 0; do { //Process any tasks in our queue while (task = PopTask()) { task->Run(); delete task; } //We ran out of tasks, so try to see if we can steal one task = StealTask(); if (task) { task->Run(); delete task; } } while (task); }
void TaskProxy::ExecuteTasks( bool _bUseCallerThread, const uint32 _cnWaitTask ) { const Array<WorkerThread*>& rvpThreadPool = WorkerThreadManager::s_GetInstance()->_GetPool(); uint32 i, nThread = rvpThreadPool.size(); uint32 iFinishedTask = 0; // Define proxy for threads for( i = 0; i < nThread; i++ ) rvpThreadPool[i]->SetTaskProxy( this ); if( _bUseCallerThread ) { Task* pTask = NULL; while( (pTask = PopTask()) != NULL ) { pTask->Execute(); iFinishedTask++; } } // Wait other threads completion while( iFinishedTask != _cnWaitTask ) { for( i = 0; i < nThread; i++ ) { if( rvpThreadPool[i]->PopFinishedTask() ) iFinishedTask++; } } // Remove proxy once finished for( i = 0; i < nThread; i++ ) rvpThreadPool[i]->SetTaskProxy( NULL ); }