void DynamicTask::inlineOrEnqueueTask() { #ifdef INLINE_NULL_TASKS if(isNullTask()) runWrapper(); //dont bother enqueuing just run it else #endif { WorkerThread* self = WorkerThread::self(); #ifdef DEBUG JASSERT(self!=NULL); #endif self->pushLocal(this); } }
void DynamicTask::inlineOrEnqueueTask() { #ifdef INLINE_NULL_TASKS if(isNullTask()) runWrapper(); //dont bother enqueuing just run it else #endif { WorkerThread* self = WorkerThread::self(); if(self!=NULL) { self->pushLocal(this); }else{ DynamicScheduler::cpuScheduler().injectWork(this); } } }
void petabricks::DynamicTask::runWrapper(bool isAborting){ JASSERT(((_state==S_READY && _type==TYPE_CPU) || (_state==S_REMOTE_READY && _type==TYPE_OPENCL)) && _numPredecessors==0)(_state)(_numPredecessors); if (!isAborting) { #ifdef DISTRIBUTED_CACHE if(!isNullTask()) { WorkerThread::self()->cache()->invalidate(); } #endif _continuation = run(); } else { _continuation = NULL; } completeTaskDeps(isAborting); }
void petabricks::DynamicTask::runWrapper(bool isAborting){ JASSERT(_state==S_READY && _numPredecessors==0)(_state)(_numPredecessors); if (!isAborting) { _continuation = run(); } else { _continuation = NULL; } std::vector<DynamicTask*> tmp; { JLOCKSCOPE(_lock); _dependents.swap(tmp); if(_continuation) _state = S_CONTINUED; else _state = S_COMPLETE; } if(_continuation){ #ifdef VERBOSE JTRACE("task complete, continued")(tmp.size()); #endif { JLOCKSCOPE(_continuation->_lock); if(_continuation->_dependents.empty()){ //swap is faster than insert _continuation->_dependents.swap(tmp); }else if(!tmp.empty()){ _continuation->_dependents.insert(_continuation->_dependents.end(), tmp.begin(), tmp.end()); } } _continuation->enqueue(); }else{ #ifdef VERBOSE if(!isNullTask()) JTRACE("task complete")(tmp.size()); #endif std::vector<DynamicTask*>::iterator it; for(it = tmp.begin(); it != tmp.end(); ++it) { #ifdef DEBUG JASSERT(*it != 0)(tmp.size()); #endif (*it)->decrementPredecessors(isAborting); } } decRefCount(); //matches with enqueue(); }