/*! run this task */ __dllexport void TaskSchedulerTBB::Task::run (Thread& thread) // FIXME: avoid as many __dllexports as possible { /* try to run if not already stolen */ if (try_switch_state(INITIALIZED,DONE)) { Task* prevTask = thread.task; thread.task = this; try { if (thread.scheduler->cancellingException == nullptr) closure->execute(); } catch (...) { if (thread.scheduler->cancellingException == nullptr) thread.scheduler->cancellingException = std::current_exception(); } thread.task = prevTask; add_dependencies(-1); } /* steal until all dependencies have completed */ steal_loop(thread, [&] () { return dependencies>0; }, [&] () { while (thread.tasks.execute_local(thread,this)); }); /* now signal our parent task that we are finished */ if (parent) parent->add_dependencies(-1); }
std::exception_ptr TaskSchedulerTBB::thread_loop(size_t threadIndex) { /* allocate thread structure */ std::unique_ptr<Thread> mthread(new Thread(threadIndex,this)); // too large for stack allocation Thread& thread = *mthread; threadLocal[threadIndex] = &thread; Thread* oldThread = swapThread(&thread); /* main thread loop */ while (anyTasksRunning) { steal_loop(thread, [&] () { return anyTasksRunning > 0; }, [&] () { atomic_add(&anyTasksRunning,+1); while (thread.tasks.execute_local(thread,nullptr)); atomic_add(&anyTasksRunning,-1); }); } threadLocal[threadIndex] = nullptr; swapThread(oldThread); /* remember exception to throw */ std::exception_ptr except = nullptr; if (cancellingException != nullptr) except = cancellingException; /* wait for all threads to terminate */ atomic_add(&threadCounter,-1); while (threadCounter > 0) yield(); return except; }
void TaskSchedulerTBB::thread_loop(size_t threadIndex) { /* allocate thread structure */ Thread thread(threadIndex,this); threadLocal[threadIndex] = &thread; Thread* oldThread = swapThread(&thread); /* main thread loop */ while (anyTasksRunning > 0) { steal_loop(thread, [&] () { return anyTasksRunning > 0; }, [&] () { atomic_add(&anyTasksRunning,+1); while (thread.tasks.execute_local(thread,nullptr)); atomic_add(&anyTasksRunning,-1); }); } threadLocal[threadIndex] = nullptr; swapThread(oldThread); /* wait for all threads to terminate */ atomic_add(&threadCounter,-1); while (threadCounter > 0) yield(); }
std::exception_ptr TaskScheduler::thread_loop(size_t threadIndex) { /* allocate thread structure */ std::unique_ptr<Thread> mthread(new Thread(threadIndex,this)); // too large for stack allocation Thread& thread = *mthread; threadLocal[threadIndex].store(&thread); Thread* oldThread = swapThread(&thread); /* main thread loop */ while (anyTasksRunning) { steal_loop(thread, [&] () { return anyTasksRunning > 0; }, [&] () { anyTasksRunning++; while (thread.tasks.execute_local_internal(thread,nullptr)); anyTasksRunning--; }); } threadLocal[threadIndex].store(nullptr); swapThread(oldThread); /* remember exception to throw */ std::exception_ptr except = nullptr; if (cancellingException != nullptr) except = cancellingException; /* wait for all threads to terminate */ threadCounter--; #if defined(__WIN32__) size_t loopIndex = 1; #endif #define LOOP_YIELD_THRESHOLD (4096) while (threadCounter > 0) { #if defined(__WIN32__) if ((loopIndex % LOOP_YIELD_THRESHOLD) == 0) yield(); else _mm_pause(); loopIndex++; #else yield(); #endif } return except; }
void TaskSchedulerTBB::thread_loop(size_t threadIndex) try { #if defined(__MIC__) setAffinity(threadIndex); #endif /* allocate thread structure */ Thread thread(threadIndex,this); threadLocal[threadIndex] = &thread; thread_local_thread = &thread; /* main thread loop */ while (!terminate) { auto predicate = [&] () { return anyTasksRunning || terminate; }; /* all threads are either spinning ... */ if (spinning) { while (!predicate()) __pause_cpu(32); } /* ... or waiting inside some condition variable */ else { //std::unique_lock<std::mutex> lock(mutex); Lock<MutexSys> lock(mutex); condition.wait(mutex, predicate); } if (terminate) break; /* special static load balancing for top level task sets */ #if TASKSCHEDULER_STATIC_LOAD_BALANCING if (executeTaskSet(thread)) continue; #endif /* work on available task */ steal_loop(thread, [&] () { return anyTasksRunning > 0; }, [&] () { atomic_add(&anyTasksRunning,+1); while (thread.tasks.execute_local(thread,nullptr)); atomic_add(&anyTasksRunning,-1); }); } /* decrement threadCount again */ atomic_add(&threadCounter,-1); /* wait for all threads to terminate */ while (threadCounter > 0) yield(); threadLocal[threadIndex] = nullptr; } catch (const std::exception& e) { std::cout << "Error: " << e.what() << std::endl; // FIXME: propagate to main thread threadLocal[threadIndex] = nullptr; exit(1); }