void ThreadEngineBase::startBlocking() { start(); semaphore.acquire(); startThreads(); bool throttled = false; #ifndef QT_NO_EXCEPTIONS try { #endif while (threadFunction() == ThrottleThread) { if (threadThrottleExit()) { throttled = true; break; } } #ifndef QT_NO_EXCEPTIONS } catch (QtConcurrent::Exception &e) { handleException(e); } catch (...) { handleException(QtConcurrent::UnhandledException()); } #endif if (throttled == false) { semaphore.release(); } semaphore.wait(); finish(); exceptionStore.throwPossibleException(); }
void ThreadEngineBase::startSingleThreaded() { start(); while (threadFunction() != ThreadFinished) ; finish(); }
void ThreadEngineBase::run() // implements QRunnable. { if (this->isCanceled()) { threadExit(); return; } startThreads(); #ifndef QT_NO_EXCEPTIONS try { #endif while (threadFunction() == ThrottleThread) { // threadFunction returning ThrottleThread means it that the user // struct wants to be throttled by making a worker thread exit. // Respect that request unless this is the only worker thread left // running, in which case it has to keep going. if (threadThrottleExit()) return; } #ifndef QT_NO_EXCEPTIONS } catch (QtConcurrent::Exception &e) { handleException(e); } catch (...) { handleException(QtConcurrent::UnhandledException()); } #endif threadExit(); }
BackgroundProcessingPool::BackgroundProcessingPool(int size_) : size(size_) { LOG_INFO(&Logger::get("BackgroundProcessingPool"), "Create BackgroundProcessingPool with " << size << " threads"); threads.resize(size); for (auto & thread : threads) thread = std::thread([this] { threadFunction(); }); }
static DWORD WINAPI callThreadFunc(LPVOID context) { struct threadFuncInvocation * invocation = context; int (* threadFunction)(void * context); void * threadContext; threadFunction = invocation->threadFunction; threadContext = invocation->context; free(invocation); return (DWORD) threadFunction(threadContext); }
static void * callThreadFunc(void * context) { struct threadFuncInvocation * invocation = context; int (* threadFunction)(void * context); void * threadContext; threadFunction = invocation->threadFunction; threadContext = invocation->context; free(invocation); return (void *) threadFunction(threadContext); }
BackgroundSchedulePool::BackgroundSchedulePool(size_t size) : size(size) { LOG_INFO(&Logger::get("BackgroundSchedulePool"), "Create BackgroundSchedulePool with " << size << " threads"); threads.resize(size); for (auto & thread : threads) thread = std::thread([this] { threadFunction(); }); delayed_thread = std::thread([this] { delayExecutionThreadFunction(); }); }
double testBarrier(IBarrier &b, size_t nThreads){ std::vector<std::thread> worker; for(size_t i=0; i<nThreads-1; ++i){ worker.push_back(std::thread(threadFunction, std::ref(b), i+1, nThreads)); } timer(); threadFunction(b, 0, nThreads); double ret = timer(); for(size_t i=0; i<nThreads-1; ++i){ worker[i].join(); } return ret; }
void MillerRabinParallelPrimes::calcPrimes() { if (mNumberOfThreads > 1) { std::future<size_t> f[mNumberOfThreads]; size_t blockSize = (mCheckLimit / mNumberOfThreads); size_t remaining = (mCheckLimit - (blockSize * mNumberOfThreads)); unsigned long low = 0, high = blockSize; ssize_t t = mNumberOfThreads - 1; start = std::chrono::steady_clock::now(); do { f[t] = std::async(std::launch::async, &MillerRabinParallelPrimes::threadFunction, this, low, high); t--; // prepare for next loop: low = high + 1; high = low + blockSize -1; if (!(!!t)) // is this the next thread the last thread? { // if it's the last, give it the remaining numbers high += remaining; } } while(t >= 0); std::cout << "waiting for threads..." << std::endl; for (ssize_t i = mNumberOfThreads - 1; i >= 0 ; i--) { f[i].wait(); mNumberOfPrimes += f[i].get(); } end = std::chrono::steady_clock::now(); } else { std::cout << "running single threaded!" << std::endl; start = std::chrono::steady_clock::now(); mNumberOfPrimes = threadFunction(0, mCheckLimit); end = std::chrono::steady_clock::now(); } }