int main() { TaskScheduler scheduler; for (size_t i = 0; i < 10; i++) { scheduler.schedule({ TaskCategory::Standard, TaskPriority::Standard, [&scheduler, i]() { std::cout << "[standard] hello from standard task " << i << "!\nscheduling new task\n"; scheduler.schedule({ TaskCategory::IO, TaskPriority::Standard, [i] { std::cout << "[io] hello from io task " << i << "!\n[io] sleeping...\n"; std::this_thread::sleep_for(7s); } }); std::cout << "[standard] sleeping...\n"; std::this_thread::sleep_for(5s); std::cout << "[standard] Done sleeping!\n"; } }); } scheduler.schedule({ TaskCategory::LongComputation, TaskPriority::Standard, [] { std::cout << "[long_comp] hello from long_comp task!\n[long_comp] sleeping...\n"; std::this_thread::sleep_for(3s); std::cout << "[long_comp] Done sleeping!\n"; } }); std::cin.get(); return 0; }
void TaskScheduler::TaskingThreadFunction( const ThreadArgs& args_ ) { CoInitializeEx(NULL, COINIT_MULTITHREADED); uint32_t threadNum = args_.threadNum; TaskScheduler* pTS = args_.pTaskScheduler; gtl_threadNum = threadNum; gtl_pCurrTS = pTS; pTS->m_NumThreadsRunning.fetch_add(1, std::memory_order_relaxed ); uint32_t spinCount = 0; uint32_t hintPipeToCheck_io = threadNum + 1; // does not need to be clamped. while( pTS->m_bRunning.load( std::memory_order_relaxed ) ) { if( !pTS->TryRunTask( threadNum, hintPipeToCheck_io ) ) { // no tasks, will spin then wait ++spinCount; if( spinCount > SPIN_COUNT ) { pTS->WaitForTasks<false>( threadNum ); } } else { spinCount = 0; } } pTS->m_NumThreadsRunning.fetch_sub( 1, std::memory_order_relaxed ); gtl_threadNum = NO_THREAD_NUM; gtl_pCurrTS = NULL; return; }
OSTHREAD_FUNC test10_testThread(void *parm){ TaskInfo *ti; TaskScheduler *ts = tgetTaskScheduler(); ti = ts->createTask(test10_PROGTest, 0); assert(ti); ts->run(); return (void*)-1; }
DWORD WINAPI TaskScheduler::ThreadMain(VOID* thread_instance) { TaskScheduler *pScheduler = reinterpret_cast<TaskScheduler*>(thread_instance); pScheduler->ExecuteTasks(); return 0; }
void RTSPManager::createRTSPServer(unsigned int id , unsigned int port , volatile char * watcher) { std::unique_lock<std::mutex> lock(_lock); TaskScheduler* taskSchedular = BasicTaskScheduler::createNew(); BasicUsageEnvironment* usageEnvironment = BasicUsageEnvironment::createNew(*taskSchedular); RTSPServer* rtspServer = RTSPServer::createNew(*usageEnvironment, port, NULL); if(rtspServer == NULL) { logger::log(usageEnvironment->getResultMsg() , logger::logType::FAILURE); *watcher = -1; this->_done = true; this->_condition.notify_all(); return; } H264LiveServerMediaSession *liveSubSession = H264LiveServerMediaSession::createNew(*usageEnvironment, true , id); std::string streamName = "camera_" + std::to_string(id); ServerMediaSession* sms = ServerMediaSession::createNew(*usageEnvironment, streamName.c_str(), streamName.c_str(), "Live H264 Stream"); sms->addSubsession(liveSubSession); rtspServer->addServerMediaSession(sms); char* url = rtspServer->rtspURL(sms); logger::log(INFO_RTSP_URL(url) , logger::logType::PRIORITY); delete[] url; this->_done = true; this->_condition.notify_all(); lock.unlock(); taskSchedular->doEventLoop(watcher); return; }
void TaskScheduler::TaskFunc(Task *task) { assert(task->m_State == Task::State::Scheduled || task->m_State == Task::State::Runnable); TaskScheduler *scheduler = task->m_Scheduler; scheduler->m_Current = task; task->m_Func(*task); scheduler->SetState(*task, Task::State::Finished); }
// The following code would be called to signal that a new frame of data has become available. // This (unlike other "LIVE555 Streaming Media" library code) may be called from a separate thread. // (Note, however, that "triggerEvent()" cannot be called with the same 'event trigger id' from different threads. // Also, if you want to have multiple device threads, each one using a different 'event trigger id', then you will need // to make "eventTriggerId" a non-static member variable of "DeviceSource".) void signalNewFrameData() { TaskScheduler* ourScheduler = NULL; //%%% TO BE WRITTEN %%% DeviceSource* ourDevice = NULL; //%%% TO BE WRITTEN %%% if (ourScheduler != NULL) { // sanity check ourScheduler->triggerEvent(DeviceSource::eventTriggerId, ourDevice); } }
int main(int argc, const char * argv[]) { uint32_t maxThreads = GetNumHardwareThreads(); double* avSpeedUps = new double[ maxThreads ]; for( uint32_t numThreads = 1; numThreads <= maxThreads; ++numThreads ) { g_TS.Initialize(numThreads); double avSpeedUp = 0.0; for( int run = 0; run< REPEATS; ++run ) { printf("Run %d.....\n", run); Timer tParallel; tParallel.Start(); ParallelReductionSumTaskSet m_ParallelReductionSumTaskSet( 10 * 1024 * 1024 ); g_TS.AddTaskSetToPipe( &m_ParallelReductionSumTaskSet ); g_TS.WaitforTaskSet( &m_ParallelReductionSumTaskSet ); tParallel.Stop(); printf("Parallel Example complete in \t%fms,\t sum: %" PRIu64 "\n", tParallel.GetTimeMS(), m_ParallelReductionSumTaskSet.m_FinalSum ); Timer tSerial; tSerial.Start(); uint64_t sum = 0; for( uint64_t i = 0; i < (uint64_t)m_ParallelReductionSumTaskSet.m_ParallelSumTaskSet.m_SetSize; ++i ) { sum += i + 1; } tSerial.Stop(); if( run >= WARMUPS ) { avSpeedUp += tSerial.GetTimeMS() / tParallel.GetTimeMS() / RUNS; } printf("Serial Example complete in \t%fms,\t sum: %" PRIu64 "\n", tSerial.GetTimeMS(), sum ); printf("Speed Up Serial / Parallel: %f\n\n", tSerial.GetTimeMS() / tParallel.GetTimeMS() ); } avSpeedUps[numThreads-1] = avSpeedUp; printf("\nAverage Speed Up for %d Hardware Threads Serial / Parallel: %f\n", numThreads, avSpeedUp ); } printf("\nHardware Threads, Av Speed Up/s\n" ); for( uint32_t numThreads = 1; numThreads <= maxThreads; ++numThreads ) { printf("%d, %f\n", numThreads, avSpeedUps[numThreads-1] ); } return 0; }
void * TaskScheduler::thread_func(void * arg) { boost::shared_ptr<ThreadParameters> tp = *(boost::shared_ptr<ThreadParameters>*)arg; TaskScheduler *that = tp->that; pthread_cleanup_push(cleanitup,&tp); that->runTask(tp); pthread_cleanup_pop(1); return NULL; }
static void signalNewVideoFrameData(int channelId) { TaskScheduler* ourScheduler = (TaskScheduler*) liveserver_taskscheduler(); //%%% TO BE WRITTEN %%% GAVideoLiveSource* ourDevice = vLiveSource[channelId]; //%%% TO BE WRITTEN %%% if (ourScheduler != NULL) { // sanity check ourScheduler->triggerEvent(eventTriggerId[channelId], ourDevice); } }
OSTHREAD_FUNC test10_incThread(void *parm){ TaskInfo *ti; TaskScheduler *ts = tgetTaskScheduler(); ti = ts->createTask(test10_PROGIncrement, 0); ts->assignFixedTask(1, ti); test10_event.set(); ts->run(); return (void*)-1; }
virtual void ExecuteRange( TaskSetPartition range, uint32_t threadnum ) { g_TS.AddTaskSetToPipe( &m_ParallelSumTaskSet ); g_TS.WaitforTaskSet( &m_ParallelSumTaskSet ); for( uint32_t i = 0; i < m_ParallelSumTaskSet.m_NumPartialSums; ++i ) { m_FinalSum += m_ParallelSumTaskSet.m_pPartialSums[i].count; } }
void RepetitiveTask(Task::TaskData & parTask) { TaskScheduler * scheduler = parTask.RepetetiveTaskData.Scheduler; TaskId taskToExec = parTask.RepetetiveTaskData.RepetiveTask; std::chrono::milliseconds repeatTimer = parTask.RepetetiveTaskData.RepeatTimer; Task * task = scheduler->GetTask(taskToExec); assert(task != nullptr); task->Run(); scheduler->ScheduleEvery(repeatTimer, taskToExec, false); }
FIBER_START_FUNCTION_CLASS_IMPL(TaskScheduler, FiberStart) { GlobalArgs *globalArgs = reinterpret_cast<GlobalArgs *>(arg); TaskScheduler *taskScheduler = &globalArgs->g_taskScheduler; while (!taskScheduler->m_quit.load()) { // Check if any of the waiting tasks are ready WaitingTask waitingTask; bool waitingTaskReady = false; taskScheduler->m_waitingTaskLock.lock(); auto iter = taskScheduler->m_waitingTasks.begin(); for (; iter != taskScheduler->m_waitingTasks.end(); ++iter) { if (iter->Counter->load() == iter->Value) { waitingTaskReady = true; break; } } if (waitingTaskReady) { waitingTask = *iter; // Optimization for removing an item from a vector as suggested by ryeguy on reddit // Explained here: http://stackoverflow.com/questions/4442477/remove-ith-item-from-c-stdvector/4442529#4442529 // Essentially, rather than forcing a memcpy to shift all the remaining elements down after the erase, // we move the last element into the place where the erased element was. Then we pop off the last element // Check that we're not already the last item // Move assignment to self is not defined if (iter != (--taskScheduler->m_waitingTasks.end())) { *iter = std::move(taskScheduler->m_waitingTasks.back()); } taskScheduler->m_waitingTasks.pop_back(); } taskScheduler->m_waitingTaskLock.unlock(); if (waitingTaskReady) { taskScheduler->SwitchFibers(waitingTask.Fiber); } TaskBundle nextTask; if (!taskScheduler->GetNextTask(&nextTask)) { std::this_thread::yield(); } else { nextTask.TaskToExecute.Function(&globalArgs->g_taskScheduler, &globalArgs->g_heap, &globalArgs->g_allocator, nextTask.TaskToExecute.ArgData); nextTask.Counter->fetch_sub(1); } } FTLConvertFiberToThread(FTLGetCurrentFiber()); globalArgs->g_taskScheduler.m_numActiveWorkerThreads.fetch_sub(1); FTLEndCurrentThread(); }
void UIWindow::setOutput() { TaskScheduler tsd; QString qstr = textEdit->toPlainText(); tsd.input(1,qstr.toStdString()); string str=""; tsd.execute(); tsd.output(str); QString qst = ""; qst=qst+str.c_str(); output->setText(qst); outputLabel->setText("Output"); repaint(); }
void Init() { delete[] m_pPartialSums; m_NumPartialSums = g_TS.GetNumTaskThreads(); m_pPartialSums = new Count[ m_NumPartialSums ]; memset( m_pPartialSums, 0, sizeof(Count)*m_NumPartialSums ); }
int main(int argc, const char * argv[]) { Remotery* rmt; rmt_CreateGlobalInstance(&rmt); // Set the callbacks BEFORE initialize or we will get no threadstart nor first waitStart calls g_TS.GetProfilerCallbacks()->threadStart = threadStartCallback; g_TS.GetProfilerCallbacks()->waitStart = waitStartCallback; g_TS.GetProfilerCallbacks()->waitStop = waitStopCallback; g_TS.Initialize(); rmt_SetCurrentThreadName("Main"); double avSpeedUp = 0.0; for( int run = 0; run< RUNS; ++run ) { rmt_ScopedCPUSample(Run); printf("Run %d.....\n", run); ParallelReductionSumTaskSet m_ParallelReductionSumTaskSet( SUMS ); { rmt_ScopedCPUSample(Parallel); m_ParallelReductionSumTaskSet.Init(); g_TS.AddTaskSetToPipe(&m_ParallelReductionSumTaskSet); g_TS.WaitforTaskSet(&m_ParallelReductionSumTaskSet); } volatile uint64_t sum = 0; { rmt_ScopedCPUSample(Serial); for (uint64_t i = 0; i < (uint64_t)m_ParallelReductionSumTaskSet.m_ParallelSumTaskSet.m_SetSize; ++i) { sum += i + 1; } } } rmt_DestroyGlobalInstance(rmt); return 0; }
void parfor(std::size_t idx_start, std::size_t idx_end, Lambda &&loopBody, TaskScheduler &scheduler, std::size_t blockSize = 32) { static_assert(std::is_same<void, typename std::result_of<Lambda(std::size_t)>::type>::value, "Loop body must return void"); auto loopLen = (idx_end - idx_start); //Execute short loops in serial if(loopLen < 10*blockSize) { for(std::size_t i=idx_start; i<idx_end; ++i) { loopBody(i); } return; } auto full_blocks = loopLen / blockSize; auto cleanup_start = full_blocks * blockSize + idx_start; auto Nblocks = full_blocks + ((cleanup_start < idx_end) ? 1 : 0); std::vector<std::future<void>> futs; futs.reserve(Nblocks); for (std::size_t iblock = 0; iblock < Nblocks; ++iblock) { std::size_t i_start = idx_start + iblock * blockSize; std::size_t i_end = i_start + blockSize; i_end = (i_end < idx_end) ? i_end : idx_end; auto [task, fut] = scheduler.createTask([&loopBody, i_start, i_end]() { for (auto i = i_start; i < i_end; ++i) { loopBody(i); } }); scheduler.enqueue(task); futs.push_back(std::move(fut)); } wait_all(futs); //return futs; }
void TaskScheduler::MainFiberStart(intptr_t arg) { MainFiberStartArgs *mainFiberArgs = reinterpret_cast<MainFiberStartArgs *>(arg); TaskScheduler *taskScheduler = mainFiberArgs->taskScheduler; // Call the main task procedure mainFiberArgs->MainTask(taskScheduler, mainFiberArgs->Arg); // Request that all the threads quit taskScheduler->m_quit.store(true, std::memory_order_release); // Switch to the thread fibers ThreadLocalStorage &tls = taskScheduler->m_tls[taskScheduler->GetCurrentThreadIndex()]; taskScheduler->m_fibers[tls.CurrentFiberIndex].SwitchToFiber(&tls.ThreadFiber); // We should never get here printf("Error: FiberStart should never return"); }
void cPluginRestfulapi::MainThreadHook(void) { // Perform actions in the context of the main program thread. // WARNING: Use with great care - see PLUGINS.html! TaskScheduler* scheduler = TaskScheduler::get(); scheduler->DoTasks(); tChannelID channelID = scheduler->SwitchableChannel(); if (!( channelID == tChannelID::InvalidID )) { cChannel* channel = Channels.GetByChannelID(channelID); if (channel != NULL) { Channels.SwitchTo( channel->Number() ); scheduler->SwitchableChannel(tChannelID::InvalidID); } } cRecording* recording = scheduler->SwitchableRecording(); if (recording != NULL) { #if APIVERSNUM > 10727 cReplayControl::SetRecording(recording->FileName()); #else cReplayControl::SetRecording(recording->FileName(), recording->Title()); #endif scheduler->SwitchableRecording(NULL); cControl::Shutdown(); cControl::Launch(new cReplayControl); } }
//realtime thread for handling all scheduled tasks void* TaskScheduler::queue_thread_callback(void* obj_p) { // if TIME_DIVISION == 1 then lock to audiodevice. if(UNIV::TIME_DIVISION == 1) { TaskScheduler* sched = static_cast<TaskScheduler*>(obj_p); EXTMonitor* guard = sched->getGuard(); while(true) { #ifdef EXT_BOOST sched->timeSlice(); guard->wait(); #else sched->timeSlice(); guard->lock(); guard->wait(); guard->unlock(); #endif } return obj_p; }else{ // otherwise if TIME_DIVISION > 1 then timeSlice never returns! TaskScheduler* sched = static_cast<TaskScheduler*>(obj_p); sched->timeSlice(); // should never return from timeSlice return NULL; } }
void SearchQualifierDialog::search( bool searchAll /* = false*/ ){ QString name = AVQualifierItem::simplifyText(ui->nameEdit->text()); QString val = AVQualifierItem::simplifyText(ui->valueEdit->text()); if (!(name.length() < 20 && TextUtils::fits(TextUtils::QUALIFIER_NAME_CHARS, name.toLatin1().data(), name.length()))) { QMessageBox::critical(this, tr("Error!"), tr("Illegal qualifier name")); return; } if (!Annotation::isValidQualifierValue(val)) { QMessageBox::critical(this, tr("Error!"), tr("Illegal qualifier value")); return; } if(searchAll){ clearPrevResults(); } FindQualifierTaskSettings settings(groupToSearchIn, name, val, ui->exactButton->isChecked(), searchAll, parentAnnotationofPrevResult, indexOfPrevResult); FindQualifierTask* findTask = new FindQualifierTask(treeView, settings); connect(findTask, SIGNAL( si_stateChanged() ), SLOT( sl_searchTaskStateChanged() )); TaskScheduler* s = AppContext::getTaskScheduler(); s->registerTopLevelTask(findTask); }
void TaskScheduler::FiberStart(void *arg) { GlobalArgs *globalArgs = (GlobalArgs *)arg; TaskScheduler *taskScheduler = &globalArgs->TaskScheduler; while (!taskScheduler->m_quit.load()) { // Check if any of the waiting tasks are ready WaitingTask waitingTask; bool waitingTaskReady = false; EnterCriticalSection(&taskScheduler->m_waitingTaskLock); auto iter = taskScheduler->m_waitingTasks.begin(); for ( ; iter != taskScheduler->m_waitingTasks.end(); ++iter) { if (iter->Counter->load() == iter->Value) { waitingTaskReady = true; break; } } if (waitingTaskReady) { waitingTask = *iter; taskScheduler->m_waitingTasks.erase(iter); } LeaveCriticalSection(&taskScheduler->m_waitingTaskLock); if (waitingTaskReady) { taskScheduler->SwitchFibers(waitingTask.Fiber); } TaskBundle nextTask; if (!taskScheduler->GetNextTask(&nextTask)) { SwitchToThread(); } else { nextTask.Task.Function(&globalArgs->TaskScheduler, &globalArgs->Heap, &globalArgs->Allocator, nextTask.Task.ArgData); nextTask.Counter->fetch_sub(1); } } }
THREAD_FUNC_RETURN_TYPE TaskScheduler::ThreadStart(void *arg) { ThreadStartArgs *threadArgs = reinterpret_cast<ThreadStartArgs *>(arg); TaskScheduler *taskScheduler = threadArgs->taskScheduler; uint index = threadArgs->threadIndex; // Clean up delete threadArgs; // Get a free fiber to switch to std::size_t freeFiberIndex = taskScheduler->GetNextFreeFiberIndex(); // Initialize tls taskScheduler->m_tls[index].CurrentFiberIndex = freeFiberIndex; // Switch taskScheduler->m_tls[index].ThreadFiber.SwitchToFiber(&taskScheduler->m_fibers[freeFiberIndex]); // And we've returned // Cleanup and shutdown FTLEndCurrentThread(); THREAD_FUNC_END; }
void TaskScheduler::cleanitup(void * arg) { boost::shared_ptr<ThreadParameters> tp = *(boost::shared_ptr<ThreadParameters>*)arg; TaskScheduler *that = tp->that; that->cleanupTask(tp); }
static void registerCoreServices() { ServiceRegistry* sr = AppContext::getServiceRegistry(); TaskScheduler* ts = AppContext::getTaskScheduler(); ts->registerTopLevelTask(sr->registerServiceTask(new PluginViewerImpl())); ts->registerTopLevelTask(sr->registerServiceTask(new ProjectViewImpl())); }
void * TaskScheduler::scheduler_thread(void *arg) { TaskScheduler *that = (TaskScheduler*)arg; that->runSchedulerLoop(); return NULL; }
OSTHREAD_FUNC TCPDatagramCommunication::serverThread(void *parm){ TCPDatagramCommunication *tdc = (TCPDatagramCommunication*) parm; int fdaccept; int res=0; TaskScheduler *ts; NewServer *ns, *epollptr; NewServer ServerEventFdNs; // epoll data for the ServerEventFd int workerno; SLauncher->initThreadContext("SERVER", 0); // allows this thread to send // messages to others ts = tgetTaskScheduler(); tsetSharedSpace(THREADCONTEXT_SPACE_TCPDATAGRAM, tdc); // *!* increase thread priority above normal? //WARNING_INIT(); // initializes warning task int i, n, epfd; struct epoll_event ev; struct epoll_event *epevents=0; eventfd_t eventdummy; epevents = new epoll_event[MAX_EPOLL_EVENTS]; ServerEventFdNs.fd = tdc->ServerEventFd; ServerEventFdNs.handlerid = -1; epfd = epoll_create1(0); assert(epfd != -1); ev.events = EPOLLIN; ev.data.ptr = (void*) &ServerEventFdNs; res = epoll_ctl(epfd, EPOLL_CTL_ADD, tdc->ServerEventFd, &ev); assert(res==0); UDPDest ud; while (!tdc->ForceEndThreads){ ts->runOnce(); n = epoll_wait(epfd, epevents, MAX_EPOLL_EVENTS, -1); for (i=0; i < n; ++i){ epollptr = (NewServer*) epevents[i].data.ptr; if (epollptr->fd == tdc->ServerEventFd){ // used just to wake us up eventfd_read(tdc->ServerEventFd, &eventdummy); while (!tdc->newServerQueue.empty()){ ns = tdc->newServerQueue.dequeue(); ev.events = EPOLLIN; ev.data.ptr = (void*) ns; res = epoll_ctl(epfd, EPOLL_CTL_ADD, ns->fd, &ev); assert(res==0); res = listen(ns->fd, SOMAXCONN); if (res == -1) { printf("listen() failed: %d\n", errno); continue; } } continue; } if (epevents[i].events & EPOLLIN){ // read available, new connection to accept ud.sockaddr_len = sizeof(sockaddr_in); fdaccept = accept(epollptr->fd, (sockaddr*) &ud.destaddr, &ud.sockaddr_len); if (fdaccept == -1){ int err = errno; if (err != 0){ printf("%016llx accept:socket_error %d from %08x\n", (long long) Time::now(), errno, *(int*)&ud.destaddr.sin_addr); } continue; } //printf("accept:connection from %08x\n", *(int*)&ud.destaddr.sin_addr); setnonblock(fdaccept); #ifdef DISABLE_NAGLE int value = 1; res = setsockopt(fdaccept, IPPROTO_TCP, TCP_NODELAY, (char*) &value, sizeof(int)); if (res) printf("setsockopt on TCP_NODELAY of accept socket: error %d\n", errno); #endif // determines which worker will handle this fd workerno = tdc->ClientCount++; //printf("Fd %d going to worker %d\n", fdaccept, workerno); tdc->startReceiving(ud.getIPPort(), fdaccept, epollptr->handlerid, workerno); } // if if (epevents[i].events & (EPOLLRDHUP | EPOLLHUP | EPOLLERR)){ // problem with fd if (epollptr->fd != tdc->ServerEventFd){ printf("Problem with fd, closing it\n"); close(epollptr->fd); delete epollptr; } } // if } // for } delete [] epevents; return 0; }
OSTHREAD_FUNC TCPDatagramCommunication::workerThread(void *parm){ TCPDatagramCommunication *tdc = (TCPDatagramCommunication *) parm; TaskScheduler *ts = tgetTaskScheduler(); int epfd = epoll_create1(0); assert(epfd != -1); eventfd_t eventdummy; int myworkerno = gContext.indexWithinClass(TCLASS_WORKER, tgetThreadNo()); tsetSharedSpace(THREADCONTEXT_SPACE_TCPDATAGRAM, tdc); tsetSharedSpace(THREADCONTEXT_SPACE_TCPDATAGRAM_WORKER, (void*)(long long) epfd); ts->assignImmediateFunc(IMMEDIATEFUNC_ADDIPPORTFD, immediateFuncAddIPPortFd); ts->assignImmediateFunc(IMMEDIATEFUNC_SEND, immediateFuncSend); tdc->startupWorkerThread(); // invokes startup code int i, n, res; struct epoll_event *epevents=0; TCPStreamState *tss=0; TCPStreamState sleepeventtss; epevents = new epoll_event[MAX_EPOLL_EVENTS]; int nread=0; int sleepeventfd = ts->getSleepEventFd(); struct epoll_event ev; sleepeventtss.fd = sleepeventfd; ev.events = EPOLLIN; ev.data.ptr = (void*) &sleepeventtss; res = epoll_ctl(epfd, EPOLL_CTL_ADD, sleepeventfd, &ev); assert(res==0); int something; int timeout=0; tdc->workerInitSync.signal(); // indicate that we have initialized while (!tdc->ForceEndThreads){ something = ts->runOnce(); // run event schedule loop once // this will handle immediate functions to add new // things to the epoll set: // after adding, must receive anything that already exists // go through pendingsends and send anything for which we did not get // EAGAIN before if (!tdc->PendingSendsBeforeEpoll[myworkerno].empty()){ SetNode<TCPStreamStatePtr> *it; TCPStreamState *tssptr; for (it = tdc->PendingSendsBeforeEpoll[myworkerno].getFirst(); it != tdc->PendingSendsBeforeEpoll[myworkerno].getLast(); it = tdc->PendingSendsBeforeEpoll[myworkerno].getNext(it)){ tssptr = it->key.tssptr; assert(!tssptr->sendeagain); tdc->sendTss(tssptr); } tdc->PendingSendsBeforeEpoll[myworkerno].clear(); } if (!something){ // start sleep cycle ts->setAsleep(1); timeout = ts->findSleepTimeout(); //printf("Going to sleep for %d\n", timeout); } else timeout = 0; n = epoll_wait(epfd, epevents, MAX_EPOLL_EVENTS, timeout); if (!something) ts->setAsleep(0); for (i=0; i < n; ++i){ tss = (TCPStreamState*) epevents[i].data.ptr; if (tss->fd == sleepeventfd){ // used just to wake us up //printf("Woken from sleep\n"); eventfd_read(sleepeventfd, &eventdummy); continue; } if (epevents[i].events & EPOLLIN){ // read available //if (ts->checkSendQueuesAlmostFull()){ // **!** if internal send queues are almost full, do not receive // TCP packets // do something here //} while (1){ nread = read(tss->fd, tss->rstate.Ptr, tss->rstate.Buflen - tss->rstate.Filled); if (nread < 0){ if (errno == EAGAIN || errno == EWOULDBLOCK) break; } else if (nread == 0) break; else { // update the state of this stream (and if entire message received, // invoke handler) tdc->updateState(tss->handlerid, tss->rstate, tss->ipport, nread); } } } if (epevents[i].events & EPOLLOUT){ // write available tdc->sendTss(tss); } if (epevents[i].events & (EPOLLRDHUP | EPOLLHUP | EPOLLERR)){ // problem with fd close(tss->fd); } } } tdc->finishWorkerThread(); // invokes cleaning up code delete [] epevents; return 0; }
void TaskScheduler::FiberStart(intptr_t arg) { TaskScheduler *taskScheduler = reinterpret_cast<TaskScheduler *>(arg); while (!taskScheduler->m_quit.load(std::memory_order_acquire)) { // Clean up from the last fiber to run on this thread taskScheduler->CleanUpOldFiber(); // Check if any of the waiting tasks are ready std::size_t waitingFiberIndex = FTL_INVALID_INDEX; for (std::size_t i = 0; i < taskScheduler->m_fiberPoolSize; ++i) { // Double lock if (!taskScheduler->m_waitingFibers[i].load(std::memory_order_relaxed)) { continue; } if (!taskScheduler->m_waitingFibers[i].load(std::memory_order_acquire)) { continue; } // Found a waiting fiber // Test if it's ready WaitingBundle *bundle = &taskScheduler->m_waitingBundles[i]; if (bundle->Counter->load(std::memory_order_relaxed) != bundle->TargetValue) { continue; } bool expected = true; if (std::atomic_compare_exchange_weak_explicit(&taskScheduler->m_waitingFibers[i], &expected, false, std::memory_order_release, std::memory_order_relaxed)) { waitingFiberIndex = i; break; } } if (waitingFiberIndex != FTL_INVALID_INDEX) { // Found a waiting task that is ready to continue ThreadLocalStorage &tls = taskScheduler->m_tls[taskScheduler->GetCurrentThreadIndex()]; tls.OldFiberIndex = tls.CurrentFiberIndex; tls.CurrentFiberIndex = waitingFiberIndex; tls.OldFiberDestination = FiberDestination::ToPool; // Switch taskScheduler->m_fibers[tls.OldFiberIndex].SwitchToFiber(&taskScheduler->m_fibers[tls.CurrentFiberIndex]); // And we're back } else { // Get a new task from the queue, and execute it TaskBundle nextTask; if (!taskScheduler->GetNextTask(&nextTask)) { // Spin } else { nextTask.TaskToExecute.Function(taskScheduler, nextTask.TaskToExecute.ArgData); nextTask.Counter->fetch_sub(1); } } } // Start the quit sequence // Switch to the thread fibers ThreadLocalStorage &tls = taskScheduler->m_tls[taskScheduler->GetCurrentThreadIndex()]; taskScheduler->m_fibers[tls.CurrentFiberIndex].SwitchToFiber(&tls.ThreadFiber); // We should never get here printf("Error: FiberStart should never return"); }