inline void executeThreadWork() { job_type* job = NULL; m_numthrs = omp_get_num_threads(); m_timers.change(TM_WORK); m_logger.start(); m_work_logger.start(); while (true) { while (m_queue.try_pop(job)) { m_logger << m_queue.unsafe_size(); if (job->run(m_cookie)) delete job; } DBG(debug_queue, "Queue" << m_id << " is empty"); // no more jobs -> switch to idle m_timers.change(TM_IDLE); ++m_idle_count; m_logger << m_queue.unsafe_size(); m_work_logger << (m_numthrs - m_idle_count); while (!m_queue.try_pop(job)) { DBG(debug_queue, "Idle thread - m_idle_count: " << m_idle_count); if ( //!m_group->assist(m_id) && m_idle_count == m_numthrs) { // assist other JobQueues before terminating. while (m_group->assist(m_id)) { } return; } } // got a new job -> not idle anymore m_timers.change(TM_WORK); --m_idle_count; m_logger << m_queue.unsafe_size(); m_work_logger << (m_numthrs - m_idle_count); if (job->run(m_cookie)) delete job; } }
void do_work () { Task task; while (likely(m_accepting.load())) { if (m_queue.try_pop(task)) { m_processor(task); } else { std::unique_lock<std::mutex> lock(m_mutex); m_condition.wait_for(lock, std::chrono::seconds(60)); } } while (m_queue.try_pop(task)) { m_processor(task); } }
void printResult() { float res = 0.0; // get result from queue m_queue.try_pop(res); fprintf(m_ofile, "%f\n", res); }
void do_work() { while (g_alive) { size_t i; std::unique_lock<std::mutex> lock(g_work_mutex); if (g_work_queue.try_pop(i)) { task(i); g_work_condition.notify_one(); } else { g_work_condition.wait(lock); } } }
//! try to run one jobs from the queue, returns false if queue is finished, //! true if ran jobs or queue not finished. bool try_run() { job_type* job = NULL; if (!m_queue.try_pop(job)) return (m_idle_count != m_numthrs); m_logger << m_queue.unsafe_size(); if (job->run(m_cookie)) delete job; return true; }
void regionDataPublisher(zmqpp::socket &publisher, PATH_FIND_NODE &pathFindNode, tbb::concurrent_queue<PathEntity*> &solvedPathQueue, tbb::concurrent_vector<Entity*> entities) { using namespace std; std::chrono::steady_clock clock; // Initialize path. for (auto entity : entities) { pathFindNode.try_put(std::tuple<size_t, glm::vec2>(entity->id, entity->position)); } while (1) { auto start = clock.now(); // Grab a bunch fo path { //size_t size = entities.size(); for (size_t i = 0; i < 200; ++i) { PathEntity* pathEntity; if (solvedPathQueue.try_pop(pathEntity)) { entities[pathEntity->id]->pathNodes = pathEntity->pathNodes; entities[pathEntity->id]->currentNode = 0; } } } // Traverse nodes { for (auto entity : entities) { if (entity->pathNodes != 0) { if (entity->currentNode < entity->pathNodes->size()) { size_t currentIndex = (size_t)(*entity->pathNodes)[entity->currentNode++]; //wss::Utils::indexToXY(currentIndex, MAP_W, entity->position); wss::Utils::indexToXY(currentIndex, MAP_W, entity->position); } else { entity->pathNodes = 0; pathFindNode.try_put(std::tuple<size_t, glm::vec2>(entity->id, entity->position)); } } } } { rapidjson::Document document; document.SetObject(); serializeEntities(document, 0, entities.size(), entities); rapidjson::StringBuffer sb; rapidjson::Writer<rapidjson::StringBuffer> writer(sb); document.Accept(writer); publisher.send(sb.GetString()); } std::chrono::duration<double> elapsed = clock.now() - start; if (elapsed.count() < 1.0/5.0) { std::this_thread::sleep_for(std::chrono::milliseconds(1000/5 - (size_t)(elapsed.count() * 1000.0))); //cout << "elpased region publisher" << endl; } } }