Beispiel #1
0
    //! try to run one jobs from the queue, returns false if queue is finished,
    //! true if ran jobs or queue not finished.
    bool try_run()
    {
        job_type* job = NULL;

        if (!m_queue.try_pop(job))
            return (m_idle_count != m_numthrs);

        m_logger << m_queue.unsafe_size();

        if (job->run(m_cookie))
            delete job;

        return true;
    }
Beispiel #2
0
 void do_work ()
 {
     Task task;
     while (likely(m_accepting.load())) {
         if (m_queue.try_pop(task)) {
             m_processor(task);
         } else {
             std::unique_lock<std::mutex> lock(m_mutex);
             m_condition.wait_for(lock, std::chrono::seconds(60));
         }
     }
     while (m_queue.try_pop(task)) {
         m_processor(task);
     }
 }
Beispiel #3
0
void *estimate(void *param)
{
  FILE *fptr = (FILE *)param;
  char coeff[MAX_CHAR];
  int t = 1;

  while(NULL != fgets(coeff, MAX_CHAR, fptr)) {
    float a = atof(strtok(coeff, ","));
    float b = atof(strtok(NULL, ","));
    float c = atof(strtok(NULL, ","));
    float d = atof(strtok(NULL, ","));

    for(int i=1 ; i < 10 ; ++i) {
      m_queue.push(a * pow(t + i * 0.1, 3) +
		   b * pow(t + i * 0.1, 2) +
		   c * (t + i * 0.1) +
		   d);
    }

    // increase time
    ++t;
  }

  // set cstat as false
  m_est_cstat = false;
  // wait signal
  pthread_cond_wait(&m_est_cvar, &m_est_mutex);

  // exit from thread
  pthread_exit(NULL);

  return NULL;
}
Beispiel #4
0
void printResult()
{
  float res = 0.0;

  // get result from queue
  m_queue.try_pop(res);

  fprintf(m_ofile, "%f\n", res);
}
Beispiel #5
0
void do_work() {
    while (g_alive) {
        size_t i;
        std::unique_lock<std::mutex> lock(g_work_mutex);
        if (g_work_queue.try_pop(i)) {
            task(i);
            g_work_condition.notify_one();
        } else {
            g_work_condition.wait(lock);
        }
    }
}
Beispiel #6
0
    void numaLoop(int numaNode, int numberOfThreads)
    {
        m_timers.start(omp_get_max_threads());

#pragma omp parallel num_threads(numberOfThreads)
        {
            // tie thread to a NUMA node
            numa_run_on_node(numaNode);
            numa_set_preferred(numaNode);

            executeThreadWork();
        }   // end omp parallel

        m_timers.stop();

        assert(m_queue.unsafe_size() == 0);
    }
Beispiel #7
0
    void loop()
    {
        m_timers.start(omp_get_max_threads());
        m_idle_count = 0;

#pragma omp parallel
        {
            if (gopt_memory_type == "mmap_node0")
            {
                // tie thread to first NUMA node
                numa_run_on_node(0);
                numa_set_preferred(0);
            }

            executeThreadWork();
        }   // end omp parallel

        m_timers.stop();

        assert(m_queue.unsafe_size() == 0);
    }
Beispiel #8
0
		void operator() (const tbb::blocked_range<int>& range) const {
			for(int i = range.begin();i != range.end(); i++)
				todo->push(std::make_pair((*nodes)[curr.first].edges[i],curr.second+1));
		}
Beispiel #9
0
 void enqueue(job_type* job)
 {
     m_queue.push(job);
     m_logger << m_queue.unsafe_size();
 }
void TestEmptyQueue() {
    const tbb::concurrent_queue<T> queue;
    ASSERT( queue.size()==0, NULL );
    ASSERT( queue.capacity()>0, NULL );
    ASSERT( size_t(queue.capacity())>=size_t(-1)/(sizeof(void*)+sizeof(T)), NULL );
}
Beispiel #11
0
 void schedule (const Task & task)
 {
     POMAGMA_ASSERT(m_accepting.load(), "pool is not accepting work");
     m_queue.push(task);
     m_condition.notify_one();
 }
Beispiel #12
0
void regionDataPublisher(zmqpp::socket &publisher, PATH_FIND_NODE &pathFindNode, tbb::concurrent_queue<PathEntity*> &solvedPathQueue, tbb::concurrent_vector<Entity*> entities) {
	using namespace std;

	std::chrono::steady_clock clock;

	// Initialize path.
	for (auto entity : entities) {
		pathFindNode.try_put(std::tuple<size_t, glm::vec2>(entity->id, entity->position));
	}

	while (1) {
		auto start = clock.now();

		// Grab a bunch fo path
		{
			//size_t size = entities.size();
			for (size_t i = 0; i < 200; ++i) {
				PathEntity* pathEntity;
				if (solvedPathQueue.try_pop(pathEntity)) {

					entities[pathEntity->id]->pathNodes = pathEntity->pathNodes;
					entities[pathEntity->id]->currentNode = 0;
				}
			}
		}

		// Traverse nodes
		{
			for (auto entity : entities) {
				if (entity->pathNodes != 0) {
					if (entity->currentNode < entity->pathNodes->size()) {
						size_t currentIndex = (size_t)(*entity->pathNodes)[entity->currentNode++];
						//wss::Utils::indexToXY(currentIndex, MAP_W, entity->position);
						wss::Utils::indexToXY(currentIndex, MAP_W, entity->position);
					}
					else {
						entity->pathNodes = 0;
						pathFindNode.try_put(std::tuple<size_t, glm::vec2>(entity->id, entity->position));
					}
				}
			}
		}

		{
			rapidjson::Document document;
			document.SetObject();
			serializeEntities(document, 0, entities.size(), entities);

			rapidjson::StringBuffer sb;
			rapidjson::Writer<rapidjson::StringBuffer> writer(sb);

			document.Accept(writer);

			publisher.send(sb.GetString());
		}
		std::chrono::duration<double> elapsed = clock.now() - start;
		if (elapsed.count() < 1.0/5.0) {
			std::this_thread::sleep_for(std::chrono::milliseconds(1000/5 - (size_t)(elapsed.count() * 1000.0)));
		//cout << "elpased region publisher" << endl;
		}

	}
}