void execute() {
     while (!requires_stop_) {
         std::function<void(void)> function;
         {
             std::unique_lock<std::mutex> lock(works_mutex_);
             if (works_.empty()) {
                 workable_.wait(lock);
             }
             function = works_.front();
             works_.pop_front();
         }
         // MEMO: Do not catch an exception of async.
         function();
     }
 }
Exemple #2
0
std::pair<size_t, ExternCompiler*> CompilerPool::getCompiler() {
  std::unique_lock<std::mutex> l(m_compilerLock);

  m_compilerCv.wait(l, [&] {
    return m_freeCount.load(std::memory_order_relaxed) != 0;
  });
  m_freeCount -= 1;

  for (size_t id = 0; id < m_compilers.size(); ++id) {
    auto ret = m_compilers.exchange(id, nullptr);
    if (ret) return std::make_pair(id, ret);
  }

  not_reached();
}
Exemple #3
0
void worker_2() {
    for (int i = 0; i < 8; i += 1) {
        std::unique_lock<std::mutex> lock(mtx);
        condition.wait(lock, [=]() -> bool { return produced; });

        auto consumer = [=]() {
            produced = false;
            consumed = true;
            std::cout << "consuming " << i << std::endl << std::flush;
        };
        consumer();

        condition.notify_one();
    }
}
 /** Thread function */
 void Run()
 {
     while (true) {
         std::unique_ptr<WorkItem> i;
         {
             std::unique_lock<std::mutex> lock(cs);
             while (running && queue.empty())
                 cond.wait(lock);
             if (!running)
                 break;
             i = std::move(queue.front());
             queue.pop_front();
         }
         (*i)();
     }
 }
Exemple #5
0
void produce()
{
  for (auto i = 0u; i < 2 * BUFFER_SIZE; ++i) {
    std::unique_lock<std::mutex> locker(mu);
    full.wait(locker, [] {return vec.size() < BUFFER_SIZE;});
    auto was_empty = vec.empty();
    vec.push_back(i);
    locker.unlock();

    if (was_empty) {
      empty.notify_one();
    }

    std::this_thread::sleep_for(slp);
  }
}
std::future<typename std::result_of<Func(Args...)>::type> StartJob(Func func, Args... args) {

	// wait until the thread count drops below the maximum
	std::unique_lock<std::mutex> lock(g_threadcount_mutex);
	while(g_threadcount >= g_threadcount_max) {
		g_threadcount_cv.wait(lock);
	}

	// increment the thread count
	++g_threadcount;

	// start the thread
	return std::async(std::launch::async, JobWrapper<Func, Args...>,
					  func, std::forward<Args>(args)...);

}
Exemple #7
0
                void thread_fn()
                {
					while (enabled)
					{
						std::unique_lock<std::mutex> locker(mutex); 
						cv.wait(locker, [&](){ return !fqueue.empty() || !enabled; });
						while(!fqueue.empty())
						{
							fn_type fn = fqueue.front(); 
							fqueue.pop();
							locker.unlock();
							fn();
							locker.lock();
						}                              
					}
                }
 void wait() {
     std::unique_lock<std::mutex> lk(mtx);
     ++wait_count;
     if(wait_count != target_wait_count) {
         // not all threads have arrived yet; go to sleep until they do
         cond_var.wait(lk, 
             [this]() { return wait_count == target_wait_count; });
     } else {
         // we are the last thread to arrive; wake the others and go on
         cond_var.notify_all();
     }
     // note that if you want to reuse the barrier, you will have to
     // reset wait_count to 0 now before calling wait again
     // if you do this, be aware that the reset must be synchronized with
     // threads that are still stuck in the wait
 }
void bar2()
{
	//Acquire the lock
	std::unique_lock<std::mutex> lck(m);

	//Wait until the first thread is done, so that this may go
	cv.wait(lck);

	//Random task that this thread is doing
	for (int i = 100; i > 0; --i)
	{
		printf("Random Other Tasking: %d\n", i);
	}

	printf("Finally ready!\n");
}
Exemple #10
0
void MergeWorkerThreadStats() {
    std::unique_lock<std::mutex> lock(workListMutex);
    std::unique_lock<std::mutex> doneLock(reportDoneMutex);
    // Set up state so that the worker threads will know that we would like
    // them to report their thread-specific stats when they wake up.
    reportWorkerStats = true;
    reporterCount = threads.size();

    // Wake up the worker threads.
    workListCondition.notify_all();

    // Wait for all of them to merge their stats.
    reportDoneCondition.wait(lock, []() { return reporterCount == 0; });

    reportWorkerStats = false;
}
Exemple #11
0
int
main(int argc, char **argv)
{
    auto params = parseArgs(argc, argv);

    // TODO: remove with GnuTLS >= 3.3
    int rc = gnutls_global_init();
    if (rc != GNUTLS_E_SUCCESS)
        throw std::runtime_error(std::string("Error initializing GnuTLS: ")+gnutls_strerror(rc));

    auto ca_tmp = dht::crypto::generateIdentity("DHT Node CA");
    auto crt_tmp = dht::crypto::generateIdentity("Scanner node", ca_tmp);

    DhtRunner dht;
    dht.run(params.port, crt_tmp, true, params.network);

    if (not params.bootstrap.first.empty())
        dht.bootstrap(params.bootstrap.first.c_str(), params.bootstrap.second.c_str());

    std::cout << "OpenDht node " << dht.getNodeId() << " running on port " <<  params.port << std::endl;
    std::cout << "Scanning network..." << std::endl;
    auto all_nodes = std::make_shared<NodeSet>();

    dht::InfoHash cur_h {};
    cur_h.setBit(8*HASH_LEN-1, 1);

    std::this_thread::sleep_for(std::chrono::seconds(2));

    std::atomic_uint done {false};
    step(dht, done, all_nodes, cur_h, 0);

    {
        std::mutex m;
        std::unique_lock<std::mutex> lk(m);
        cv.wait(lk, [&](){
            return done.load() == 0;
        });
    }

    std::cout << std::endl << "Scan ended: " << all_nodes->size() << " nodes found." << std::endl;
    for (const auto& n : *all_nodes)
        std::cout << "Node " << *n << std::endl;

    dht.join();
    gnutls_global_deinit();
    return 0;
}
	bool wait()
	{
          std::unique_lock<std::mutex> lock(m_mutex);
          unsigned int gen = m_generation;

          if (--m_count == 0)
          {
            m_generation++;
            m_count = m_threshold;
            m_cond.notify_all();
            return true;
          }

          while (gen == m_generation)
            m_cond.wait(lock);
          return false;
	}
			T get()
			{
				T front;

				{
					auto_lock lock(mutex_);
					while(queue_.empty())
						not_empty_.wait(mutex_, std::chrono::milliseconds(INFINITE));

					assert(!queue_.empty());
					front = std::move(queue_.front());
					queue_.pop_front();
				}

				not_full_.notify_one();
				return front;
			}
Exemple #14
0
int fakeSocketConnect(int fd1, int fd2)
{
    std::vector<FakeSocketPair>& fds = getFds();
    std::unique_lock<std::mutex> lock(theMutex);
    if (fd1 < 0 || fd2 < 0 || static_cast<unsigned>(fd1/2) >= fds.size() || static_cast<unsigned>(fd2/2) >= fds.size())
    {
        loggingBuffer << "FakeSocket EBADF: Connect #" << fd1 << " to #" << fd2 << flush();
        errno = EBADF;
        return -1;
    }
    if (fd1/2 == fd2/2)
    {
        loggingBuffer << "FakeSocket EBADF: Connect #" << fd1 << " to #" << fd2 << flush();
        errno = EBADF;
        return -1;
    }

    FakeSocketPair& pair1 = fds[fd1/2];
    FakeSocketPair& pair2 = fds[fd2/2];

    if ((fd1&1) || (fd2&1))
    {
        loggingBuffer << "FakeSocket EISCONN: Connect #" << fd1 << " to #" << fd2 << flush();
        errno = EISCONN;
        return -1;
    }

    if (!pair2.listening || pair2.connectingFd != -1)
    {
        loggingBuffer << "FakeSocket ECONNREFUSED: Connect #" << fd1 << " to #" << fd2 << flush();
        errno = ECONNREFUSED;
        return -1;
    }

    pair2.connectingFd = fd1;
    theCV.notify_all();

    while (pair1.fd[1] == -1)
        theCV.wait(lock);

    assert(pair1.fd[1] == pair1.fd[0] + 1);

    loggingBuffer << "FakeSocket Connect #" << fd1 << " to #" << fd2 << ": #" << pair1.fd[1] << flush();

    return 0;
}
Exemple #15
0
	void run(processing_function func) {
		proc = func;

		srand(time(NULL));

		const int concurrency_limit = 1;

		while(1) {
			while(running_requests < concurrency_limit) {
				++running_requests;
				queue_peek(client, next_request_id++, request_size);
				//fprintf(stderr, "%d running_requests\n", (int)running_requests);
			}
			std::unique_lock<std::mutex> lock(mutex);
			condition.wait(lock, [this]{return running_requests < concurrency_limit;});
		}
	}
 void loop() {
     while (true) {
         std::function<void()> f;
         {
             std::unique_lock<std::mutex> lk(mutex);
             while (todo.empty() && running) {
                 modified.wait(lk);
             }
             if (!running) {
                 return;
             }
             f = todo.front()->task;
             todo.pop_front();
         }
         f();
     }
 }
Exemple #17
0
    void Join(std::function<void()> reportFn = nullptr)
    {
        unique_lock lock(_mutex);
        while (true)
        {
            // Wait for the queue to become empty or having completed tasks.
            _condComplete.wait(lock, [this]()
            {
                return (_pending.empty() && _processing == 0) ||
                        !_completed.empty();
            });

            // Dispatch all completion callbacks if there are any.
            while (!_completed.empty())
            {
                auto taskData = _completed.front();
                _completed.pop_front();

                if (taskData.CompletionFn)
                {
                    lock.unlock();

                    taskData.CompletionFn();

                    lock.lock();
                }
            }

            if (reportFn)
            {
                lock.unlock();

                reportFn();

                lock.lock();
            }

            // If everything is empty and no more work has to be done we can stop waiting.
            if (_completed.empty() &&
                _pending.empty() &&
                _processing == 0)
            {
                break;
            }
        }
    }
Exemple #18
0
		inline result_type blockingDequeue(T & elem)
		{
			if (shouldQuit)
				return FAILURE;

			std::unique_lock<std::mutex> uniqueLock(queueAccessMutex);
			while (q.empty()) {
				queueAccessCondVar.wait(uniqueLock);
				if (shouldQuit) {
					uniqueLock.unlock();
					return FAILURE;
				}
			}
			internalDequeue(elem);
			uniqueLock.unlock();
			return SUCCESS;
		}
	std::shared_ptr<T> getNext()
	{
		std::unique_lock<std::mutex> lock(mutex);
		condition.wait(lock, [this]() -> bool 
		{
			return !queue.empty() || isClosed;
		});

		if (!queue.empty())
		{
			auto ptr = queue.front();
			queue.pop();
			return ptr;
		}
		
		return std::shared_ptr<T>();
	}	
Exemple #20
0
void boost_song()
{
    using namespace std::chrono ;

    while( true )
    {
        std::unique_lock<std::mutex> lk{m} ;
        cv.wait( lk ) ;

        std::atomic<unsigned> x{0} ;
        while( high_resolution_clock::now() < mid )
        {
            ++x ;
        }
        std::this_thread::sleep_until( reset ) ;
    }
}
Exemple #21
0
      /*! \brief Thread worker loop, called by the threads beginThreadFunc.
       */
      inline void beginThread()
      {
	try
	  {
	    std::unique_lock<std::mutex> lock1(_queue_mutex);
	    
	    while (!_stop_flag)
	      {
		if (_waitingFunctors.empty())
		  {
		    ++_idlingThreads;
		    //Let whoever is waiting know that the thread is now available
		    _threadAvailable_condition.notify_all();
		    //And send it to sleep
		    _need_thread_mutex.wait(lock1);
		    --_idlingThreads;
		    continue;
		  }
		
		std::function<void()> func = _waitingFunctors.front();
		_waitingFunctors.pop();
		
		lock1.unlock();
		
		try { func(); }
		catch(std::exception& cep)
		  {
		    //Mark the main process to throw an exception as soon as possible
		    std::lock_guard<std::mutex> lock2(_exception_mutex);
		    
		    _exception_data << "\nTHREAD: Task threw an exception:-"
				    << cep.what();
		    
		    _exception_flag = true;
		  }
		lock1.lock();
	      }
	  }
	catch (std::exception& p)
	  {
	    std::cout << "\nTHREAD :Catastrophic Failure of thread!!! System will Hang, Aborting!"
		      << p.what();
	    throw;
	  }
      }
Exemple #22
0
void workThread() {
	{
		std::unique_lock<std::mutex> lock(queue_mutex);
		queue_condition.wait(lock);
	}
	while (1) {
		queue_mutex.lock();
		if (queue.size() == 0) {
			queue_mutex.unlock();
			break;
		}

		RayData d = queue.front();
		queue.pop();
		queue_mutex.unlock();
		rayThread(d.i, d.j, d.cur_pixel_pos, d.ray, d.x_grid_size, d.y_grid_size);
	}
}
void consumer()
{

    while (true)
    {
        std::unique_lock<std::mutex> guard_consumer(lock_buffer);
        if (shared_buffer.empty())
            cond.wait(guard_consumer);

        int item_consumed = shared_buffer.back(); // gets item
        shared_buffer.pop_back(); // removes item

        if (shared_buffer.size() == N - 1)
            cond.notify_one();
        std::cout << "Item " << item_consumed
                  << " was removed from the buffer" << std::endl;
    }
}
Exemple #24
0
void waitEnd(
    std::mutex &                _mutex
    , std::condition_variable & _cond
    , const dp::Bool &          _ENDED
)
{
    std::unique_lock< std::mutex >  lock( _mutex );

    _cond.wait(
        lock
        , [
            &_ENDED
        ]
        {
            return _ENDED;
        }
    );
}
Exemple #25
0
void
Worker::threadMain(std::condition_variable& cv, std::mutex& condvarMutex) {
    while (this->running.load()) {
        {
            std::unique_lock<std::mutex> lock(condvarMutex);

            cv.wait(lock,
                    [this] {
                        std::lock_guard<std::mutex> guard(this->taskMutex);
                        return (not this->running.load() || this->task != nullptr);
                    });
            if (not this->running.load()) return;
        }
        this->task();
        this->setReserved(false);
        this->setTask(Task(nullptr));
    }
}
void producer()
{

    while (true)
    {
        std::unique_lock<std::mutex> guard_producer(lock_buffer);
        if (shared_buffer.size() == N)
            cond.wait(guard_producer);

        int item_produced = rand() % 100 + 1; // ranges 1 to 100
        shared_buffer.push_back(item_produced);

        if (shared_buffer.size() == 1)
            cond.notify_one();
        std::cout << "Item " << item_produced
                  << " was inserted into the buffer" << std::endl;
    }
}
Exemple #27
0
void pong()
{  
    std::unique_lock<std::mutex> lock(m);
    pong_cond.notify_one();
    
    while(!done)
    {
        ping_cond.wait(lock);
        
        if(need_to_pong)
        {
            std::cout << "Pong!\n";
            need_to_pong = false;
        }
        
        pong_cond.notify_one();
    }
}
    void run() {
        while(!_isQuiting) {
            std::shared_ptr<DThreadPoolTask> task(nullptr);
            {
                std::unique_lock<std::mutex> lck(_threadCvMtx);
                _threadCv.wait(lck,[&]() { return _taskList.size() > 0; });
                auto taskBegin = _taskList.begin();
                if (taskBegin != _taskList.end()) {
                    task = *taskBegin;
                    _taskList.erase(taskBegin);
                }
            }

            if (task) {
                task->Run();
            }
        }
    }
Exemple #29
0
bool TestCommonData::CheckErrors()
{
//    Exceptions.Read();
    auto& mem = cpu::CpuMemory::Instance().M1;

    QString str_errs = ErrMsg();

    if ( str_errs.isEmpty() )
        return true;

    std::mutex mutex;
    std::unique_lock< std::mutex > lock( mutex );
    Launcher( std::bind( &TestCommonData::ShowErrors, this, str_errs ) );

    CondVar.wait( lock );
    mem.SetKvitir_Osch( true );
    return false;
}
Exemple #30
0
bool FSEventsWatcher::start(const std::shared_ptr<w_root_t>& root) {
  // Spin up the fsevents processing thread; it owns a ref on the root

  auto self = std::dynamic_pointer_cast<FSEventsWatcher>(shared_from_this());
  try {
    // Acquire the mutex so thread initialization waits until we release it
    auto wlock = items_.wlock();

    std::thread thread([self, root]() {
      try {
        self->FSEventsThread(root);
      } catch (const std::exception& e) {
        watchman::log(watchman::ERR, "uncaught exception: ", e.what());
        root->cancel();
      }

      // Ensure that we signal the condition variable before we
      // finish this thread.  That ensures that don't get stuck
      // waiting in FSEventsWatcher::start if something unexpected happens.
      self->fse_cond.notify_one();
    });
    // We have to detach because the readChangesThread may wind up
    // being the last thread to reference the watcher state and
    // cannot join itself.
    thread.detach();

    // Allow thread init to proceed; wait for its signal
    fse_cond.wait(wlock.getUniqueLock());

    if (root->failure_reason) {
      w_log(
          W_LOG_ERR,
          "failed to start fsevents thread: %s\n",
          root->failure_reason.c_str());
      return false;
    }

    return true;
  } catch (const std::exception& e) {
    watchman::log(
        watchman::ERR, "failed to start fsevents thread: ", e.what(), "\n");
    return false;
  }
}