// work_queue work items is are automatically dequeued and called by proton
 // This function is called because it was queued by send()
 void do_send(const proton::message& m) {
     sender_.send(m);
     std::lock_guard<std::mutex> l(lock_);
     --queued_;                    // work item was consumed from the work_queue
     credit_ = sender_.credit();   // update credit
     sender_ready_.notify_all();       // Notify senders we have space on queue
 }
Esempio n. 2
0
    /// Blocks until all N threads reach here
    void Sync()
    {
        std::unique_lock<std::mutex> lock{ m_mutex };

        if (m_state == State::Down)
        {
            // Counting down the number of syncing threads
            if (--m_count == 0) {
                m_state = State::Up;
                m_cv.notify_all();
            }
            else {
                m_cv.wait(lock, [this] { return m_state == State::Up; });
            }
        }

        else // (m_state == State::Up)
        {
            // Counting back up for Auto reset
            if (++m_count == m_initial) {
                m_state = State::Down;
                m_cv.notify_all();
            }
            else {
                m_cv.wait(lock, [this] { return m_state == State::Down; });
            }
        }
    }
Esempio n. 3
0
		/**
		 * @brief Unlock on writing
		 */
		void writeUnlock()
		{
			writing_ = false;
			
			read_cv_.notify_all();
			write_cv_.notify_all();
		};
 void halt() {
     {
         std::lock_guard<std::mutex> lk(mutex);
         if (!running) return;
         running = false;
     }
     tasks_updated.notify_all();
     modified.notify_all();
 }
Esempio n. 5
0
void signals()
{
    std::this_thread::sleep_for(std::chrono::milliseconds(120));
    std::cerr << "Notifying...\n";
    cv.notify_all();
    std::this_thread::sleep_for(std::chrono::milliseconds(100));
    i = 1;
    std::cerr << "Notifying again...\n";
    cv.notify_all();
}
Esempio n. 6
0
void signals()
{
    wait( 120) ;
    std::cerr << "Notifying...\n";
    cv.notify_all();
    wait( 120) ;
    i = 1;
    std::cerr << "Notifying again...\n";
    cv.notify_all();
}
Esempio n. 7
0
    void ThreadMain(ComPtr<ISwapChainPanel> swapChainPanel)
    {
        auto lock = GetLock();

        m_dispatcher = CreateCoreDispatcher(swapChainPanel.Get());
        swapChainPanel.Reset(); // we only needed this to create the dispatcher
        m_conditionVariable.notify_all();

        lock.unlock();
        m_client->OnGameLoopStarting();
        lock.lock();

        m_started = true;
        m_conditionVariable.notify_all();        

        for (;;)
        {
            m_conditionVariable.wait(lock, [=] { return m_shutdownRequested || !m_pendingActions.empty() || m_startDispatcher; });

            if (m_shutdownRequested)
                break;

            if (!m_pendingActions.empty())
            {
                std::vector<ComPtr<AnimatedControlAsyncAction>> actions;
                std::swap(actions, m_pendingActions);
                
                lock.unlock();
                RunActions(std::move(actions));
                lock.lock();
            }
            else if (m_startDispatcher)
            {
                m_dispatcherStarted = true;
                m_conditionVariable.notify_all();
                
                lock.unlock();
                ThrowIfFailed(m_dispatcher->ProcessEvents(CoreProcessEventsOption_ProcessUntilQuit));
                lock.lock();

                m_dispatcherStarted = false;
                m_conditionVariable.notify_all();
            }
        }

        // Cancel any remaining actions
        CancelActions(lock);

        lock.unlock();
        m_client->OnGameLoopStopped();

        // falling out of ThreadMain will cause ThreadCompleted to be called,
        // which will mark the thread as shutdown.
    }
Esempio n. 8
0
void signals()
{
	std::this_thread::sleep_for(std::chrono::seconds(1));
	std::cerr << "Notifying...\n";
	cv.notify_all();

	std::this_thread::sleep_for(std::chrono::seconds(1));
	std::unique_lock<std::mutex> lk(cv_m);
	i = 1;
	std::cerr << "Notifying again...\n";
	cv.notify_all();
}
Esempio n. 9
0
  /** Process the read reservations that are ready to be released in the
      reservation queue
  */
  void move_read_reservation_forward() {
    // Lock the pipe to avoid nuisance
    std::lock_guard<std::mutex> lg { cb_mutex };

    for (;;) {
      if (r_rid_q.empty())
        // No pending reservation, so nothing to do
        break;
      if (!r_rid_q.front().ready)
        /* If the first reservation is not ready to be released, stop
           because it is blocking all the following in the queue
           anyway */
        break;
      // Remove the reservation to be released from the queue
      r_rid_q.pop_front();
      std::size_t n_to_pop;
      if (r_rid_q.empty())
        // If it was the last one, remove all the reservation
        n_to_pop = read_reserved_frozen;
      else
        // Else remove everything up to the next reservation
        n_to_pop =  r_rid_q.front().start - cb.begin();
      // No longer take into account these reserved slots
      read_reserved_frozen -= n_to_pop;
      // Release the elements from the FIFO
      while (n_to_pop--)
        cb.pop_front();
      // Notify the clients waiting for some room to write in the pipe
      read_done.notify_all();
      /* ...and process the next reservation to see if it is ready to
         be released too */
    }
  }
Esempio n. 10
0
  /** Try to read a value from the pipe

      \param[out] value is the reference to where to store what is
      read

      \param[in] blocking specify if the call wait for the operation
      to succeed

      \return true on success
  */
  bool read(T &value, bool blocking = false) {
    // Lock the pipe to avoid being disturbed
    std::unique_lock<std::mutex> ul { cb_mutex };
    TRISYCL_DUMP_T("Read pipe empty = " << empty());

    if (blocking)
      /* If in blocking mode, wait for the not empty condition, that
         may be changed when a write is done */
      write_done.wait(ul, [&] { return !empty(); });
    else if (empty())
      return false;

    TRISYCL_DUMP_T("Read pipe front = " << cb.front()
                   << " back = " << cb.back()
                   << " reserved_for_reading() = " << reserved_for_reading());
    if (read_reserved_frozen)
      /** If there is a pending reservation, read the next element to
          be read and update the number of reserved elements */
      value = cb.begin()[read_reserved_frozen++];
    else {
      /* There is no pending read reservation, so pop the read value
         from the pipe */
      value = cb.front();
      cb.pop_front();
    }

    TRISYCL_DUMP_T("Read pipe value = " << value);
    // Notify the clients waiting for some room to write in the pipe
    read_done.notify_all();
    return true;
  }
Esempio n. 11
0
  /** Try to write a value to the pipe

      \param[in] value is what we want to write

      \param[in] blocking specify if the call wait for the operation
      to succeed

      \return true on success

      \todo provide a && version
  */
  bool write(const T &value, bool blocking = false) {
    // Lock the pipe to avoid being disturbed
    std::unique_lock<std::mutex> ul { cb_mutex };
    TRISYCL_DUMP_T("Write pipe full = " << full()
                   << " value = " << value);

    if (blocking)
      /* If in blocking mode, wait for the not full condition, that
         may be changed when a read is done */
      read_done.wait(ul, [&] { return !full(); });
    else if (full())
      return false;

    cb.push_back(value);
    TRISYCL_DUMP_T("Write pipe front = " << cb.front()
                   << " back = " << cb.back()
                   << " cb.begin() = " << (void *)&*cb.begin()
                   << " cb.size() = " << cb.size()
                   << " cb.end() = " << (void *)&*cb.end()
                   << " reserved_for_reading() = " << reserved_for_reading()
                   << " reserved_for_writing() = " << reserved_for_writing());
    // Notify the clients waiting to read something from the pipe
    write_done.notify_all();
    return true;
  }
Esempio n. 12
0
    std::size_t set_concurrency(std::size_t n)
    {
        std::unique_lock<std::mutex> g(mutex_);

        if ( concurrency_ != spawned_threads_ )
        {
            return concurrency_;
        }

        std::size_t to_spawn = (n > concurrency_) ? ( n - concurrency_ ) : 0;
        concurrency_ = n;

        for ( std::size_t i = 0; i < to_spawn; ++i )
        {
            //std::thread t(&dfs_task_manager::worker_loop, this);
            //t.detach();
            global_task_manager.schedule(&dfs_task_manager::worker_loop, this);
        }

        workers_cv_.notify_all();

        while ( concurrency_ != spawned_threads_ )
        {
            manager_cv_.wait(g);
        }

        return concurrency_;
    }
Esempio n. 13
0
 /// <summary>
 /// Invoked to cause Run to continue its processing
 /// </summary>
 void Proceed(void) {
   std::lock_guard<std::mutex> lk(m_lock);
   if(m_barrierDone)
     return;
   m_barrierDone = true;
   m_continueCond.notify_all();
 }
Esempio n. 14
0
	void Save(Data &&data)
	{
		std::unique_lock<std::mutex> lock(m_queueGuard);
		m_queue.emplace();
		m_queue.back().swap(data);
		m_condition.notify_all();
	}
Esempio n. 15
0
 virtual void onAdminQueryMessage(const std::string& message)
 {
     std::unique_lock<std::mutex> lock(_messageReceivedMutex);
     _messageReceivedCV.notify_all();
     _messageReceived = message;
     Log::info("UnitAdmin:: onAdminQueryMessage: " + message);
 }
void mcuMessageHandler2(const U8*, const std::size_t) {
    std::unique_lock<std::mutex> l(mcuMessageGuard);
    gmcu->send<>(mcu2local2);
    if(++stats.mcu.message2 == LOAD_N) {
        mcuCond.notify_all();
    }
}
Esempio n. 17
0
    void stop ()
    {
        std::unique_lock<std::mutex> lock (mutex);
        running = false;

        flag.notify_all ();
    };
Esempio n. 18
0
	void	process(int thread_index)
	{
		(void)(thread_index); //currently unused, but maybe useful on debugging.

		for( ; ; ) {
			if(is_terminated()) {
				break;
			}

			task_ptr_t task = task_queue_.dequeue();

			bool should_notify = false;

			task->run();

			{
				task_count_lock_t lock(task_count_mutex_);
				--task_count_;

				if(is_waiting() && task_count_ == 0) {
					should_notify = true;
				}
			}

			if(should_notify) {
				c_task_.notify_all();
			}
        }
	}
Esempio n. 19
0
    inline void open()
    {
        std::lock_guard<std::mutex> lock(this->mutex);

        closed = false;
        cond.notify_all();
    }
Esempio n. 20
0
void square_am_signal(float time, float frequency)
{
    using namespace std::chrono ;
    
    std::cout << "Playing / " << time << " seconds / " << frequency << " Hz\n" ;

    seconds const sec{1} ;
    nanoseconds const nsec{ sec } ;
    using rep = nanoseconds::rep ;
    auto nsec_per_sec = nsec.count() ;

    nanoseconds const period( static_cast<rep>( nsec_per_sec / frequency) ) ;

    auto start = high_resolution_clock::now() ;
    auto const end = start + nanoseconds( static_cast<rep>(time * nsec_per_sec) ) ;

    while (high_resolution_clock::now() < end)
    {
        mid = start + period / 2 ;
        reset = start + period ;

        cv.notify_all() ;
        std::this_thread::sleep_until( reset ) ;
        start = reset;
    }
}
        void run() {
            {
                std::lock_guard<std::mutex> lk(mutex);
                if (running) return;
                running = true;
                for (auto& t : threads) {
                    t = std::thread([this]{this->loop();});
                }
            }
            while (true) {
                std::unique_lock<std::mutex> lk(mutex);
                if (!running) break;

                auto task_it = min_element(tasks.begin(), tasks.end());
                time_point next_task = task_it == tasks.end() ? clock_type::time_point::max() : task_it->start;
                if (tasks_updated.wait_until(lk, next_task) == std::cv_status::timeout) {
                    if (task_it->repeat != clock_type::duration::zero()) {
                        task_it->start += task_it->repeat;
                    }
                    else {
                        handles.remove(task_it);
                        tasks.erase(task_it);
                    }
                    todo.push_back(task_it);
                    modified.notify_all();
                }
            }
            for (auto& t : threads) {
                t.join();
            }
        }
Esempio n. 22
0
 // Called when the read op terminates
 void
 on_read_done()
 {
     std::lock_guard<std::mutex> lock(m0_);
     b0_ = true;
     cv0_.notify_all();
 }
Esempio n. 23
0
/// "Do on main thread" support.
static void iothread_service_main_thread_requests(void) {
    ASSERT_IS_MAIN_THREAD();

    // Move the queue to a local variable.
    std::queue<main_thread_request_t *> request_queue;
    {
        scoped_lock queue_lock(s_main_thread_request_q_lock);
        request_queue.swap(s_main_thread_request_queue);
    }

    if (!request_queue.empty()) {
        // Perform each of the functions. Note we are NOT responsible for deleting these. They are
        // stack allocated in their respective threads!
        while (!request_queue.empty()) {
            main_thread_request_t *req = request_queue.front();
            request_queue.pop();
            req->func();
            req->done = true;
        }

        // Ok, we've handled everybody. Announce the good news, and allow ourselves to be unlocked.
        // Note we must do this while holding the lock. Otherwise we race with the waiting threads:
        //
        // 1. waiting thread checks for done, sees false
        // 2. main thread performs request, sets done to true, posts to condition
        // 3. waiting thread unlocks lock, waits on condition (forever)
        //
        // Because the waiting thread performs step 1 under the lock, if we take the lock, we avoid
        // posting before the waiting thread is waiting.
        scoped_lock broadcast_lock(s_main_thread_performer_lock);
        s_main_thread_performer_cond.notify_all();
    }
}
Esempio n. 24
0
 void abort()
 {
     std::unique_lock<std::mutex> lock(_waitMutex);
     _abort = true;
     _done = _done || _startedSubTasks == 0;
     _waitCond.notify_all();
 }
Esempio n. 25
0
int fakeSocketClose(int fd)
{
    std::vector<FakeSocketPair>& fds = getFds();
    std::unique_lock<std::mutex> lock(theMutex);
    if (fd < 0 || static_cast<unsigned>(fd/2) >= fds.size())
    {
        loggingBuffer << "FakeSocket EBADF: Close #" << fd << flush();
        errno = EBADF;
        return -1;
    }

    FakeSocketPair& pair = fds[fd/2];

    const int K = (fd&1);
    const int N = 1 - K;

    if (pair.fd[K] == -1)
    {
        loggingBuffer << "FakeSocket EBADF: Close #" << fd << flush();
        errno = EBADF;
        return -1;
    }

    assert(pair.fd[K] == fd);

    pair.fd[K] = -1;
    pair.buffer[K].resize(0);
    pair.readable[N] = true;

    theCV.notify_all();

    loggingBuffer << "FakeSocket Close #" << fd << flush();

    return 0;
}
Esempio n. 26
0
File: main.cpp Progetto: CCJY/coliru
	void emplace(Args&&... args)
	{
		bool empty_ = empty();
		internal.emplace(std::forward<Args>(args)...);
		if (empty_)
			not_empty.notify_all();
	}
Esempio n. 27
0
File: main.cpp Progetto: CCJY/coliru
	void push(const typename std::stack<T>::value_type& val)
	{
		bool empty_ = empty();
		internal.push(val);
		if (empty_)
			not_empty.notify_all();
	}
Esempio n. 28
0
File: main.cpp Progetto: CCJY/coliru
	void pop()
	{
		internal.pop();
		if (empty())
			is_empty.notify_all();

	}
Esempio n. 29
0
/*********************************************************************
 * This function traverses all the pixels and cast rays. It calls the
 * recursive ray tracer and assign return color to frame
 *
 * You should not need to change it except for the call to the recursive
 * ray tracer. Feel free to change other parts of the function however,
 * if you must.
 *********************************************************************/
void ray_trace() {
	int i, j;
	float x_grid_size = image_width / float(win_width);
	float y_grid_size = image_height / float(win_height);
	float x_start = -0.5 * image_width;
	float y_start = -0.5 * image_height;
	Point cur_pixel_pos;
	Vector ray;

	// ray is cast through center of pixel
	cur_pixel_pos.x = x_start + 0.5 * x_grid_size;
	cur_pixel_pos.y = y_start + 0.5 * y_grid_size;
	cur_pixel_pos.z = image_plane;

	for (unsigned int i = 0; i < std::thread::hardware_concurrency(); ++i) {
		std::thread t(workThread);
		threads.push_back(std::move(t));
	}

	for (i=0; i<win_height; i++) {
		for (j=0; j<win_width; j++) {
			ray = get_vec(eye_pos, cur_pixel_pos);
			ray = normalize(ray);

			queueRay(i, j, cur_pixel_pos, ray, x_grid_size, y_grid_size);

			cur_pixel_pos.x += x_grid_size;
		}

		cur_pixel_pos.y += y_grid_size;
		cur_pixel_pos.x = x_start;
	}
	queue_condition.notify_all();
}
Esempio n. 30
0
File: main.cpp Progetto: CCJY/coliru
	void push(typename std::stack<T>::value_type&& val)
	{
		bool empty_ = empty();
		internal.push(std::forward<typename std::stack<T>::value_type>(val));
		if (empty_)
			not_empty.notify_all();
	}