std::shared_ptr<T> wait_and_pop() { std::unique_lock<std::mutex> lk(mut); data_cond.wait(lk,[this]{return !data_queue.empty();}); std::shared_ptr<T> res(std::make_shared<T>(data_queue.front())); data_queue.pop(); empty_cond.notify_one(); return res; }
static void notify(TestData &test,TestResult result) { { std::lock_guard<std::mutex> lk(m_mtx); --m_running_tests; test.result = result; } m_cndvar.notify_one(); }
void provider(int val){ //push different valus(val til val+5 with timeouts of val milliseconds into the queue) for (int i = 0; i < 6; ++i){ std::lock_guard<std::mutex> lg(queueMutex); queue.push(val + 1); }//release lock queueConVar.notify_one(); std::this_thread::sleep_for(std::chrono::milliseconds(val)); }
void CompilerPool::releaseCompiler(size_t id, ExternCompiler* ptr) { std::unique_lock<std::mutex> l(m_compilerLock); m_compilers[id].store(ptr, std::memory_order_relaxed); m_freeCount += 1; l.unlock(); m_compilerCv.notify_one(); }
void signals() { std::this_thread::sleep_for(std::chrono::seconds(1)); std::cout << "Notifying falsely...\n"; cv.notify_one(); //Most probably, the waiting thread is awaken unnecessarily. std::unique_lock<std::mutex> lk(cv_m); i = 1; while (!done) { std::cout << "Notifying true change...\n"; lk.unlock(); //It is a good idea unlock before notifying. cv.notify_one(); std::this_thread::sleep_for(std::chrono::seconds(1)); lk.lock(); } }
void add(int id, int data) { std::unique_lock<std::mutex> l(lock_); notFull_.wait(l, [this]() { return size_ < capacity_; }); buffer_[rear_] = data; rear_ = (rear_ + 1) % capacity_; ++size_; std::cout << "Produced " << id << " produced " << data << std::endl; notEmpty_.notify_one(); }
~FrozenCleanupCheck() { if (should_freeze) { std::unique_lock<std::mutex> l(m); nFrozen = 1; cv.notify_one(); cv.wait(l, []{ return nFrozen == 0;}); } }
~ManagerImp () override { { std::lock_guard<std::mutex> lock(mutex_); stop_ = true; cond_.notify_one(); } thread_.join(); }
~FrozenCleanupCheck() { if (should_freeze) { std::unique_lock<std::mutex> l(m); nFrozen.store(1, std::memory_order_relaxed); cv.notify_one(); cv.wait(l, []{ return nFrozen.load(std::memory_order_relaxed) == 0;}); } }
void pong() { std::unique_lock<std::mutex> lock(m); pong_cond.notify_one(); while(!done) { ping_cond.wait(lock); if(need_to_pong) { std::cout << "Pong!\n"; need_to_pong = false; } pong_cond.notify_one(); } }
void ComeToWork() { std::cout << "Hey security, please open the door!\n"; g_Bell.notify_one(); std::mutex mutex; mutex.lock(); g_Door.wait(mutex); mutex.unlock(); }
void one_done() { std::unique_lock<std::mutex> g(mutex_); ++current_; if ( current_ == required_ ) { cv_.notify_one(); } }
void eat(int* part) { while (!stopped) { std::unique_lock<std::mutex> lock(mutex); BIG_PIE -= 100; *part += 100; cond.notify_one(); cond.wait_for(lock, std::chrono::milliseconds(100)); } }
int get(int id) { std::unique_lock<std::mutex> l(lock_); notEmpty_.wait(l, [this]() { return size_ > 0; }); int data = buffer_[front_]; front_ = (front_ + 1) % capacity_; --size_; std::cout << "Consumer " << id << " fetched " << data << std::endl; notFull_.notify_one(); return data; }
void print(int max, bool isEven) { for (int i = isEven ? 0 : 1; i < max; i += 2) { std::unique_lock<std::mutex> lock(mtx); cv.wait(lock); std::cout << i + 1 << std::endl; lock.unlock(); cv.notify_one(); } }
int down() { std::unique_lock<std::mutex> lock(mtx); while (value <= 0) { cv.wait(lock); } if (--value > 0) { cv.notify_one(); } return value; }
/** Enqueue a work item */ bool Enqueue(WorkItem* item) { std::unique_lock<std::mutex> lock(cs); if (queue.size() >= maxDepth) { return false; } queue.emplace_back(std::unique_ptr<WorkItem>(item)); cond.notify_one(); return true; }
inline std::enable_if_t<std::is_base_of<T, std::decay_t<U>>::value> push(U&& value) { std::unique_lock<std::mutex> lock(mut); // only add the value on the stack if there is room data_cond.wait(lock, [this]{ return (data_queue.size() < capacity) || shutdownFlag; }); data_queue.emplace(std::make_shared< std::decay_t<U>> (std::forward<U>(value))); data_cond.notify_one(); }
App42HttpClient::~App42HttpClient() { s_need_quit = true; if (s_requestQueue != nullptr) { s_SleepCondition.notify_one(); } s_pHttpClient = nullptr; }
void onStop () override { JLOG (j_.info()) << "Stopping"; { std::lock_guard<std::mutex> lock (mutex_); shouldExit_ = true; wakeup_.notify_one(); } thread_.join(); }
HttpClient::~HttpClient() { s_need_quit = true; if (s_requestQueue != NULL) { s_SleepCondition.notify_one(); } s_pHttpClient = NULL; }
void sendJob(igcl::peer_id id, uint sendIndex) { //cout << "sendJob() to " << id << " index: " << sendIndex << endl; if (id == 0) { std::lock_guard<std::mutex> threadQueueLock(threadJobsAccessMutex); threadJobs.push(sendIndex); threadJobsAccessCondVar.notify_one(); } else { coord->sendTo(id, sendIndex); } }
void f() { std::unique_lock<std::mutex> lk(mut); assert(test2 == 0); test1 = 1; cv.notify_one(); while (test2 == 0) cv.wait(lk); assert(test2 != 0); }
void put(messageT msg) { // Put a single message into the mailbox std::unique_lock<std::mutex> lock(queue_mutex); // Push message into mailbox messages.push(msg); // Signal there is a message in the mailbox msg_available_cv.notify_one(); }
void TestCommonData::ShowErrors( QString const& err ) { QMessageBox msg; msg.setWindowTitle( "Ошибки тестирования" ); msg.setText( err.left(200) + "..." ); msg.addButton( QMessageBox::Ok ); msg.setModal( true ); msg.exec(); CondVar.notify_one(); }
~AsyncCRC32() { mutex.lock(); finished = true; mutex.unlock(); readCond.notify_one(); thread.join(); for (auto buf : queue) delete buf; }
/** * Push an item into the queue. * * @param value The item to push into the queue. * @param num Number to describe ordering for the items. * It must increase monotonically. */ void push(T value, size_type num) { std::lock_guard<std::mutex> lock(m_mutex); num -= m_offset; if (m_queue.size() <= num + 1) { m_queue.resize(num + 2); } m_queue[num] = std::move(value); m_data_available.notify_one(); }
void request_complete(std::shared_ptr<request> req, const ioremap::elliptics::error_info &error) { if (error) { fprintf(stderr, "%s %d: queue request completion error: %s\n", dnet_dump_id(&req->id), req->src_key, error.message().c_str()); } else { //fprintf(stderr, "%s %d: queue request completed\n", dnet_dump_id(&req->id), req->src_key); } --running_requests; condition.notify_one(); }
void thread_pool::post( Handler handler, taskData *arg ) { tasksLock.lock(); _tasks.push(task( handler, arg )); tasksLock.unlock(); tasksAmount++; coutLock.lock(); coutLock.unlock(); cvTaskCheck.notify_one(); };
shared_ptr<RunItem> post(std::function<void()> task, std::chrono::steady_clock::duration delay) { shared_ptr<RunItem> retval(new RunItem(task, delay, false)); lock_guard<mutex> lock(m); if(delay == std::chrono::steady_clock::duration(0)){ workqueue.push_back(retval); } else { delayworkqueue.push(retval); } cv.notify_one(); return retval; }