void resized(const boo::SWindowRect& rect) { std::unique_lock<std::mutex> lk(m_mt); m_latestResize = rect; m_hasResize = true; m_resizeCv.wait_for(lk, std::chrono::milliseconds(500)); }
/** * Push an element onto the queue. If the queue has a max size, * this call will block if the queue is full. */ void push(T value) { constexpr const std::chrono::milliseconds max_wait{10}; #ifdef OSMIUM_DEBUG_QUEUE_SIZE ++m_push_counter; #endif if (m_max_size) { while (size() >= m_max_size) { std::unique_lock<std::mutex> lock{m_mutex}; m_space_available.wait_for(lock, max_wait, [this] { return m_queue.size() < m_max_size; }); #ifdef OSMIUM_DEBUG_QUEUE_SIZE ++m_full_counter; #endif } } std::lock_guard<std::mutex> lock{m_mutex}; m_queue.push(std::move(value)); #ifdef OSMIUM_DEBUG_QUEUE_SIZE if (m_largest_size < m_queue.size()) { m_largest_size = m_queue.size(); } #endif m_data_available.notify_one(); }
TestResult testDocCount() { _messageReceived.clear(); const std::string queryMessage = "active_docs_count"; _adminWs->sendFrame(queryMessage.data(), queryMessage.size()); std::unique_lock<std::mutex> lock(_messageReceivedMutex); if (_messageReceived.empty() && _messageReceivedCV.wait_for(lock, std::chrono::milliseconds(_messageTimeoutMilliSeconds)) == std::cv_status::timeout) { Log::info("testDocCount: Timed out waiting for admin console message"); return TestResult::TEST_TIMED_OUT; } lock.unlock(); StringTokenizer tokens(_messageReceived, " ", StringTokenizer::TOK_IGNORE_EMPTY | StringTokenizer::TOK_TRIM); if (tokens.count() != 2 || tokens[0] != "active_docs_count" || std::stoi(tokens[1]) != _docsCount) { Log::info("testDocCount: Unrecognized message format"); return TestResult::TEST_FAILED; } else if (std::stoi(tokens[1]) != _docsCount) { Log::info("testDocCount: Incorrect doc count " ", expected: " + std::to_string(_docsCount) + ", actual: " + tokens[1]); return TestResult::TEST_FAILED; } Log::info(std::string("testDocCount: OK")); return TestResult::TEST_OK; }
TestResult testRmDocNotify() { _messageReceived.clear(); // subscribe to rmdoc notification on admin websocket const std::string subscribeMessage = "subscribe rmdoc"; _adminWs->sendFrame(subscribeMessage.data(), subscribeMessage.size()); _docWs1->close(); std::unique_lock<std::mutex> lock(_messageReceivedMutex); if (_messageReceived.empty() && _messageReceivedCV.wait_for(lock, std::chrono::milliseconds(_messageTimeoutMilliSeconds)) == std::cv_status::timeout) { Log::info("testRmDocNotify: Timed out waiting for admin console message"); return TestResult::TEST_TIMED_OUT; } lock.unlock(); StringTokenizer tokens(_messageReceived, " ", StringTokenizer::TOK_IGNORE_EMPTY | StringTokenizer::TOK_TRIM); if (tokens.count() != 3 || tokens[0] != "rmdoc" || stoi(tokens[1]) != _docPid1) { Log::info("testRmDocNotify: Invalid message format"); return TestResult::TEST_FAILED; } _usersCount--; Log::info(std::string("testRmDocNotify: OK")); return TestResult::TEST_OK; }
std::shared_ptr<WdtAbortChecker> setupAbortChecker() { int abortSeconds = FLAGS_abort_after_seconds; if (abortSeconds <= 0) { return nullptr; } LOG(INFO) << "Setting up abort " << abortSeconds << " seconds."; static std::atomic<bool> abortTrigger{false}; auto res = std::make_shared<WdtAbortChecker>(abortTrigger); auto lambda = [=] { LOG(INFO) << "Will abort in " << abortSeconds << " seconds."; std::unique_lock<std::mutex> lk(abortMutex); bool isNotAbort = abortCondVar.wait_for(lk, std::chrono::seconds(abortSeconds), [&]() -> bool { return isAbortCancelled; }); if (isNotAbort) { LOG(INFO) << "Already finished normally, no abort."; } else { LOG(INFO) << "Requesting abort."; abortTrigger.store(true); } }; // Run this in a separate thread concurrently with sender/receiver static auto f = std::async(std::launch::async, lambda); return res; }
//! Returns true if timeout hit bool timedWaitOnMutex(std::condition_variable &waitCond, std::mutex &waitMutex, int secs, int msecs) { auto span = std::chrono::seconds(secs) + std::chrono::milliseconds(msecs); std::unique_lock<std::mutex> lock(waitMutex); return waitCond.wait_for(lock, span) == std::cv_status::timeout; }
void run() { std::unique_lock<std::mutex>lock(mutex_); while (!done_) { cond_.wait_for(lock, std::chrono::milliseconds(interval_)); print(); } }
boost::optional<Json::Value> findMsg(std::chrono::milliseconds const& timeout, std::function<bool(Json::Value const&)> pred) override { std::shared_ptr<msg> m; { std::unique_lock<std::mutex> lock(m_); if(! cv_.wait_for(lock, timeout, [&] { for (auto it = msgs_.begin(); it != msgs_.end(); ++it) { if (pred((*it)->jv)) { m = std::move(*it); msgs_.erase(it); return true; } } return false; })) { return boost::none; } } return std::move(m->jv); }
/** * This case tests if multiple requests are being sent up to AVS with a reset during the process. */ TEST_F(UserInactivityMonitorTest, sendMultipleReportsWithReset) { InSequence s; std::mutex exitMutex; std::unique_lock<std::mutex> exitLock(exitMutex); int repetitionCount = 5; EXPECT_CALL(*m_mockMessageSender, sendMessage(ResultOf(&checkMessageRequest, Eq(true)))) .Times(AtLeast(repetitionCount - 1)); EXPECT_CALL(*m_mockMessageSender, sendMessage(ResultOf(&checkMessageRequestAndReleaseTrigger, Eq(true)))).Times(1); auto userInactivityMonitor = UserInactivityMonitor::create( m_mockMessageSender, m_mockExceptionEncounteredSender, USER_INACTIVITY_REPORT_PERIOD); ASSERT_NE(nullptr, userInactivityMonitor); auto directiveSequencer = adsl::DirectiveSequencer::create(m_mockExceptionEncounteredSender); directiveSequencer->addDirectiveHandler(userInactivityMonitor); auto userInactivityDirectiveHeader = std::make_shared<AVSMessageHeader>( USER_INACTIVITY_RESET_NAMESPACE, USER_INACTIVITY_RESET_NAME, USER_INACTIVITY_MESSAGE_ID); auto attachmentManager = std::make_shared<StrictMock<attachment::test::MockAttachmentManager>>(); std::shared_ptr<AVSDirective> userInactivityDirective = AVSDirective::create("", userInactivityDirectiveHeader, "", attachmentManager, ""); std::this_thread::sleep_for(2 * USER_INACTIVITY_REPORT_PERIOD + USER_INACTIVITY_REPORT_PERIOD / 2); directiveSequencer->onDirective(userInactivityDirective); exitTrigger.wait_for(exitLock, repetitionCount * USER_INACTIVITY_REPORT_PERIOD + USER_INACTIVITY_REPORT_PERIOD / 2); directiveSequencer->shutdown(); }
void f() { typedef std::chrono::system_clock Clock; typedef std::chrono::milliseconds milliseconds; std::unique_lock<std::mutex> lk(mut); assert(test2 == 0); test1 = 1; cv.notify_one(); Clock::time_point t0 = Clock::now(); while (test2 == 0 && cv.wait_for(lk, milliseconds(250)) == std::cv_status::no_timeout) ; Clock::time_point t1 = Clock::now(); if (runs == 0) { assert(t1 - t0 < milliseconds(250)); assert(test2 != 0); } else { assert(t1 - t0 - milliseconds(250) < milliseconds(5)); assert(test2 == 0); } ++runs; }
void waitForUpdate(std::mutex& m, std::condition_variable& cv, bool& x) { std::unique_lock<std::mutex> lk(m); if (!x) { cv.wait_for(lk, std::chrono::seconds(5), [&] { return x; }); } ASSERT_TRUE(x); x = false; }
void waits(int idx) { std::unique_lock<std::mutex> lk(cv_m); if(cv.wait_for(lk, std::chrono::milliseconds(idx*100), [](){return i == 1;})) std::cerr << "Thread " << idx << " finished waiting. i == " << i << '\n'; else std::cerr << "Thread " << idx << " timed out. i == " << i << '\n'; }
bool wait(int seconds) { std::unique_lock<std::mutex> lk(m); cv.wait_for(lk, std::chrono::seconds(seconds), std::bind(&cracker::isFound, this)); lk.unlock(); return found; }
void task1() { log("Starting task 1. Waiting on cv for 2 secs."); lock_type lck(mtx); bool done = cv.wait_for(lck, 2*sec, [] {return ready;}); std::stringstream ss; ss << "Task 1 finished, done==" << (done?"true":"false") << ", " << (lck.owns_lock()?"lock owned":"lock not owned"); log(ss.str()); }
void interruptible_wait(std::condition_variable& cv, std::unique_lock<std::mutex>& lk) { interruption_point(); this_thread_interrupt_flag.set_condition_variable(cv); clear_cv_on_destruct guard; interruption_point(); cv.wait_for(lk, std::chrono::milliseconds(1)); interruption_point(); }
bool wait_for( const std::chrono::duration<Rep, Period>& time) { if (!flag.load(std::memory_order_consume)) { std::unique_lock<std::mutex> lock(mutex); return condition.wait_for(lock, time, std::bind(&request_t<Work, Result>::ready, this)); } }
void eat(int* part) { while (!stopped) { std::unique_lock<std::mutex> lock(mutex); BIG_PIE -= 100; *part += 100; cond.notify_one(); cond.wait_for(lock, std::chrono::milliseconds(100)); } }
int waitForCompletion(long milliseconds) { std::unique_lock < std::mutex > lock(mMutex); if (mCompletedCount) { return true; } mCondition.wait_for(lock, std::chrono::milliseconds(milliseconds)); return mCompletedCount; }
void wait_and_pop_with_timeout(T& value) { std::unique_lock<std::mutex> lock(m_mutex); if (!m_data_available.wait_for(lock, std::chrono::seconds(1), [this] { return !m_queue.empty(); })) { return; } value=std::move(m_queue.front()); m_queue.pop(); }
/** * This case tests if @c UserInactivityMonitor basic create function works properly */ TEST_F(UserInactivityMonitorTest, createSuccessfully) { std::mutex exitMutex; std::unique_lock<std::mutex> exitLock(exitMutex); EXPECT_CALL(*m_mockMessageSender, sendMessage(ResultOf(&checkMessageRequestAndReleaseTrigger, Eq(true)))); auto userInactivityMonitor = UserInactivityMonitor::create( m_mockMessageSender, m_mockExceptionEncounteredSender, USER_INACTIVITY_REPORT_PERIOD); ASSERT_NE(nullptr, userInactivityMonitor); exitTrigger.wait_for(exitLock, USER_INACTIVITY_REPORT_PERIOD + USER_INACTIVITY_REPORT_PERIOD / 2); }
void interruptible_wait(std::condition_variable& cv, std::unique_lock<std::mutex>& lk, Predicate pred) { interruption_point(); this_thread_interrupt_flag.set_condition_variable(cv); interrupt_flag::clear_cv_on_destruct guard; while (!thie_thread_interrupt_flag.is_set() && !pred()) { cv.wait_for(lk, std::chrono::milliseconds(1)); } interruption_point(); }
int ThreadSimSafeSpace(int nListenChanl) { #if 0 sockaddr_in AddrMainBd; nListenChanl = ::socket(PF_INET, SOCK_DGRAM, 0); if(INVALID_SOCKET == nListenChanl) { std::lock_guard<std::mutex> Lock(g_mutex_Printout); cout<<"socket() error"<<endl; return -1; } AddrMainBd.sin_family = AF_INET; AddrMainBd.sin_addr = inet_addr("192.168.1.128"); AddrMainBd.sin_port = htons(5001); CommTestFrame DataToSend; memset(&DataToSend, 0x00, sizeof(CommTestFrame)); unsigned int nLoopCount = 0; uint32_t nPrevCounter = 0; while(1) { std::this_thread::sleep_for(std::chrono::milliseconds(5)); nLoopCount++; DataToSend.StartFrame = htonl(STARTFRAME); DataToSend.EndFrame = htonl(ENDFRAME); DataToSend.nCounter = htonl(nLoopCount); // Send test-data to main B/D int nSendByte = ::sendto(nListenChanl, reinterpret_cast<char*>(&DataToSend), sizeof(CommTestFrame), 0, reinterpret_cast<struct sockaddr*>(&AddrMainBd), sizeof(struct sockaddr)); if(SOCKET_ERROR == nSendByte) { cout<<"[ThreadSimSafeSpace] fail to sendto"<<endl; } { std::unique_lock<std::mutex> lock(g_mutex_UserTerminate); bool bRetValue = g_condition_UserTerminate.wait_for(lock, std::chrono::microseconds(0), [](){ return (true==g_bTerminate);}); if(true == bRetValue) { cout<<"[ThreadSimSpaceReceiver] this loop ends"<<endl; break; } } } #endif return 0; }
void run () { beast::setCurrentThreadName ("Resource::Manager"); for(;;) { logic_.periodicActivity(); std::unique_lock<std::mutex> lock(mutex_); cond_.wait_for(lock, std::chrono::seconds(1)); if (stop_) break; } }
BluetoothObject *wait_for(std::chrono::milliseconds timeout) { if (result != nullptr) return result; if (!triggered) { waiting++; std::unique_lock<std::mutex> lk(lock); cv.wait_for(lk, timeout); waiting--; } return result; }
int main() { std::thread t(test); std::unique_lock<std::mutex> lk(m); while (!is_ready) { cv.wait_for(lk, 20); if (!is_ready) std::cout << "Spurious wake up!\n"; } t.join(); }
void put(T &&x, std::chrono::milliseconds time_out = INFINITE) { { auto_lock lock(mutex_); while(queue_.size() == max_size_ ) not_full_.wait_for(mutex_, time_out); assert(queue_.size() != max_size_); queue_.push_back(std::forward<T>(x)); } not_empty_.notify_one(); }
/// the ticker function void Tick(std::chrono::milliseconds period) { try{ std::unique_lock<std::mutex> ul(tickingMutex_); while(std::cv_status::timeout == tickingCondition_.wait_for(ul, period)){ tickPublisher(); } } catch(...){ exceptionPtr_ = std::current_exception(); } }
/** * This case tests if multiple requests are being sent up to AVS. */ TEST_F(UserInactivityMonitorTest, sendMultipleReports) { InSequence s; std::mutex exitMutex; std::unique_lock<std::mutex> exitLock(exitMutex); int repetitionCount = 3; EXPECT_CALL(*m_mockMessageSender, sendMessage(ResultOf(&checkMessageRequest, Eq(true)))).Times(repetitionCount - 1); EXPECT_CALL(*m_mockMessageSender, sendMessage(ResultOf(&checkMessageRequestAndReleaseTrigger, Eq(true)))).Times(1); auto userInactivityMonitor = UserInactivityMonitor::create( m_mockMessageSender, m_mockExceptionEncounteredSender, USER_INACTIVITY_REPORT_PERIOD); ASSERT_NE(nullptr, userInactivityMonitor); exitTrigger.wait_for(exitLock, repetitionCount * USER_INACTIVITY_REPORT_PERIOD + USER_INACTIVITY_REPORT_PERIOD / 2); }
bool fiber_waiter::wait_ready(std::chrono::steady_clock::duration timeout_duration) noexcept { if (gth_thread_type == thread_type::thread) { std::unique_lock<std::mutex> lk(m_thread_mutex); return m_thread_var.wait_for(lk, timeout_duration, [this] { return m_ready.load(std::memory_order_relaxed); }); } else { std::unique_lock<boost::fibers::mutex> lk(m_fiber_mutex); return m_fiber_var.wait_for(lk, timeout_duration, [this] { return m_ready.load(std::memory_order_relaxed); }); } }
boost::optional<Json::Value> getMsg(std::chrono::milliseconds const& timeout) override { std::shared_ptr<msg> m; { std::unique_lock<std::mutex> lock(m_); if(! cv_.wait_for(lock, timeout, [&] { return ! msgs_.empty(); })) return boost::none; m = std::move(msgs_.back()); msgs_.pop_back(); } return std::move(m->jv); }