int main() { { L1 lk(m0); std::thread t(f); assert(test1 == 0); while (test1 == 0) cv.wait(lk); assert(test1 != 0); test2 = 1; lk.unlock(); cv.notify_one(); t.join(); } test1 = 0; test2 = 0; { L1 lk(m0); std::thread t(f); assert(test1 == 0); while (test1 == 0) cv.wait(lk); assert(test1 != 0); lk.unlock(); t.join(); } }
void waits() { std::unique_lock<std::mutex> lk(cv_m); std::cout << "Waiting... \n"; cv.wait(lk, []{return i == 1;}); std::cout << "...finished waiting. i == 1\n"; }
/*处理函数*/ void ThreadPool::process() { while(running) { try{ threadMtx.lock(); cond_var.wait(threadMtx); threadMtx.unlock(); Message message=msgQueue.pop(); //从消息队列获得消息 int socket=message.getFd(); //获取消息长度 uint32 size=0; Recv(socket,&size,sizeof(size),0); //获取消息内容 char *buffer=new char[size]; Recv(socket,buffer,size,MSG_WAITALL); //设置消息内容 message.setContent(buffer,size); delete[] buffer; parse(message); //交给parse解析 rmWork(); } catch(Exception &error) { error.exit(); } } }
void ComeToWork() { std::cout << "Hey security, please open the door!\n"; g_Bell.notify_one(); std::mutex mutex; mutex.lock(); g_Door.wait(mutex); mutex.unlock(); }
void wait() { std::unique_lock<std::mutex> lock(mutex_); //while(!count_) while(!work_waiting) condition_.wait(lock); work_waiting = false; //--count_; }
void f() { L1 lk(m0); assert(test2 == 0); test1 = 1; cv.notify_one(); cv.wait(lk, Pred(test2)); assert(test2 != 0); }
void f2() { L1 lk(m0); assert(test2 == 0); while (test2 == 0) cv.wait(lk); assert(test2 == 1); test2 = 2; }
void f1() { L1 lk(m0); assert(test1 == 0); while (test1 == 0) cv.wait(lk); assert(test1 == 1); test1 = 2; }
//--------------------------------------------------------------------------- // function : wait_no_readers /// @brief This function wait until the number of readers over the data /// structure is 0 //--------------------------------------------------------------------------- void wait_no_readers () { struct shuttle { rw_mutex_data &r ; shuttle ( rw_mutex_data & Alfa):r(Alfa){ }; bool operator( )( void) { return (r.no_readers() );}; } S ( *this); cv_no_readers.wait ( mtx_no_readers,S ); };
void insert(T t) { std::unique_lock<M> lock{mutex}; producers.wait(lock, [this]() { return begin != (end + 1) % SIZE; }); buffer[end] = t; end = (end + 1) % SIZE; consumers.notify_all(); }
//--------------------------------------------------------------------------- // function : lock_read /// @brief This function lock for the read operation. It's atomic /// @param [in] /// @return true : locked false : unlocked //--------------------------------------------------------------------------- void lock_read(void) { //--------------------- begin ---------------------------- struct shuttle { rw_mutex_data &r ; shuttle ( rw_mutex_data & Alfa):r(Alfa){ }; bool operator( )( void) { return (r.try_lock_read());}; } S ( *this); cv_read.wait ( mtx_read,S ); };
// Worker thread void HttpClient::networkThread() { auto scheduler = Director::getInstance()->getScheduler(); while (true) { HttpRequest *request; // step 1: send http request if the requestQueue isn't empty { std::lock_guard<std::mutex> lock(s_requestQueueMutex); while (s_requestQueue->empty()) { s_SleepCondition.wait(s_requestQueueMutex); } request = s_requestQueue->at(0); s_requestQueue->erase(0); } if (request == s_requestSentinel) { break; } // step 2: libcurl sync access // Create a HttpResponse object, the default setting is http access failed HttpResponse *response = new (std::nothrow) HttpResponse(request); processResponse(response, s_errorBuffer); // add response packet into queue s_responseQueueMutex.lock(); s_responseQueue->pushBack(response); s_responseQueueMutex.unlock(); if (nullptr != s_pHttpClient) { scheduler->performFunctionInCocosThread(CC_CALLBACK_0(HttpClient::dispatchResponseCallbacks, this)); } } // cleanup: if worker thread received quit signal, clean up un-completed request queue s_requestQueueMutex.lock(); s_requestQueue->clear(); s_requestQueueMutex.unlock(); if (s_requestQueue != nullptr) { delete s_requestQueue; s_requestQueue = nullptr; delete s_responseQueue; s_responseQueue = nullptr; } }
T extract() { std::unique_lock<M> lock{mutex}; consumers.wait(lock, [this]() { return begin != end; }); T t = buffer[begin]; begin = (begin + 1) % SIZE; producers.notify_all(); return t; }
void enqueue_io(IoRef &&io) { int id = (io->offset / (OBJECT_SIZE / BLOCK_SIZE)) % concurrence; std::unique_lock<std::mutex> lock(io_datas[id]->mtx); lock.unlock(); io_datas[id]->cond.notify_one(); perf.io_started++; if(perf.get_io_flight() > io_capacity) { std::unique_lock<std::mutex> prod_lock(prod_mtx); prod_cond.wait(prod_lock); } }
void Worker::run() { QueryQueue query_queue; QueryPtr follower_query; while (working_) { assert(query_queue.empty()); { std::unique_lock<std::mutex> lock(mutex_); if (!follower_query_ && query_queue_.empty()) { cond_.wait(lock); } if (follower_query_) { follower_query = follower_query_; follower_query_.reset(); } if (!query_queue_.empty()) { query_queue_.swap(query_queue); } } while (!query_queue.empty()) { handle_query(query_queue.front()); query_queue.pop_front(); } if (follower_query != nullptr) { handle_query(follower_query); follower_query.reset(); on_query_finished(shared_from_this()); } } }
void wait(std::condition_variable_any& cv, Lockable& lk) { struct custom_lock { interrupt_flag* self; Lockable& lk; custom_lock(interrupt_flag* self_, std::condition_variable_any& cond, Lockable& lk_) : self(self_) , lk(lk_) { self->set_clear_mutex.lock(); self->thread_cond_any = &cond; } void unlock() { lk.unlock(); self->set_clear_mutex.unlock(); } void lock() { std::lock(self->set_clear_mutex, lk); } ~custom_lock() { self->thread_cond_any = 0; self->set_clear_mutex.unlock(); } }; custom_lock cl(this, cv, lk); interruption_point(); cv.wait(cl); interruption_point(); }
// メイン処理 void run(const std::string& url, const std::string& name) { // 接続およびエラーイベントのリスナを設定する client.set_close_listener(std::bind(&SampleClient::on_close, this)); client.set_fail_listener(std::bind(&SampleClient::on_fail, this)); client.set_open_listener(std::bind(&SampleClient::on_open, this)); // 接続要求を出す client.connect(url); { // 別スレッドで動く接続処理が終わるまで待つ std::unique_lock<std::mutex> lock(sio_mutex); if (!is_connected) { sio_cond.wait(sio_mutex); } } // "run"コマンドのリスナを登録する socket = client.socket(); socket->on("run", std::bind(&SampleClient::on_run, this, std::placeholders::_1)); { sio::message::ptr send_data(sio::object_message::create()); std::map<std::string, sio::message::ptr>& map = send_data->get_map(); // objectのメンバ、typeとnameを設定する map.insert(std::make_pair("type", sio::string_message::create("native"))); map.insert(std::make_pair("name", sio::string_message::create(name))); // joinコマンドをサーバに送る socket->emit("join", send_data); } while(true) { // イベントキューが空の場合、キューが補充されるまで待つ std::unique_lock<std::mutex> lock(sio_mutex); while (sio_queue.empty()) { sio_cond.wait(lock); } // イベントキューから登録されたデータを取り出す sio::message::ptr recv_data(sio_queue.front()); std::stringstream output; char buf[1024]; FILE* fp = nullptr; // objectのcommandメンバの値を取得する std::string command = recv_data->get_map().at("command")->get_string(); std::cout << "run:" << command << std::endl; // commandを実行し、実行結果を文字列として取得する if ((fp = popen(command.c_str(), "r")) != nullptr) { while (!feof(fp)) { size_t len = fread(buf, 1, sizeof(buf), fp); output << std::string(buf, len); } } else { // エラーを検出した場合はエラーメッセージを取得する output << strerror(errno); } pclose(fp); sio::message::ptr send_data(sio::object_message::create()); std::map<std::string, sio::message::ptr>& map = send_data->get_map(); // コマンドの実行結果をobjectのoutputに設定する map.insert(std::make_pair("output", sio::string_message::create(output.str()))); // sio::messageをサーバに送る socket->emit("reply", send_data); // 処理が終わったイベントをキューから取り除く sio_queue.pop(); } }