void ChunkModelFactory::worker() { while(1) { std::unique_lock<std::mutex> ulock(mutex); chunks_condition.wait(ulock, [&]{return !chunks.empty();}); auto it = chunks.begin(); auto position = *it; chunks.erase(it); auto itr = model_data.find(position); if(itr == model_data.end()) continue; auto data = itr->second; model_data.erase(itr); ulock.unlock(); auto result = compute_chunk(data, block_data); if(result->size > 0) { std::lock_guard<std::mutex> ulock(mutex); models.push_back(result); created++; processed++; } else { std::lock_guard<std::mutex> ulock(mutex); empty++; processed++; } } }
long waitFor(long sequence, const Sequence& cursor, const SequenceGroup& dependent, const AlertableBarrier& barrier) { long availableSequence; if ((availableSequence = cursor.get()) < sequence) { std::unique_lock<std::mutex> ulock(m_mutex); while ((availableSequence = cursor.get()) < sequence) { barrier.checkAlert(); std::cv_status status = m_condition.wait_for(ulock, std::chrono::microseconds(m_timeout_micros)); if (status == std::cv_status::timeout) { break; } } } if (!dependent.isEmpty()) { while ((availableSequence = dependent.get()) < sequence) { barrier.checkAlert(); } } return availableSequence; }
// this only returns when either: // 1. should stop OR // 2. get a call OR // 3. wait for more than _max_wait_rep times. // return - true : if get a call // - false: if should stop Or // if wait for more than _max_wait_rep bool Client::Connection::waitForWork() { int rep = 0; while(rep < _max_wait_rep) { std::unique_lock<std::mutex> ulock(_mutex_conn); if(_cond_conn.wait_for(ulock, chrono::milliseconds(_call_wait_time), [this] { return _calls.size() > 0; })) { // reset rep to 0, and start counting again. // this is to track last active time rep = 0; return true; } else if(_should_stop) return false; rep++; } // mark connection as closed, as we have been waiting and there is // no call comes markClosed(); return false; }
/*protected virtual */void ASDDeviceTransceiver::update(ptr_type device) { DataRec data; tgs::TGSError error; boost::upgrade_lock<boost::shared_mutex> ulock(_mutex); data = _data; if ((error = device->getFrequencySender(&data.frequencySender)) == tgs::TGSERROR_NO_RESULT) { data.frequencySender = -1; } else if (error != tgs::TGSERROR_OK) { artsatd::getInstance().log(LOG_WARNING, "TGSTransceiverInterface getFrequencySender error [%s]", error.print().c_str()); } if ((error = device->getFrequencyReceiver(&data.frequencyReceiver)) == tgs::TGSERROR_NO_RESULT) { data.frequencyReceiver = -1; } else if (error != tgs::TGSERROR_OK) { artsatd::getInstance().log(LOG_WARNING, "TGSTransceiverInterface getFrequencyReceiver error [%s]", error.print().c_str()); } if (data.frequencySender != _data.frequencySender || data.frequencyReceiver != _data.frequencyReceiver) { boost::upgrade_to_unique_lock<boost::shared_mutex> wlock(ulock); _data = data; } return; }
void SocketManager::Close(ConnectionID connectionID) { { std::lock_guard<std::mutex> ulock(m_eventQueueMutex); m_closeQueue.push(connectionID); } NotifyMe(); }
void semaphore:: wait() { #if 1 { std::unique_lock<std::mutex> ulock(mutex_); signal_lock_.wait(ulock, [&]{ return signal_count_ >= min_signal_count_ || shutdown_; }); if (signal_count_ >= min_signal_count_) { signal_count_ -= min_signal_count_; } } #else while (true) { if (signal_count_ >= min_signal_count_) { mutex_.lock(); if (signal_count_ >= min_signal_count_) { signal_count_ -= min_signal_count_; mutex_.unlock(); break; } mutex_.unlock(); } if (shutdown_) { break; } } #endif }
/** Removes the elements in the range [first, last) (unique_lock * access). */ void erase (size_type first, size_type last) { unique_lock_type ulock(mut_); const_iterator it_first = vect_.cbegin(); const_iterator it_last = it_first+last; it_first+=first; vect_.erase(first, last); }
bool LockDirectory(const fs::path& directory, const std::string lockfile_name, bool probe_only) { std::lock_guard<std::mutex> ulock(cs_dir_locks); fs::path pathLockFile = directory / lockfile_name; // If a lock for this directory already exists in the map, don't try to re-lock it if (dir_locks.count(pathLockFile.string())) { return true; } // Create empty lock file if it doesn't exist. FILE* file = fsbridge::fopen(pathLockFile, "a"); if (file) fclose(file); try { auto lock = MakeUnique<boost::interprocess::file_lock>(pathLockFile.string().c_str()); if (!lock->try_lock()) { return false; } if (!probe_only) { // Lock successful and we're not just probing, put it into the map dir_locks.emplace(pathLockFile.string(), std::move(lock)); } } catch (const boost::interprocess::interprocess_exception& e) { return error("Error while attempting to lock directory %s: %s", directory.string(), e.what()); } return true; }
// ensure only 1 call is sending at a time. void Client::Connection::sendCall(Call* call) { std::unique_lock<std::mutex> ulock(_mutex_send_call); if(!call->write()) { Log::write(ERROR, "Client::sendCall: Failed\n"); } }
void Stats::PrintStats() { std::stringstream ss; std::unique_lock<std::mutex> ulock(mtx_); ss << "======= Total Duration (sum of all thread durations) " << total_time_ << " =========\n"; for (int i = 0; i < num_items_; ++i) { int table_id = i / kStatsTypeName.size(); StatsType type = static_cast<StatsType>(i % kStatsTypeName.size()); //LOG(INFO) << "table_id = " << table_id << " type = " << type; if (CheckSelected(table_id, type)) { if (table_id != 0) { ss << "Table " << table_id << "\t" << kStatsTypeName[static_cast<int>(type)] << "\tcount: " << call_counts_[i] << "\ttime: " << times_[i] << " (" << times_[i] / total_time_ * 100. << "%)" << std::endl; } else { // Don't print Table id. ss << kStatsTypeName[static_cast<int>(type)] << "\tcount: " << call_counts_[i] << "\ttime: " << times_[i] << " (" << times_[i] / total_time_ * 100. << "%)" << std::endl; } } } ss << "===========================================================" << "==========="; LOG(INFO) << "\n" << ss.str(); }
void SocketManager::CreateEvent(EventFunction fun) { { std::lock_guard<std::mutex> ulock(m_eventQueueMutex); m_eventQueue.push(std::move(fun)); } NotifyMe(); }
int lock_init(lock_t *lock) { /*int* a; int t =10; a=&t;*/ ulock(lock); return -1; }
bool pop(ByteBuffer& ptr) { WriteLock ulock(m_mutex); if (m_queue.empty()) return false; ptr = m_queue.front(); m_queue.pop_front(); return true; }
void Stats::FinalizeStats() { std::unique_lock<std::mutex> ulock(mtx_); for (int i = 0; i < num_items_; ++i) { call_counts_[i] += (thread_data_->call_counts)[i]; times_[i] += (thread_data_->times)[i]; } if (thread_data_->add_to_total_time) total_time_ += thread_data_->total_time.elapsed(); }
int main(){ printf("进程标识(PID): %d\n",getpid()); int fd = open("lock.txt",O_RDWR | O_CREAT | O_TRUNC,0664); if(fd == -1){ perror("open"); return -1; } const char* text = "ABCDEFGHIJKLMNOPQR"; if(write(fd,text,strlen(text) * sizeof(text[0])) == -1){ perror("write"); return -1; } //对EFGH加读锁 printf("对EFGH加读锁"); //文件头从0开始 if(rlock(fd,4,4,0) == -1){ printf("失败:%m\n"); return -1; } printf("成功!\n"); //对MNOP加写锁 printf("对MNOP加写锁"); if(wlock(fd,12,4,0) == -1){ printf("失败:%m\n"); return -1; } printf("成功!\n"); printf("按<回车>,解锁MN..."); getchar(); //解锁MN ulock(fd,12,2); printf("按<回车>,解锁EFGH..."); getchar(); //解锁EFGH ulock(fd,4,4); /** * 只要文件描述符一关,什么锁就都没有了。系统内核会自动为你解锁,文件表会被删掉 * 你加的锁也会从v节点表的锁的链表中删掉 * 因为只要文件描述符一关,文件描述符和文件指针的对应就会被删掉, * 文件表也就没有了,对应v节点表中的锁的链表也就没有了 */ close(fd); return 0; }
SAM::StreamSession& StreamSessionAdapter::SessionHolder::getSession() { boost::upgrade_lock<mutex_type> lock(mtx_); if (session_->isSick()) { boost::upgrade_to_unique_lock<mutex_type> ulock(lock); heal(); } return *session_; }
void SocketManager::Write(ConnectionID connectionID, const RawBuffer &rawBuffer) { WriteBuffer buffer; buffer.connectionID = connectionID; buffer.rawBuffer = rawBuffer; { std::lock_guard<std::mutex> ulock(m_eventQueueMutex); m_writeBufferQueue.push(buffer); } NotifyMe(); }
void SocketManager::Write(ConnectionID connectionID, const SendMsgData &sendMsgData) { WriteData data; data.connectionID = connectionID; data.sendMsgData = sendMsgData; { std::lock_guard<std::mutex> ulock(m_eventQueueMutex); m_writeDataQueue.push(data); } NotifyMe(); }
int MapUpdater::wait() { std::unique_lock<std::mutex> ulock(Lock); while (pending_requests > 0) condition.wait(ulock); ulock.unlock(); return 0; }
// Connection is closing, erase all calls associated. void Client::Connection::cleanupCalls() { std::unique_lock<std::mutex> ulock(_mutex_conn); map<int,shared_ptr<Call>>::iterator iter = _calls.begin(); while(iter != _calls.end()){ _calls.erase(iter->first); iter++; } }
void threadPool::queue(boost::function<void()> func) { boost::unique_lock<boost::mutex> ulock(mQueueReady_); while (!isQueueReady()) { isQueueReady_.wait(ulock); } boost::lock_guard<boost::mutex> lock(m_); queue_.push_back(func); }
void consumer(int demand) { while (true) { std::unique_lock<std::mutex> ulock(mutex); condvar.wait(ulock, [] { return msgQueue.size() > 0;}); // wait的第二个参数使得显式的double check不再必要 printf("Consume message %d\n", msgQueue.front()); msgQueue.pop(); --demand; if (!demand) break; } }
void DWThreadPool::stop() { { std::unique_lock<std::mutex> ulock(_queue_mutex); _is_running = false; _not_empty.notify_all(); } for(auto &iter : _threads) { iter.join(); } }
void SocketManager::ProcessQueue() { while (1) { EventFunction fun; { std::lock_guard<std::mutex> ulock(m_eventQueueMutex); if (m_eventQueue.empty()) return; fun = std::move(m_eventQueue.front()); m_eventQueue.pop(); } fun(); } }
/*protected virtual */void ASDDeviceRotator::update(ptr_type device) { DataRec data; tgs::TGSError error; boost::upgrade_lock<boost::shared_mutex> ulock(_mutex); data = _data; if ((error = device->getAngle(&data.azimuth, &data.elevation)) != tgs::TGSERROR_OK) { artsatd::getInstance().log(LOG_WARNING, "TGSRotatorInterface getAngle error [%s]", error.print().c_str()); } if (data.azimuth != _data.azimuth || data.elevation != _data.elevation) { boost::upgrade_to_unique_lock<boost::shared_mutex> wlock(ulock); _data = data; } return; }
void Client::removeConnection(shared_ptr<tcp::endpoint> ep) { std::unique_lock<std::mutex> ulock(_mutex_client); stringstream ss; ss<<ep->address().to_string()<<":"<<ep->port(); if(_connections.erase(ss.str())) { Log::write(ERROR, "Failed to erase connection to <%s:%d>\n", ep->address().to_string().c_str(), ep->port()); } else { Log::write(INFO, "Erased connection to <%s:%d>\n", ep->address().to_string().c_str(), ep->port()); } }
void LibDNNBlas<MItype, MOtype>::dot(const uint_tp n, vptr<const MItype> x, vptr<const MItype> y, MOtype* out, const QuantizerValues* const x_quant, const QuantizerValues* const y_quant, const QuantizerValues* const out_quant) { string identifier = dot_string_identifier(); int_tp id = get_id(identifier); if (id < 0) { id = get_id_or_new(identifier); } shared_ptr<LibDNNTuner> tuner = program_tuners_[id]; shared_ptr<DeviceProgram> program = programs_[id]; boost::shared_lock<boost::shared_mutex> lock(program_mutex_); if (!program_ready_[id]) { lock.unlock(); // Compiling new kernel has to lock the program lock exclusively boost::unique_lock<boost::shared_mutex> ulock(program_mutex_); if (!program_ready_[id]) { stringstream ss; ss << generate_dot_source(program, tuner); program->set_source(ss.str()); program->Compile(true, true); program_ready_[id] = true; } ulock.unlock(); lock.lock(); } lock.unlock(); int_tp buffer_id = -1; vector<int_tp> buffer_shape(1,1); shared_ptr<Blob<MOtype> > buff = this->dev_ptr_->template Buffer<MOtype>(buffer_shape, &buffer_id); vptr<MOtype> gpu_out = buff->mutable_gpu_data(); shared_ptr<DeviceKernel> kernel = program->GetKernel("libdnn_dot"); vector<size_t> group(1, 1); vector<size_t> local(1, 1); kernel->add_arg(&n); kernel->add_arg(&x); kernel->add_arg(&y); kernel->add_arg(&gpu_out); kernel->Execute(group, local); this->dev_ptr_->template copy<MOtype>(1, gpu_out, out); this->dev_ptr_->unlock_buffer(&buffer_id); }
shared_ptr<Client::Connection> Client::getConnection(shared_ptr<tcp::endpoint> ep, shared_ptr<Call> call) { std::unique_lock<std::mutex> ulock(_mutex_client); shared_ptr<Client::Connection> conn; try{ stringstream ss; ss<<ep->address().to_string()<<":"<<ep->port(); map<string,shared_ptr<Client::Connection>>::iterator iter = _connections.find(ss.str()); if(iter == _connections.end()) { conn = make_shared<Client::Connection>(ep, _last_connection_index++); _connections.insert(pair<string,shared_ptr<Client::Connection>>(ss.str(),conn)); Log::write(DEBUG, "Create new connection %s\n", conn->toString().c_str()); conn->setClient(this); } else { conn = iter->second; Log::write(DEBUG, "Reuse connection %s\n", conn->toString().c_str()); } // only connect if it's not connected conn->connect(ep,call); }catch(exception& e){ Log::write(ERROR, "Failed to retrieve/create connection : %s\n", e.what()); return NULL; } if(!conn->addCall(call)) { Log::write(ERROR, "FATAL: can not insert call into _calls. is it full !?"); return NULL; } call->setConnection(conn); return conn; }
//------------------------------------------------------------- // void NetServer::handle_accept(ISessionPtr session, const boost::system::error_code& error) { if (!error) { WriteLock ulock(m_mutex); m_mapSession[session->getId()] = session; session->bindReceiveHandle(boost::bind(&NetServer::onReciveHandle, this, _1, _2)); session->bindErrorHandle(boost::bind(&NetServer::onErrorHandle, this, _1, _2)); session->start(); } else { throwError(error); } start_accept(); }
void semaphore:: signal(const size_t signal_count) { #if 1 { std::unique_lock<std::mutex> ulock(mutex_); if (signal_count_+signal_count <= max_signal_count_) { signal_count_ += signal_count; } } signal_lock_.notify_all(); #else mutex_.lock(); if (signal_count_+signal_count <= max_signal_count_) { signal_count_ += signal_count; } mutex_.unlock(); #endif }