void thread_function_increase() { for (int i=0; i<3; i++) { if(g_counter_mutex.try_lock()) //g_counter_mutex.lock(); { ++g_counter; cout << this_thread::get_id() << ": " << i << endl; g_counter_mutex.unlock(); this_thread::sleep_for(std::chrono::seconds(2)); } } }
// Thread to import data from the USRP !Size of the arrays in complex -> 2*buffer_size ! void usrpGetData(uhd::rx_streamer::sptr rx_stream, uhd::usrp::multi_usrp::sptr dev, size_t buffer_size, board_60GHz_RX *my_60GHz_RX){ // Set priority of the thread int which = PRIO_PROCESS; id_t pid; int priority = -20; int ret; pid = getpid(); ret = setpriority(which, pid, priority); if(ret!=0){ std::cout << "Main priority went wrong in usrpT: " << ret << std::endl ;} // Create storage for a single buffer from USRP short *buff_short; buff_short=new short[2*buffer_size]; size_t n_rx_last; uhd::rx_metadata_t md; //int time=buffer_size/(25)-100; // microsecondes while (1){ n_rx_last=0; // Fill buff_short while (n_rx_last==0) { n_rx_last=rx_stream->recv(&buff_short[0], buffer_size, md, 3.0); std::this_thread::yield(); // Avoid active waiting }; // Check if no overflow if (n_rx_last!=buffer_size) { std::cerr << "I expect the buffer size to be always the same!\n"; std::cout<<"Read only:"<<n_rx_last<<"\n"; std::cout<<"Buffer:"<<buffer_size<<"\n"; //exit(1); }else{ // Add the just received buffer to the queue mtxUsrp.lock(); usrpQ.push(buff_short); mtxUsrp.unlock(); // Change memory cell used buff_short=new short [2*buffer_size]; // Gives the start to detection part sem_post( &usrpReady); } }//end while 1 }
void send_logs() { static std::mutex mtx; mtx.lock(); Logger::LogPath lp; const std::map<string, string> logs= { { lp.errorLogFile, serverLogPath + "/Errors/"}, { lp.infoLogFile, serverLogPath + "/Info/"}, { lp.productLogFile, serverLogPath + "/Products/"} }; WebClient::FTP ftp; for (auto itr = logs.begin(); itr != logs.end(); ++itr) { if (Utils::file_exists(itr->first)) { const string logPath = itr->first + ".copy"; if (!Utils::copy_paste_file(itr->first, logPath)) break; size_t found = itr->first.find_last_of("/\\"); if (found != string::npos) { const string fileName = itr->first.substr(found+1); for (int i = 0; i < 2; i++) { if (!ftp.upload_file(logPath, itr->second, ftpCredentials, fileName)) { cout << "Failed to upload file to ftp server!" << endl; usleep(ONESECOND / 2); continue; } break; } } else cout << "Invalid filepath: " << itr->first << endl; delete_temp_logs(); } } mtx.unlock(); }
void operator() () { for (int i = start_; i <= end_; ++i) for (int j = 0; j < columns_; ++j) { char current = matrix_[i][j]; if ((current >= 'a') && (current <= 'j')) { mtx.lock(); ++(*result_)[current - 'a']; mtx.unlock(); } } }
inline void _removeToken(const std::string& path) { auto preCheck = m_tokens.find(path); if (preCheck != m_tokens.end()) { DeviceToken& tok = *preCheck->second; std::shared_ptr<DeviceBase> dev = tok.m_connectedDev; tok._deviceClose(); deviceDisconnected(tok, dev.get()); m_tokensLock.lock(); m_tokens.erase(preCheck); m_tokensLock.unlock(); } }
bool FileWriter::write() { mtx.lock(); while (loadAvail == false) { mtx.unlock(); usleep(100*5); mtx.lock(); } loadAvail = false; mtx.unlock(); if(fs.is_open() && fs.good()) { fs.write(writeBuffer, toWrite); } mtx.lock(); loadAvail = true; mtx.unlock(); if (fs.bad()) return false; return true; }
void ImageLoader::AddToPending(const char* Filename) { UploadData New; if (Textures.find(Filename) == Textures.end()) { auto d = GetDataForImage(Filename); New.Data = d.Data; New.Width = d.Width; New.Height = d.Height; LoadMutex.lock(); PendingUploads.insert(std::pair<char*, UploadData>((char*)Filename, New)); LoadMutex.unlock(); } }
bool dequeue(T* data) { head_mutex.lock(); node* current_head = head; node* new_head = current_head->next; if( new_head == nullptr) { head_mutex.unlock(); return false; } *data = new_head->data; head = new_head; // Swapping dummy-initial node so we avoid to update the tail pointer // Therefore no need for protecting the tail head_mutex.unlock(); delete current_head; // De allocate previous dummy node return true; }
void randomCalculate(int size, char ch) { mutex.lock(); // enter critical section for (int i = 0; i < size; ++i) { int numerator = rand() % 10; int denominator = rand() % 10; if (denominator == 0) { throw DivisionByZeroException(); } float quotient = static_cast<float>(numerator) / denominator; printf(" %c%i/%i=%.2f%c ", ch, numerator, denominator, quotient, ch); } printf("\n\n"); mutex.unlock(); // exit critical section }
virtual void show_image() { // get the next element inside the CircularBuffer cv::Mat frame = input.first.pop(); if (!frame.empty()) { item->setPixmap(QPixmap::fromImage(QImage(frame.data, frame.cols, frame.rows, QImage::Format_RGB888).rgbSwapped())); } img_view_mutex.unlock(); }
void running(void * aArg) { wmra _wmra; _wmra.initialize(); clock_t last_time, current_time; last_time = clock()-1; current_time = clock(); double dt; int count = 0; vector<double> X_dot; for (int i = 0; i < 7; i++) { X_dot.push_back(0.0); _wmra.Qarm.push_back(0.0); } _wmra.sendInputValues(); // zero input while (true) { /****Calculate dt****/ clock_t current_time = clock(); dt = (current_time - last_time) / CLOCKS_PER_SEC; last_time = current_time; /********************/ /****Data Output (cout)****/ std::cout.flush(); std::cout << "\rRunning at " << 1/dt << " seconds per loop" << std::endl; std::cout << "Omni Input = [" << _wmra.inputDevice[0] << ", " << _wmra.inputDevice[1] << ", " << _wmra.inputDevice[2] << "]" << std::endl; /********************/ m.lock(); // mutex locked since using global variable. _wmra.sendInputValues(inputValues); m.unlock(); _wmra.Jacobian_Ground2Endeffector(); _wmra.weighted_pseudoinverse(); _wmra.control_joint(_wmra.inputDevice[4], _wmra.inputDevice[5]); _wmra.sendInputValues(); // zero input values after they are used /**********Updating Devices**********/ _wmra.ARM.updateArmPosition(); _wmra.WHEELCHAIR.WMRA_Theta_dot2Xphi_dot(); _wmra.phi = _wmra.phi + _wmra.WHEELCHAIR.DXphi_dot[1]; /********************/ } }
void threadfunction_sleep_mutex_try_lock(int arg) { this_thread::sleep_for (std::chrono::milliseconds(200)); long long count = 0; while(true) { if(coutLock.try_lock()) { cout << "mutex try_lock: " << arg << ", this_thread::get_id=" << this_thread::get_id() << ", tried " << count << " times before we attained the lock" << "\n"; coutLock.unlock(); break; } else { // lock is busy, do something else. Let's count the number of times we tried. count++; } } }
void GHtttpService::request(GHttpTask* task) { CURL* handle = this->getHandle(task); mutex.lock(); this->handle_list.push_back(handle); mutex.unlock(); auto success = curl_easy_perform(handle); mutex.lock(); long retcode = 0; curl_easy_getinfo(handle, CURLINFO_RESPONSE_CODE , &retcode); if(success == CURLE_OK && retcode == 200) { task->setStatus(true); GLog("http request:%s ok",task->getUrl().c_str()); } else { GLogE("http request:%s failure",task->getUrl().c_str()); } if(task->getType() == GHTTPTYPE::DOWNLOAD) { task->closeFile(); if(task->getAsync())//只有异步时才release task->release(); } this->removeHandle(handle); mutex.unlock(); if(task->callback) { task->callback(task->getUrl(),task->getStatus()); } }
int main() { // Это вектор потоков. std::vector<std::thread> threads; // Запускаем все наши потоки for (int i = 0; i < threadsNumber; ++i) { threads.push_back(std::thread(threadFunc, i)); } // Этот блок кладёт задания в нашу очередь. unsigned i = 1; while (i++ < 10000) { // Сгенерируем два случайных числа. std::random_device rd; // non-deterministic generator std::mt19937 gen(rd()); long long k = static_cast<long long>(abs(gen())) % 102; long long k1 = static_cast<long long>(abs(gen())) % 102; // Сунем эти два числа в блокирующую очередь. writeMutex.lock(); std::cout << "k == " << k << " given " << std::endl; std::cout << "k1 == " << k1 << " given " << std::endl; writeMutex.unlock(); ourQueue.enqueue(k); ourQueue.enqueue(k1); } /* // Запускаем все наши потоки for (int i = 0; i < threadsNumber; ++i) { threads.push_back(std::thread(threadFunc, i)); } */ // Бросим ядовитые пилюли, чтобы убить потоки. for (int i = 0; i < threadsNumber; ++i) ourQueue.enqueue(-1); // Ждём пока все потоки завершатся. for (int j = 0; j < threadsNumber; ++j) { if (threads[j].joinable()) threads[j].join(); } return 0; }
void func(size_t thread_index, tree_mutex *m) { size_t n = 10000; //dbg(thread_index, n); while (n--) { m->lock(thread_index); assert(m2.try_lock()); ++i; ++ai; ++ai2; m2.unlock(); m->unlock(thread_index); } }
ArrayList<unsigned int> Multitouch::getTouchIDs(Window*window) { ArrayList<unsigned int> touchIDs; Multitouch_state_mutex.lock(); for(unsigned int i=0; i<Multitouch_currentActiveTouches.size(); i++) { MultitouchData&touchData = Multitouch_currentActiveTouches.get(i); if(touchData.window == window) { touchIDs.add(touchData.touchID); } } Multitouch_state_mutex.unlock(); return touchIDs; }
//Returns index into Pool EffectHandle ParticleEffects::PreLoadEffectResources( ParticleEffectProperties* effectProperties ) { if (!s_InitComplete) return EFFECTS_ERROR; static std::mutex s_TextureMutex; s_TextureMutex.lock(); MaintainTextureList(effectProperties); ParticleEffectsPool.emplace_back(new ParticleEffect(effectProperties)); s_TextureMutex.unlock(); EffectHandle index = (EffectHandle)ParticleEffectsPool.size() - 1; ParticleEffectsPool[index]->LoadDeviceResources(Graphics::g_Device); return index; }
void thread_pool::slave( std::mutex &tasksLock, std::mutex &coutLock, std::queue<struct task> &tasks, std::condition_variable &cvTaskCheck, std::atomic<int> &ready, std::atomic<int> &tasksAmount, std::atomic<bool> &turn_off, std::atomic<int> &num_of_done_tasks ) { Handler handler; taskData *arg; coutLock.lock(); // std::cout << "Thread " << std::this_thread::get_id() << " created" << std::endl; coutLock.unlock(); ready++; std::unique_lock<std::mutex> locker(tasksLock); cvTaskCheck.wait( locker, [&](){ bool task_exist = 0; if(tasksAmount != 0 ) task_exist = 1; return (turn_off || task_exist); } ); if( turn_off ) { ready--; tasksLock.unlock(); } else { handler = tasks.front()._handler; arg = tasks.front()._arg; tasks.pop(); ready--; tasksAmount--; tasksLock.unlock(); handler(arg); num_of_done_tasks++; } };
inline std::function<Future<Unit>(void)> makeThunk( std::queue<std::shared_ptr<Promise<Unit>>>& ps, int& interrupt, std::mutex& ps_mutex) { return [&]() mutable { auto p = std::make_shared<Promise<Unit>>(); p->setInterruptHandler( [&](exception_wrapper const& /* e */) { ++interrupt; }); ps_mutex.lock(); ps.push(p); ps_mutex.unlock(); return p->getFuture(); }; }
int main(int, char**) { m.lock(); std::thread t(f); std::this_thread::sleep_for(ms(250)); m.unlock(); t.join(); #ifdef __cpp_deduction_guides std::lock_guard lg(m); static_assert((std::is_same<decltype(lg), std::lock_guard<decltype(m)>>::value), "" ); #endif return 0; }
void pop() { m.lock(); for(int i = 0; i != 10; ++i) { if(vec.size() > 0) { int val = vec.back(); vec.pop_back(); std::cout << "Pop " << val << std::endl; } std::this_thread::sleep_for(std::chrono::milliseconds(500)); } m.unlock(); }
void ControlPanel::flatTrim() { if(hasCalledFlatTrim){ return; } std::cout << "called flat trim" << std::endl; hasCalledFlatTrim = true; std_srvs::Empty flattrim_srv_srvs; pubLock.lock(); flatTrimClient.call(flattrim_srv_srvs); pubLock.unlock(); }
void CBaLog::logRoutine(TBaCoreThreadArg *pArg) { while (!sLogdArg.exitTh) { // Crate and delete have preference to avoid deadlocks. // deleting a logger flushed it anyways if (sMtx.try_lock()) { // iterate loggers for (auto &kv : sLoggers) { kv.second->Flush(); } sMtx.unlock(); } BaCoreMSleep(50); } }
void subscribeCallback(redisAsyncContext *context, void *replay, void *priv) { g_SubscribeMutex.lock(); redisReply *reply = static_cast<redisReply*>(replay); if (reply == NULL) return; if (reply->type == REDIS_REPLY_ARRAY && reply->elements == 3) { if (strcmp(reply->element[0]->str, "subscribe") != 0) { if (NULL != g_pServer) { g_pServer->RedisSubscribeAsync((char*)priv, reply->element[1]->str, reply->element[2]->str); } } } g_SubscribeMutex.unlock(); }
/***************************************************************** * DebugPrint(): Prints to the debug logger in a threadsafe manner * does nothing if not in Debug Configuration * * Ins: sDebugLog * * Outs: void * * Returns: void * * Mod. Date: 05/23/2015 * Mod. Initials: MJG *****************************************************************/ void DebugWPrint(const wchar_t* sDebugLog, ConsoleColor eColor) { #ifdef _DEBUG static std::mutex s_mPrinterMutex; //Keeps log ledgible across any possible threads s_mPrinterMutex.lock( ); ChangeConsoleColor(eColor); wprintf(sDebugLog); ChangeConsoleColor(ConsoleColor::Default); s_mPrinterMutex.unlock( ); #endif outputFile << sDebugLog; OutputDebugStringW(sDebugLog); }
unsigned int Mouse::getTotalMouseInstances(Window*window) { unsigned int counter = 0; Mouse_state_mutex.lock(); for(unsigned int i=0; i<Mouse_currentStates.size(); i++) { MouseData& mouseData = Mouse_currentStates.get(i); if(mouseData.window == window) { counter++; } } Mouse_state_mutex.unlock(); return counter; }
uint32_t access_pool (bool flag = false) { static std::mutex g_access_mutex; if (!flag) g_access_mutex.lock(); char old_val = *g_char_ptr; if (flag) do_bad_thing_with_location(g_char_ptr, old_val + 1); if (!flag) g_access_mutex.unlock(); return *g_char_ptr; }
uint32_t access_pool (bool flag = false) { static std::mutex g_access_mutex; if (!flag) g_access_mutex.lock(); uint32_t old_val = g_val; if (flag) g_val = old_val + 1; if (!flag) g_access_mutex.unlock(); return g_val; }
void ReceiveThread(int s, int recvLength) { bool paskafix = true; char* buf = (char*)malloc(recvLength); recvThreadMutex.lock(); while (1) { if (recv(s, buf, recvLength, 0) == SOCKET_ERROR) { printf("recvfrom(...) failed! Error code : %d\n", WSAGetLastError()); recvThreadMutex.unlock(); return; } if (paskafix) { recvThreadMutex.unlock(); paskafix = false; } ParseMessage(buf); } }
void capFunc(VideoCapture cap, std::queue<cv::Mat> &frames) { cv::Mat frame; while (1) { cap >> frame; std::this_thread::sleep_for(std::chrono::milliseconds(100)); if (frame.empty()) continue; mu.lock(); frames.push(frame); //std::cout << "push " << frames.size() << std::endl; mu.unlock(); } }