void cmdVelReceived(const geometry_msgs::Twist::ConstPtr& cmd_vel) { std::cout << "cmdVel Received: speed = " << cmd_vel->linear.x << " angular = " << cmd_vel->angular.z << std::endl; if(bumper_warning.load()) roomba->drive(-0.2, 0); else if(ir_warning.load()) roomba->drive(0, cmd_vel->angular.z); else roomba->drive(cmd_vel->linear.x,cmd_vel->angular.z); }
void thread_pool::thread_func(std::atomic_bool & stop_request) { std::unique_lock<std::mutex> lk(m_mutex, std::defer_lock); for (;;) { ext::intrusive_ptr<task_base> task_ptr; lk.lock(); if (stop_request.load(std::memory_order_relaxed)) return; if (!m_tasks.empty()) goto avail; again: m_event.wait(lk); if (stop_request.load(std::memory_order_relaxed)) return; if (m_tasks.empty()) goto again; avail: task_ptr.reset(&m_tasks.front(), ext::noaddref); m_tasks.pop_front(); lk.unlock(); task_ptr->execute(); } }
progressIndicatorThreadWrapper( const std::chrono::nanoseconds& updateInterval ) { m_stopCondition.store( false ); m_thread = std::thread( [this, &updateInterval] { FormattedPrint::On(std::cout, false).app(" "); int slashIndex = 0; while( ! m_stopCondition.load() ) { FormattedPrint::On(std::cout, false).app("\b\b\b \b") .color( Green ) .app('[') .color( Yellow ) .app( slashes[ slashIndex++ ] ) .color( Green ) .app(']') .color(); slashIndex = slashIndex % 4; std::this_thread::sleep_for( updateInterval ); } FormattedPrint::On(std::cout, false).app("\b\b\b"); }); }
/** * This is the actual function that the thread executes. * It receives a block from the scheduler, class its run() method and returns it to the scheduler. * The thread runs until the it is told to stop by setting the m_stop boolean flag */ void operator()() { if(CPU_COUNT(&m_mask)>0) { pthread_t id = pthread_self(); int ret = pthread_setaffinity_np(id, sizeof(m_mask), &m_mask); if(ret != 0) { perror("setaffinity"); throw(std::runtime_error("set affinity failed")); } } while(!m_stop.load()) { std::shared_ptr<Block>torun(m_scheduler.next_task(m_id)); if(torun) { torun->run(); m_scheduler.task_done(m_id, std::move(torun)); } else { std::this_thread::sleep_for(std::chrono::milliseconds(10)); } } m_stop.store(false); }
void thread_connect() { connected.store(false); do { struct sockaddr_in addr; int r; hostent* h; memset((void*)&addr, 0, sizeof(addr)); addr.sin_addr.s_addr = inet_addr(ip.c_str()); if(INADDR_NONE == addr.sin_addr.s_addr) { h = gethostbyname(ip.c_str()); if(NULL == h) { perror("Could not get host by name"); break;; } } else { h = gethostbyaddr((const char*)&addr.sin_addr, sizeof(struct sockaddr_in), AF_INET); if(NULL == h) { perror("Could not get host by address"); break;; } } sock = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP); if(INVALID_SOCKET == sock) { perror("Could not create socket"); break; } BOOL bDontLinger = TRUE; setsockopt( sock, SOL_SOCKET, SO_DONTLINGER, ( const char* )&bDontLinger, sizeof( BOOL ) ); addr.sin_family = AF_INET; addr.sin_addr = *((in_addr*)*h->h_addr_list); addr.sin_port = htons(port); printf("Connecting... "); r = connect(sock, (sockaddr*)&addr, sizeof(struct sockaddr)); if(SOCKET_ERROR == r) { printf("Cannot connect to server%d\n", get_errno()); break; } printf("connected.\n"); connected.store(true); connecting.store(false); sender.swap(std::thread(std::bind(&transport_t::thread_send, this))); recver.swap(std::thread(std::bind(&transport_t::thread_recv, this))); return; } while (0); connecting.store(false); }
void handleDbgBreakInterrupt() { // If we are not initialised, we should ignore DbgBreaks if (!decaf::config::debugger::enabled) { return; } std::unique_lock<std::mutex> lock(sMutex); auto coreId = cpu::this_core::id(); // Store our core state before we flip isPaused sCorePauseState[coreId] = cpu::this_core::state(); // Check to see if we were the last core to join on the fun auto coreBit = 1 << coreId; auto isPausing = sIsPausing.fetch_or(coreBit); if (isPausing == 0) { // This is the first core to hit a breakpoint sPauseInitiatorCoreId = coreId; // Signal the rest of the cores to stop for (auto i = 0; i < 3; ++i) { cpu::interrupt(i, cpu::DBGBREAK_INTERRUPT); } } if ((isPausing | coreBit) == (1 | 2 | 4)) { // This was the last core to join. sIsPaused.store(true); sIsPausing.store(0); sIsResuming.store(0); } // Spin around the release condition while we are paused while (sIsPausing.load() || sIsPaused.load()) { sPauseReleaseCond.wait(lock); } // Clear any additional DbgBreaks that occured cpu::this_core::clearInterrupt(cpu::DBGBREAK_INTERRUPT); // Everyone needs to leave at once in case new breakpoints occur. if ((sIsResuming.fetch_or(coreBit) | coreBit) == (1 | 2 | 4)) { sPauseReleaseCond.notify_all(); } else { while ((sIsResuming.load() | coreBit) != (1 | 2 | 4)) { sPauseReleaseCond.wait(lock); } } }
bool fiber_waiter::wait_ready(std::chrono::steady_clock::duration timeout_duration) noexcept { if (gth_thread_type == thread_type::thread) { std::unique_lock<std::mutex> lk(m_thread_mutex); return m_thread_var.wait_for(lk, timeout_duration, [this] { return m_ready.load(std::memory_order_relaxed); }); } else { std::unique_lock<boost::fibers::mutex> lk(m_fiber_mutex); return m_fiber_var.wait_for(lk, timeout_duration, [this] { return m_ready.load(std::memory_order_relaxed); }); } }
void fiber_waiter::wait_ready() noexcept { if (gth_thread_type == thread_type::thread) { std::unique_lock<std::mutex> lk(m_thread_mutex); return m_thread_var.wait(lk, [this] { return m_ready.load(std::memory_order_relaxed); }); } else { std::unique_lock<boost::fibers::mutex> lk(m_fiber_mutex); return m_fiber_var.wait(lk, [this] { return m_ready.load(std::memory_order_relaxed); }); } }
/** * Start the IPC thread. */ void ipcStart() { std::unique_lock<std::mutex> lock { sIpcMutex }; sIpcThreadRunning.store(true); sIpcThread = std::thread { ipcThreadEntry }; }
LRESULT WINAPI WndProc(HWND hWnd, UINT msg, WPARAM wParam, LPARAM lParam) { WindowEvent params = {}; params.hWnd = hWnd; params.msg = msg; params.wParam = wParam; params.lParam = lParam; s_queue->Enqueue(params); switch (msg) { case WM_SIZE: if (g_renderApi && wParam != SIZE_MINIMIZED) { g_renderApi->Resize((int32_t) LOWORD(lParam), (int32_t) HIWORD(lParam)); return 0; } break; case WM_SYSCOMMAND: if ((wParam & 0xfff0) == SC_KEYMENU) // Disable ALT application menu return 0; break; case WM_DESTROY: s_running.store(false); PostQuitMessage(0); return 0; } return DefWindowProcW(hWnd, msg, wParam, lParam); }
void gstate_update_func() { POINT p; int mButton; if(GetSystemMetrics(SM_SWAPBUTTON)) mButton = VK_RBUTTON; // if swapped else mButton = VK_LBUTTON; // not swapped (normal) int screenWidth = GetSystemMetrics( SM_CXSCREEN ); int screenHeight = GetSystemMetrics( SM_CYSCREEN ); // default: SM_CX/CYSCREEN gets the size of a primary screen. // lines uncommented below are just for a specially need on multi-display. //int screenWidth = GetSystemMetrics( SM_CXVIRTUALSCREEN ); //int screenHeight = GetSystemMetrics( SM_CYVIRTUALSCREEN ); float r_screenWidth = 1.f / (float)(screenWidth -1); float r_screenHeight = 1.f / (float)(screenHeight -1); while ( inputThreadRunning.load( std::memory_order_relaxed ) ) { // "KeyState" is disabled for now, on Windows... //GetKey((long*)gstate->keys); GetCursorPos(&p); gMouseUGenGlobals.mouseX = (float)p.x * r_screenWidth; gMouseUGenGlobals.mouseY = 1.f - (float)p.y * r_screenHeight; gMouseUGenGlobals.mouseButton = (GetKeyState(mButton) < 0); std::this_thread::sleep_for( std::chrono::milliseconds( 17 ) ); } }
static void run(std::atomic_bool& running, int fd, felix::netio::DataSinkCallbacks* callbacks) { while(running.load()) { /* felix::netio::msgheader header; ssize_t count = read(fd, &header, sizeof(header)); assert(count == sizeof(header)); char* data = new char[header.len]; ssize_t bytes_read = 0; while(bytes_read < header.len) { count = read(fd, data+bytes_read, header.len-bytes_read); if (count == 0) { std::vector<felix::netio::DataSinkMessage> messages; messages.emplace_back(data, bytes_read); callbacks->on_data_received_with_error(messages); return; } bytes_read += count; } */ char* data = new char[1024]; ssize_t count = read(fd, data, 1024); std::vector<felix::netio::DataSinkMessage> messages; messages.emplace_back(data, count); callbacks->on_data_received(messages); } }
bool example1_rx(const std::string& dataaddress, unsigned short dataport, std::atomic_bool& stopFlag) { SuperBlock rxBlock; uint8_t rawBlock[sizeof(SuperBlock)]; int rawBlockSize; UDPSocket rxSocket(dataport); std::string senderaddress, senderaddress0; unsigned short senderport, senderport0 = 0; Example1Rx ex1(nbSamplesPerBlock, nbOriginalBlocks, nbRecoveryBlocks); std::cerr << "example1_rx: receiving on address: " << dataaddress << " port: " << (int) dataport << std::endl; while (!stopFlag.load()) { rawBlockSize = 0; while (rawBlockSize < sizeof(SuperBlock)) { rawBlockSize += rxSocket.RecvDataGram((void *) &rawBlock[rawBlockSize], (int) sizeof(SuperBlock), senderaddress, senderport); if ((senderaddress != senderaddress0) || (senderport != senderport0)) { std::cerr << "example1_rx: connected to: " << senderaddress << ":" << senderport << std::endl; senderaddress0 = senderaddress; senderport0 = senderport; } usleep(10); } rxBlock = *((SuperBlock *) rawBlock); ex1.processBlock(rxBlock); } }
void consume(std::atomic_bool& done, int* array, int_queue& q) { while (!done.load()) { int val; if(q.pop(val)) { array[val] = val; } else { std::this_thread::yield(); } } // drain while (!q.empty()) { int val; if(q.pop(val)) { array[val] = val; } else { std::this_thread::yield(); } } }
bool check_events() { bool true_value = true; if (flag_needs_recreate_swapchain.compare_exchange_weak(true_value, false)) { graphics::render3d::resources::create_pipeline(); } return true; }
void reader_thread() { while (!data_ready.load()) { std::this_thread::sleep_for(std::chrono::milliseconds(1)); } std::cout<<"The answer = "<<data[0]<<"\n"; }
void try_connect() { bool f = false; if (!connecting.compare_exchange_strong(f, true)) return; connector.swap(std::thread(std::bind(&transport_t::thread_connect, this))); connector.detach(); return; }
void OSLockScheduler() { bool locked = false; while (!gSchedulerLock.compare_exchange_weak(locked, true, std::memory_order_acquire)) { locked = false; } }
~ThreadPool() { stoped.exchange(true); for (auto& cond: thread_cond) { cond.notify_one(); } for (auto& worker : workers) { if (worker.joinable()) worker.join(); } }
inline void lock() { while (m_spin.exchange(true)) { if (yield) { std::this_thread::yield(); } } }
bool init(const char* pipeline, core::c_window *window, bool validation) { VERIFY(graphics::render3d::resources::load_pipeline(pipeline)); VERIFY(create_instance("appname")); VERIFY(create_surface(window)); VERIFY(create_device()); VERIFY(create_device_queue()); VERIFY(graphics::render3d::resources::create_pipeline()); vk_globals::is_init = true; flag_needs_recreate_swapchain.store(false); flag_needs_shutdown.store(false); on_window_resize_listener = window->add_event_listener(core::e_window_event::ON_RESIZE, on_window_resize); return true; }
~progressIndicatorThreadWrapper() { m_stopCondition.store( true ); if( m_thread.joinable() ) { m_thread.join(); } }
void resumeAll() { auto oldState = sIsPaused.exchange(false); decaf_check(oldState); for (auto i = 0; i < 3; ++i) { sCorePauseState[i] = nullptr; } sPauseReleaseCond.notify_all(); }
void disconnect() { connected.store(false); closesocket(sock); sender.join(); recver.join(); send_queue.clear(); recv_queue.clear(); }
/** * Main thread entry point for the IPC thread. * * This thread represents the IOS side of the IPC mechanism. * * Responsible for receiving IPC requests and dispatching them to the * correct IOS device. */ void ipcThreadEntry() { std::unique_lock<std::mutex> lock { sIpcMutex }; while (true) { if (!sIpcRequests.empty()) { auto request = sIpcRequests.front(); sIpcRequests.pop(); lock.unlock(); iosDispatchIpcRequest(request); lock.lock(); switch (request->cpuId) { case IOSCpuId::PPC0: sIpcResponses[0].push(request); cpu::interrupt(0, cpu::IPC_INTERRUPT); break; case IOSCpuId::PPC1: sIpcResponses[1].push(request); cpu::interrupt(1, cpu::IPC_INTERRUPT); break; case IOSCpuId::PPC2: sIpcResponses[2].push(request); cpu::interrupt(2, cpu::IPC_INTERRUPT); break; default: decaf_abort("Unexpected cpu id"); } } if (!sIpcThreadRunning.load()) { break; } if (sIpcRequests.empty()) { sIpcCond.wait(lock); } } sIpcThreadRunning.store(false); }
static void stepCore(uint32_t coreId, bool stepOver) { decaf_check(sIsPaused.load()); const cpu::CoreRegs *state = sCorePauseState[coreId]; uint32_t nextInstr = calculateNextInstr(state, stepOver); cpu::addBreakpoint(nextInstr, cpu::SYSTEM_BPFLAG); resumeAll(); }
~WorkerThreads() { _shutdownFlag.store(true); _cache.setShutdownFlag(); for (auto worker : _workers) { worker->join(); delete worker; } }
/** * Stop the IPC thread. */ void ipcShutdown() { std::unique_lock<std::mutex> lock { sIpcMutex }; if (sIpcThreadRunning.exchange(false)) { sIpcCond.notify_all(); lock.unlock(); sIpcThread.join(); } }
WorkerThreads(unsigned int threadsCount, RJCache& cache) : _nextThreadId(0), _cache(cache) { _shutdownFlag.store(false); _workers.reserve(threadsCount); for (unsigned int i = 0; i < threadsCount; i++) { std::thread* worker = new std::thread(&WorkerThreads::workerThreadFunc, this, _nextThreadId++, &cache); _workers.push_back(worker); } }
void swap_queues() { for(unsigned int i=0; i<m_full_slots; ++i) { m_queues[m_consumer_q].vec[i].valid.store(false,std::memory_order_release); } unsigned int newpoint=m_consumer_q<<30; m_consumer_q=exchange_queue(m_consumer_q); unsigned int old_pointer=m_pointer.exchange(newpoint,std::memory_order_release); m_full.store(false,std::memory_order_release); old_pointer&=~MASK; m_full_slots=std::min(old_pointer,Qlen); m_read=0; }