int main(int argc, const char** argv) { unsigned num_cpus = std::thread::hardware_concurrency(); std::cout << "Launching " << num_cpus << " threads\n"; // A mutex ensures orderly access to std::cout from multiple threads. std::mutex iomutex; std::vector<std::thread> threads(num_cpus); for (unsigned i = 0; i < num_cpus; ++i) { threads[i] = std::thread([&iomutex, i] { { // Use a lexical scope and lock_guard to safely lock the mutex only for // the duration of std::cout usage. std::lock_guard<std::mutex> iolock(iomutex); std::cout << "Thread #" << i << " is running\n"; } // Simulate important work done by the tread by sleeping for a bit... std::this_thread::sleep_for(std::chrono::milliseconds(200)); }); } for (auto& t : threads) { t.join(); } return 0; }
int main(int argc, const char* argv[]) { constexpr unsigned num_threads = 4; std::mutex iomutex; std::vector<std::thread> threads(num_threads); for(unsigned i=0;i<num_threads;i++) { threads[i] = std::thread([&iomutex, i] { std::this_thread::sleep_for(std::chrono::milliseconds(20)); while(1) { { std::lock_guard<std::mutex> iolock(iomutex); std::cout<<"Thread #"<<i<<": on CPU"<<sched_getcpu()<<"\n"; } std::this_thread::sleep_for(std::chrono::milliseconds(900)); } }); cpu_set_t cpuset; CPU_ZERO(&cpuset); CPU_SET(i, &cpuset); int rc = pthread_setaffinity_np(threads[i].native_handle(), sizeof(cpu_set_t), &cpuset); if(rc != 0) { std::cerr<< "Error calling pthread_setaffinity_np: "<<rc<<"\n"; } } for(auto &t : threads) { t.join(); } return 0; }
int main(int argc, const char** argv) { std::mutex iomutex; std::thread t = std::thread([&iomutex] { { std::lock_guard<std::mutex> iolock(iomutex); std::cout << "Thread: my id = " << std::this_thread::get_id() << "\n" << " my pthread id = " << pthread_self() << "\n"; } }); { std::lock_guard<std::mutex> iolock(iomutex); std::cout << "Launched t: id = " << t.get_id() << "\n" << " native_handle = " << t.native_handle() << "\n"; } t.join(); return 0; }
// Dispatcher function // Simulates and pushes a message every 2 seconds void listen() { { std::lock_guard<std::mutex> iolock(gIOMutex); std::cout << "Listener " << mName << " running .... " << std::endl; } mCounter = 0; while (mIsRunning) { bool valid; T message = mpSQ->pop(std::chrono::milliseconds(1000), valid); if (valid) { std::cout << " " << mName << " intercepted a message on CPU " << std::endl; mpfC(message); } } std::cout << "Done .... " << std::endl; }
int main(int argc, const char** argv) { constexpr unsigned num_threads = 4; // A mutex ensures orderly access to std::cout from multiple threads. std::mutex iomutex; std::vector<std::thread> threads(num_threads); for (unsigned i = 0; i < num_threads; ++i) { threads[i] = std::thread([&iomutex, i] { std::this_thread::sleep_for(std::chrono::milliseconds(20)); while (1) { { // Use a lexical scope and lock_guard to safely lock the mutex only // for the duration of std::cout usage. std::lock_guard<std::mutex> iolock(iomutex); std::cout << "Thread #" << i << ": on CPU " << sched_getcpu() << "\n"; } // Simulate important work done by the tread by sleeping for a bit... std::this_thread::sleep_for(std::chrono::milliseconds(900)); } }); // Create a cpu_set_t object representing a set of CPUs. Clear it and mark // only CPU i as set. cpu_set_t cpuset; CPU_ZERO(&cpuset); CPU_SET(i, &cpuset); int rc = pthread_setaffinity_np(threads[i].native_handle(), sizeof(cpu_set_t), &cpuset); if (rc != 0) { std::cerr << "Error calling pthread_setaffinity_np: " << rc << "\n"; } } for (auto& t : threads) { t.join(); } return 0; }
int main(int argc, const char* argv[]) { constexpr unsigned num_threads = 4; std::mutex iomutex; std::vector<std::thread> threads(num_threads); for(int i=0;i<num_threads;i++) { threads[i] = std::thread([&iomutex, i] { while(1) { { std::lock_guard<std::mutex> iolock(iomutex); std::cout<<"Thread #" <<i<<": on CPU "<<sched_getcpu()<<" \n"; } std::this_thread::sleep_for(std::chrono::milliseconds(900)); } }); } for(auto& t: threads) { t.join(); } return 0; }