void MonitorRecMutexTest::run() { Monitor<RecMutex> monitor; MonitorRecMutexTestThreadPtr t; MonitorRecMutexTestThread2Ptr t2; MonitorRecMutexTestThread2Ptr t3; ThreadControl control; ThreadControl control2; { Monitor<RecMutex>::Lock lock(monitor); Monitor<RecMutex>::TryLock lock2(monitor); test(lock2.acquired()); // TEST: TryLock Monitor<RecMutex>::TryLock tlock(monitor); test(tlock.acquired()); // TEST: Start thread, try to acquire the mutex. t = new MonitorRecMutexTestThread(monitor); control = t->start(); // TEST: Wait until the tryLock has been tested. t->waitTryLock(); } // // TEST: Once the mutex has been released, the thread should // acquire the mutex and then terminate. // control.join(); // TEST: notify() wakes one consumer. t2 = new MonitorRecMutexTestThread2(monitor); control = t2->start(); t3 = new MonitorRecMutexTestThread2(monitor); control2 = t3->start(); // Give the thread time to start waiting. ThreadControl::sleep(Time::seconds(1)); { Monitor<RecMutex>::Lock lock(monitor); monitor.notify(); } // Give one thread time to terminate ThreadControl::sleep(Time::seconds(1)); test((t2->finished && !t3->finished) || (t3->finished && !t2->finished)); { Monitor<RecMutex>::Lock lock(monitor); monitor.notify(); } control.join(); control2.join(); // TEST: notifyAll() wakes one consumer. t2 = new MonitorRecMutexTestThread2(monitor); control = t2->start(); t3 = new MonitorRecMutexTestThread2(monitor); control2 = t3->start(); // Give the threads time to start waiting. ThreadControl::sleep(Time::seconds(1)); { Monitor<RecMutex>::Lock lock(monitor); monitor.notifyAll(); } control.join(); control2.join(); // TEST: timedWait { Monitor<RecMutex>::Lock lock(monitor); test(!monitor.timedWait(Time::milliSeconds(500))); } }
int main(int argc, char **argv) { int port = 9091; string serverType = "thread-pool"; string protocolType = "binary"; size_t workerCount = 4; size_t clientCount = 20; size_t loopCount = 50000; TType loopType = T_VOID; string callName = "echoVoid"; bool runServer = true; bool logRequests = false; string requestLogPath = "./requestlog.tlog"; bool replayRequests = false; ostringstream usage; usage << argv[0] << " [--port=<port number>] [--server] [--server-type=<server-type>] [--protocol-type=<protocol-type>] [--workers=<worker-count>] [--clients=<client-count>] [--loop=<loop-count>]" << endl << "\tclients Number of client threads to create - 0 implies no clients, i.e. server only. Default is " << clientCount << endl << "\thelp Prints this help text." << endl << "\tcall Service method to call. Default is " << callName << endl << "\tloop The number of remote thrift calls each client makes. Default is " << loopCount << endl << "\tport The port the server and clients should bind to for thrift network connections. Default is " << port << endl << "\tserver Run the Thrift server in this process. Default is " << runServer << endl << "\tserver-type Type of server, \"simple\" or \"thread-pool\". Default is " << serverType << endl << "\tprotocol-type Type of protocol, \"binary\", \"ascii\", or \"xml\". Default is " << protocolType << endl << "\tlog-request Log all request to ./requestlog.tlog. Default is " << logRequests << endl << "\treplay-request Replay requests from log file (./requestlog.tlog) Default is " << replayRequests << endl << "\tworkers Number of thread pools workers. Only valid for thread-pool server type. Default is " << workerCount << endl; map<string, string> args; for (int ix = 1; ix < argc; ix++) { string arg(argv[ix]); if (arg.compare(0,2, "--") == 0) { size_t end = arg.find_first_of("=", 2); string key = string(arg, 2, end - 2); if (end != string::npos) { args[key] = string(arg, end + 1); } else { args[key] = "true"; } } else { throw invalid_argument("Unexcepted command line token: "+arg); } } try { if (!args["clients"].empty()) { clientCount = atoi(args["clients"].c_str()); } if (!args["help"].empty()) { cerr << usage.str(); return 0; } if (!args["loop"].empty()) { loopCount = atoi(args["loop"].c_str()); } if (!args["call"].empty()) { callName = args["call"]; } if (!args["port"].empty()) { port = atoi(args["port"].c_str()); } if (!args["server"].empty()) { runServer = args["server"] == "true"; } if (!args["log-request"].empty()) { logRequests = args["log-request"] == "true"; } if (!args["replay-request"].empty()) { replayRequests = args["replay-request"] == "true"; } if (!args["server-type"].empty()) { serverType = args["server-type"]; if (serverType == "simple") { } else if (serverType == "thread-pool") { } else if (serverType == "threaded") { } else { throw invalid_argument("Unknown server type "+serverType); } } if (!args["workers"].empty()) { workerCount = atoi(args["workers"].c_str()); } } catch(std::exception& e) { cerr << e.what() << endl; cerr << usage; } shared_ptr<PosixThreadFactory> threadFactory = shared_ptr<PosixThreadFactory>(new PosixThreadFactory()); // Dispatcher shared_ptr<Server> serviceHandler(new Server()); if (replayRequests) { shared_ptr<Server> serviceHandler(new Server()); shared_ptr<ServiceProcessor> serviceProcessor(new ServiceProcessor(serviceHandler)); // Transports shared_ptr<TFileTransport> fileTransport(new TFileTransport(requestLogPath)); fileTransport->setChunkSize(2 * 1024 * 1024); fileTransport->setMaxEventSize(1024 * 16); fileTransport->seekToEnd(); // Protocol Factory shared_ptr<TProtocolFactory> protocolFactory(new TBinaryProtocolFactory()); TFileProcessor fileProcessor(serviceProcessor, protocolFactory, fileTransport); fileProcessor.process(0, true); exit(0); } if (runServer) { shared_ptr<ServiceProcessor> serviceProcessor(new ServiceProcessor(serviceHandler)); // Transport shared_ptr<TServerSocket> serverSocket(new TServerSocket(port)); // Transport Factory shared_ptr<TTransportFactory> transportFactory(new TBufferedTransportFactory()); // Protocol Factory shared_ptr<TProtocolFactory> protocolFactory(new TBinaryProtocolFactory()); if (logRequests) { // initialize the log file shared_ptr<TFileTransport> fileTransport(new TFileTransport(requestLogPath)); fileTransport->setChunkSize(2 * 1024 * 1024); fileTransport->setMaxEventSize(1024 * 16); transportFactory = shared_ptr<TTransportFactory>(new TPipedTransportFactory(fileTransport)); } shared_ptr<Thread> serverThread; if (serverType == "simple") { serverThread = threadFactory->newThread(shared_ptr<TServer>(new TSimpleServer(serviceProcessor, serverSocket, transportFactory, protocolFactory))); } else if (serverType == "threaded") { serverThread = threadFactory->newThread(shared_ptr<TServer>(new TThreadedServer(serviceProcessor, serverSocket, transportFactory, protocolFactory))); } else if (serverType == "thread-pool") { shared_ptr<ThreadManager> threadManager = ThreadManager::newSimpleThreadManager(workerCount); threadManager->threadFactory(threadFactory); threadManager->start(); serverThread = threadFactory->newThread(shared_ptr<TServer>(new TThreadPoolServer(serviceProcessor, serverSocket, transportFactory, protocolFactory, threadManager))); } cerr << "Starting the server on port " << port << endl; serverThread->start(); // If we aren't running clients, just wait forever for external clients if (clientCount == 0) { serverThread->join(); } } if (clientCount > 0) { Monitor monitor; size_t threadCount = 0; set<shared_ptr<Thread> > clientThreads; if (callName == "echoVoid") { loopType = T_VOID;} else if (callName == "echoByte") { loopType = T_BYTE;} else if (callName == "echoI32") { loopType = T_I32;} else if (callName == "echoI64") { loopType = T_I64;} else if (callName == "echoString") { loopType = T_STRING;} else {throw invalid_argument("Unknown service call "+callName);} for (size_t ix = 0; ix < clientCount; ix++) { shared_ptr<TSocket> socket(new TSocket("127.0.0.1", port)); shared_ptr<TBufferedTransport> bufferedSocket(new TBufferedTransport(socket, 2048)); shared_ptr<TProtocol> protocol(new TBinaryProtocol(bufferedSocket)); shared_ptr<ServiceClient> serviceClient(new ServiceClient(protocol)); clientThreads.insert(threadFactory->newThread(shared_ptr<ClientThread>(new ClientThread(socket, serviceClient, monitor, threadCount, loopCount, loopType)))); } for (std::set<shared_ptr<Thread> >::const_iterator thread = clientThreads.begin(); thread != clientThreads.end(); thread++) { (*thread)->start(); } int64_t time00; int64_t time01; {Synchronized s(monitor); threadCount = clientCount; cerr << "Launch "<< clientCount << " client threads" << endl; time00 = Util::currentTime(); monitor.notifyAll(); while(threadCount > 0) { monitor.wait(); } time01 = Util::currentTime(); } int64_t firstTime = 9223372036854775807LL; int64_t lastTime = 0; double averageTime = 0; int64_t minTime = 9223372036854775807LL; int64_t maxTime = 0; for (set<shared_ptr<Thread> >::iterator ix = clientThreads.begin(); ix != clientThreads.end(); ix++) { shared_ptr<ClientThread> client = dynamic_pointer_cast<ClientThread>((*ix)->runnable()); int64_t delta = client->_endTime - client->_startTime; assert(delta > 0); if (client->_startTime < firstTime) { firstTime = client->_startTime; } if (client->_endTime > lastTime) { lastTime = client->_endTime; } if (delta < minTime) { minTime = delta; } if (delta > maxTime) { maxTime = delta; } averageTime+= delta; } averageTime /= clientCount; cout << "workers :" << workerCount << ", client : " << clientCount << ", loops : " << loopCount << ", rate : " << (clientCount * loopCount * 1000) / ((double)(time01 - time00)) << endl; count_map count = serviceHandler->getCount(); count_map::iterator iter; for (iter = count.begin(); iter != count.end(); ++iter) { printf("%s => %d\n", iter->first, iter->second); } cerr << "done." << endl; } return 0; }
void MonitorMutexTest::run() { Monitor<Mutex> monitor; MonitorMutexTestThreadPtr t; MonitorMutexTestThread2Ptr t2; MonitorMutexTestThread2Ptr t3; ThreadControl control; ThreadControl control2; { Monitor<Mutex>::Lock lock(monitor); try { Monitor<Mutex>::TryLock tlock(monitor); test(!tlock.acquired()); } catch(const ThreadLockedException&) { // // pthread_mutex_trylock returns EDEADLK in FreeBSD's new threading implementation // as well as in Fedora Core 5. // } // TEST: Start thread, try to acquire the mutex. t = new MonitorMutexTestThread(monitor); control = t->start(); // TEST: Wait until the tryLock has been tested. t->waitTryLock(); } // // TEST: Once the mutex has been released, the thread should // acquire the mutex and then terminate. // control.join(); // TEST: notify() wakes one consumer. t2 = new MonitorMutexTestThread2(monitor); control = t2->start(); t3 = new MonitorMutexTestThread2(monitor); control2 = t3->start(); // Give the thread time to start waiting. ThreadControl::sleep(Time::seconds(1)); { Monitor<Mutex>::Lock lock(monitor); monitor.notify(); } // Give one thread time to terminate ThreadControl::sleep(Time::seconds(1)); test((t2->finished && !t3->finished) || (t3->finished && !t2->finished)); { Monitor<Mutex>::Lock lock(monitor); monitor.notify(); } control.join(); control2.join(); // TEST: notifyAll() wakes one consumer. t2 = new MonitorMutexTestThread2(monitor); control = t2->start(); t3 = new MonitorMutexTestThread2(monitor); control2 = t3->start(); // Give the threads time to start waiting. ThreadControl::sleep(Time::seconds(1)); { Monitor<Mutex>::Lock lock(monitor); monitor.notifyAll(); } control.join(); control2.join(); // TEST: timedWait { Monitor<Mutex>::Lock lock(monitor); try { monitor.timedWait(Time::milliSeconds(-1)); test(false); } catch(const IceUtil::InvalidTimeoutException&) { } test(!monitor.timedWait(Time::milliSeconds(500))); } }
/** * Block test. * Create pendingTaskCountMax tasks. Verify that we block adding the * pendingTaskCountMax + 1th task. Verify that we unblock when a task * completes */ static void blockTest(int64_t /*timeout*/, size_t numWorkers) { size_t pendingTaskMaxCount = numWorkers; auto threadManager = ThreadManager::newSimpleThreadManager(numWorkers, pendingTaskMaxCount); auto threadFactory = std::make_shared<PosixThreadFactory>(); threadManager->threadFactory(threadFactory); threadManager->start(); Monitor monitor; Monitor bmonitor; // Add an initial set of tasks, 1 task per worker bool blocked1 = true; size_t tasksCount1 = numWorkers; std::set<std::shared_ptr<BlockTask>> tasks; for (size_t ix = 0; ix < numWorkers; ix++) { auto task = std::make_shared<BlockTask>( &monitor, &bmonitor, &blocked1, &tasksCount1); tasks.insert(task); threadManager->add(task); } REQUIRE_EQUAL_TIMEOUT(threadManager->totalTaskCount(), numWorkers); // Add a second set of tasks. // All of these will end up pending since the first set of tasks // are using up all of the worker threads and are still blocked bool blocked2 = true; size_t tasksCount2 = pendingTaskMaxCount; for (size_t ix = 0; ix < pendingTaskMaxCount; ix++) { auto task = std::make_shared<BlockTask>( &monitor, &bmonitor, &blocked2, &tasksCount2); tasks.insert(task); threadManager->add(task); } REQUIRE_EQUAL_TIMEOUT(threadManager->totalTaskCount(), numWorkers + pendingTaskMaxCount); REQUIRE_EQUAL_TIMEOUT(threadManager->pendingTaskCountMax(), pendingTaskMaxCount); // Attempt to add one more task. // Since the pending task count is full, this should fail bool blocked3 = true; size_t tasksCount3 = 1; auto extraTask = std::make_shared<BlockTask>( &monitor, &bmonitor, &blocked3, &tasksCount3); ASSERT_THROW(threadManager->add(extraTask, 1), TimedOutException); ASSERT_THROW(threadManager->add(extraTask, -1), TooManyPendingTasksException); // Unblock the first set of tasks { Synchronized s(bmonitor); blocked1 = false; bmonitor.notifyAll(); } // Wait for the first set of tasks to all finish { Synchronized s(monitor); while (tasksCount1 != 0) { monitor.wait(); } } // We should be able to add the extra task now try { threadManager->add(extraTask, 1); } catch (const TimedOutException& e) { FAIL() << "Unexpected timeout adding task"; } catch (const TooManyPendingTasksException& e) { FAIL() << "Unexpected failure adding task"; } // Unblock the second set of tasks { Synchronized s(bmonitor); blocked2 = false; bmonitor.notifyAll(); } { Synchronized s(monitor); while (tasksCount2 != 0) { monitor.wait(); } } // Unblock the extra task { Synchronized s(bmonitor); blocked3 = false; bmonitor.notifyAll(); } { Synchronized s(monitor); while (tasksCount3 != 0) { monitor.wait(); } } CHECK_EQUAL_TIMEOUT(threadManager->totalTaskCount(), 0); }