uint32 SuperpixelGetter::queue(uint32 bodyid) { IntVec& planes = m_planes.start(bodyid); IntVec& spids = m_spids.start(bodyid); getStack()->getsuperpixelsinbody(bodyid, planes, spids); assert(planes.size() == spids.size()); return planes.size(); }
void QueueTest::putMessages() { EventBase eventBase; QueueConsumer consumer; QueueConsumer consumer2; consumer.fn = [&](int msg) { // Stop consuming after we receive a message with value 0, and start // consumer2 if (msg == 0) { consumer.stopConsuming(); consumer2.startConsuming(&eventBase, &queue); } }; consumer2.fn = [&](int msg) { // Stop consuming after we receive a message with value 0 if (msg == 0) { consumer2.stopConsuming(); } }; consumer.startConsuming(&eventBase, &queue); list<int> msgList = { 1, 2, 3, 4 }; vector<int> msgVector = { 5, 0, 9, 8, 7, 6, 7, 7, 8, 8, 2, 9, 6, 6, 10, 2, 0 }; // Call putMessages() several times to add messages to the queue queue.putMessages(msgList.begin(), msgList.end()); queue.putMessages(msgVector.begin() + 2, msgVector.begin() + 4); // Test sending 17 messages, the pipe-based queue calls write in 16 byte // chunks queue.putMessages(msgVector.begin(), msgVector.end()); // Loop until the consumer has stopped eventBase.loop(); vector<int> expectedMessages = { 1, 2, 3, 4, 9, 8, 7, 5, 0 }; vector<int> expectedMessages2 = { 9, 8, 7, 6, 7, 7, 8, 8, 2, 9, 6, 10, 2, 0 }; EXPECT_EQ(expectedMessages.size(), consumer.messages.size()); for (unsigned int idx = 0; idx < expectedMessages.size(); ++idx) { EXPECT_EQ(expectedMessages[idx], consumer.messages.at(idx)); } EXPECT_EQ(expectedMessages2.size(), consumer2.messages.size()); for (unsigned int idx = 0; idx < expectedMessages2.size(); ++idx) { EXPECT_EQ(expectedMessages2[idx], consumer2.messages.at(idx)); } }
void QueueTest::fillQueue(bool expectFail) { try { for (uint32_t i = 0; i < 0x000fffff; i++) { queue.putMessage(i); } BOOST_CHECK(!expectFail); } catch (const apache::thrift::TLibraryException &ex) { BOOST_CHECK(expectFail); } catch (...) { BOOST_CHECK(false); } }
TEST(NotificationQueueTest, ConsumeUntilDrainedStress) { for (size_t i = 0; i < 1 << 8; ++i) { // Basic tests: make sure we // - drain all the messages // - ignore any maxReadAtOnce // - can't add messages during draining EventBase eventBase; IntQueue queue; QueueConsumer consumer; consumer.fn = [&](int j) { EXPECT_THROW(queue.tryPutMessage(j), std::runtime_error); EXPECT_FALSE(queue.tryPutMessageNoThrow(j)); EXPECT_THROW(queue.putMessage(j), std::runtime_error); std::vector<int> ints{1, 2, 3}; EXPECT_THROW( queue.putMessages(ints.begin(), ints.end()), std::runtime_error); }; consumer.setMaxReadAtOnce(10); // We should ignore this consumer.startConsuming(&eventBase, &queue); for (int j = 0; j < 20; j++) { queue.putMessage(j); } EXPECT_TRUE(consumer.consumeUntilDrained()); EXPECT_EQ(20, consumer.messages.size()); // Make sure there can only be one drainer at once folly::Baton<> callbackBaton, threadStartBaton; consumer.fn = [&](int /* i */) { callbackBaton.wait(); }; QueueConsumer competingConsumer; competingConsumer.startConsuming(&eventBase, &queue); queue.putMessage(1); atomic<bool> raceA {false}; atomic<bool> raceB {false}; size_t numConsA = 0; size_t numConsB = 0; auto thread = std::thread([&]{ threadStartBaton.post(); raceB = consumer.consumeUntilDrained(&numConsB) && numConsB; }); threadStartBaton.wait(); raceA = competingConsumer.consumeUntilDrained(&numConsA) && numConsA; callbackBaton.post(); thread.join(); EXPECT_FALSE(raceA && raceB); EXPECT_TRUE(raceA || raceB); EXPECT_TRUE(raceA ^ raceB); } }
void QueueTest::sendOne() { // Create a notification queue and a callback in this thread TEventBase eventBase; QueueConsumer consumer; consumer.fn = [&](int) { // Stop consuming after we receive 1 message consumer.stopConsuming(); }; consumer.startConsuming(&eventBase, &queue); // Start a new TEventBase thread to put a message on our queue ScopedEventBaseThread t1; t1.getEventBase()->runInEventBaseThread([&] { queue.putMessage(5); }); // Loop until we receive the message eventBase.loop(); const auto& messages = consumer.messages; BOOST_CHECK_EQUAL(messages.size(), 1); BOOST_CHECK_EQUAL(messages.at(0), 5); }
int main(int argc, char *argv[]) { int test = argc > 1 ? atoi(argv[1]) : 0; int verbose = argc > 2; int veryVerbose = argc > 3; int veryVeryVerbose = argc > 4; cout << "TEST " << __FILE__ << " CASE " << test << endl; switch (test) { case 0: case 7: { // -------------------------------------------------------------------- // TESTING CONCURRENT SEMAPHORE CREATION // // Concerns: // 1. On Darwin the creation of the semaphore object is synchronized // because it's implemented via named semaphores. Concurrent // creation of multiple semaphores should not lead to invalid // semaphore objects or deadlock. // // Plan: // 1. In multiple threads, create a number of semaphore objects and // verify that they are valid. // -------------------------------------------------------------------- if (verbose) cout << "Testing concurrent creation\n" << "===========================\n"; vector<bslmt::ThreadUtil::Handle> threads(16); for (int i = 0; i != threads.size(); ++i) { int rc = bslmt::ThreadUtil::create(&threads[i], &createSemaphoresWorker, NULL); ASSERT(rc == 0); } for (int i = 0; i != threads.size(); ++i) { bslmt::ThreadUtil::join(threads[i]); } } break; case 6: { // -------------------------------------------------------------------- // TESTING MULTIPLE POST // // Concern: that 'post(n)' for large n works properly. Two test modes: // when threads are not waiting, and when they are concurrently // waiting. // -------------------------------------------------------------------- enum { k_NUM_POST = 1048704, k_NUM_WAIT_THREADS = 8 }; BSLMF_ASSERT(0 == k_NUM_POST % k_NUM_WAIT_THREADS); Obj sem(0); bslmt::ThreadUtil::Handle threads[k_NUM_WAIT_THREADS]; MyBarrier barrier(k_NUM_WAIT_THREADS+1); struct ThreadInfo4 info; info.d_numIterations = k_NUM_POST / k_NUM_WAIT_THREADS; info.d_barrier = &barrier; info.d_sem = &sem; for (int i = 0; i < k_NUM_WAIT_THREADS; ++i) { int rc = bslmt::ThreadUtil::create(&threads[i], thread6wait, &info); ASSERT(0 == rc); } if (verbose) { bsl::cout << "case 6 mode 1: concurrent waiting" << bsl::endl; } // MODE 1: THREADS WAITING barrier.wait(); bslmt::ThreadUtil::microSleep(10000); // 10 ms sem.post(k_NUM_POST); for (int i = 0; i < k_NUM_WAIT_THREADS; ++i) { ASSERT(0 == bslmt::ThreadUtil::join(threads[i])); } // if we reach here, we woke up all the threads the correct number of // times for (int i = 0; i < k_NUM_WAIT_THREADS; ++i) { int rc = bslmt::ThreadUtil::create(&threads[i], thread6wait, &info); ASSERT(0 == rc); } if (verbose) { bsl::cout << "case 6 mode 2: no concurrent waiting" << bsl::endl; } // MODE 2: NO THREADS WAITING sem.post(k_NUM_POST); barrier.wait(); for (int i = 0; i < k_NUM_WAIT_THREADS; ++i) { ASSERT(0 == bslmt::ThreadUtil::join(threads[i])); } // if we reach here, we woke up all the threads the correct number of // times } break; case 5: { ///Usage ///----- // This component is an implementation detail of 'bslmt' and is *not* intended // for direct client use. It is subject to change without notice. As such, a // usage example is not provided. // USAGE EXAMPLE IntQueue testQueue; testQueue.pushInt(1); ASSERT(1 == testQueue.getInt()); testQueue.pushInt(2); ASSERT(2 == testQueue.getInt()); } break; case 4: { // -------------------------------------------------------------------- // TESTING 'tryWait' // // Concerns: // 1. 'tryWait' decrements the count if resources are available, // or return an error otherwise. // // Plan: // We create two groups of threads. One will call 'post', the other // 'tryWait'. First, we make sure that 'tryWait' fails if no resources // is available. Then we will make sure it succeeds if resources are. // We will also test 'tryWait' in the steady state works fine. // // Testing: // void tryWait(); // -------------------------------------------------------------------- if (verbose) cout << endl << "Testing 'trywait'" << endl << "=================" << endl; bslmt::ThreadUtil::Handle threads[10]; MyBarrier barrier(10); Obj sem(0); struct ThreadInfo4 info; info.d_numIterations = 5000; // number of ops per thread / 3 info.d_barrier = &barrier; info.d_sem = &sem; for (int i = 0; i < 5; ++i) { ASSERT(0 == bslmt::ThreadUtil::create(&threads[i * 2], thread4Post, &info)); ASSERT(0 == bslmt::ThreadUtil::create(&threads[i * 2 + 1], thread4Wait, &info)); } for (int i = 0; i < 10; ++i) { ASSERT(0 == bslmt::ThreadUtil::join(threads[i])); } } break; case 3: { // -------------------------------------------------------------------- // TESTING 'post(int)' // // Concerns: // 1. post(int) increments the count by the expected number // // Plan: // Create a set of threads calling 'wait' and use a thread to post a // number smaller than the set of threads. // // Testing: // void post(int number); // -------------------------------------------------------------------- if (verbose) cout << endl << "Testing 'post(int number)'" << endl << "==========================" << endl; bslmt::ThreadUtil::Handle threads[6]; MyBarrier barrier(6); Obj sem(0); struct ThreadInfo3 info; info.d_numIterations = 10000; // number of ops per thread info.d_numWaitThreads = 5; info.d_barrier = &barrier; info.d_sem = &sem; for (int i = 0; i < 5; ++i) { ASSERT(0 == bslmt::ThreadUtil::create(&threads[i], thread3Wait, &info)); } ASSERT(0 == bslmt::ThreadUtil::create(&threads[5], thread3Post, &info)); for (int i = 0; i < 6; ++i) { ASSERT(0 == bslmt::ThreadUtil::join(threads[i])); } } break; case 2: { // -------------------------------------------------------------------- // TESTING 'wait' and 'post' // // Concerns: // 1. wait() blocks the thread when no resource is available, // and then decrements the count // 2. post() increments the count // // Plan: // Create two groups of threads: one will call 'post' and the other // will call 'wait'. To address concern 1, we will use a barrier and // a counter to make sure that waiting threads are blocked into wait // state before any calls to 'post'. After that, we will post a small // limited number of times (between 0 and 5), and check that the // semaphore can indeed satisfy that number of waiters. Then we try // to reach a steady state by calling the two functions 'post' and // 'wait' in a number of threads each, perturbing the 'post' operation // by adding small delays to exercise different parts of the code. // // Testing: // void post(); // void wait(int *signalInterrupted = 0); // -------------------------------------------------------------------- if (verbose) cout << endl << "Testing 'wait' and 'post'" << endl << "=========================" << endl; enum { k_NUM_POSTERS = 5, k_NUM_WAITERS = 5 }; for (int n = 0; n < 5; ++n) { if (veryVerbose) cout << "\tPosting " << n << " first." << endl; bslmt::ThreadUtil::Handle threads[k_NUM_POSTERS + k_NUM_WAITERS]; MyBarrier barrier(k_NUM_POSTERS+ 1); bsls::AtomicInt posts(n); bsls::AtomicInt past (0); Obj sem(0); struct ThreadInfo2 info; info.d_numIterations = 1000; // number of ops per thread info.d_barrier = &barrier; info.d_sem = &sem; info.d_past = &past; info.d_numInitialPosts = &posts; info.d_verbose = veryVeryVerbose; for (int i = 0; i < k_NUM_POSTERS; ++i) { ASSERT(0 == bslmt::ThreadUtil::create(&threads[i], thread2Post, &info)); } for (int i = 0; i < k_NUM_WAITERS; ++i) { ASSERT(0 == bslmt::ThreadUtil::create( &threads[i + k_NUM_POSTERS], thread2Wait, &info)); } bslmt::ThreadUtil::microSleep(1000 * 100); ASSERT(0 == past); ASSERT(0 == past); barrier.wait(); // Wait until the initial posters complete. if (veryVerbose) cout << "\t\tFirst barrier passed." << endl; while (n != past) { // Wait for some waiters to grab the posts. if (veryVeryVerbose) MTCOUT << "\t\tWaiters still blocking, " << posts << "." << MTENDL; bslmt::ThreadUtil::microSleep(1000 * 100); } barrier.wait(); // Unleash the remaining posters. if (veryVerbose) cout << "\t\tSecond barrier passed." << endl; // The testing will complete once all the threads join, meaning // that all the waiters were satisfied. for (int i = 0; i < 10; ++i) { ASSERT(0 == bslmt::ThreadUtil::join(threads[i])); } } } break; case 1: { // -------------------------------------------------------------------- // BREATHING TEST: // // Exercises basic functionality. // -------------------------------------------------------------------- if (verbose) { cout << endl << "Breathing Test" << endl << "==============" << endl; #if defined(BSLS_PLATFORM_OS_AIX) || defined(BSLS_PLATFORM_OS_LINUX) cout << "INFO: SEM_VALUE_MAX=" << SEM_VALUE_MAX << endl; #endif } { Obj X(0); X.post(); X.post(2); X.wait(); X.wait(); ASSERT(0 == X.tryWait()); ASSERT(0 != X.tryWait()); } } break; case -2: { // -------------------------------------------------------------------- // A SIMPLE BENCHMARK // // imitates a producer-consumer system with a fixed size queue using // two semaphores // int numProducers = atoi(argv[2]); int numConsumers = atoi(argv[3]); int queueSize = atoi(argv[4]); int seconds = 5; int samples = 5; if (verbose) cout << endl << "Benchmarking....." << endl << "=================" << endl << "producers=" << numProducers << endl << "consumers=" << numConsumers << endl << "queue size=" << queueSize << endl; BenchData* producerData = new BenchData[numProducers]; BenchData* consumerData = new BenchData[numConsumers]; Obj resource(0); Obj queue(0); queue.post(queueSize); for(int i=0; i<numConsumers; i++) { consumerData[i].resource = &resource; consumerData[i].queue = &queue; consumerData[i].count = 0; consumerData[i].stop = false; bslmt::ThreadUtil::create(&consumerData[i].handle, benchConsumer, (void*)(consumerData+i)); } for(int i=0; i<numProducers; i++) { producerData[i].resource = &resource; producerData[i].queue = &queue; producerData[i].stop = false; bslmt::ThreadUtil::create(&producerData[i].handle, benchProducer, (void*)(producerData+i)); } for(int j=0; j<samples; j++) { bsls::Types::Int64 timeStart = bsls::TimeUtil::getTimer(); bsls::Types::Int64 timeStartCPU = ::clock(); int* consumerCount = new int[numConsumers]; for(int i=0; i<numConsumers; i++) { consumerCount[i] = consumerData[i].count; } bsls::Types::Int64 throughput; bsls::Types::Int64 throughputCPU; for(int i=0; i<seconds; i++) { bslmt::ThreadUtil::microSleep(1000000); bsls::Types::Int64 totalMessages = 0; for(int i=0; i<numConsumers;i++) { totalMessages += (consumerData[i].count-consumerCount[i]); } bsls::Types::Int64 elapsed_us = (bsls::TimeUtil::getTimer()-timeStart)/1000; bsls::Types::Int64 elapsed_usCPU = ::clock()-timeStartCPU; throughput = (totalMessages*1000000/elapsed_us); throughputCPU = (totalMessages*1000000/elapsed_usCPU); cout << "testing: " << elapsed_us/1000 << " ms, " << elapsed_usCPU*100/elapsed_us << " CPU%, " << totalMessages << " msg, " << fmt(throughput) << " msg/s, " << fmt(throughputCPU) << " msg/CPUs" << endl; } cout << "====== final:" << fmt(throughput) << " msg/s, " << fmt(throughputCPU) << " msg/CPUs\n" << endl; } cout << "stopping: " << flush; for(int i=0; i<numProducers; i++) { producerData[i].stop = true; } for(int i=0; i<numProducers; i++) { bslmt::ThreadUtil::join(producerData[i].handle); cout << 'p' << flush; } for(int i=0; i<numConsumers; i++) { consumerData[i].stop = true; } resource.post(numConsumers); for(int i=0; i<numConsumers;i++) { bslmt::ThreadUtil::join(consumerData[i].handle); cout << 'c' << flush; } cout << endl; delete[] producerData; delete[] consumerData; } break; default: { testStatus = -1; break; } } return testStatus; }
/* * Test code that creates a NotificationQueue, then forks, and incorrectly * tries to send a message to the queue from the child process. * * The child process should crash in this scenario, since the child code has a * bug. (Older versions of NotificationQueue didn't catch this in the child, * resulting in a crash in the parent process.) */ TEST(NotificationQueueTest, UseAfterFork) { IntQueue queue; int childStatus = 0; QueueConsumer consumer; // Boost sets a custom SIGCHLD handler, which fails the test if a child // process exits abnormally. We don't want this. signal(SIGCHLD, SIG_DFL); // Log some info so users reading the test output aren't confused // by the child process' crash log messages. LOG(INFO) << "This test makes sure the child process crashes. " << "Error log messagges and a backtrace are expected."; { // Start a separate thread consuming from the queue ScopedEventBaseThread t1; t1.getEventBase()->runInEventBaseThread([&] { consumer.startConsuming(t1.getEventBase(), &queue); }); // Send a message to it, just for sanity checking queue.putMessage(1234); // Fork pid_t pid = fork(); if (pid == 0) { // The boost test framework installs signal handlers to catch errors. // We only want to catch in the parent. In the child let SIGABRT crash // us normally. signal(SIGABRT, SIG_DFL); // Child. // We're horrible people, so we try to send a message to the queue // that is being consumed in the parent process. // // The putMessage() call should catch this error, and crash our process. queue.putMessage(9876); // We shouldn't reach here. _exit(0); } PCHECK(pid > 0); // Parent. Wait for the child to exit. auto waited = waitpid(pid, &childStatus, 0); EXPECT_EQ(pid, waited); // Send another message to the queue before we terminate the thread. queue.putMessage(5678); } // The child process should have crashed when it tried to call putMessage() // on our NotificationQueue. EXPECT_TRUE(WIFSIGNALED(childStatus)); EXPECT_EQ(SIGABRT, WTERMSIG(childStatus)); // Make sure the parent saw the expected messages. // It should have gotten 1234 and 5678 from the parent process, but not // 9876 from the child. EXPECT_EQ(2, consumer.messages.size()); EXPECT_EQ(1234, consumer.messages.front()); consumer.messages.pop_front(); EXPECT_EQ(5678, consumer.messages.front()); consumer.messages.pop_front(); }
void QueueTest::maxReadAtOnce() { // Add 100 messages to the queue for (int n = 0; n < 100; ++n) { queue.putMessage(n); } EventBase eventBase; // Record how many messages were processed each loop iteration. uint32_t messagesThisLoop = 0; std::vector<uint32_t> messagesPerLoop; std::function<void()> loopFinished = [&] { // Record the current number of messages read this loop messagesPerLoop.push_back(messagesThisLoop); // Reset messagesThisLoop to 0 for the next loop messagesThisLoop = 0; // To prevent use-after-free bugs when eventBase destructs, // prevent calling runInLoop any more after the test is finished. // 55 == number of times loop should run. if (messagesPerLoop.size() != 55) { // Reschedule ourself to run at the end of the next loop eventBase.runInLoop(loopFinished); } }; // Schedule the first call to loopFinished eventBase.runInLoop(loopFinished); QueueConsumer consumer; // Read the first 50 messages 10 at a time. consumer.setMaxReadAtOnce(10); consumer.fn = [&](int value) { ++messagesThisLoop; // After 50 messages, drop to reading only 1 message at a time. if (value == 50) { consumer.setMaxReadAtOnce(1); } // Terminate the loop when we reach the end of the messages. if (value == 99) { eventBase.terminateLoopSoon(); } }; consumer.startConsuming(&eventBase, &queue); // Run the event loop until the consumer terminates it eventBase.loop(); // The consumer should have read all 100 messages in order EXPECT_EQ(100, consumer.messages.size()); for (int n = 0; n < 100; ++n) { EXPECT_EQ(n, consumer.messages.at(n)); } // Currently EventBase happens to still run the loop callbacks even after // terminateLoopSoon() is called. However, we don't really want to depend on // this behavior. In case this ever changes in the future, add // messagesThisLoop to messagesPerLoop in loop callback isn't invoked for the // last loop iteration. if (messagesThisLoop > 0) { messagesPerLoop.push_back(messagesThisLoop); messagesThisLoop = 0; } // For the first 5 loops it should have read 10 messages each time. // After that it should have read 1 messages per loop for the next 50 loops. EXPECT_EQ(55, messagesPerLoop.size()); for (int n = 0; n < 5; ++n) { EXPECT_EQ(10, messagesPerLoop.at(n)); } for (int n = 5; n < 55; ++n) { EXPECT_EQ(1, messagesPerLoop.at(n)); } }
void QueueTest::maxQueueSize() { // Create a queue with a maximum size of 5, and fill it up for (int n = 0; n < 5; ++n) { queue.tryPutMessage(n); } // Calling tryPutMessage() now should fail EXPECT_THROW(queue.tryPutMessage(5), std::overflow_error); EXPECT_FALSE(queue.tryPutMessageNoThrow(5)); int val = 5; EXPECT_FALSE(queue.tryPutMessageNoThrow(std::move(val))); // Pop a message from the queue int result = -1; EXPECT_TRUE(queue.tryConsume(result)); EXPECT_EQ(0, result); // We should be able to write another message now that we popped one off. queue.tryPutMessage(5); // But now we are full again. EXPECT_THROW(queue.tryPutMessage(6), std::overflow_error); // putMessage() should let us exceed the maximum queue.putMessage(6); // Pull another mesage off EXPECT_TRUE(queue.tryConsume(result)); EXPECT_EQ(1, result); // tryPutMessage() should still fail since putMessage() actually put us over // the max. EXPECT_THROW(queue.tryPutMessage(7), std::overflow_error); // Pull another message off and try again EXPECT_TRUE(queue.tryConsume(result)); EXPECT_EQ(2, result); queue.tryPutMessage(7); // Now pull all the remaining messages off EXPECT_TRUE(queue.tryConsume(result)); EXPECT_EQ(3, result); EXPECT_TRUE(queue.tryConsume(result)); EXPECT_EQ(4, result); EXPECT_TRUE(queue.tryConsume(result)); EXPECT_EQ(5, result); EXPECT_TRUE(queue.tryConsume(result)); EXPECT_EQ(6, result); EXPECT_TRUE(queue.tryConsume(result)); EXPECT_EQ(7, result); // There should be no messages left result = -1; EXPECT_TRUE(!queue.tryConsume(result)); EXPECT_EQ(-1, result); }
void QueueTest::multiConsumer() { uint32_t numConsumers = 8; uint32_t numMessages = 10000; // Create several consumers each running in their own EventBase thread vector<QueueConsumer> consumers(numConsumers); vector<ScopedEventBaseThread> threads(numConsumers); for (uint32_t consumerIdx = 0; consumerIdx < numConsumers; ++consumerIdx) { QueueConsumer* consumer = &consumers[consumerIdx]; consumer->fn = [consumer, consumerIdx, this](int value) { // Treat 0 as a signal to stop. if (value == 0) { consumer->stopConsuming(); // Put a message on the terminationQueue to indicate we have stopped terminationQueue.putMessage(consumerIdx); } }; EventBase* eventBase = threads[consumerIdx].getEventBase(); eventBase->runInEventBaseThread([eventBase, consumer, this] { consumer->startConsuming(eventBase, &queue); }); } // Now add a number of messages from this thread // Start at 1 rather than 0, since 0 is the signal to stop. for (uint32_t n = 1; n < numMessages; ++n) { queue.putMessage(n); } // Now add a 0 for each consumer, to signal them to stop for (uint32_t n = 0; n < numConsumers; ++n) { queue.putMessage(0); } // Wait until we get notified that all of the consumers have stopped // We use a separate notification queue for this. QueueConsumer terminationConsumer; vector<uint32_t> consumersStopped(numConsumers, 0); uint32_t consumersRemaining = numConsumers; terminationConsumer.fn = [&](int consumerIdx) { --consumersRemaining; if (consumersRemaining == 0) { terminationConsumer.stopConsuming(); } EXPECT_GE(consumerIdx, 0); EXPECT_LT(consumerIdx, numConsumers); ++consumersStopped[consumerIdx]; }; EventBase eventBase; terminationConsumer.startConsuming(&eventBase, &terminationQueue); eventBase.loop(); // Verify that we saw exactly 1 stop message for each consumer for (uint32_t n = 0; n < numConsumers; ++n) { EXPECT_EQ(1, consumersStopped[n]); } // Validate that every message sent to the main queue was received exactly // once. vector<int> messageCount(numMessages, 0); for (uint32_t n = 0; n < numConsumers; ++n) { for (int msg : consumers[n].messages) { EXPECT_GE(msg, 0); EXPECT_LT(msg, numMessages); ++messageCount[msg]; } } // 0 is the signal to stop, and should have been received once by each // consumer EXPECT_EQ(numConsumers, messageCount[0]); // All other messages should have been received exactly once for (uint32_t n = 1; n < numMessages; ++n) { EXPECT_EQ(1, messageCount[n]); } }
void SuperpixelGetter::get(uint32 bodyid, uint32 count, uint32* planes, uint32* spids) { m_planes.get(count, planes, bodyid); m_spids.get(count, spids, bodyid); }
void QueueTest::maxQueueSize() { // Create a queue with a maximum size of 5, and fill it up for (int n = 0; n < 5; ++n) { queue.tryPutMessage(n); } // Calling tryPutMessage() now should fail BOOST_CHECK_THROW(queue.tryPutMessage(5), TQueueFullException); BOOST_CHECK_EQUAL(queue.tryPutMessageNoThrow(5), false); int val = 5; BOOST_CHECK_EQUAL(queue.tryPutMessageNoThrow(std::move(val)), false); // Pop a message from the queue int result = -1; BOOST_CHECK(queue.tryConsume(result)); BOOST_CHECK_EQUAL(result, 0); // We should be able to write another message now that we popped one off. queue.tryPutMessage(5); // But now we are full again. BOOST_CHECK_THROW(queue.tryPutMessage(6), TQueueFullException); // putMessage() should let us exceed the maximum queue.putMessage(6); // Pull another mesage off BOOST_CHECK(queue.tryConsume(result)); BOOST_CHECK_EQUAL(result, 1); // tryPutMessage() should still fail since putMessage() actually put us over // the max. BOOST_CHECK_THROW(queue.tryPutMessage(7), TQueueFullException); // Pull another message off and try again BOOST_CHECK(queue.tryConsume(result)); BOOST_CHECK_EQUAL(result, 2); queue.tryPutMessage(7); // Now pull all the remaining messages off BOOST_CHECK(queue.tryConsume(result)); BOOST_CHECK_EQUAL(result, 3); BOOST_CHECK(queue.tryConsume(result)); BOOST_CHECK_EQUAL(result, 4); BOOST_CHECK(queue.tryConsume(result)); BOOST_CHECK_EQUAL(result, 5); BOOST_CHECK(queue.tryConsume(result)); BOOST_CHECK_EQUAL(result, 6); BOOST_CHECK(queue.tryConsume(result)); BOOST_CHECK_EQUAL(result, 7); // There should be no messages left result = -1; BOOST_CHECK(!queue.tryConsume(result)); BOOST_CHECK_EQUAL(result, -1); }
int main(int argc, char *argv[]) { int test = argc > 1 ? atoi(argv[1]) : 0; int verbose = argc > 2; cout << "TEST " << __FILE__ << " CASE " << test << endl; switch (test) { case 0: // Zero is always the leading case. case 6: { ///Usage ///----- // This component is an implementation detail of 'bslmt' and is *not* intended // for direct client use. It is subject to change without notice. As such, a // usage example is not provided. // USAGE EXAMPLE IntQueue testQueue; testQueue.pushInt(1); ASSERT(1 == testQueue.getInt()); testQueue.pushInt(2); ASSERT(2 == testQueue.getInt()); } break; case 5: { // -------------------------------------------------------------------- // TESTING 'tryWait' // // Concerns: // 1. 'tryWait' decrements the count if resources are available, // or return an error otherwise. // // Plan: // We create two groups of threads. One will call 'post', the other // 'tryWait'. First, we make sure that 'tryWait' fails if no // resources are available. Then we will make sure it succeeds if // resources are. We will also test 'tryWait' in the steady state // works fine. // // Testing: // void tryWait(); // -------------------------------------------------------------------- if (verbose) cout << endl << "Testing 'trywait'" << endl << "=================" << endl; bslmt::ThreadUtil::Handle threads[10]; MyBarrier barrier(10); Obj sem; struct ThreadInfo5 info; info.d_numIterations = 5000; // number of ops per thread / 3 info.d_barrier = &barrier; info.d_sem = &sem; for (int i = 0; i < 5; ++i) { ASSERT(0 == bslmt::ThreadUtil::create(&threads[i * 2], thread5Post, &info)); ASSERT(0 == bslmt::ThreadUtil::create(&threads[i * 2 + 1], thread5Wait, &info)); } for (int i = 0; i < 10; ++i) { ASSERT(0 == bslmt::ThreadUtil::join(threads[i])); } } break; case 4: { // -------------------------------------------------------------------- // TESTING 'post(int)' // // Concerns: // 1. post(int) increments the count by the expected number // // Plan: // Create a set of threads calling 'wait' and use a thread to post a // number smaller than the set of threads. // // Testing: // void post(int number); // -------------------------------------------------------------------- if (verbose) cout << endl << "Testing 'post(int number)'" << endl << "==========================" << endl; bslmt::ThreadUtil::Handle threads[6]; MyBarrier barrier(6); Obj sem; struct ThreadInfo4 info; info.d_numIterations = 10000; // number of ops per thread info.d_numWaitThreads = 5; info.d_barrier = &barrier; info.d_sem = &sem; for (int i = 0; i < 5; ++i) { ASSERT(0 == bslmt::ThreadUtil::create(&threads[i], thread4Wait, &info)); } ASSERT(0 == bslmt::ThreadUtil::create(&threads[5], thread4Post, &info)); for (int i = 0; i < 6; ++i) { ASSERT(0 == bslmt::ThreadUtil::join(threads[i])); } } break; case 3: { // -------------------------------------------------------------------- // TESTING 'timedWait' // // Concerns: // 1. timedWait() blocks the thread until a resource is available // or the timeout expires. // // Plan: // Create two groups of threads one will call 'post' and the other // will call 'timedWait'. First, we will make sure that the // 'timedWait' will timeout properly if no resource available by // calling the function with a reasonable timeout before any calls to // 'post'. Then, both groups of threads will enter a loop to simulate // the steady state. The 'post' loop will be perturbed to exercise // different portions of code. The specified timeout will be pretty // important and we will make sure we do not timeout. At the end // of this first run, we will make a second run with a much lower // timeout which will force *some* waits to timeout. // // Testing: // void timedWait(bsls::TimeInterval timeout, // int *signalInterrupted = 0); // -------------------------------------------------------------------- if (verbose) cout << endl << "Testing 'timedWait'" << endl << "===================" << endl; testCase3(bsls::SystemClockType::e_REALTIME); testCase3(bsls::SystemClockType::e_MONOTONIC); } break; case 2: { // -------------------------------------------------------------------- // TESTING 'wait' and 'post' // // Concerns: // 1. wait() blocks the thread when no resource is available, // and then decrements the count // 2. post() increments the count // // Plan: // Create two groups of threads one will call 'post' and the other // will call 'wait'. To address concern 1, we will use a barrier and // a counter to make sure that waiting threads are blocked into wait // state before any calls to 'post'. After that, we will try to reach // a steady state by calling the two functions in a loop and perturb // the 'post' operation by adding small delays to exercise different // parts of the code. // // Testing: // void post(); // void wait(int *signalInterrupted = 0); // -------------------------------------------------------------------- if (verbose) cout << endl << "Testing 'wait' and 'post'" << endl << "=========================" << endl; bslmt::ThreadUtil::Handle threads[10]; MyBarrier barrier(6); bsls::AtomicInt past(0); Obj sem; struct ThreadInfo2 info; info.d_numIterations = 10000; // number of ops per thread info.d_barrier = &barrier; info.d_sem = &sem; info.d_past = &past; for (int i = 0; i < 5; ++i) { ASSERT(0 == bslmt::ThreadUtil::create(&threads[i * 2], thread2Post, &info)); ASSERT(0 == bslmt::ThreadUtil::create(&threads[i * 2 + 1], thread2Wait, &info)); } bslmt::ThreadUtil::microSleep(1000 * 100); ASSERT(0 == past); barrier.wait(); bslmt::ThreadUtil::microSleep(1000 * 200); ASSERT(0 != past); for (int i = 0; i < 10; ++i) { ASSERT(0 == bslmt::ThreadUtil::join(threads[i])); } } break; case 1: { // -------------------------------------------------------------------- // BREATHING TEST: // // Exercises basic functionality. // -------------------------------------------------------------------- if (verbose) cout << endl << "Breathing Test" << endl << "==============" << endl; { Obj X; X.post(); X.post(2); X.wait(); ASSERT(0 == X.timedWait(bsls::SystemTime::nowRealtimeClock() + bsls::TimeInterval(60))); ASSERT(0 == X.tryWait()); ASSERT(0 != X.tryWait()); ASSERT(0 != X.timedWait(bsls::SystemTime::nowRealtimeClock() + bsls::TimeInterval(1))); } } break; default: { cerr << "WARNING: CASE `" << test << "' NOT FOUND." << endl; testStatus = -1; } } if (testStatus > 0) { cerr << "Error, non-zero test status = " << testStatus << "." << endl; } return testStatus; }