void run() override { setName( "Master" ); co::ConnectionDescriptionPtr desc = new co::ConnectionDescription; co::LocalNodePtr node = new co::LocalNode; node->addConnectionDescription( desc ); TEST( node->listen( )); _port = desc->port; co::Barrier barrier( node, node->getNodeID(), 3 ); TEST( barrier.isAttached( )); TEST( barrier.getVersion() == co::VERSION_FIRST ); TEST( barrier.getHeight() == 3 ); _barrier = &barrier; TEST( barrier.enter( )); barrier.setHeight( 2 ); barrier.commit(); TEST( barrier.getVersion() == co::VERSION_FIRST + 1 ); TEST( barrier.enter( )); _barrier.waitEQ( 0 ); // wait for slave thread finish node->deregisterObject( &barrier ); node->close(); }
virtual void run() { co::ConnectionDescriptionPtr description = new co::ConnectionDescription; description->type = co::CONNECTIONTYPE_TCPIP; description->port = _master ? _port : _port+1; co::LocalNodePtr node = new co::LocalNode; node->addConnectionDescription( description ); TEST( node->listen( )); if( _master ) { co::Barrier barrier( node, 2 ); node->registerObject( &barrier ); TEST( barrier.isAttached( )); _barrier = &barrier; barrier.enter(); _barrier.waitEQ( 0 ); // wait for slave to unmap session node->deregisterObject( &barrier ); } else { _barrier.waitNE( 0 ); co::NodePtr server = new co::Node; co::ConnectionDescriptionPtr serverDesc = new co::ConnectionDescription; serverDesc->port = _port; server->addConnectionDescription( serverDesc ); TEST( node->connect( server )); std::cerr << "Slave enter" << std::endl; _barrier->enter(); std::cerr << "Slave left" << std::endl; _barrier = 0; } node->close(); }
int main( int argc, char **argv ) { TEST(( PACKETSIZE % 8 ) == 0 ); co::init( argc, argv ); for( size_t i = 0; types[i] != co::CONNECTIONTYPE_NONE; ++i ) { co::ConnectionDescriptionPtr desc = new co::ConnectionDescription; desc->type = types[i]; co::ConnectionPtr writer; co::ConnectionPtr listener; if( !_initialize( desc, listener, writer )) continue; Reader readThread( listener ); if( desc->type != co::CONNECTIONTYPE_RSP ) TEST( writer->connect( )); uint64_t out[ PACKETSIZE / 8 ]; lunchbox::Clock clock; uint64_t sequence = 0; while( clock.getTime64() < RUNTIME ) { out[0] = ++sequence; TEST( writer->send( out, PACKETSIZE )); } out[0] = 0xdeadbeef; TEST( writer->send( out, PACKETSIZE )); s_done.waitEQ( true ); writer->close(); readThread.join(); listener->close(); const float bwTime = clock.getTimef(); const uint64_t numBW = sequence; TEST( _initialize( desc, listener, writer )); Latency latency( listener ); if( desc->type != co::CONNECTIONTYPE_RSP ) TEST( writer->connect( )); sequence = 0; clock.reset(); while( clock.getTime64() < RUNTIME ) { ++sequence; TEST( writer->send( &sequence, sizeof( uint64_t ))); } sequence = 0xC0FFEE; TEST( writer->send( &sequence, sizeof( uint64_t ))); s_done.waitEQ( true ); writer->close(); latency.join(); listener->close(); const float latencyTime = clock.getTimef(); const float mFactor = 1024.f / 1024.f * 1000.f; std::cout << desc->type << ": " << (numBW+1) * PACKETSIZE / mFactor / bwTime << " MBps, " << (sequence+1) / mFactor / latencyTime << " Mpps" << std::endl; if( listener == writer ) listener = 0; TESTINFO( !listener || listener->getRefCount() == 1, listener->getRefCount()); TEST( writer->getRefCount() == 1 ); } co::exit(); return EXIT_SUCCESS; }
int main( int, char** ) { #ifdef LUNCHBOX_USE_OPENMP const size_t nThreads = lunchbox::OMP::getNThreads() * 3; #else const size_t nThreads = 16; #endif std::cout << " read, write, push, copy, erase, " << " flush/ms, rd, other #threads" << std::endl; _runSerialTest< std::vector< size_t >, size_t >(); _runSerialTest< Vector_t, size_t >(); std::vector< Reader > readers(nThreads); std::vector< Writer > writers(nThreads); std::vector< Pusher > pushers(nThreads); stage_ = 1; size_t stage = 0; for( size_t l = 0; l < nThreads; ++l ) { readers[l].start(); writers[l].start(); pushers[l].start(); } lunchbox::sleep( 10 ); for( size_t i = 1; i <= nThreads; i = i<<1 ) for( size_t j = 1; j <= nThreads; j = j<<1 ) { // concurrent read, write, push Vector_t vector; for( size_t k = 0; k < nThreads; ++k ) { readers[k].vector = k < i ? &vector : 0; writers[k].vector = k < j ? &vector : 0; pushers[k].vector = k < j ? &vector : 0; } const size_t nextStage = ++stage * STAGESIZE; _clock.reset(); stage_ = nextStage; stage_.waitEQ( nextStage + (3 * nThreads) ); TEST( vector.size() >= LOOPSIZE ); // multi-threaded copy std::vector< Copier > copiers(j); _clock.reset(); for( size_t k = 0; k < j; ++k ) { copiers[k].vector = &vector; copiers[k].start(); } for( size_t k = 0; k < j; ++k ) copiers[k].join(); for( size_t k = 0; k < vector.size(); ++k ) TEST( vector[k] == k || vector[k] == 0 ); // multi-threaded erase std::vector< Eraser > erasers(j); _clock.reset(); for( size_t k = 0; k < j; ++k ) { erasers[k].vector = &vector; erasers[k].start(); } for( size_t k = 0; k < j; ++k ) erasers[k].join(); for( size_t k = 0; k < vector.size(); ++k ) { if( vector[k] == 0 ) break; if( k > vector.size() / 2 ) { TEST( vector[k] > vector[k-1] ); } else { TEST( vector[k] == k ); } } // multi-threaded pop_back const size_t fOps = vector.size(); std::vector< Flusher > flushers(j); _clock.reset(); for( size_t k = 0; k < j; ++k ) { flushers[k].vector = &vector; flushers[k].start(); } for( size_t k = 0; k < j; ++k ) flushers[k].join(); const float fTime = _clock.getTimef(); TEST( vector.empty( )); std::cerr << std::setw(11) << float(i*LOOPSIZE)/rTime_ << ", " << std::setw(11) << float(j*LOOPSIZE)/wTime_ << ", " << std::setw(11) << float(LOOPSIZE)/pTime_ << ", " << std::setw(9) << float(j)/cTime_ << ", " << std::setw(9) << float(j)/eTime_ << ", " << std::setw(9) << float(fOps)/fTime << ", " << std::setw(3) << i << ", " << std::setw(3) << j << std::endl; } stage_ = std::numeric_limits< size_t >::max(); for( size_t k = 0; k < nThreads; ++k ) { readers[k].join(); writers[k].join(); pushers[k].join(); } return EXIT_SUCCESS; }