template< class T > void _test() { T* lock = new T; lock->set(); #ifdef LUNCHBOX_USE_OPENMP const size_t nThreads = LB_MIN( lunchbox::OMP::getNThreads() * 3, MAXTHREADS ); #else const size_t nThreads = 16; #endif Thread< T > threads[MAXTHREADS]; for( size_t i = 1; i <= nThreads; i = i << 1 ) { _running = true; for( size_t j = 0; j < i; ++j ) { threads[j].lock = lock; TEST( threads[j].start( )); } lunchbox::sleep( 10 ); // let threads initialize _clock.reset(); lock->unset(); lunchbox::sleep( TIME ); // let threads run _running = false; for( size_t j = 0; j < i; ++j ) TEST( threads[j].join( )); const float time = _clock.getTimef(); TEST( !lock->isSet( )); lock->set(); size_t ops = 0; for( size_t j = 0; j < nThreads; ++j ) ops += threads[j].ops; std::cout << std::setw(20) << lunchbox::className( lock ) << ", " << std::setw(12) << /*set, test, unset*/ 3 * ops / time << ", " << std::setw(3) << i << std::endl; } delete lock; }
int main( int, char** ) { #ifdef LUNCHBOX_USE_OPENMP const size_t nThreads = lunchbox::OMP::getNThreads() * 3; #else const size_t nThreads = 16; #endif std::cout << " read, write, push, copy, erase, " << " flush/ms, rd, other #threads" << std::endl; _runSerialTest< std::vector< size_t >, size_t >(); _runSerialTest< Vector_t, size_t >(); std::vector< Reader > readers(nThreads); std::vector< Writer > writers(nThreads); std::vector< Pusher > pushers(nThreads); stage_ = 1; size_t stage = 0; for( size_t l = 0; l < nThreads; ++l ) { readers[l].start(); writers[l].start(); pushers[l].start(); } lunchbox::sleep( 10 ); for( size_t i = 1; i <= nThreads; i = i<<1 ) for( size_t j = 1; j <= nThreads; j = j<<1 ) { // concurrent read, write, push Vector_t vector; for( size_t k = 0; k < nThreads; ++k ) { readers[k].vector = k < i ? &vector : 0; writers[k].vector = k < j ? &vector : 0; pushers[k].vector = k < j ? &vector : 0; } const size_t nextStage = ++stage * STAGESIZE; _clock.reset(); stage_ = nextStage; stage_.waitEQ( nextStage + (3 * nThreads) ); TEST( vector.size() >= LOOPSIZE ); // multi-threaded copy std::vector< Copier > copiers(j); _clock.reset(); for( size_t k = 0; k < j; ++k ) { copiers[k].vector = &vector; copiers[k].start(); } for( size_t k = 0; k < j; ++k ) copiers[k].join(); for( size_t k = 0; k < vector.size(); ++k ) TEST( vector[k] == k || vector[k] == 0 ); // multi-threaded erase std::vector< Eraser > erasers(j); _clock.reset(); for( size_t k = 0; k < j; ++k ) { erasers[k].vector = &vector; erasers[k].start(); } for( size_t k = 0; k < j; ++k ) erasers[k].join(); for( size_t k = 0; k < vector.size(); ++k ) { if( vector[k] == 0 ) break; if( k > vector.size() / 2 ) { TEST( vector[k] > vector[k-1] ); } else { TEST( vector[k] == k ); } } // multi-threaded pop_back const size_t fOps = vector.size(); std::vector< Flusher > flushers(j); _clock.reset(); for( size_t k = 0; k < j; ++k ) { flushers[k].vector = &vector; flushers[k].start(); } for( size_t k = 0; k < j; ++k ) flushers[k].join(); const float fTime = _clock.getTimef(); TEST( vector.empty( )); std::cerr << std::setw(11) << float(i*LOOPSIZE)/rTime_ << ", " << std::setw(11) << float(j*LOOPSIZE)/wTime_ << ", " << std::setw(11) << float(LOOPSIZE)/pTime_ << ", " << std::setw(9) << float(j)/cTime_ << ", " << std::setw(9) << float(j)/eTime_ << ", " << std::setw(9) << float(fOps)/fTime << ", " << std::setw(3) << i << ", " << std::setw(3) << j << std::endl; } stage_ = std::numeric_limits< size_t >::max(); for( size_t k = 0; k < nThreads; ++k ) { readers[k].join(); writers[k].join(); pushers[k].join(); } return EXIT_SUCCESS; }
template< class V, class T > void _runSerialTest() { V vector; _clock.reset(); while( vector.size() < LOOPSIZE*10 ) vector.push_back( T( 0 )); pTime_ = _clock.getTimef(); _clock.reset(); for( size_t i = 0; i < LOOPSIZE*10; ++i ) { if( i < vector.size( )) vector[ i ] = i; } wTime_ = _clock.getTimef(); _clock.reset(); typename V::const_iterator it = vector.begin(); for( size_t i = 0; i < LOOPSIZE*10; ++i ) { const size_t value = *it; TESTINFO( i == value || value == 0, i << ", " << value ); ++it; if( it == vector.end( )) it = vector.begin(); } rTime_ = _clock.getTimef(); _clock.reset(); for( size_t i = 0; i < 10; ++i ) { V copy; copy = vector; vector = copy; TEST( copy.size() >= vector.size( )); } cTime_ = _clock.getTimef(); _clock.reset(); const size_t pos = vector.size() / 2; typename V::iterator i = vector.begin() + pos; typename V::iterator j = vector.erase( i ); TEST( j != vector.end( )); TEST( *j == 0 || *j >= pos ); eTime_ = _clock.getTimef(); _clock.reset(); while( !vector.empty( )) vector.pop_back(); const float fTime = _clock.getTimef(); std::cerr << std::setw(11) << float(LOOPSIZE*10)/rTime_ << ", " << std::setw(11) << float(LOOPSIZE*10)/wTime_ << ", " << std::setw(11) << float(LOOPSIZE*10)/pTime_ << ", " << std::setw(9) << float(10)/cTime_ << ", " << std::setw(9) << float(10)/eTime_ << ", " << std::setw(9) << float(LOOPSIZE*10)/fTime << ", " << std::setw(3) << 0 << ", " << std::setw(3) << 0 << std::endl; vector.push_back( 42 ); i = vector.begin(); i = vector.erase( i ); TEST( i == vector.begin( )); TEST( vector.empty( )); vector.push_back( 42 ); vector.push_back( 17 ); vector.resize( 1 ); TEST( vector.size() == 1 ); TEST( vector[0] == 42 ); vector.resize( 10, 17 ); TEST( vector.size() == 10 ); TEST( vector[0] == 42 ); TEST( vector[1] == 17 ); TEST( vector[9] == 17 ); }
template< class T, uint32_t hold > void _test() { T* lock = new T; lock->set(); #ifdef LUNCHBOX_USE_OPENMP const size_t nThreads = LB_MIN( lunchbox::OMP::getNThreads()*3, MAXTHREADS ); #else const size_t nThreads = 16; #endif WriteThread< T, hold > writers[MAXTHREADS]; ReadThread< T, hold > readers[MAXTHREADS]; std::cout << " Class, write ops/ms, read ops/ms, w threads, " << "r threads" << std::endl; for( size_t nWrite = 0; nWrite <= nThreads; nWrite = (nWrite == 0) ? 1 : nWrite << 1 ) { for( size_t i = 1; i <= nThreads; i = i << 1 ) { if( i < nWrite ) continue; const size_t nRead = i - nWrite; _running = true; for( size_t j = 0; j < nWrite; ++j ) { writers[j].lock = lock; TEST( writers[j].start( )); } for( size_t j = 0; j < nRead; ++j ) { readers[j].lock = lock; TESTINFO( readers[j].start(), j ); } lunchbox::sleep( 10 ); // let threads initialize _clock.reset(); lock->unset(); lunchbox::sleep( TIME ); // let threads run _running = false; for( size_t j = 0; j < nWrite; ++j ) TEST( writers[j].join( )); for( size_t j = 0; j < nRead; ++j ) TEST( readers[j].join( )); const double time = _clock.getTimed(); TEST( !lock->isSet( )); lock->set(); size_t nWriteOps = 0; double wTime = time * double( nWrite ); for( size_t j = 0; j < nWrite; ++j ) { nWriteOps += writers[j].ops; wTime -= writers[j].sTime; } if( nWrite > 0 ) wTime /= double( nWrite ); if( wTime == 0.f ) wTime = std::numeric_limits< double >::epsilon(); size_t nReadOps = 0; double rTime = time * double( nRead ); for( size_t j = 0; j < nRead; ++j ) { nReadOps += readers[j].ops; rTime -= readers[j].sTime; } if( nRead > 0 ) rTime /= double( nRead ); if( rTime == 0.f ) rTime = std::numeric_limits< double >::epsilon(); std::cout << std::setw(20)<< lunchbox::className( lock ) << ", " << std::setw(12) << 3 * nWriteOps / wTime << ", " << std::setw(12) << 3 * nReadOps / rTime << ", " << std::setw(9) << nWrite << ", " << std::setw(9) << nRead << std::endl; } } delete lock; }
int main( int argc, char **argv ) { co::init( argc, argv ); co::ObjectDataOCommand out( co::Connections(), co::CMD_NODE_OBJECT_INSTANCE, co::COMMANDTYPE_NODE, co::uint128_t(), 0, co::uint128_t(1), 0, 0, COMMAND_SIZE, true, 0 ); co::LocalNodePtr node = new co::LocalNode; co::ObjectDataICommand in = out._getCommand( node ); TESTINFO( in.isValid(), in ); TEST( in.isLast( )); Reader** readers = static_cast< Reader** >( alloca( N_READER * sizeof( Reader* ))); co::InstanceCache cache; lunchbox::RNG rng; size_t hits = 0; size_t ops = 0; for( co::uint128_t key; key.low() < 65536; ++key ) // Fill cache if( !cache.add( co::ObjectVersion( key, co::uint128_t(1) ), 1, in )) break; _clock.reset(); for( size_t i = 0; i < N_READER; ++i ) { readers[ i ] = new Reader( cache ); readers[ i ]->start(); } while( _clock.getTime64() < RUNTIME ) { const co::uint128_t id( 0, rng.get< uint16_t >( )); const co::ObjectVersion key( id, co::uint128_t(1) ); if( cache[ key.identifier ] != co::InstanceCache::Data::NONE ) { TEST( cache.release( key.identifier, 1 )); ++ops; if( cache.erase( key.identifier )) { TEST( cache.add( key, 1, in )); ops += 2; hits += 2; } } else if( cache.add( key, 1, in )) ++hits; ++ops; } const uint64_t time = _clock.getTime64(); std::cout << hits << " write hits in " << ops << " operations, " << ops / time << " ops/ms" << std::endl; for( size_t i = 0; i < N_READER; ++i ) { readers[ i ]->join(); delete readers[ i ]; } std::cout << cache << std::endl; for( co::uint128_t key; key.low() < 65536; ++key ) // Fill cache { if( cache[ key ] != co::InstanceCache::Data::NONE ) { TEST( cache.release( key, 1 )); TEST( cache.erase( key )); } } for( co::uint128_t key; key.low() < 65536; ++key ) // Fill cache { TEST( cache[ key ] == co::InstanceCache::Data::NONE ); } std::cout << cache << std::endl; TESTINFO( cache.getSize() == 0, cache.getSize( )); TEST( co::exit( )); return EXIT_SUCCESS; }