void run(unsigned thread_id, std::string filename, Address ip, std::vector<Address> routing_addresses) { std::string log_file = "log_user.txt"; std::string logger_name = "user_log"; auto logger = spdlog::basic_logger_mt(logger_name, log_file, true); logger->flush_on(spdlog::level::info); std::hash<std::string> hasher; unsigned seed = time(NULL); seed += hasher(ip); seed += thread_id; logger->info("Random seed is {}.", seed); // mapping from key to a set of worker addresses std::unordered_map<Key, std::unordered_set<Address>> key_address_cache; UserThread ut = UserThread(ip, thread_id); int timeout = 10000; zmq::context_t context(1); SocketCache pushers(&context, ZMQ_PUSH); // responsible for pulling response zmq::socket_t response_puller(context, ZMQ_PULL); response_puller.setsockopt(ZMQ_RCVTIMEO, &timeout, sizeof(timeout)); response_puller.bind(ut.get_request_pulling_bind_addr()); // responsible for receiving key address responses zmq::socket_t key_address_puller(context, ZMQ_PULL); key_address_puller.setsockopt(ZMQ_RCVTIMEO, &timeout, sizeof(timeout)); key_address_puller.bind(ut.get_key_address_bind_addr()); unsigned rid = 0; std::string input; unsigned trial = 1; if (filename == "") { while (true) { std::cout << "kvs> "; getline(std::cin, input); handle_request(input, pushers, routing_addresses, key_address_cache, seed, logger, ut, response_puller, key_address_puller, ip, thread_id, rid, trial); } } else { std::ifstream infile(filename); while (getline(infile, input)) { handle_request(input, pushers, routing_addresses, key_address_cache, seed, logger, ut, response_puller, key_address_puller, ip, thread_id, rid, trial); } } }
int main(int argc, char* argv[]) { if (argc != 2) { std::cerr << "Usage: " << argv[0] << " <benchmark_threads>" << std::endl; return 1; } unsigned thread_num = atoi(argv[1]); // read in the benchmark addresses std::vector<Address> benchmark_address; // read the YAML conf std::vector<Address> ips; YAML::Node conf = YAML::LoadFile("conf/config.yml"); YAML::Node benchmark = conf["benchmark"]; for (const YAML::Node& node : benchmark) { ips.push_back(node.as<Address>()); } zmq::context_t context(1); SocketCache pushers(&context, ZMQ_PUSH); std::string command; while (true) { std::cout << "command> "; getline(std::cin, command); for (const std::string address : benchmark_address) { for (unsigned tid = 0; tid < thread_num; tid++) { BenchmarkThread bt = BenchmarkThread(address, tid); kZmqUtil->send_string(command, &pushers[bt.get_benchmark_command_port_addr()]); } } } }
int main( int, char** ) { #ifdef LUNCHBOX_USE_OPENMP const size_t nThreads = lunchbox::OMP::getNThreads() * 3; #else const size_t nThreads = 16; #endif std::cout << " read, write, push, copy, erase, " << " flush/ms, rd, other #threads" << std::endl; _runSerialTest< std::vector< size_t >, size_t >(); _runSerialTest< Vector_t, size_t >(); std::vector< Reader > readers(nThreads); std::vector< Writer > writers(nThreads); std::vector< Pusher > pushers(nThreads); stage_ = 1; size_t stage = 0; for( size_t l = 0; l < nThreads; ++l ) { readers[l].start(); writers[l].start(); pushers[l].start(); } lunchbox::sleep( 10 ); for( size_t i = 1; i <= nThreads; i = i<<1 ) for( size_t j = 1; j <= nThreads; j = j<<1 ) { // concurrent read, write, push Vector_t vector; for( size_t k = 0; k < nThreads; ++k ) { readers[k].vector = k < i ? &vector : 0; writers[k].vector = k < j ? &vector : 0; pushers[k].vector = k < j ? &vector : 0; } const size_t nextStage = ++stage * STAGESIZE; _clock.reset(); stage_ = nextStage; stage_.waitEQ( nextStage + (3 * nThreads) ); TEST( vector.size() >= LOOPSIZE ); // multi-threaded copy std::vector< Copier > copiers(j); _clock.reset(); for( size_t k = 0; k < j; ++k ) { copiers[k].vector = &vector; copiers[k].start(); } for( size_t k = 0; k < j; ++k ) copiers[k].join(); for( size_t k = 0; k < vector.size(); ++k ) TEST( vector[k] == k || vector[k] == 0 ); // multi-threaded erase std::vector< Eraser > erasers(j); _clock.reset(); for( size_t k = 0; k < j; ++k ) { erasers[k].vector = &vector; erasers[k].start(); } for( size_t k = 0; k < j; ++k ) erasers[k].join(); for( size_t k = 0; k < vector.size(); ++k ) { if( vector[k] == 0 ) break; if( k > vector.size() / 2 ) { TEST( vector[k] > vector[k-1] ); } else { TEST( vector[k] == k ); } } // multi-threaded pop_back const size_t fOps = vector.size(); std::vector< Flusher > flushers(j); _clock.reset(); for( size_t k = 0; k < j; ++k ) { flushers[k].vector = &vector; flushers[k].start(); } for( size_t k = 0; k < j; ++k ) flushers[k].join(); const float fTime = _clock.getTimef(); TEST( vector.empty( )); std::cerr << std::setw(11) << float(i*LOOPSIZE)/rTime_ << ", " << std::setw(11) << float(j*LOOPSIZE)/wTime_ << ", " << std::setw(11) << float(LOOPSIZE)/pTime_ << ", " << std::setw(9) << float(j)/cTime_ << ", " << std::setw(9) << float(j)/eTime_ << ", " << std::setw(9) << float(fOps)/fTime << ", " << std::setw(3) << i << ", " << std::setw(3) << j << std::endl; } stage_ = std::numeric_limits< size_t >::max(); for( size_t k = 0; k < nThreads; ++k ) { readers[k].join(); writers[k].join(); pushers[k].join(); } return EXIT_SUCCESS; }