int main(void) { int i; secp256k1_pubkey pubkey; secp256k1_ecdsa_signature sig; benchmark_verify_t data; data.ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); for (i = 0; i < 32; i++) { data.msg[i] = 1 + i; } for (i = 0; i < 32; i++) { data.key[i] = 33 + i; } data.siglen = 72; CHECK(secp256k1_ecdsa_sign(data.ctx, &sig, data.msg, data.key, NULL, NULL)); CHECK(secp256k1_ecdsa_signature_serialize_der(data.ctx, data.sig, &data.siglen, &sig)); CHECK(secp256k1_ec_pubkey_create(data.ctx, &pubkey, data.key)); data.pubkeylen = 33; CHECK(secp256k1_ec_pubkey_serialize(data.ctx, data.pubkey, &data.pubkeylen, &pubkey, SECP256K1_EC_COMPRESSED) == 1); run_benchmark("ecdsa_verify", benchmark_verify, NULL, NULL, &data, 10, 20000); #ifdef ENABLE_OPENSSL_TESTS data.ec_group = EC_GROUP_new_by_curve_name(NID_secp256k1); run_benchmark("ecdsa_verify_openssl", benchmark_verify_openssl, NULL, NULL, &data, 10, 20000); EC_GROUP_free(data.ec_group); #endif secp256k1_context_destroy(data.ctx); return 0; }
static void run_fault_benchmark(env_t *env, fault_results_t *results) { /* allocate endpoint */ vka_object_t fault_endpoint = {0}; UNUSED int error = vka_alloc_endpoint(&env->slab_vka, &fault_endpoint); assert(error == 0); vka_object_t done_ep = {0}; error = vka_alloc_endpoint(&env->slab_vka, &done_ep); assert(error == 0); /* create faulter */ ccnt_t start = 0; benchmark_configure_thread(env, fault_endpoint.cptr, seL4_MinPrio + 1, "faulter", &faulter); sel4utils_create_word_args(faulter_args, faulter_argv, N_FAULTER_ARGS, (seL4_Word) &start, (seL4_Word) results, done_ep.cptr); /* create fault handler */ benchmark_configure_thread(env, seL4_CapNull, seL4_MinPrio, "fault handler", &fault_handler); sel4utils_create_word_args(handler_args, handler_argv, N_HANDLER_ARGS, fault_endpoint.cptr, (seL4_Word) &start, (seL4_Word) results, done_ep.cptr, fault_handler.reply.cptr); /* benchmark fault */ run_benchmark(measure_fault_fn, measure_fault_handler_fn, done_ep.cptr); /* benchmark reply */ run_benchmark(measure_fault_reply_fn, measure_fault_reply_handler_fn, done_ep.cptr); /* benchmark round_trip */ run_benchmark(measure_fault_roundtrip_fn, measure_fault_roundtrip_handler_fn, done_ep.cptr); }
int main(int argc, const char *argv[]) { SDGL2 sdgl2; SDGLinf sdg; bool is_linf = true; if (argc == 2) { if (strcmp(argv[1], "--l2") == 0) { is_linf = false; } else if (strcmp(argv[1], "--linf") == 0) { is_linf = true; } else { std::cerr << "Error: Only --l2/--linf argument allowed." << std::endl; return -2; } } if (argc > 2) { std::cerr << "Error: Only one allowed optional argument." << std::endl; return -3; } if (is_linf) { run_benchmark(sdg); } else { run_benchmark(sdgl2); } return 0; }
int main(int argc, char **argv) { bench_options options; FILE *infile; char input_file_name[100]; // Defaults. options.iterations = 1; options.level = HIGH_COMPRESSION; options.library = LIB_ZLIB; if (argc < 2) { puts("Too few arguments"); usage(); return 1; } get_options(argc, argv, &options, input_file_name); // Open input file. infile = fopen(input_file_name, "r"); if (!infile) { puts("Error: problem with opening input file."); return 1; } switch(options.library) { case LIB_ZLIB: run_benchmark(infile, input_file_name, options); rewind(infile); break; case LIB_BZIP2: run_benchmark(infile, input_file_name, options); rewind(infile); break; case LIB_SNAPPY: run_benchmark(infile, input_file_name, options); break; case LIB_LZO: run_benchmark(infile, input_file_name, options); break; default: break; } fclose(infile); return 0; }
int hpx_main(boost::program_options::variables_map & vm) { params p(process_args(vm)); print_header("OSU HPX Scatter Latency Test"); run_benchmark(p); return hpx::finalize(); }
static void run_test(bench_data* data, size_t count, int includes_g) { char str[32]; static const secp256k1_scalar zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); size_t iters = 1 + ITERS / count; size_t iter; data->count = count; data->includes_g = includes_g; /* Compute (the negation of) the expected results directly. */ data->offset1 = (data->count * 0x537b7f6f + 0x8f66a481) % POINTS; data->offset2 = (data->count * 0x7f6f537b + 0x6a1a8f49) % POINTS; for (iter = 0; iter < iters; ++iter) { secp256k1_scalar tmp; secp256k1_scalar total = data->scalars[(data->offset1++) % POINTS]; size_t i = 0; for (i = 0; i + 1 < count; ++i) { secp256k1_scalar_mul(&tmp, &data->seckeys[(data->offset2++) % POINTS], &data->scalars[(data->offset1++) % POINTS]); secp256k1_scalar_add(&total, &total, &tmp); } secp256k1_scalar_negate(&total, &total); secp256k1_ecmult(&data->ctx->ecmult_ctx, &data->expected_output[iter], NULL, &zero, &total); } /* Run the benchmark. */ sprintf(str, includes_g ? "ecmult_%ig" : "ecmult_%i", (int)count); run_benchmark(str, bench_ecmult, bench_ecmult_setup, bench_ecmult_teardown, data, 10, count * (1 + ITERS / count)); }
static int run_worker(coreid_t mycore) { errval_t err; trace_event(TRACE_SUBSYS_MEMTEST, TRACE_EVENT_MEMTEST_WAIT, 0); err = ns_barrier_worker((int)mycore, "mem_bench_ready"); if (err_is_fail(err)) { USER_PANIC_ERR(err, "barrier_worker failed"); } trace_event(TRACE_SUBSYS_MEMTEST, TRACE_EVENT_MEMTEST_RUN, 0); run_benchmark(mycore, MAX_REQUESTS); trace_event(TRACE_SUBSYS_MEMTEST, TRACE_EVENT_MEMTEST_WAIT, 0); err = ns_barrier_worker((int)mycore, "mem_bench_finished"); if (err_is_fail(err)) { USER_PANIC_ERR(err, "barrier_worker failed"); } trace_event(TRACE_SUBSYS_MEMTEST, TRACE_EVENT_MEMTEST_DONE, 0); return EXIT_SUCCESS; }
void CLI::apply_option(int option) { switch (option) { case HUMAN_VS_CPU: case CPU_VS_HUMAN: case HUMAN_VS_HUMAN: case CPU_VS_CPU: init_game(option); start_game(); end_game(); break; case LOAD: read_load(); break; case SHOW_HELP: print_help(); break; case BENCHMARK: run_benchmark(); break; case WAC: run_wac_test(); break; case SETTINGS: read_settings(); break; case QUIT: cout << "Thanks for playing...!! Have fun..\n"; break; } }
/** * The application entry point under linux and windows. * * @param argc number of arguments including the program name * @param argv program arguments including the program name */ int main(int argc, char *argv[]) { /* rhash_transmit(RMSG_SET_OPENSSL_MASK, 0, RHASH_ALL_HASHES, 0); */ #ifndef USE_RHASH_DLL rhash_library_init(); #endif test_endianness(); if(argc > 1 && strcmp(argv[1], "--speed") == 0) { unsigned hash_id = (argc > 2 ? find_hash(argv[2]) : RHASH_SHA1); if(hash_id == 0) { fprintf(stderr, "error: unknown hash_id: %s\n", argv[2]); return 1; } test_known_strings(hash_id); run_benchmark(hash_id, 0, stdout); } else if(argc > 1 && strcmp(argv[1], "--flags") == 0) { printf("%s", compiler_flags); } else { test_all_known_strings(); test_long_strings(); test_alignment(); if(n_errors == 0) printf("All sums are working properly!\n"); fflush(stdout); } if(n_errors > 0) printf("%s", compiler_flags); return (n_errors == 0 ? 0 : 1); }
int main(void) { int i; secp256k1_pubkey_t pubkey; secp256k1_ecdsa_signature_t sig; benchmark_verify_t data; data.ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); for (i = 0; i < 32; i++) { data.msg[i] = 1 + i; } for (i = 0; i < 32; i++) { data.key[i] = 33 + i; } data.siglen = 72; CHECK(secp256k1_ecdsa_sign(data.ctx, &sig, data.msg, data.key, NULL, NULL)); CHECK(secp256k1_ecdsa_signature_serialize_der(data.ctx, data.sig, &data.siglen, &sig)); CHECK(secp256k1_ec_pubkey_create(data.ctx, &pubkey, data.key)); CHECK(secp256k1_ec_pubkey_serialize(data.ctx, data.pubkey, &data.pubkeylen, &pubkey, SECP256K1_EC_COMPRESSED) == 1); run_benchmark("ecdsa_verify", benchmark_verify, NULL, NULL, &data, 10, 20000); secp256k1_context_destroy(data.ctx); return 0; }
static int run_all_benchmarks(size_t msg_size) { int error = 0; size_t i; for (i = 0; i < GPR_ARRAY_SIZE(test_strategies); ++i) { test_strategy *strategy = &test_strategies[i]; size_t j; for (j = 0; j < GPR_ARRAY_SIZE(socket_types); ++j) { thread_args *client_args = malloc(sizeof(thread_args)); thread_args *server_args = malloc(sizeof(thread_args)); char *socket_type = socket_types[j]; client_args->read_bytes = strategy->read_strategy; client_args->write_bytes = blocking_write_bytes; client_args->setup = strategy->setup; client_args->msg_size = msg_size; client_args->strategy_name = strategy->name; server_args->read_bytes = strategy->read_strategy; server_args->write_bytes = blocking_write_bytes; server_args->setup = strategy->setup; server_args->msg_size = msg_size; server_args->strategy_name = strategy->name; error = run_benchmark(socket_type, client_args, server_args); if (error < 0) { return error; } } } return error; }
/** \fun main \brief main program of the miniapp, run by MPI */ int main(int argc, char* argv[]) { // Build benchmark according to command line arguments benchmark b(argc, argv); replib::statistics s = run_benchmark(b); s.process(); std::cout << s << std::endl; return 0; }
int main(void) { bench_sign_t data; secp256k1_start(SECP256K1_START_SIGN); run_benchmark("ecdsa_sign", bench_sign, bench_sign_setup, NULL, &data, 10, 20000); secp256k1_stop(); return 0; }
int main(void) { bench_recover_t data; data.ctx = secp256k1_context_create(SECP256K1_CONTEXT_VERIFY); run_benchmark("ecdsa_recover", bench_recover, bench_recover_setup, NULL, &data, 10, 20000); secp256k1_context_destroy(data.ctx); return 0; }
int main(void) { bench_sign_t data; data.ctx = secp256k1_context_create(secp256k1_context_sign); run_benchmark("ecdsa_sign", bench_sign, bench_sign_setup, null, &data, 10, 20000); secp256k1_context_destroy(data.ctx); return 0; }
int run_strategies_benchmarks(int mean, int jitter, struct parameters p) { p.mean = mean; p.jitter = jitter; struct parameters p1, p2, p3; p1 = p2 = p3 = p; add_to_description("WORST FIT STRATEGY", &p1); p1.strategy = WORST_FIT; run_benchmark(p1); add_to_description("BEST FIT STRATEGY", &p2); p2.strategy = BEST_FIT; run_benchmark(p2); add_to_description("FIRST FIT STRATEGY", &p3); p3.strategy = FIRST_FIT; run_benchmark(p3); }
int main(int argc, char *argv[]) { errval_t err; coreid_t mycore = disp_get_core_id(); debug_printf("This is mem_bench\n"); if (argc >= 2) { assert(mycore == 0); int num_cores = strtol(argv[1], NULL, 10); debug_printf("spawning on %d cores\n", num_cores); err = init_tracing(); if (err_is_fail(err)) { DEBUG_ERR(err, "initialising tracing"); return EXIT_FAILURE; } prepare_dump(); start_tracing(); char *path = argv[0]; argv[1] = NULL; for (int i = 1; i <= num_cores; i++) { err = spawn_program(i, path, argv, NULL, 0, NULL); if (err_is_fail(err)) { DEBUG_ERR(err, "failed spawn %d", i); return EXIT_FAILURE; } debug_printf("spawned on core %d\n", i); } //start_tracing(); run_benchmark_0(mycore); ns_barrier_master(1, num_cores, "mem_bench"); debug_printf("all benchmarks completed\n"); stop_tracing(); // dump_trace(); } else { run_benchmark(mycore); } return EXIT_SUCCESS; }
void runner::run_benchmark_configurations(benchmark_ptr benchmark) { assert(benchmark); assert(m_impl); benchmark->get_options(m_impl->m_options); if (benchmark->has_configurations()) { uint32_t configs = benchmark->configuration_count(); for (uint32_t i = 0; i < configs; ++i) { benchmark->set_current_configuration(i); run_benchmark(benchmark); } } else { run_benchmark(benchmark); } }
int main (void) { OclPlatform *ocl; Data *data; ocl = ocl_new (0, CL_DEVICE_TYPE_ALL); if (ocl == NULL) return 1; data = setup_data (ocl, 4096 * 2048); run_benchmark (setup_single_blocking_queue, "Single blocking queue: %fs\n", data); run_benchmark (setup_ooo_queue, "Single out-of-order queue: %fs\n", data); run_benchmark (setup_two_queues, "Two queues: %fs\n", data); run_benchmark (setup_three_queues, "Three queues: %fs\n", data); free_data (data); ocl_free (ocl); return 0; }
int main(void) { AES128_ctx ctx128; AES192_ctx ctx192; AES256_ctx ctx256; run_benchmark("aes128_init", bench_AES128_init, NULL, NULL, &ctx128, 20, 50000); run_benchmark("aes128_encrypt_byte", bench_AES128_encrypt, bench_AES128_encrypt_setup, NULL, &ctx128, 20, 4000000); run_benchmark("aes128_decrypt_byte", bench_AES128_decrypt, bench_AES128_encrypt_setup, NULL, &ctx128, 20, 4000000); run_benchmark("aes192_init", bench_AES192_init, NULL, NULL, &ctx192, 20, 50000); run_benchmark("aes192_encrypt_byte", bench_AES192_encrypt, bench_AES192_encrypt_setup, NULL, &ctx192, 20, 4000000); run_benchmark("aes192_decrypt_byte", bench_AES192_decrypt, bench_AES192_encrypt_setup, NULL, &ctx192, 20, 4000000); run_benchmark("aes256_init", bench_AES256_init, NULL, NULL, &ctx256, 20, 50000); run_benchmark("aes256_encrypt_byte", bench_AES256_encrypt, bench_AES256_encrypt_setup, NULL, &ctx256, 20, 4000000); run_benchmark("aes256_decrypt_byte", bench_AES256_decrypt, bench_AES256_encrypt_setup, NULL, &ctx256, 20, 4000000); return 0; }
int main(int argc, char* argv[]) { // build argument from the command line keyvalue::argument a(argc, argv); // build the bench infunction of the argument if(a.backend() == "map"){ benchmark<keyvalue::map> b(a); //bench keyvalue::statistic s = run_benchmark(b); //compute statistics s.process(); //print the results std::cout << s << std::endl; } else { std::cout << "Backend not supported in this version" << std::endl; } return 0; }
int main(int argc, char* argv[]) { const char* benchmarks = "tst/benchmarks/"; putchar('\n'); DIR* d = opendir(benchmarks); if (d == NULL) { perror(benchmarks); exit(errno); } struct dirent* ent = readdir(d); while (ent) { char full_path[300]; char* start = stpncpy(full_path, benchmarks, 50); char* name = ent->d_name; start = stpncpy(start, name, 250); run_benchmark(full_path, name); ent = readdir(d); } closedir(d); return 0; }
int main(int argc, char **argv) { try { process_args(argc, argv); SystemDescription sys_desc(param_sys_desc_file); Simulation sim(sys_desc); if (param_do_benchmark) { run_benchmark(sim); } else { sim.run(); } } catch (exception &e) { cerr << e.what() << endl; return EXIT_FAILURE; } return EXIT_SUCCESS; }
void run_benchmarks(const std::vector<std::string>* benchmarks) { std::vector<std::string> all_benchmarks; if(!benchmarks) { for(BenchmarkMap::const_iterator i = get_benchmark_map().begin(); i != get_benchmark_map().end(); ++i) { all_benchmarks.push_back(i->first); } benchmarks = &all_benchmarks; } foreach(const std::string& benchmark, *benchmarks) { std::string::const_iterator colon = std::find(benchmark.begin(), benchmark.end(), ':'); if(colon != benchmark.end()) { //this benchmark has a user-supplied argument const std::string bench_name(benchmark.begin(), colon); const std::string arg(colon+1, benchmark.end()); run_command_line_benchmark(bench_name, arg); } else { run_benchmark(benchmark, get_benchmark_map()[benchmark]); } } }
void run_command_line_benchmark(const std::string& benchmark_name, const std::string& arg) { run_benchmark(benchmark_name, boost::bind(get_cl_benchmark_map()[benchmark_name], _1, arg)); }
int main(int argc, char **argv) { bench_inv_t data; if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "add")) run_benchmark("scalar_add", bench_scalar_add, bench_setup, NULL, &data, 10, 2000000); if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "negate")) run_benchmark("scalar_negate", bench_scalar_negate, bench_setup, NULL, &data, 10, 2000000); if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "sqr")) run_benchmark("scalar_sqr", bench_scalar_sqr, bench_setup, NULL, &data, 10, 200000); if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "mul")) run_benchmark("scalar_mul", bench_scalar_mul, bench_setup, NULL, &data, 10, 200000); #ifdef USE_ENDOMORPHISM if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "split")) run_benchmark("scalar_split", bench_scalar_split, bench_setup, NULL, &data, 10, 20000); #endif if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "inverse")) run_benchmark("scalar_inverse", bench_scalar_inverse, bench_setup, NULL, &data, 10, 2000); if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "inverse")) run_benchmark("scalar_inverse_var", bench_scalar_inverse_var, bench_setup, NULL, &data, 10, 2000); if (have_flag(argc, argv, "field") || have_flag(argc, argv, "normalize")) run_benchmark("field_normalize", bench_field_normalize, bench_setup, NULL, &data, 10, 2000000); if (have_flag(argc, argv, "field") || have_flag(argc, argv, "normalize")) run_benchmark("field_normalize_weak", bench_field_normalize_weak, bench_setup, NULL, &data, 10, 2000000); if (have_flag(argc, argv, "field") || have_flag(argc, argv, "sqr")) run_benchmark("field_sqr", bench_field_sqr, bench_setup, NULL, &data, 10, 200000); if (have_flag(argc, argv, "field") || have_flag(argc, argv, "mul")) run_benchmark("field_mul", bench_field_mul, bench_setup, NULL, &data, 10, 200000); if (have_flag(argc, argv, "field") || have_flag(argc, argv, "inverse")) run_benchmark("field_inverse", bench_field_inverse, bench_setup, NULL, &data, 10, 20000); if (have_flag(argc, argv, "field") || have_flag(argc, argv, "inverse")) run_benchmark("field_inverse_var", bench_field_inverse_var, bench_setup, NULL, &data, 10, 20000); if (have_flag(argc, argv, "field") || have_flag(argc, argv, "sqrt")) run_benchmark("field_sqrt_var", bench_field_sqrt_var, bench_setup, NULL, &data, 10, 20000); if (have_flag(argc, argv, "group") || have_flag(argc, argv, "double")) run_benchmark("group_double_var", bench_group_double_var, bench_setup, NULL, &data, 10, 200000); if (have_flag(argc, argv, "group") || have_flag(argc, argv, "add")) run_benchmark("group_add_var", bench_group_add_var, bench_setup, NULL, &data, 10, 200000); if (have_flag(argc, argv, "group") || have_flag(argc, argv, "add")) run_benchmark("group_add_affine", bench_group_add_affine, bench_setup, NULL, &data, 10, 200000); if (have_flag(argc, argv, "group") || have_flag(argc, argv, "add")) run_benchmark("group_add_affine_var", bench_group_add_affine_var, bench_setup, NULL, &data, 10, 200000); if (have_flag(argc, argv, "ecmult") || have_flag(argc, argv, "wnaf")) run_benchmark("wnaf_const", bench_wnaf_const, bench_setup, NULL, &data, 10, 20000); if (have_flag(argc, argv, "ecmult") || have_flag(argc, argv, "wnaf")) run_benchmark("ecmult_wnaf", bench_ecmult_wnaf, bench_setup, NULL, &data, 10, 20000); if (have_flag(argc, argv, "hash") || have_flag(argc, argv, "sha256")) run_benchmark("hash_sha256", bench_sha256, bench_setup, NULL, &data, 10, 20000); if (have_flag(argc, argv, "hash") || have_flag(argc, argv, "hmac")) run_benchmark("hash_hmac_sha256", bench_hmac_sha256, bench_setup, NULL, &data, 10, 20000); if (have_flag(argc, argv, "hash") || have_flag(argc, argv, "rng6979")) run_benchmark("hash_rfc6979_hmac_sha256", bench_rfc6979_hmac_sha256, bench_setup, NULL, &data, 10, 20000); if (have_flag(argc, argv, "context") || have_flag(argc, argv, "verify")) run_benchmark("context_verify", bench_context_verify, bench_setup, NULL, &data, 10, 20); if (have_flag(argc, argv, "context") || have_flag(argc, argv, "sign")) run_benchmark("context_sign", bench_context_sign, bench_setup, NULL, &data, 10, 200); return 0; }
int main(void) { bench_ecdh_t data; run_benchmark("ecdh", bench_ecdh, bench_ecdh_setup, NULL, &data, 10, 20000); return 0; }
int main(int argc, char *argv[]) { struct benchmark_config cfg; memset(&cfg, 0, sizeof(struct benchmark_config)); if (config_parse_args(argc, argv, &cfg) < 0) { usage(); } config_init_defaults(&cfg); log_level = cfg.debug; if (cfg.show_config) { fprintf(stderr, "============== Configuration values: ==============\n"); config_print(stdout, &cfg); fprintf(stderr, "===================================================\n"); } struct rlimit rlim; if (getrlimit(RLIMIT_NOFILE, &rlim) != 0) { benchmark_error_log("error: getrlimit failed: %s\n", strerror(errno)); exit(1); } if (cfg.unix_socket != NULL && (cfg.server != NULL || cfg.port > 0)) { benchmark_error_log("error: UNIX domain socket and TCP cannot be used together.\n"); exit(1); } unsigned int fds_needed = (cfg.threads * cfg.clients) + (cfg.threads * 10) + 10; if (fds_needed > rlim.rlim_cur) { if (fds_needed > rlim.rlim_max && getuid() != 0) { benchmark_error_log("error: running the tool with this number of connections requires 'root' privilegs.\n"); exit(1); } rlim.rlim_cur = rlim.rlim_max = fds_needed; if (setrlimit(RLIMIT_NOFILE, &rlim) != 0) { benchmark_error_log("error: setrlimit failed: %s\n", strerror(errno)); exit(1); } } // create and configure object generator object_generator* obj_gen = NULL; imported_keylist* keylist = NULL; if (!cfg.data_import) { if (cfg.data_verify) { fprintf(stderr, "error: use data-verify only with data-import\n"); exit(1); } if (cfg.no_expiry) { fprintf(stderr, "error: use no-expiry only with data-import\n"); exit(1); } obj_gen = new object_generator(); assert(obj_gen != NULL); } else { // check paramters if (cfg.data_size || cfg.data_size_list.is_defined() || cfg.data_size_range.is_defined()) { fprintf(stderr, "error: data size cannot be specified when importing.\n"); exit(1); } if (cfg.random_data) { fprintf(stderr, "error: random-data cannot be specified when importing.\n"); exit(1); } if (!cfg.generate_keys && (cfg.key_maximum || cfg.key_minimum || cfg.key_prefix)) { fprintf(stderr, "error: use key-minimum, key-maximum and key-prefix only with generate-keys.\n"); exit(1); } if (!cfg.generate_keys) { // read keys fprintf(stderr, "Reading keys from %s...", cfg.data_import); keylist = new imported_keylist(cfg.data_import); assert(keylist != NULL); if (!keylist->read_keys()) { fprintf(stderr, "\nerror: failed to read keys.\n"); exit(1); } else { fprintf(stderr, " %u keys read.\n", keylist->size()); } } obj_gen = new import_object_generator(cfg.data_import, keylist, cfg.no_expiry); assert(obj_gen != NULL); if (dynamic_cast<import_object_generator*>(obj_gen)->open_file() != true) { fprintf(stderr, "error: %s: failed to open.\n", cfg.data_import); exit(1); } } if (cfg.authenticate) { if (strcmp(cfg.protocol, "redis") != 0 && strcmp(cfg.protocol, "memcache_binary") != 0) { fprintf(stderr, "error: authenticate can only be used with redis or memcache_binary.\n"); usage(); } if (strcmp(cfg.protocol, "memcache_binary") == 0 && strchr(cfg.authenticate, ':') == NULL) { fprintf(stderr, "error: binary_memcache credentials must be in the form of USER:PASSWORD.\n"); usage(); } } if (cfg.select_db > 0 && strcmp(cfg.protocol, "redis")) { fprintf(stderr, "error: select-db can only be used with redis protocol.\n"); usage(); } if (cfg.data_offset > 0) { if (cfg.data_offset > (1<<29)-1) { fprintf(stderr, "error: data-offset too long\n"); usage(); } if (cfg.expiry_range.min || cfg.expiry_range.max || strcmp(cfg.protocol, "redis")) { fprintf(stderr, "error: data-offset can only be used with redis protocol, and cannot be used with expiry\n"); usage(); } } if (cfg.data_size) { if (cfg.data_size_list.is_defined() || cfg.data_size_range.is_defined()) { fprintf(stderr, "error: data-size cannot be used with data-size-list or data-size-range.\n"); usage(); } obj_gen->set_data_size_fixed(cfg.data_size); } else if (cfg.data_size_list.is_defined()) { if (cfg.data_size_range.is_defined()) { fprintf(stderr, "error: data-size-list cannot be used with data-size-range.\n"); usage(); } obj_gen->set_data_size_list(&cfg.data_size_list); } else if (cfg.data_size_range.is_defined()) { obj_gen->set_data_size_range(cfg.data_size_range.min, cfg.data_size_range.max); obj_gen->set_data_size_pattern(cfg.data_size_pattern); } else if (!cfg.data_import) { fprintf(stderr, "error: data-size, data-size-list or data-size-range must be specified.\n"); usage(); } if (!cfg.data_import) { obj_gen->set_random_data(cfg.random_data); } if (!cfg.data_import || cfg.generate_keys) { obj_gen->set_key_prefix(cfg.key_prefix); obj_gen->set_key_range(cfg.key_minimum, cfg.key_maximum); } if (cfg.key_stddev>0 || cfg.key_median>0) { if (cfg.key_pattern[0]!='G' && cfg.key_pattern[2]!='G') { fprintf(stderr, "error: key-stddev and key-median are only allowed together with key-pattern set to G.\n"); usage(); } if (cfg.key_median!=0 && (cfg.key_median<cfg.key_minimum || cfg.key_median>cfg.key_maximum)) { fprintf(stderr, "error: key-median must be between key-minimum and key-maximum.\n"); usage(); } obj_gen->set_key_distribution(cfg.key_stddev, cfg.key_median); } obj_gen->set_expiry_range(cfg.expiry_range.min, cfg.expiry_range.max); // Prepare output file FILE *outfile; if (cfg.out_file != NULL) { fprintf(stderr, "Writing results to %s...\n", cfg.out_file); outfile = fopen(cfg.out_file, "w"); if (!outfile) { perror(cfg.out_file); } } else { outfile = stdout; } if (!cfg.verify_only) { std::vector<run_stats> all_stats; for (unsigned int run_id = 1; run_id <= cfg.run_count; run_id++) { if (run_id > 1) sleep(1); // let connections settle run_stats stats = run_benchmark(run_id, &cfg, obj_gen); all_stats.push_back(stats); } // Print some run information fprintf(outfile, "%-9u Threads\n" "%-9u Connections per thread\n" "%-9u %s\n", cfg.threads, cfg.clients, cfg.requests > 0 ? cfg.requests : cfg.test_time, cfg.requests > 0 ? "Requests per thread" : "Seconds"); // If more than 1 run was used, compute best, worst and average if (cfg.run_count > 1) { unsigned int min_ops_sec = (unsigned int) -1; unsigned int max_ops_sec = 0; run_stats* worst = NULL; run_stats* best = NULL; for (std::vector<run_stats>::iterator i = all_stats.begin(); i != all_stats.end(); i++) { unsigned long usecs = i->get_duration_usec(); unsigned int ops_sec = (int)(((double)i->get_total_ops() / (usecs > 0 ? usecs : 1)) * 1000000); if (ops_sec < min_ops_sec || worst == NULL) { min_ops_sec = ops_sec; worst = &(*i); } if (ops_sec > max_ops_sec || best == NULL) { max_ops_sec = ops_sec; best = &(*i); } } fprintf(outfile, "\n\n" "BEST RUN RESULTS\n" "========================================================================\n"); best->print(outfile, !cfg.hide_histogram); fprintf(outfile, "\n\n" "WORST RUN RESULTS\n" "========================================================================\n"); worst->print(outfile, !cfg.hide_histogram); fprintf(outfile, "\n\n" "AGGREGATED AVERAGE RESULTS (%u runs)\n" "========================================================================\n", cfg.run_count); run_stats average; average.aggregate_average(all_stats); average.print(outfile, !cfg.hide_histogram); } else { all_stats.begin()->print(outfile, !cfg.hide_histogram); } } // If needed, data verification is done now... if (cfg.data_verify) { struct event_base *verify_event_base = event_base_new(); abstract_protocol *verify_protocol = protocol_factory(cfg.protocol); verify_client *client = new verify_client(verify_event_base, &cfg, verify_protocol, obj_gen); fprintf(outfile, "\n\nPerforming data verification...\n"); // Run client in verification mode client->prepare(); event_base_dispatch(verify_event_base); fprintf(outfile, "Data verification completed:\n" "%-10llu keys verified successfuly.\n" "%-10llu keys failed.\n", client->get_verified_keys(), client->get_errors()); // Clean up... delete client; delete verify_protocol; event_base_free(verify_event_base); } if (outfile != stdout) { fclose(outfile); } delete obj_gen; if (keylist != NULL) delete keylist; }
int main(int argc, char **argv) { thread_args *client_args = malloc(sizeof(thread_args)); thread_args *server_args = malloc(sizeof(thread_args)); int msg_size = -1; char *read_strategy = NULL; char *socket_type = NULL; size_t i; const test_strategy *strategy = NULL; int error = 0; gpr_cmdline *cmdline = gpr_cmdline_create("low_level_ping_pong network benchmarking tool"); gpr_cmdline_add_int(cmdline, "msg_size", "Size of sent messages", &msg_size); gpr_cmdline_add_string(cmdline, "read_strategy", read_strategy_usage, &read_strategy); gpr_cmdline_add_string(cmdline, "socket_type", socket_type_usage, &socket_type); gpr_cmdline_parse(cmdline, argc, argv); if (msg_size == -1) { msg_size = 50; } if (read_strategy == NULL) { gpr_log(GPR_INFO, "No strategy specified, running all benchmarks"); return run_all_benchmarks((size_t)msg_size); } if (socket_type == NULL) { socket_type = "tcp"; } if (msg_size <= 0) { fprintf(stderr, "msg_size must be > 0\n"); print_usage(argv[0]); return -1; } for (i = 0; i < GPR_ARRAY_SIZE(test_strategies); ++i) { if (strcmp(test_strategies[i].name, read_strategy) == 0) { strategy = &test_strategies[i]; } } if (strategy == NULL) { fprintf(stderr, "Invalid read strategy %s\n", read_strategy); return -1; } client_args->read_bytes = strategy->read_strategy; client_args->write_bytes = blocking_write_bytes; client_args->setup = strategy->setup; client_args->msg_size = (size_t)msg_size; client_args->strategy_name = read_strategy; server_args->read_bytes = strategy->read_strategy; server_args->write_bytes = blocking_write_bytes; server_args->setup = strategy->setup; server_args->msg_size = (size_t)msg_size; server_args->strategy_name = read_strategy; error = run_benchmark(socket_type, client_args, server_args); gpr_cmdline_destroy(cmdline); return error; }
jdoubleArray Java_de_blinkt_openvpn_core_NativeUtils_getOpenSSLSpeed(JNIEnv* env, jclass thiz, jstring algorithm, jint testnumber) { static const unsigned char key16[16] = { 0x12, 0x34, 0x56, 0x78, 0x9a, 0xbc, 0xde, 0xf0, 0x34, 0x56, 0x78, 0x9a, 0xbc, 0xde, 0xf0, 0x12 }; const EVP_CIPHER *evp_cipher = NULL; const char* alg = (*env)->GetStringUTFChars( env, algorithm , NULL ) ; evp_cipher = EVP_get_cipherbyname(alg); if (evp_cipher == NULL) evp_md = EVP_get_digestbyname(alg); if (evp_cipher == NULL && evp_md == NULL) { // BIO_printf(bio_err, "%s: %s is an unknown cipher or digest\n", prog, opt_arg()); //jniThrowException(env, "java/security/NoSuchAlgorithmException", "Algorithm not found"); return NULL; } const char* name; loopargs_t *loopargs = NULL; int loopargs_len = 1; int async_jobs=0; loopargs = malloc(loopargs_len * sizeof(loopargs_t)); memset(loopargs, 0, loopargs_len * sizeof(loopargs_t)); jdoubleArray ret = (*env)->NewDoubleArray(env, 3); if (testnum < 0 || testnum >= SIZE_NUM) return NULL; testnum = testnumber; for (int i = 0; i < loopargs_len; i++) { int misalign=0; loopargs[i].buf_malloc = malloc((int)BUFSIZE + MAX_MISALIGNMENT + 1); loopargs[i].buf2_malloc = malloc((int)BUFSIZE + MAX_MISALIGNMENT + 1); /* Align the start of buffers on a 64 byte boundary */ loopargs[i].buf = loopargs[i].buf_malloc + misalign; loopargs[i].buf2 = loopargs[i].buf2_malloc + misalign; } int count; float d; if (evp_cipher) { name = OBJ_nid2ln(EVP_CIPHER_nid(evp_cipher)); /* * -O3 -fschedule-insns messes up an optimization here! * names[D_EVP] somehow becomes NULL */ for (int k = 0; k < loopargs_len; k++) { loopargs[k].ctx = EVP_CIPHER_CTX_new(); if (decrypt) EVP_DecryptInit_ex(loopargs[k].ctx, evp_cipher, NULL, key16, iv); else EVP_EncryptInit_ex(loopargs[k].ctx, evp_cipher, NULL, key16, iv); EVP_CIPHER_CTX_set_padding(loopargs[k].ctx, 0); } Time_F(START); pthread_t timer_thread; if (pthread_create(&timer_thread, NULL, stop_run, NULL)) return NULL; count = run_benchmark(async_jobs, EVP_Update_loop, loopargs); d = Time_F(STOP); for (int k = 0; k < loopargs_len; k++) { EVP_CIPHER_CTX_free(loopargs[k].ctx); } } if (evp_md) { name = OBJ_nid2ln(EVP_MD_type(evp_md)); // print_message(names[D_EVP], save_count, lengths[testnum]); pthread_t timer_thread; if (pthread_create(&timer_thread, NULL, stop_run, NULL)) return NULL; Time_F(START); count = run_benchmark(async_jobs, EVP_Digest_loop, loopargs); d = Time_F(STOP); } // Save results in hacky way double results[] = {(double) lengths[testnum], (double) count, d}; (*env)->SetDoubleArrayRegion(env, ret, 0, 3, results); // print_result(D_EVP, testnum, count, d); return ret; }