void object::test<7>() { set_test_name("Test Limits on setting home URL"); LLMediaEntry entry; U32 status = entry.setHomeURL(URL_OK); ensure(get_test_name() + " ok failed", status == LSL_STATUS_OK); status = entry.setHomeURL(URL_TOO_BIG); ensure(get_test_name() + " ok failed", status == LSL_STATUS_BOUNDS_ERROR); }
void object::test<8>() { set_test_name("Test Limits on setting whitelist"); // Test a valid list LLMediaEntry entry; std::vector<std::string> whitelist; whitelist.push_back(std::string(URL_OK)); S32 status = entry.setWhiteList(whitelist); ensure(get_test_name() + " invalid result", status == LSL_STATUS_OK); ensure(get_test_name() + " failed", whitelist == entry.getWhiteList()); }
void object::test<9>() { set_test_name("Test Limits on setting whitelist too big"); // Test an invalid list LLMediaEntry entry; std::vector<std::string> whitelist, empty; whitelist.push_back(std::string(URL_OK)); whitelist.push_back(std::string(URL_TOO_BIG)); S32 status = entry.setWhiteList(whitelist); ensure(get_test_name() + " invalid result", status == LSL_STATUS_BOUNDS_ERROR); ensure(get_test_name() + " failed", empty == entry.getWhiteList()); }
void object::test<10>() { set_test_name("Test Limits on setting whitelist too many"); // Test an invalid list LLMediaEntry entry; std::vector<std::string> whitelist, empty; for (int i=0; i < LLMediaEntry::MAX_WHITELIST_SIZE+1; i++) { whitelist.push_back("Q"); } S32 status = entry.setWhiteList(whitelist); ensure(get_test_name() + " invalid result", status == LSL_STATUS_BOUNDS_ERROR); ensure(get_test_name() + " failed", empty == entry.getWhiteList()); }
void object::test<11>() { set_test_name("Test to make sure both setWhiteList() functions behave the same"); // Test a valid list std::vector<std::string> whitelist, empty; LLSD whitelist_llsd; whitelist.push_back(std::string(URL_OK)); whitelist_llsd.append(std::string(URL_OK)); LLMediaEntry entry1, entry2; ensure(get_test_name() + " setWhiteList(s) don't match", entry1.setWhiteList(whitelist) == LSL_STATUS_OK && entry2.setWhiteList(whitelist_llsd)== LSL_STATUS_OK ); ensure(get_test_name() + " failed", entry1.getWhiteList() == entry2.getWhiteList()); }
int main(int ac, char **av) { int rc, i; /* simple "do we have ssl" check for run_tests.sh */ if (ac == 2 && !strncmp(av[1], "--ssl", 5)) { #ifdef HAVE_SSL return 0; #else return 1; #endif } #ifdef WIN32 win_init(); #endif for (i = 0; TestCases[i] != NULL; i++) { const struct test_case *current = TestCases[i]; const char *name = get_test_name(current); printf("%-48s", name); fflush(stdout); rc = test_case_run(current); if (rc) { printf(TEST_FAILED " (%s)\n", socket_strerror(-rc)); break; } printf(TEST_OK "\n"); } return rc; }
void object::test<2>() { set_test_name("Test LLMediaEntry Instantiation from LLSD"); LLMediaEntry entry; LLSD sd; entry.fromLLSD(sd); ensure_llsd_equals(get_test_name() + " failed", emptyMediaEntryLLSD, entry.asLLSD()); }
// start the test std::string SelfTest::start(hz::intrusive_ptr<CmdexSync> smartctl_ex) { this->clear(); // clear previous results if (!drive_) return "Invalid drive given."; if (drive_->get_test_is_active()) return "A test is already running on this drive."; if (!this->is_supported()) return get_test_name(type_) + " is unsupported by this drive."; std::string test_param; switch(type_) { case type_ioffline: test_param = "offline"; break; case type_short: test_param = "short"; break; case type_long: test_param = "long"; break; case type_conveyance: test_param = "conveyance"; break; // no default - this way we get warned by compiler if we're not listing all of them. } if (test_param.empty()) return "Invalid test specified"; std::string output; std::string error_msg = drive_->execute_smartctl("-t " + test_param, smartctl_ex, output); // --test= if (!error_msg.empty()) // checks for empty output too return error_msg; if (!app_pcre_match("/^Drive command .* successful\\.\\nTesting has begun\\.$/mi", output)) { return "Sending command failed."; } // update our members // error_msg = this->update(smartctl_ex); // if (!error_msg.empty()) // update can error out too. // return error_msg; // Don't update here - the logs may not be updated this fast. // Better to wait several seconds and then call it manually. // Set up everything so that the caller won't have to. status_ = StorageSelftestEntry::status_in_progress; remaining_percent_ = 100; // set to 90 to avoid the 100->90 timer reset. this way we won't be looking at // "remaining 60sec" on 60sec test twice (5 seconds apart). Since the test starts // at 90% anyway, it's a good thing. last_seen_percent_ = 90; poll_in_seconds_ = 5; // first update() in 5 seconds timer_.start(); drive_->set_test_is_active(true); return std::string(); // everything ok }
void object::test<13>() { set_test_name("Test to make sure both setWhiteList() functions behave the same"); // Test an invalid list, too many std::vector<std::string> whitelist, empty; LLSD whitelist_llsd; for (int i=0; i < LLMediaEntry::MAX_WHITELIST_SIZE+1; i++) { whitelist.push_back("Q"); whitelist_llsd.append("Q"); } LLMediaEntry entry1, entry2; ensure(get_test_name() + " invalid result", entry1.setWhiteList(whitelist) == LSL_STATUS_BOUNDS_ERROR && entry2.setWhiteList(whitelist_llsd) == LSL_STATUS_BOUNDS_ERROR); ensure(get_test_name() + " failed", empty == entry1.getWhiteList() && empty == entry2.getWhiteList()); }
void object::test<3>() { set_test_name("Test LLMediaEntry Partial Instantiation from LLSD"); LLMediaEntry entry; LLSD sd; sd[LLMediaEntry::CURRENT_URL_KEY] = "http://www.example.com"; entry.fromLLSD(sd); LLSD golden; std::istringstream p(PARTIAL_MEDIA_ENTRY("http://www.example.com")); LLSDSerialize::fromXML(golden,p); ensure_llsd_equals(get_test_name() + " failed", golden, entry.asLLSD()); }
void object::test<4>() { set_test_name("Test LLMediaEntry::asLLSD()"); LLMediaEntry entry; LLSD sd; // Put some cruft in the LLSD sd[LLMediaEntry::CURRENT_URL_KEY] = "http://www.example.com"; LLSD whitelist; whitelist.append("*.example.com"); sd[LLMediaEntry::WHITELIST_KEY] = whitelist; entry.asLLSD(sd); ensure_llsd_equals(get_test_name() + " failed", defaultMediaEntryLLSD, sd); }
void object::test<5>() { set_test_name("Test LLMediaEntry::asLLSD() -> LLMediaEntry::fromLLSD()"); LLMediaEntry entry1, entry2; // Add a whitelist to entry2 std::vector<std::string> whitelist; whitelist.push_back("*.example.com"); entry2.setWhiteList(whitelist); // Render entry1 (which has no whitelist) as an LLSD LLSD sd; entry1.asLLSD(sd); // "read" that LLSD into entry 2 entry2.fromLLSD(sd); ensure_llsd_equals(get_test_name() + " failed", defaultMediaEntryLLSD, entry2.asLLSD()); }
int main(int ac, char **av) { int rc, i; #ifdef WIN32 win_init(); #endif for (i = 0; TestCases[i] != NULL; i++) { const struct test_case *current = TestCases[i]; const char *name = get_test_name(current); printf("%-48s", name); fflush(stdout); rc = test_case_run(current); if (rc) { printf(TEST_FAILED " (%s)\n", socket_strerror(-rc)); break; } printf(TEST_OK "\n"); } return rc; }
void benchmark::draw_frame() { base::frame_context *ctx = 0; // wait for renderer while((ctx = _renderer->pop_frame_context_from_pool()) == 0 && !_shutdown) base::sleep_ms(0); if (_shutdown) return; assert(ctx != 0); app::begin_frame(); app::create_perspective_matrix(ctx); app::update_camera(ctx); _scene->update(ctx); // prepare stats static base::stats_data stats; static int nframes = 0; static __int64 start_time = 0; static int nframes_total = 0; static float dtime_total = 0.0f; static int test_cycles = 0; char mesh_size_str[32]; if (start_time == 0 && ctx->_time != 0) start_time = ctx->_time; if (start_time != 0) { nframes++; stats += ctx->_stats; stats._cpu_time += ctx->_stats._cpu_time; stats._gpu_time += ctx->_stats._gpu_time; const __int64 freq = base::hptimer().freq(); const __int64 dt = ((ctx->_time - start_time) * 1000000) / freq; if (dt > 1000000) { const float dtf = float(dt) * 0.000001f; const float fps = float(nframes) / dtf; const float r_nframes = 1.0f / float(nframes); static uint32 nvert; static uint32 nelem; get_face_and_vert_count(base::cfg().mesh_size, nelem, nvert); get_mesh_size_str(mesh_size_str, nvert, nelem); if (base::cfg().sceneType == base::config::stGrass) { sprintf( &_stats_str[0], "MVtx/s: %.0f\n" "MVtx: %.3f\n" "MTris/s: %.0f\n" "MTris: %.3f\n" "KDraw/s: %.0f\n" "KDraw: %.3f\n" "gpu: %.3f ms\n" "cpu: %.3f ms\n" "fps: %.0f\n\n" "GPU driver: %s\n" "GPU: %s\n\n" "%s", float(stats._nvertices) * 0.000001 / dtf, float(stats._nvertices) * 0.000001 / float(nframes), float(stats._ntriangles) * 0.000001 / dtf, float(stats._ntriangles) * 0.000001 / float(nframes), float(stats._ndrawcalls) * 0.001 / dtf, float(stats._ndrawcalls) * 0.001 / nframes, stats._gpu_time * r_nframes, stats._cpu_time * r_nframes, fps, _renderer->get_gpu_driver_str(), _renderer->get_gpu_str(), get_test_name()); } else if (base::cfg().sceneType == base::config::stCubes) { sprintf( &_stats_str[0], "tex: %.0f MB\n" "buf: %.0f MB\n" "MVtx/s: %.0f\n" "MVtx: %.3f\n" "MTris/s: %.0f\n" "MTris: %.3f\n" "KDraw/s: %.0f\n" "KDraw: %.3f\n" "gpu: %.3f ms\n" "cpu: %.3f ms\n" "fps: %.0f\n\n" "one mesh: %s\n" "vertex data: %s\n" "average mesh size: %s\n" "textures: %ux%u BGRA8\n" "tex freq: %u\n" "tex mode: %s\n" "GPU driver: %s\n" "GPU: %s\n\n" "%s", float(stats._texture_mem) / float(1024 * 1024), float(stats._buffer_mem) / float(1024 * 1024), float(stats._nvertices) * 0.000001 / dtf, float(stats._nvertices) * 0.000001 / float(nframes), float(stats._ntriangles) * 0.000001 / dtf, float(stats._ntriangles) * 0.000001 / float(nframes), float(stats._ndrawcalls) * 0.001 / dtf, float(stats._ndrawcalls) * 0.001 / nframes, stats._gpu_time * r_nframes, stats._cpu_time * r_nframes, fps, base::cfg().one_mesh ? "true" : "false", base::cfg().use_vbo ? "VERTEX BUFFER" : "TEXTURE BUFFER", mesh_size_str, base::cfg().tex_size, base::cfg().tex_size, base::cfg().tex_freq, get_texturing_mode_str(base::cfg().tex_mode), _renderer->get_gpu_driver_str(), _renderer->get_gpu_str(), get_test_name()); }else if (base::cfg().sceneType == base::config::stBuildings) { sprintf( &_stats_str[0], "NBuildings:%u\n" "Block size:%d\n" "MVtx/s: %.0f\n" "MVtx: %.3f\n" "MTris/s: %.0f\n" "MTris: %.3f\n" "KDraw/s: %.0f\n" "KDraw: %.3f\n" "gpu: %.3f ms\n" "cpu: %.3f ms\n" "fps: %.0f\n\n" "GPU driver: %s\n" "GPU: %s\n\n" "%s", base::cfg().buildings_count, base::cfg().blocks_per_idc, float(stats._nvertices) * 0.000001 / dtf, float(stats._nvertices) * 0.000001 / float(nframes), float(stats._ntriangles) * 0.000001 / dtf, float(stats._ntriangles) * 0.000001 / float(nframes), float(stats._ndrawcalls) * 0.001 / dtf, float(stats._ndrawcalls) * 0.001 / nframes, stats._gpu_time * r_nframes, stats._cpu_time * r_nframes, fps, _renderer->get_gpu_driver_str(), _renderer->get_gpu_str(), get_test_name()); } if (test_cycles >= 1){ _test_stats += stats; _test_stats._cpu_time += stats._cpu_time; _test_stats._gpu_time += stats._gpu_time; nframes_total += nframes; dtime_total += dtf; } if (base::cfg().test != -1 && test_cycles == 3){ if (base::cfg().sceneType == base::config::stGrass) { grass_write_test_data_csv(GRASS_TEST_FILE_NAME, _test_stats, dtime_total, nframes_total); } else if (base::cfg().sceneType == base::config::stCubes){ write_test_data_csv(CUBES_TEST_FILE_NAME, _test_stats, dtime_total, nframes_total); } else if (base::cfg().sceneType == base::config::stBuildings){ buildings_write_test_data_csv(BUILDINGS_TEST_FILE_NAME, _test_stats, dtime_total, nframes_total); } _shutdown = true; } start_time = ctx->_time; nframes = 0; stats = base::stats_data(); test_cycles++; } // _canvas->fill_rect( ctx, glm::vec2(0), glm::vec2(256, 260), glm::vec4(0.0, 0.0, 0.0, 0.8)); _canvas->draw_text( ctx, glm::vec2(3), &_stats_str[0], glm::vec4(1, 1, 1, 1), _fnt_mono.get()); } _renderer->push_frame_context(ctx); }
int main(int argc, char *argv[]){ int n, pid, i , j, h; int stime_cpu = 0,stime_scpu = 0, stime_IO = 0; int retime_cpu = 0, retime_scpu = 0, retime_IO = 0; int tatime_cpu = 0, tatime_scpu = 0, tatime_IO = 0; int retime, rutime, stime; enum test_type test; if (argc != 2) return -1; n = atoi(argv[1]); for(i = 0 ; i < 3 * n ; ++i) { pid = fork(); if(pid == 0) { test = getpid() % 3; if(test == CPU) { for(j = 0 ; j < 100 ; ++j) for(h = 0 ; h < 1000000 ; ++h){} } else if(test == S_CPU) { for(j = 0 ; j < 100 ; ++j) { for(h = 0 ; h < 1000000 ; ++h){} yield(); } } else { //IO for(j = 0 ; j < 100 ; ++j) sleep(1); } exit(); } else if (pid < 0) { printf(1, "fork number %d, failed!!! \n", i); } else{} } while((pid =wait2(&retime, &rutime, &stime)) > 0) { // pid != 0 (parent code) test = pid % 3; printf(1, "process id: %d, type: %s \n", pid, get_test_name(test)); printf(1,"wait time: %d, run time: %d, IO time: %d \n", retime, rutime, stime); switch(test) { case CPU: stime_cpu += stime; retime_cpu += retime; tatime_cpu += stime + retime + rutime; break; case S_CPU: stime_scpu += stime; retime_scpu += retime; tatime_scpu += stime + retime + rutime; break; case IO: stime_IO += stime; retime_IO += retime; tatime_IO += stime + retime + rutime; break; } } sleeptime(CPU,n,stime_cpu); sleeptime(S_CPU,n,stime_scpu); sleeptime(IO,n,stime_IO); readytime(CPU,n,retime_cpu); readytime(S_CPU,n,retime_scpu); readytime(IO,n,retime_IO); turnaroundtime(CPU,n,tatime_cpu); turnaroundtime(S_CPU,n,tatime_scpu); turnaroundtime(IO,n,tatime_IO); exit(); }
void statistics(enum test_type test, int n, int sleeptime, const char * test_group){ printf(1, "Average %s for %s is %d \n",test_group, get_test_name(test), sleeptime/n); }
void object::test<1>() { set_test_name("Test LLMediaEntry Instantiation"); LLMediaEntry entry; ensure_llsd_equals(get_test_name() + " failed", defaultMediaEntryLLSD, entry.asLLSD()); }