void ManagerList::Reset() { RemoveAllManagers(RESET); // Clear the manager list's results. ResetResults(WHOLE_TEST_PERF); ResetResults(LAST_UPDATE_PERF); }
// // Reset all results for all managers. This includes resetting the results for all // managers and any results stored for them. // void ManagerList::ResetAllResults() { ResetResults(WHOLE_TEST_PERF); ResetResults(LAST_UPDATE_PERF); // Have all managers reset their results too. for (int i = 0; i < ManagerCount(); i++) GetManager(i)->ResetAllResults(); }
void StressTest::ResetStressTest() { Q_D( StressTest); ResetLoans(); ResetResults(); if (d->ProgressForm) { d->ProgressForm->deleteLater(); } }
CGameResults::CGameResults () { er.setLevel(LOG_INFO); setKillsToWin(false); // DEFAULT: kills to win setValueToWinMatch(3); // DEFAULT: 10 KILLS = WIN MATCH setTeamPlay(false); ResetResults(); }
ManagerList::ManagerList() { ResetResults(WHOLE_TEST_PERF); ResetResults(LAST_UPDATE_PERF); strcpy(name, "All Managers"); }
// // Calculation intensive code to determine the combined performance for // all managers and their workers. Programmers beware. // void ManagerList::UpdateResults(int which_perf) { Manager *manager; // Total time each manager used transferring I/Os in ms. double read_latency_sum = 0; double write_latency_sum = 0; double transaction_latency_sum = 0; double connection_latency_sum = 0; int stat; // loop control if ((which_perf < 0) || (which_perf >= MAX_PERF)) return; ResetResults(which_perf); // Loop through all managers to get their results. for (int i = 0; i < ManagerCount(); i++) { manager = GetManager(i); // Skip managers not active in the current test. if (!manager->ActiveInCurrentTest()) { // Clear the results of idle managers to prevent them from being // displayed in the results window. manager->ResetAllResults(); continue; } // Request an update from all managers and process the results. manager->UpdateResults(which_perf); // Recording error results. results[which_perf].total_errors += manager->results[which_perf].total_errors; results[which_perf].raw.read_errors += manager->results[which_perf].raw.read_errors; results[which_perf].raw.write_errors += manager->results[which_perf].raw.write_errors; // Recording results related to the number of I/Os completed. results[which_perf].IOps += manager->results[which_perf].IOps; results[which_perf].read_IOps += manager->results[which_perf].read_IOps; results[which_perf].write_IOps += manager->results[which_perf].write_IOps; results[which_perf].raw.read_count += manager->results[which_perf].raw.read_count; results[which_perf].raw.write_count += manager->results[which_perf].raw.write_count; // Recording throughput results. results[which_perf].MBps += manager->results[which_perf].MBps; results[which_perf].read_MBps += manager->results[which_perf].read_MBps; results[which_perf].write_MBps += manager->results[which_perf].write_MBps; results[which_perf].raw.bytes_read += manager->results[which_perf].raw.bytes_read; results[which_perf].raw.bytes_written += manager->results[which_perf].raw.bytes_written; // Recording results related to the number of transactions completed. results[which_perf].transactions_per_second += manager->results[which_perf].transactions_per_second; results[which_perf].raw.transaction_count += manager->results[which_perf].raw.transaction_count; // Recording results related to the number of connections completed. results[which_perf].connections_per_second += manager->results[which_perf].connections_per_second; results[which_perf].raw.connection_count += manager->results[which_perf].raw.connection_count; // Recording maximum latency information. if (results[which_perf].max_latency < manager->results[which_perf].max_latency) { results[which_perf].max_latency = manager->results[which_perf].max_latency; } if (results[which_perf].max_read_latency < manager->results[which_perf].max_read_latency) { results[which_perf].max_read_latency = manager->results[which_perf].max_read_latency; } if (results[which_perf].max_write_latency < manager->results[which_perf].max_write_latency) { results[which_perf].max_write_latency = manager->results[which_perf].max_write_latency; } if (results[which_perf].max_transaction_latency < manager->results[which_perf].max_transaction_latency) { results[which_perf].max_transaction_latency = manager->results[which_perf].max_transaction_latency; } if (results[which_perf].max_connection_latency < manager->results[which_perf].max_connection_latency) { results[which_perf].max_connection_latency = manager->results[which_perf].max_connection_latency; } read_latency_sum += (double)(_int64) manager->results[which_perf].raw.read_latency_sum / (double) manager->processor_speed; write_latency_sum += (double)(_int64) manager->results[which_perf].raw.write_latency_sum / (double) manager->processor_speed; transaction_latency_sum += (double)(_int64) manager->results[which_perf].raw.transaction_latency_sum / (double) manager->processor_speed; connection_latency_sum += (double)(_int64) manager->results[which_perf].raw.connection_latency_sum / (double) manager->processor_speed; for (stat = 0; stat < CPU_RESULTS; stat++) { results[which_perf].CPU_utilization[stat] += manager->results[which_perf].CPU_utilization[stat]; } for (stat = 0; stat < TCP_RESULTS; stat++) { results[which_perf].tcp_statistics[stat] += manager->results[which_perf].tcp_statistics[stat]; } for (stat = 0; stat < NI_COMBINE_RESULTS; stat++) { results[which_perf].ni_statistics[stat] += manager->results[which_perf].ni_statistics[stat]; } } if (results[which_perf].raw.read_count || results[which_perf].raw.write_count) { results[which_perf].ave_latency = (read_latency_sum + write_latency_sum) * (double) 1000 / (double)(_int64) (results[which_perf].raw.read_count + results[which_perf].raw.write_count); if (results[which_perf].raw.read_count) results[which_perf].ave_read_latency = read_latency_sum * (double)1000 / (double)(_int64) results[which_perf].raw.read_count; else results[which_perf].ave_read_latency = (double)0; if (results[which_perf].raw.write_count) results[which_perf].ave_write_latency = write_latency_sum * (double)1000 / (double)(_int64) results[which_perf].raw.write_count; else results[which_perf].ave_write_latency = (double)0; if (results[which_perf].raw.transaction_count) { results[which_perf].ave_transaction_latency = transaction_latency_sum * (double)1000 / (double)(_int64) (results[which_perf].raw.transaction_count); } else { results[which_perf].ave_transaction_latency = (double)0; } } else { results[which_perf].ave_latency = (double)0; results[which_perf].ave_read_latency = (double)0; results[which_perf].ave_write_latency = (double)0; results[which_perf].ave_transaction_latency = (double)0; } if (results[which_perf].raw.connection_count) { results[which_perf].ave_connection_latency = connection_latency_sum * (double)1000 / (double)(_int64) (results[which_perf].raw.connection_count); } else { results[which_perf].ave_connection_latency = (double)0; } for (stat = 0; stat < CPU_UTILIZATION_RESULTS; stat++) { results[which_perf].CPU_utilization[stat] /= ManagerCount(ActiveType); } if (results[which_perf].CPU_utilization[CPU_TOTAL_UTILIZATION] != (double)0) { results[which_perf].CPU_effectiveness = results[which_perf].IOps / results[which_perf].CPU_utilization[CPU_TOTAL_UTILIZATION]; } else { results[which_perf].CPU_effectiveness = (double)0; } }