Пример #1
0
int main(int argc, char **argv)
{
	TestData::InitTestData();

	int numTotalRuns = (argc >= 2) ? atoi(argv[1]) : 100;
	int numTrialsPerTimedBlock = (argc >= 3) ? atoi(argv[2]) : 100;
#ifdef EMSCRIPTEN
	numTotalRuns = numTrialsPerTimedBlock = 10;
#endif
	// A list of test prefixes to include in the run.
	std::vector<const char *>prefixesArray;
	for(int i = 3; i < argc; ++i)
	{
		if (argv[i][0] != '-' && argv[i][0] != '/')
			prefixesArray.push_back(argv[i]);
	}
	if (prefixesArray.empty())
		prefixesArray.push_back(""); // Empty prefix runs all tests.
	prefixesArray.push_back(0); // Sentinel to terminate prefix string list.
	const char * const *prefixes = &prefixesArray[0];

	if (numTotalRuns == 0 || numTrialsPerTimedBlock == 0)
	{
		LOGI("Usage: %s <numTotalRuns> <numTrialsPerTimedBlock>", argv[0]); 
		LOGI("   Runs all tests.");
		LOGI("       %s <numTotalRuns> <numTrialsPerTimedBlock> prefix1 prefix2 prefix3...", argv[0]); 
		LOGI("   Runs all tests starting with one of the given prefixes, or residing in one of the named code files.");
		return 0;
	}

	JSONReport jsonReport;
	std::string jsonFilename = "test_results.json";
	for(int i = 1; i+1 < argc; ++i)
		if (!strcmp(argv[i], "--json"))
			jsonFilename = argv[i+1]; // Allow overriding the output file name from command line.
	jsonReport.Create(jsonFilename.c_str());

	int numFailures = RunTests(numTotalRuns, numTrialsPerTimedBlock, prefixes, jsonReport);
	LOGI("%d", globalPokedData);

	// When --exit0 is passed, we forcibly return 0 and not the number of failed tests.
	// Used by buildbot in valgrind runs to ignore any failures - the failures are detected
	// in a "real" run instead that carry more randomized trial runs.
	for(int i = 1; i < argc; ++i)
		if (!strcmp(argv[i], "--exit0"))
			return 0;

	return numFailures; // exit code of 0 denotes a successful run.
}
Пример #2
0
int main(int argc, char **argv)
{
	int numTotalRuns = (argc >= 2) ? atoi(argv[1]) : 100;
	int numTrialsPerTimedBlock = (argc >= 3) ? atoi(argv[2]) : 100;
#ifdef EMSCRIPTEN
	numTotalRuns = numTrialsPerTimedBlock = 10;
#endif
	const char * const noPrefixes[] = { "", 0 };
	const char * const *prefixes = (argc >= 4) ? &argv[3] : noPrefixes;

	if (numTotalRuns == 0 || numTrialsPerTimedBlock == 0)
	{
		LOGI("Usage: %s <numTotalRuns> <numTrialsPerTimedBlock>", argv[0]); 
		LOGI("   Runs all tests.");
		LOGI("       %s <numTotalRuns> <numTrialsPerTimedBlock> prefix1 prefix2 prefix3...", argv[0]); 
		LOGI("   Runs all tests starting with one of the given prefixes, or residing in one of the named code files.");
		return 0;
	}

	JSONReport jsonReport;
	std::string jsonFilename = "test_results.json";
	for(int i = 1; i+1 < argc; ++i)
		if (!strcmp(argv[i], "--json"))
			jsonFilename = argv[i+1]; // Allow overriding the output file name from command line.
	jsonReport.Create(jsonFilename.c_str());

	int numFailures = RunTests(numTotalRuns, numTrialsPerTimedBlock, prefixes, jsonReport);
	LOGI("%d", globalPokedData);

	// When --exit0 is passed, we forcibly return 0 and not the number of failed tests.
	// Used by buildbot in valgrind runs to ignore any failures - the failures are detected
	// in a "real" run instead that carry more randomized trial runs.
	for(int i = 1; i < argc; ++i)
		if (!strcmp(argv[i], "--exit0"))
			return 0;

	return numFailures; // exit code of 0 denotes a successful run.
}
Пример #3
0
/// Returns 0: passed, 1: passed with warnings, -1: failed.
int RunTest(Test &t, int numTimesToRun, int numTrialsPerRun, JSONReport &jsonReport)
{
	if (t.runOnlyOnce)
		numTimesToRun = numTrialsPerRun = 1;
	if (!t.isRandomized)
		numTimesToRun = 1;
	if (t.isBenchmark)
	{
		numTimesToRun = numTrialsPerRun = 1;
		LOGI_NL("Benchmark '%s': %s", t.name.c_str(), t.description.c_str());
	}
	else
		LOGI_NL("Testing '%s': ", t.name.c_str());

	std::vector<tick_t> times;
	times.reserve(numTimesToRun);

	t.numFails = 0;
	t.numPasses = 0;
	std::string failReason; // Stores the failure reason of the first failure.
	std::vector<std::string> failReasons;
	globalTestExpectedToFail = 0;
	globalTestFailureDescription = std::string();

	for(int j = 0; j < numTimesToRun; ++j)
	{
		tick_t start = Clock::Tick();
		for(int k = 0; k < numTrialsPerRun; ++k)
//			for(int k = 0; k < (t.isRandomized ? numTrials : 1); ++k)
		{
#ifdef FAIL_USING_EXCEPTIONS
			try
			{
#endif
				t.function(t);
				if (globalTestExpectedToFail)
				{
					globalTestExpectedToFail = 0; // Signal that the following exception reports a failure of this test, and not an expected failure.
					throw std::runtime_error(std::string("This test should have failed due to reason '") + globalTestFailureDescription + "', but it didn't fail!");
				}
#ifdef FAIL_USING_EXCEPTIONS
			}
			catch(const TestSkippedException &e)
			{
				if (failReason.empty())
				{
					failReason = std::string("SKIPPED: ") + e.what();
					LOGW("%s", failReason.c_str());
				}
			}
			catch(const std::exception &e)
			{
				if (globalTestExpectedToFail)
				{
					if (globalTestExpectedToFail == 2)
						LOGE("This test failed as expected. Caught an exception '%s', failure is due to reason '%s'.", e.what(), globalTestFailureDescription.c_str());
					else
						LOGI("This test failed as expected. Caught an exception '%s', failure is due to reason '%s'.", e.what(), globalTestFailureDescription.c_str());
				}
				else
				{
					if (failReason.empty())
						failReason = e.what();
					++t.numFails;
				}
			}
			catch(...)
			{
				++t.numFails;
				LOGE("Error: Received an unknown exception type that is _not_ derived from std::exception! This should not happen!");
			}
#endif
		}
		tick_t end = Clock::Tick();
		times.push_back(end - start);
	}

	t.numPasses = numTimesToRun*numTrialsPerRun - t.numFails;
	std::sort(times.begin(), times.end());

	// Erase outliers. (x% slowest)
	const float rateSlowestToDiscard = 0.05f;
	int numSlowestToDiscard = (int)(times.size() * rateSlowestToDiscard);
	times.erase(times.end() - numSlowestToDiscard, times.end());

	tick_t total = 0;
	for(size_t j = 0; j < times.size(); ++j)
		total += times[j];

	if (!t.isBenchmark)
	{
		if (!times.empty())
		{
			t.fastestTime = (double)times[0] / numTrialsPerRun;
			t.averageTime = (double)total / times.size() / numTrialsPerRun;
			t.worstTime = (double)times.back() / numTrialsPerRun;
			t.numTimesRun = numTimesToRun;
			t.numTrialsPerRun = numTrialsPerRun;
		}
		else
		{
			t.fastestTime = t.averageTime = t.worstTime = -1.0;
			t.numTimesRun = t.numTrialsPerRun = 0;
		}
	}
	float successRate = (t.numPasses + t.numFails > 0) ? (float)t.numPasses * 100.f / (t.numPasses + t.numFails) : 0.f;

	jsonReport.Report(t);

	if (t.isBenchmark && t.numFails == 0) // Benchmarks print themselves.
		return 0; // 0: Success

	int ret = 0; // 0: Success

	if (t.numFails == 0)
	{
		if (t.isRandomized)
			LOGI(" ok (%d passes, 100%%)", t.numPasses);
		else
			LOGI(" ok ");
//		++numTestsPassed;
		t.result = TestPassed;
	}
	else if (successRate >= 95.0f)
	{
		LOGI_NL(" ok ");
		LOGW("Some failures with '%s' (%d passes, %.2f%% of all tries)", failReason.c_str(), t.numPasses, successRate);
//		++numTestsPassed;
//		++numWarnings;
		ret = 1; // Success with warnings
		t.result = TestPassedWithWarnings;
	}
	else
	{
		if (t.isRandomized)
			LOGE("FAILED: '%s' (%d passes, %.2f%% of all tries)", failReason.c_str(), t.numPasses, successRate);
		else
			LOGE("FAILED: '%s'", failReason.c_str());
		ret = -1; // Failed
		t.result = TestFailed;
	}

	if (!times.empty())
	{
		if (t.runOnlyOnce)
			LOGI("   Elapsed: %s", FormatTime((double)times[0]).c_str());
		else
			LOGI("   Fastest: %s, Average: %s, Slowest: %s", FormatTime(t.fastestTime).c_str(), FormatTime(t.averageTime).c_str(), FormatTime(t.worstTime).c_str());
	}

	return ret;
}