TEST_F(EventsDatabaseTests, test_optimize) { auto sub = std::make_shared<DBFakeEventSubscriber>(); for (size_t i = 800; i < 800 + 10; ++i) { sub->testAdd(i); } // Lie about the tool type to enable optimizations. auto default_type = kToolType; kToolType = ToolType::DAEMON; FLAGS_events_optimize = true; // Must also define an executing query. setDatabaseValue(kPersistentSettings, kExecutingQuery, "events_db_test"); auto t = getUnixTime(); auto results = genRows(sub.get()); EXPECT_EQ(10U, results.size()); // Optimization will set the time NOW as the minimum event time. // Thus it is not possible to set event in past. EXPECT_GE(sub->optimize_time_ + 100, t); EXPECT_LE(sub->optimize_time_ - 100, t); // The last EID returned will also be stored for duplication checks. EXPECT_EQ(10U, sub->optimize_eid_); for (size_t i = t + 800; i < t + 800 + 10; ++i) { sub->testAdd(i); } results = genRows(sub.get()); EXPECT_EQ(10U, results.size()); // The optimize time should have been written to the database. // It should be the same as the current (relative) optimize time. std::string content; getDatabaseValue("events", "optimize.events_db_test", content); EXPECT_EQ(std::to_string(sub->optimize_time_), content); // Restore the tool type. kToolType = default_type; }
TEST(logcat, logrotate_id) { static const char logcat_cmd[] = "logcat -b all -d -f %s/%s -n 32 -r 1 --id=test"; static const char logcat_short_cmd[] = "logcat -b all -t 10 -f %s/%s -n 32 -r 1 --id=test"; static const char tmp_out_dir_form[] = "/data/local/tmp/logcat.logrotate.XXXXXX"; static const char log_filename[] = "log.txt"; char tmp_out_dir[strlen(tmp_out_dir_form) + 1]; ASSERT_TRUE(NULL != mkdtemp(strcpy(tmp_out_dir, tmp_out_dir_form))); EXPECT_EQ(34, logrotate_count_id(logcat_cmd, tmp_out_dir)); EXPECT_EQ(34, logrotate_count_id(logcat_short_cmd, tmp_out_dir)); char id_file[strlen(tmp_out_dir_form) + strlen(log_filename) + 5]; snprintf(id_file, sizeof(id_file), "%s/%s.id", tmp_out_dir, log_filename); if (getuid() != 0) { chmod(id_file, 0); EXPECT_EQ(34, logrotate_count_id(logcat_short_cmd, tmp_out_dir)); } unlink(id_file); EXPECT_EQ(34, logrotate_count_id(logcat_short_cmd, tmp_out_dir)); FILE *fp = fopen(id_file, "w"); if (fp) { fprintf(fp, "not_a_test"); fclose(fp); } if (getuid() != 0) { chmod(id_file, 0); // API to preserve content even with signature change ASSERT_EQ(34, logrotate_count_id(logcat_short_cmd, tmp_out_dir)); chmod(id_file, 0600); } int new_signature; EXPECT_LE(2, (new_signature = logrotate_count_id(logcat_short_cmd, tmp_out_dir))); EXPECT_GT(34, new_signature); static const char cleanup_cmd[] = "rm -rf %s"; char command[strlen(cleanup_cmd) + strlen(tmp_out_dir_form)]; snprintf(command, sizeof(command), cleanup_cmd, tmp_out_dir); EXPECT_FALSE(IsFalse(system(command), command)); }
TYPED_TEST(NeuronLayerTest, TestTanH) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; TanHLayer<Dtype> layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); // Test exact values for (int i = 0; i < this->blob_bottom_->num(); ++i) { for (int j = 0; j < this->blob_bottom_->channels(); ++j) { for (int k = 0; k < this->blob_bottom_->height(); ++k) { for (int l = 0; l < this->blob_bottom_->width(); ++l) { EXPECT_GE(this->blob_top_->data_at(i, j, k, l) + 1e-4, (exp(2*this->blob_bottom_->data_at(i, j, k, l)) - 1) / (exp(2*this->blob_bottom_->data_at(i, j, k, l)) + 1)); EXPECT_LE(this->blob_top_->data_at(i, j, k, l) - 1e-4, (exp(2*this->blob_bottom_->data_at(i, j, k, l)) - 1) / (exp(2*this->blob_bottom_->data_at(i, j, k, l)) + 1)); } } } } }
TYPED_TEST(CuDNNNeuronLayerTest, TestTanHCuDNN) { Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; CuDNNTanHLayer<TypeParam> layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); // Test exact values for (int i = 0; i < this->blob_bottom_->num(); ++i) { for (int j = 0; j < this->blob_bottom_->channels(); ++j) { for (int k = 0; k < this->blob_bottom_->height(); ++k) { for (int l = 0; l < this->blob_bottom_->width(); ++l) { EXPECT_GE(this->blob_top_->data_at(i, j, k, l) + 1e-4, (exp(2*this->blob_bottom_->data_at(i, j, k, l)) - 1) / (exp(2*this->blob_bottom_->data_at(i, j, k, l)) + 1)); EXPECT_LE(this->blob_top_->data_at(i, j, k, l) - 1e-4, (exp(2*this->blob_bottom_->data_at(i, j, k, l)) - 1) / (exp(2*this->blob_bottom_->data_at(i, j, k, l)) + 1)); } } } } }
void ProfilerTest::test_petsc_memory() { int ierr, mpi_rank; ierr = MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); EXPECT_EQ( ierr, 0 ); Profiler::initialize(); { PetscLogDouble mem; START_TIMER("A"); PetscInt size = 100*1000; PetscScalar value = 0.1; Vec tmp_vector; VecCreateSeq(PETSC_COMM_SELF, size, &tmp_vector); VecSet(tmp_vector, value); // VecSetRandom(tmp_vector, NULL); END_TIMER("A"); START_TIMER("A"); // allocated memory MUST be greater or equal to size * size of double EXPECT_GE(AN.petsc_memory_difference, size*sizeof(double)); END_TIMER("A"); START_TIMER("B"); PetscScalar sum; VecSum(tmp_vector, &sum); END_TIMER("B"); START_TIMER("C"); VecDestroy(&tmp_vector); END_TIMER("C"); START_TIMER("C"); // since we are destroying vector, we expect to see negative memory difference EXPECT_LE(AN.petsc_memory_difference, 0); END_TIMER("C"); } PI->output(MPI_COMM_WORLD, cout); Profiler::uninitialize(); }
TEST_F(TestObjList, WriteAndRead) { ObjList<Obj> list_to_write; for (int i = 10; i > 0; --i) { Obj obj(i); list_to_write.add_object(std::move(obj)); } list_to_write.sort(); list_to_write.add_object(std::move(Obj(13))); list_to_write.add_object(std::move(Obj(12))); list_to_write.add_object(std::move(Obj(11))); std::vector<Obj>& v = list_to_write.get_data(); Obj* p = &v[0]; Obj* p2 = &v[10]; list_to_write.delete_object(p); // rm 1 list_to_write.delete_object(p2); // rm 13 size_t old_objlist_size = list_to_write.get_size(); EXPECT_TRUE(list_to_write.write_to_disk()); size_t data_capacity_after_write = list_to_write.get_data().capacity(); size_t bitmap_capacity_after_write = list_to_write.get_del_bitmap().capacity(); EXPECT_EQ(data_capacity_after_write, 0); EXPECT_EQ(bitmap_capacity_after_write, 0); std::string list_to_write_path = list_to_write.id2str(); ObjList<Obj> list_to_read; list_to_read.read_from_disk(list_to_write_path); EXPECT_EQ(list_to_read.get_size(), old_objlist_size); EXPECT_EQ(list_to_read.get_size(), list_to_read.get_sorted_size()); EXPECT_EQ(list_to_read.get_size(), list_to_read.get_del_bitmap().size()); EXPECT_EQ(list_to_read.get_hashed_size(), 0); EXPECT_EQ(list_to_read.get_num_del(), 0); for (size_t i = 0; i < list_to_read.get_size() - 1; i++) EXPECT_LE(list_to_read.get(i).id(), list_to_read.get(i + 1).id()); for (size_t i = 0; i < list_to_read.get_size() - 1; i++) EXPECT_EQ(list_to_read.get_del(i), false); }
TEST(DurationTest, Comparison) { EXPECT_EQ(Duration::zero(), Seconds(0)); EXPECT_EQ(Minutes(180), Hours(3)); EXPECT_EQ(Seconds(10800), Hours(3)); EXPECT_EQ(Milliseconds(10800000), Hours(3)); EXPECT_EQ(Milliseconds(1), Microseconds(1000)); EXPECT_EQ(Milliseconds(1000), Seconds(1)); EXPECT_GT(Weeks(1), Days(6)); EXPECT_LT(Hours(23), Days(1)); EXPECT_LE(Hours(24), Days(1)); EXPECT_GE(Hours(24), Days(1)); EXPECT_NE(Minutes(59), Hours(1)); // Maintains precision for a 100 year duration. EXPECT_GT(Weeks(5217) + Nanoseconds(1), Weeks(5217)); EXPECT_LT(Weeks(5217) - Nanoseconds(1), Weeks(5217)); }
TEST(SimpleTest, FirstTest) { // ASSERT_* can be replaced by EXPECT_*. // ASSERT_* yields fatal failure and EXPECT_* yields nonfatal failure. ASSERT_TRUE(true); ASSERT_FALSE(false); ASSERT_EQ(42, 42); ASSERT_NE(false, true); ASSERT_LT(1, 2); ASSERT_LE(1, 1); EXPECT_LE(1, 2); ASSERT_GT(1, 0); ASSERT_GE(1, 0); ASSERT_GE(0, 0); ASSERT_STREQ("foo", "foo"); ASSERT_STRNE("foo", "Foo"); ASSERT_STRCASEEQ("foo", "FOO"); ASSERT_STRCASENE("foo", "bar"); ASSERT_STRCASENE("", nullptr); }
TEST_F(PageIOTestEnv, test_dealloc_reuse_many) { constexpr size_t SIZE_FACTOR = 10; size_t nr_page = m_page_io->page_size() * SIZE_FACTOR, max_alloc = nr_page * sizeof(PageIO::page_id_t) / (m_page_io->page_size() - LinkedStackImpl::header_size()) + 1 + nr_page; for (int test = 0; test < 10; test ++) { std::unordered_set<PageIO::page_id_t> used_id; std::vector<PageIO::Page> pages; for (size_t i = 0; i < nr_page; i ++) { pages.emplace_back(m_page_io->alloc()); used_id.insert(pages.back().id()); } EXPECT_EQ(nr_page, used_id.size()); for (auto &&i: pages) { m_page_io->free(std::move(i)); EXPECT_FALSE(i.valid()); } } EXPECT_LE(m_page_io->file_io().get_meta().nr_page_allocated, max_alloc); }
TEST(BasicThreadPoolTest, StopJoinTest) { ReleaseWaitArg arg_data; ContainedThreadPool<2, 10> thread_pool; ASSERT_TRUE(thread_pool.Start()); EXPECT_FALSE(thread_pool.Join(0)); ASSERT_TRUE(thread_pool.EnqueueRun(ReleaseWaitSemaphoreRoutine, &arg_data)); ASSERT_TRUE(arg_data.release_semaphore.Wait(50)); thread_pool.Stop(0); YPlatform::Timer test_timer; test_timer.Start(); EXPECT_FALSE(thread_pool.Join(100)); test_timer.Pulse(); EXPECT_LE(95, test_timer.GetPulsedTimeMilli()); EXPECT_GE(120, test_timer.GetPulsedTimeMilli()); // Releasing the semaphore should let the join succeed. arg_data.wait_semaphore.Release(); EXPECT_TRUE(thread_pool.Join(50)); }
TEST(Type, Specialized) { auto packed = Type::Array(ArrayData::kPackedKind); EXPECT_LE(packed, TArr); EXPECT_LT(packed, TArr); EXPECT_FALSE(TArr <= packed); EXPECT_LT(packed, TArr | TObj); EXPECT_EQ(packed, packed & (TArr | TCounted)); EXPECT_GE(packed, TBottom); EXPECT_GT(packed, TBottom); EXPECT_TRUE(TInt <= (packed | TInt)); EXPECT_EQ(TBottom, packed & Type::Array(ArrayData::kMixedKind)); EXPECT_EQ(TBottom, packed - TArr); EXPECT_EQ(TPtrToSPropCell, TPtrToSPropGen - TPtrToBoxedCell); auto const array = make_packed_array(1, 2, 3, 4); auto const mixed = make_map_array(1, 1, 2, 2); auto const arrData = ArrayData::GetScalarArray(array.get()); auto const arrDataMixed = ArrayData::GetScalarArray(mixed.get()); auto constArray = Type::cns(arrData); auto constArrayMixed = Type::cns(arrDataMixed); auto const spacked = Type::StaticArray(ArrayData::kPackedKind); EXPECT_EQ(spacked, spacked - constArray); // conservative EXPECT_EQ(TBottom, constArray - spacked); // Implemented conservatively right now, but the following better not return // bottom: EXPECT_EQ(constArrayMixed, constArrayMixed - spacked); // Checking specialization dropping. EXPECT_EQ(TArr | TBoxedInitCell, packed | TBoxedInitCell); auto specializedObj = Type::SubObj(SystemLib::s_IteratorClass); EXPECT_EQ(TArr | TObj, packed | specializedObj); }
TEST(EscapeObstaclesPathPlanner, run) { // The robot is at the origin MotionInstant startInstant({0, 0}, {0, 0}); // EmptyCommand cmd; // "None" command std::unique_ptr<MotionCommand> cmd = std::make_unique<EmptyCommand>(); // "None" command // Add an circle of radius 5 centered at the origin as an obstacle ShapeSet obstacles; const float circleRadius = 5; obstacles.add(std::make_shared<Circle>(Point(0, 0), circleRadius)); SystemState systemState; EscapeObstaclesPathPlanner planner; std::vector<DynamicObstacle> dynamicObstacles; PlanRequest request(systemState, startInstant, std::move(cmd), RobotConstraints(), nullptr, obstacles, dynamicObstacles, 0); auto path = planner.run(request); ASSERT_NE(nullptr, path) << "Planner returned null path"; // Ensure that the path escapes the obstacle RJ::Seconds hitTime; EXPECT_FALSE(path->hit(obstacles, 0s, &hitTime)) << "Returned path hits obstacles"; // Make sure the path's endpoint is close to the original point. It // shouldn't be further than two steps outside of the closest possible // point. const float stepSize = EscapeObstaclesPathPlanner::stepSize(); const float pathLength = (path->end().motion.pos - startInstant.pos).mag(); EXPECT_LE(pathLength, circleRadius + Robot_Radius + stepSize * 2) << "Path is longer than it should be"; }
TEST_F(IGraphicBufferProducerTest, Query_Succeeds) { ASSERT_NO_FATAL_FAILURE(ConnectProducer()); int32_t value = -1; EXPECT_OK(mProducer->query(NATIVE_WINDOW_WIDTH, &value)); EXPECT_EQ(DEFAULT_WIDTH, static_cast<uint32_t>(value)); EXPECT_OK(mProducer->query(NATIVE_WINDOW_HEIGHT, &value)); EXPECT_EQ(DEFAULT_HEIGHT, static_cast<uint32_t>(value)); EXPECT_OK(mProducer->query(NATIVE_WINDOW_FORMAT, &value)); EXPECT_EQ(DEFAULT_FORMAT, value); EXPECT_OK(mProducer->query(NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &value)); EXPECT_LE(0, value); EXPECT_GE(BufferQueue::NUM_BUFFER_SLOTS, value); EXPECT_OK(mProducer->query(NATIVE_WINDOW_CONSUMER_RUNNING_BEHIND, &value)); EXPECT_FALSE(value); // Can't run behind when we haven't touched the queue EXPECT_OK(mProducer->query(NATIVE_WINDOW_CONSUMER_USAGE_BITS, &value)); EXPECT_EQ(DEFAULT_CONSUMER_USAGE_BITS, value); }
/* Tests the rcl_steady_time_point_now() function. */ TEST_F(TestTimeFixture, test_rcl_steady_time_point_now) { assert_no_realloc_begin(); rcl_ret_t ret; // Check for invalid argument error condition (allowed to alloc). ret = rcl_steady_time_point_now(nullptr); EXPECT_EQ(ret, RCL_RET_INVALID_ARGUMENT) << rcl_get_error_string_safe(); rcl_reset_error(); assert_no_malloc_begin(); assert_no_free_begin(); // Check for normal operation (not allowed to alloc). rcl_steady_time_point_t now = {0}; ret = rcl_steady_time_point_now(&now); assert_no_malloc_end(); assert_no_realloc_end(); assert_no_free_end(); stop_memory_checking(); EXPECT_EQ(ret, RCL_RET_OK) << rcl_get_error_string_safe(); EXPECT_NE(now.nanoseconds, 0u); // Compare to std::chrono::steady_clock difference of two times (within a second). now = {0}; ret = rcl_steady_time_point_now(&now); std::chrono::steady_clock::time_point now_sc = std::chrono::steady_clock::now(); EXPECT_EQ(ret, RCL_RET_OK) << rcl_get_error_string_safe(); // Wait for a little while. std::this_thread::sleep_for(std::chrono::milliseconds(100)); // Then take a new timestamp with each and compare. rcl_steady_time_point_t later; ret = rcl_steady_time_point_now(&later); std::chrono::steady_clock::time_point later_sc = std::chrono::steady_clock::now(); EXPECT_EQ(ret, RCL_RET_OK) << rcl_get_error_string_safe(); int64_t steady_diff = later.nanoseconds - now.nanoseconds; int64_t sc_diff = std::chrono::duration_cast<std::chrono::nanoseconds>(later_sc - now_sc).count(); const int k_tolerance_ms = 1; EXPECT_LE(llabs(steady_diff - sc_diff), RCL_MS_TO_NS(k_tolerance_ms)) << "steady_clock differs"; }
TEST(Trims, invertedThrottlePlusthrottleTrimWithZeroWeightOnThrottle) { MODEL_RESET(); modelDefault(0); g_model.throttleReversed = 1; g_model.thrTrim = 1; #if defined(PCBTARANIS) // the input already exists ExpoData *expo = expoAddress(THR_STICK); #else ExpoData *expo = expoAddress(0); expo->mode = 3; expo->chn = THR_STICK; #endif expo->weight = 0; // stick max + trim max anaInValues[THR_STICK] = +1024; setTrimValue(0, THR_STICK, TRIM_MAX); evalMixes(1); EXPECT_EQ(channelOutputs[2], 0); // stick max + trim mid anaInValues[THR_STICK] = +1024; setTrimValue(0, THR_STICK, 0); evalMixes(1); EXPECT_LE(abs(channelOutputs[2] - 125), 1); // stick max + trim min anaInValues[THR_STICK] = +1024; setTrimValue(0, THR_STICK, TRIM_MIN); evalMixes(1); EXPECT_EQ(channelOutputs[2], 250); // stick min + trim max anaInValues[THR_STICK] = -1024; setTrimValue(0, THR_STICK, TRIM_MAX); evalMixes(1); EXPECT_EQ(channelOutputs[2], 0); // stick min + trim mid anaInValues[THR_STICK] = -1024; setTrimValue(0, THR_STICK, 0); evalMixes(1); EXPECT_LE(abs(channelOutputs[2] - 125), 1); // stick min + trim min anaInValues[THR_STICK] = -1024; setTrimValue(0, THR_STICK, TRIM_MIN); evalMixes(1); EXPECT_EQ(channelOutputs[2], 250); // now some tests with extended Trims g_model.extendedTrims = 1; // trim min + various stick positions = should always be same value setTrimValue(0, THR_STICK, TRIM_EXTENDED_MIN); anaInValues[THR_STICK] = -1024; evalMixes(1); EXPECT_EQ(channelOutputs[2], 1000); anaInValues[THR_STICK] = -300; evalMixes(1); EXPECT_EQ(channelOutputs[2], 1000); anaInValues[THR_STICK] = +300; evalMixes(1); EXPECT_EQ(channelOutputs[2], 1000); anaInValues[THR_STICK] = +1024; evalMixes(1); EXPECT_EQ(channelOutputs[2], 1000); // trim max + various stick positions = should always be same value setTrimValue(0, THR_STICK, TRIM_EXTENDED_MAX); anaInValues[THR_STICK] = -1024; evalMixes(1); EXPECT_EQ(channelOutputs[2], 0); anaInValues[THR_STICK] = -300; evalMixes(1); EXPECT_EQ(channelOutputs[2], 0); anaInValues[THR_STICK] = +300; evalMixes(1); EXPECT_EQ(channelOutputs[2], 0); anaInValues[THR_STICK] = +1024; evalMixes(1); EXPECT_EQ(channelOutputs[2], 0); }
TEST(logcat, blocking_tail) { FILE *fp; unsigned long long v = 0xA55FDEADBEEF0000ULL; pid_t pid = getpid(); v += pid & 0xFFFF; LOG_FAILURE_RETRY(__android_log_btwrite(0, EVENT_TYPE_LONG, &v, sizeof(v))); v &= 0xFFFAFFFFFFFFFFFFULL; ASSERT_TRUE(NULL != (fp = popen( "( trap exit HUP QUIT INT PIPE KILL ; sleep 6; echo DONE )&" " logcat -v brief -b events -T 5 2>&1", "r"))); char buffer[5120]; int count = 0; int signals = 0; signal(SIGALRM, caught_blocking_tail); alarm(2); while (fgets(buffer, sizeof(buffer), fp)) { if (!strncmp(buffer, "DONE", 4)) { break; } ++count; int p; unsigned long long l; if ((2 != sscanf(buffer, "I/[0] ( %u): %lld", &p, &l)) || (p != pid)) { continue; } if (l == v) { if (count >= 5) { ++signals; } break; } } alarm(0); signal(SIGALRM, SIG_DFL); // Generate SIGPIPE fclose(fp); caught_blocking_tail(0); pclose(fp); EXPECT_LE(2, count); EXPECT_EQ(1, signals); }
TEST(logcat, blocking_clear) { FILE *fp; unsigned long long v = 0xDEADBEEFA55C0000ULL; pid_t pid = getpid(); v += pid & 0xFFFF; // This test is racey; an event occurs between clear and dump. // We accept that we will get a false positive, but never a false negative. ASSERT_TRUE(NULL != (fp = popen( "( trap exit HUP QUIT INT PIPE KILL ; sleep 6; echo DONE )&" " logcat -b events -c 2>&1 ;" " logcat -v brief -b events 2>&1", "r"))); char buffer[5120]; int count = 0; int signals = 0; signal(SIGALRM, caught_blocking_clear); alarm(2); while (fgets(buffer, sizeof(buffer), fp)) { if (!strncmp(buffer, "clearLog: ", 10)) { fprintf(stderr, "WARNING: Test lacks permission to run :-(\n"); count = signals = 1; break; } if (!strncmp(buffer, "DONE", 4)) { break; } ++count; int p; unsigned long long l; if ((2 != sscanf(buffer, "I/[0] ( %u): %lld", &p, &l)) || (p != pid)) { continue; } if (l == v) { if (count > 1) { fprintf(stderr, "WARNING: Possible false positive\n"); } ++signals; break; } } alarm(0); signal(SIGALRM, SIG_DFL); // Generate SIGPIPE fclose(fp); caught_blocking_clear(0); pclose(fp); EXPECT_LE(1, count); EXPECT_EQ(1, signals); }
TEST_F(trajectoryGeneratorFixtureTest, testTrajectoryRequest_Path_With_Nine_KnotPoints){ // A path needed for trajectory request ramp_msgs::Path _path; // Seed knot points and push them into path { // Scop the variables & Seed knot point ramp_msgs::KnotPoint knotPoint; knotPoint.motionState.positions.push_back(1.5f); knotPoint.motionState.positions.push_back(2.f); knotPoint.motionState.positions.push_back((-3.f*PI/4.f)); knotPoint.motionState.velocities.push_back(-0.23f); knotPoint.motionState.velocities.push_back(-0.23f); knotPoint.motionState.velocities.push_back(0.22f); knotPoint.motionState.time = 0; _path.points.push_back(knotPoint); } // ----------------------------------------------------- { // Scop the variables & Seed the second knot point ramp_msgs::KnotPoint knotPoint; knotPoint.motionState.positions.push_back(1.75f); knotPoint.motionState.positions.push_back(0.f); knotPoint.motionState.positions.push_back((-1.f*PI/2.f)); knotPoint.motionState.velocities.push_back(-0.20f); knotPoint.motionState.velocities.push_back(-0.15f); knotPoint.motionState.velocities.push_back(-0.13f); knotPoint.motionState.time = 0; _path.points.push_back(knotPoint); } // ----------------------------------------------------- { // Scop the variables & Seed the third knot point ramp_msgs::KnotPoint knotPoint; knotPoint.motionState.positions.push_back(3.25f); knotPoint.motionState.positions.push_back(0.f); knotPoint.motionState.positions.push_back((3.f*PI/4.f)); knotPoint.motionState.velocities.push_back(0.f); knotPoint.motionState.velocities.push_back(0.f); knotPoint.motionState.velocities.push_back(0.f); knotPoint.motionState.time = 0; _path.points.push_back(knotPoint); } // ----------------------------------------------------- { // Scop the variables & Seed the forth knot point ramp_msgs::KnotPoint knotPoint; knotPoint.motionState.positions.push_back(3.25f); knotPoint.motionState.positions.push_back(0.f); knotPoint.motionState.positions.push_back((3.f*PI/4.f)); knotPoint.motionState.velocities.push_back(0.f); knotPoint.motionState.velocities.push_back(0.f); knotPoint.motionState.velocities.push_back(0.f); knotPoint.motionState.time = 0; _path.points.push_back(knotPoint); } // ----------------------------------------------------- { // Scop the variables & Seed the fifth knot point ramp_msgs::KnotPoint knotPoint; knotPoint.motionState.positions.push_back(3.25f); knotPoint.motionState.positions.push_back(0.f); knotPoint.motionState.positions.push_back((3.f*PI/4.f)); knotPoint.motionState.velocities.push_back(0.f); knotPoint.motionState.velocities.push_back(0.f); knotPoint.motionState.velocities.push_back(0.f); knotPoint.motionState.time = 0; _path.points.push_back(knotPoint); } // ----------------------------------------------------- { // Scop the variables & Seed the sixth knot point ramp_msgs::KnotPoint knotPoint; knotPoint.motionState.positions.push_back(3.25f); knotPoint.motionState.positions.push_back(0.f); knotPoint.motionState.positions.push_back((3.f*PI/4.f)); knotPoint.motionState.velocities.push_back(0.f); knotPoint.motionState.velocities.push_back(0.f); knotPoint.motionState.velocities.push_back(0.f); knotPoint.motionState.time = 0; _path.points.push_back(knotPoint); } // ----------------------------------------------------- { // Scop the variables & Seed the seventh knot point ramp_msgs::KnotPoint knotPoint; knotPoint.motionState.positions.push_back(3.25f); knotPoint.motionState.positions.push_back(0.f); knotPoint.motionState.positions.push_back((3.f*PI/4.f)); knotPoint.motionState.velocities.push_back(0.f); knotPoint.motionState.velocities.push_back(0.f); knotPoint.motionState.velocities.push_back(0.f); knotPoint.motionState.time = 0; _path.points.push_back(knotPoint); } // ----------------------------------------------------- { // Scop the variables & Seed the eight knot point ramp_msgs::KnotPoint knotPoint; knotPoint.motionState.positions.push_back(3.25f); knotPoint.motionState.positions.push_back(0.f); knotPoint.motionState.positions.push_back((3.f*PI/4.f)); knotPoint.motionState.velocities.push_back(0.f); knotPoint.motionState.velocities.push_back(0.f); knotPoint.motionState.velocities.push_back(0.f); knotPoint.motionState.time = 0; _path.points.push_back(knotPoint); } // ----------------------------------------------------- { // Scop the variables & Seed the ninth knot point ramp_msgs::KnotPoint knotPoint; knotPoint.motionState.positions.push_back(3.25f); knotPoint.motionState.positions.push_back(0.f); knotPoint.motionState.positions.push_back((3.f*PI/4.f)); knotPoint.motionState.velocities.push_back(0.f); knotPoint.motionState.velocities.push_back(0.f); knotPoint.motionState.velocities.push_back(0.f); knotPoint.motionState.time = 0; _path.points.push_back(knotPoint); } // ----------------------------------------------------- // Initialize the bezier curve ------------------------ ramp_msgs::BezierCurve _temp; _temp.segmentPoints.push_back(_path.points.at(0).motionState); _temp.segmentPoints.push_back(_path.points.at(1).motionState); _temp.segmentPoints.push_back(_path.points.at(2).motionState); // ----------------------------------------------------- ramp_msgs::TrajectoryRequest tr; tr.path = _path; tr.type = HYBRID; tr.bezierCurves.push_back(_temp); // Initialize the trajectory request ------------------- _trajectorySrv.request.reqs.push_back(tr); //_trajectorySrv.request.path = _path; //_trajectorySrv.request.type = HYBRID; //_trajectorySrv.request.bezierCurves.push_back(_temp); // ----------------------------------------------------- try{ // Request a trajectory _client.call(_trajectorySrv); // Expectations EXPECT_LE(50, (_trajectorySrv.response.trajectory.trajectory.points.size())) <<"Size of the trajectory is less than 50 point"; EXPECT_GE(500, (_trajectorySrv.response.trajectory.trajectory.points.size())) <<"Size of the trajectory is greater than 75 point"; }catch(...){ FAIL() << "Failed to call trajectory generator service."; } }
TEST(logcat, tail_time) { FILE *fp; ASSERT_TRUE(NULL != (fp = popen("logcat -v long -b all -t 10 2>&1", "r"))); char buffer[5120]; char *last_timestamp = NULL; char *first_timestamp = NULL; int count = 0; const unsigned int time_length = 18; const unsigned int time_offset = 2; while (fgets(buffer, sizeof(buffer), fp)) { if ((buffer[0] == '[') && (buffer[1] == ' ') && isdigit(buffer[time_offset]) && isdigit(buffer[time_offset + 1]) && (buffer[time_offset + 2] == '-')) { ++count; buffer[time_length + time_offset] = '\0'; if (!first_timestamp) { first_timestamp = strdup(buffer + time_offset); } free(last_timestamp); last_timestamp = strdup(buffer + time_offset); } } pclose(fp); EXPECT_EQ(10, count); EXPECT_TRUE(last_timestamp != NULL); EXPECT_TRUE(first_timestamp != NULL); snprintf(buffer, sizeof(buffer), "logcat -v long -b all -t '%s' 2>&1", first_timestamp); ASSERT_TRUE(NULL != (fp = popen(buffer, "r"))); int second_count = 0; int last_timestamp_count = -1; while (fgets(buffer, sizeof(buffer), fp)) { if ((buffer[0] == '[') && (buffer[1] == ' ') && isdigit(buffer[time_offset]) && isdigit(buffer[time_offset + 1]) && (buffer[time_offset + 2] == '-')) { ++second_count; buffer[time_length + time_offset] = '\0'; if (first_timestamp) { // we can get a transitory *extremely* rare failure if hidden // underneath the time is *exactly* XX-XX XX:XX:XX.XXX000000 EXPECT_STREQ(buffer + time_offset, first_timestamp); free(first_timestamp); first_timestamp = NULL; } if (!strcmp(buffer + time_offset, last_timestamp)) { last_timestamp_count = second_count; } } } pclose(fp); free(last_timestamp); last_timestamp = NULL; EXPECT_TRUE(first_timestamp == NULL); EXPECT_LE(count, second_count); EXPECT_LE(count, last_timestamp_count); }
TEST_F(QuotaTest, multipleQuotas) { MojObject obj; // put quota (from testUsage) MojAssertNoErr( obj.fromJson(_T("{\"owner\":\"com.foo.bar\",\"size\":1000}")) ); MojAssertNoErr( db.putQuotas(&obj, &obj + 1) ); // quota for com.foo.baz MojAssertNoErr( obj.fromJson(_T("{\"owner\":\"com.foo.baz\",\"size\":1000}")) ); MojAssertNoErr( db.putQuotas(&obj, &obj + 1) ); // register kinds MojAssertNoErr( obj.fromJson(MojTestKind1Str1) ); MojExpectNoErr( db.putKind(obj) ); MojAssertNoErr( obj.fromJson(MojTestKind2Str1) ); MojExpectNoErr( db.putKind(obj) ); // put object of kind1 and kind2 EXPECT_NO_FATAL_FAILURE( put(db, MojTestKind1Objects[0]) ); EXPECT_NO_FATAL_FAILURE( put(db, MojTestKind2Objects[0]) ); MojInt64 quotaUsage1 = -1; EXPECT_NO_FATAL_FAILURE( getQuotaUsage(db, _T("com.foo.bar"), quotaUsage1) ); EXPECT_LE( 0, quotaUsage1 ); MojInt64 quotaUsage2 = -1; EXPECT_NO_FATAL_FAILURE( getQuotaUsage(db, _T("com.foo.baz"), quotaUsage2) ); EXPECT_LE( 0, quotaUsage2 ); EXPECT_LT( 0, quotaUsage1 ); EXPECT_EQ( 0, quotaUsage2 ); // change owner of kind2 to com.foo.baz MojAssertNoErr( obj.fromJson(MojTestKind2Str2) ); MojExpectNoErr( db.putKind(obj) ); MojInt64 quotaUsage3 = -1; EXPECT_NO_FATAL_FAILURE( getQuotaUsage(db, _T("com.foo.bar"), quotaUsage3) ); EXPECT_LE( 0, quotaUsage3 ); MojInt64 quotaUsage4 = -1; EXPECT_NO_FATAL_FAILURE( getQuotaUsage(db, _T("com.foo.baz"), quotaUsage4) ); EXPECT_LE( 0, quotaUsage4 ); EXPECT_LT( 0, quotaUsage3 ); EXPECT_EQ( quotaUsage3, quotaUsage4); // make kind2 inherit from kind1 MojAssertNoErr( obj.fromJson(MojTestKind2Str3) ); MojExpectNoErr( db.putKind(obj) ); MojInt64 quotaUsage5 = -1; EXPECT_NO_FATAL_FAILURE( getQuotaUsage(db, _T("com.foo.bar"), quotaUsage5) ); MojInt64 quotaUsage6 = -1; EXPECT_NO_FATAL_FAILURE( getQuotaUsage(db, _T("com.foo.baz"), quotaUsage6) ); EXPECT_GT( quotaUsage5, quotaUsage1 ); EXPECT_EQ( 0, quotaUsage6 ); // kind3 and object MojAssertNoErr( obj.fromJson(MojTestKind3Str1) ); MojExpectNoErr( db.putKind(obj) ); EXPECT_NO_FATAL_FAILURE( put(db, MojTestKind3Objects[0]) ); MojInt64 quotaUsage7 = -1; EXPECT_NO_FATAL_FAILURE( getQuotaUsage(db, _T("com.foo.bar"), quotaUsage7) ); EXPECT_EQ( quotaUsage7, quotaUsage5 ); // wildcard MojAssertNoErr( obj.fromJson(_T("{\"owner\":\"com.foo.*\",\"size\":1000}")) ); MojExpectNoErr( db.putQuotas(&obj, &obj + 1) ); MojInt64 quotaUsage8 = -1; EXPECT_NO_FATAL_FAILURE( getQuotaUsage(db, _T("com.foo.bar"), quotaUsage8) ); MojInt64 quotaUsage9 = -1; EXPECT_NO_FATAL_FAILURE( getQuotaUsage(db, _T("com.foo.*"), quotaUsage9) ); EXPECT_EQ( quotaUsage5, quotaUsage8 ); EXPECT_LT( 0, quotaUsage9 ); }
TEST_F(QuotaTest, error) { MojErr err; MojObject obj; // put quota (from testUsage) err = obj.fromJson(_T("{\"owner\":\"com.foo.bar\",\"size\":1000}")); MojAssertNoErr(err); err = db.putQuotas(&obj, &obj + 1); MojAssertNoErr(err); // quota for com.foo.baz (from multipleQuotas) err = obj.fromJson(_T("{\"owner\":\"com.foo.baz\",\"size\":1000}")); MojAssertNoErr(err); err = db.putQuotas(&obj, &obj + 1); MojAssertNoErr(err); // make kind2 inherit from kind1 (from multipleQuotas) err = obj.fromJson(MojTestKind2Str3); MojAssertNoErr(err); err = db.putKind(obj); MojExpectNoErr(err); // kind3 (from multipleQuotas) err = obj.fromJson(MojTestKind3Str1); MojAssertNoErr(err); err = db.putKind(obj); MojExpectNoErr(err); // wildcard (from multipleQuotas) err = obj.fromJson(_T("{\"owner\":\"com.foo.*\",\"size\":1000}")); MojAssertNoErr(err); err = db.putQuotas(&obj, &obj + 1); MojExpectNoErr(err); err = db.close(); MojAssertNoErr(err); MojRefCountedPtr<MojDbEnv> testEnv(new MojDbTestStorageEnv(env)); err = db.open(path.c_str(), testEnv); MojAssertNoErr(err); MojDbTestStorageEngine* testEngine = dynamic_cast<MojDbTestStorageEngine*> (db.storageEngine()); ASSERT_TRUE(testEngine); // test that failed put does not affect quota MojInt64 quotaUsage1 = -1; EXPECT_NO_FATAL_FAILURE( getQuotaUsage(db, _T("com.foo.*"), quotaUsage1) ); EXPECT_LE( 0, quotaUsage1 ); err = testEngine->setNextError(_T("txn.commit"), MojErrDbDeadlock); MojAssertNoErr(err); err = obj.fromJson(MojTestKind3Objects[1]); MojAssertNoErr(err); EXPECT_EQ( MojErrDbDeadlock, db.put(obj) ); MojInt64 quotaUsage2 = -1; EXPECT_NO_FATAL_FAILURE( getQuotaUsage(db, _T("com.foo.*"), quotaUsage2) ); EXPECT_LE( 0, quotaUsage2 ); EXPECT_EQ( quotaUsage1, quotaUsage2 ); // test that failed putQuota has no effect err = testEngine->setNextError(_T("txn.commit"), MojErrDbDeadlock); MojAssertNoErr(err); err = obj.fromJson(_T("{\"owner\":\"com.foo.boo\",\"size\":1000}")); MojAssertNoErr(err); EXPECT_EQ( MojErrDbDeadlock, db.putQuotas(&obj, &obj + 1) ); MojInt64 quotaUsage3 = -1; EXPECT_NO_FATAL_FAILURE( getQuotaUsage(db, _T("com.foo.*"), quotaUsage3) ); EXPECT_LE( 0, quotaUsage3 ); EXPECT_EQ( quotaUsage1, quotaUsage3 ); // test that failed putKind has no effect err = testEngine->setNextError(_T("txn.commit"), MojErrDbDeadlock); MojAssertNoErr(err); err = obj.fromJson(MojTestKind3Str2); MojAssertNoErr(err); EXPECT_EQ( MojErrDbDeadlock, db.putKind(obj) ); MojInt64 quotaUsage4 = -1; EXPECT_NO_FATAL_FAILURE( getQuotaUsage(db, _T("com.foo.*"), quotaUsage4) ); EXPECT_LE( 0, quotaUsage4 ); EXPECT_EQ( quotaUsage1, quotaUsage4 ); }
std::pair<double, double> run_test(barrier_inserter& insert_barrier, bool prefill, uint64_t tasks_per_queue, unsigned num_queues, unsigned num_threads, uint64_t delay_us, unsigned idle_queues) { EXPECT_LT(0U, tasks_per_queue); EXPECT_LT(0U, num_queues); EXPECT_LE(0U, idle_queues); boost::property_tree::ptree pt; PARAMETER_TYPE(ip::perf_threadpool_test_threads)(num_threads).persist(pt); pt.put("version", 1); std::unique_ptr<threadpool_type> tp(new threadpool_type(pt)); BOOST_SCOPE_EXIT_TPL((&tp)) { EXPECT_NO_THROW(tp->stop()) << "Failed to stop threadpool"; } BOOST_SCOPE_EXIT_END; { blocker_ptr_vec blockers(idle_queues); for (size_t i = 0; i < idle_queues; ++i) { blockers[i] = blocker_ptr(new Blocker(*tp, num_queues + i)); } } callback_ptr_vec callbacks(num_queues); for (size_t i = 0; i < callbacks.size(); ++i) { callbacks[i] = callback_ptr(new Callback(tasks_per_queue, delay_us)); } youtils::wall_timer t; double post_time; if (prefill) { blocker_ptr_vec blockers(num_queues); for (size_t i = 0; i < blockers.size(); ++i) { blockers[i] = blocker_ptr(new Blocker(*tp, i)); } post_time = post_tasks_(insert_barrier, *tp, callbacks, tasks_per_queue); t.restart(); } else { post_time = post_tasks_(insert_barrier, *tp, callbacks, tasks_per_queue); } for (size_t i = 0; i < callbacks.size(); ++i) { callback_ptr cb = callbacks[i]; std::unique_lock<Callback::lock_type> u(cb->lock_); while (cb->count_ > 0) { ASSERT(cb->count_ <= tasks_per_queue); cb->cond_.wait(u); } } const double proc_time = t.elapsed(); std::cout << "# queues: " << num_queues << ", tasks per queue: " << tasks_per_queue << ", # idle queues: " << idle_queues << ", threads in pool: " << tp->getNumThreads() << ", delay per task (us): " << delay_us << ", processing duration (s): " << proc_time << std::endl; return std::make_pair(post_time, proc_time); }
void pthread_once_routine() { static AtomicInt32 count = 0; AtomicInt32 res = __sync_fetch_and_add(&count, 1); EXPECT_LE(res, 1); }
TEST(AliasClass, SpecializedUnions) { IRUnit unit{test_context}; auto const marker = BCMarker::Dummy(); auto const FP = unit.gen(DefFP, marker)->dst(); AliasClass const stk = AStack { FP, -10, 3 }; AliasClass const unrelated_stk = AStack { FP, -14, 1 }; AliasClass const related_stk = AStack { FP, -11, 2 }; auto const stk_and_frame = stk | AFrameAny; EXPECT_TRUE(!stk_and_frame.is_stack()); EXPECT_TRUE(AFrameAny <= stk_and_frame); EXPECT_TRUE(stk <= stk_and_frame); EXPECT_TRUE(AStackAny.maybe(stk_and_frame)); EXPECT_TRUE(AFrameAny.maybe(stk_and_frame)); EXPECT_FALSE(unrelated_stk <= stk_and_frame); EXPECT_FALSE(stk_and_frame.maybe(unrelated_stk)); auto const stk_and_prop = stk | APropAny; EXPECT_TRUE(stk_and_prop.maybe(stk_and_frame)); EXPECT_TRUE(stk_and_frame.maybe(stk_and_prop)); EXPECT_FALSE(stk_and_prop <= stk_and_frame); EXPECT_FALSE(stk_and_frame <= stk_and_prop); EXPECT_TRUE(APropAny.maybe(stk_and_prop)); EXPECT_TRUE(AStackAny.maybe(stk_and_prop)); auto const unrelated_stk_and_prop = unrelated_stk | APropAny; EXPECT_FALSE(stk_and_frame.maybe(unrelated_stk_and_prop)); EXPECT_FALSE(unrelated_stk_and_prop.maybe(stk_and_frame)); EXPECT_TRUE(unrelated_stk_and_prop.maybe(stk_and_prop)); // because of prop EXPECT_FALSE(unrelated_stk_and_prop <= stk_and_prop); EXPECT_FALSE(stk_and_prop <= unrelated_stk_and_prop); EXPECT_FALSE(unrelated_stk_and_prop <= stk_and_frame); EXPECT_FALSE(stk_and_frame <= unrelated_stk_and_prop); EXPECT_FALSE(stk_and_prop <= AHeapAny); EXPECT_TRUE(stk_and_prop.maybe(AHeapAny)); EXPECT_FALSE(stk_and_frame <= AHeapAny); EXPECT_FALSE(stk_and_frame.maybe(AHeapAny)); auto const rel_stk_and_frame = related_stk | AFrameAny; EXPECT_TRUE(stk_and_frame.maybe(rel_stk_and_frame)); EXPECT_TRUE(rel_stk_and_frame.maybe(stk_and_frame)); EXPECT_TRUE(related_stk <= stk); EXPECT_TRUE(rel_stk_and_frame <= stk_and_frame); EXPECT_FALSE(stk_and_frame <= rel_stk_and_frame); EXPECT_TRUE(rel_stk_and_frame.maybe(stk_and_prop)); EXPECT_TRUE(stk_and_prop.maybe(rel_stk_and_frame)); EXPECT_FALSE(rel_stk_and_frame <= stk_and_prop); auto const some_mis = AMIStateTvRef; { auto const some_heap = AElemIAny; auto const u1 = some_heap | some_mis; auto const u2 = AFrameAny | u1; EXPECT_TRUE((AHeapAny | some_heap) == AHeapAny); EXPECT_TRUE(AHeapAny <= (AHeapAny | u1)); EXPECT_TRUE(AHeapAny <= (AHeapAny | u2)); } auto const mis_stk = some_mis | stk; auto const mis_stk_any = AStackAny | mis_stk; EXPECT_EQ(some_mis, AliasClass{*mis_stk_any.mis()}); EXPECT_NE(mis_stk_any, AStackAny | AMIStateAny); auto const other_mis = AMIStateBase; EXPECT_LE(some_mis, some_mis | other_mis); EXPECT_LE(other_mis, some_mis | other_mis); EXPECT_NE(some_mis, some_mis | other_mis); EXPECT_NE(other_mis, some_mis | other_mis); }
TEST_F(MemoryPressureMesosTest, CGROUPS_ROOT_Statistics) { Try<Owned<cluster::Master>> master = StartMaster(); ASSERT_SOME(master); slave::Flags flags = CreateSlaveFlags(); // We only care about memory cgroup for this test. flags.isolation = "cgroups/mem"; flags.agent_subsystems = None(); Fetcher fetcher; Try<MesosContainerizer*> _containerizer = MesosContainerizer::create(flags, true, &fetcher); ASSERT_SOME(_containerizer); Owned<MesosContainerizer> containerizer(_containerizer.get()); Owned<MasterDetector> detector = master.get()->createDetector(); Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), containerizer.get(), flags); ASSERT_SOME(slave); MockScheduler sched; MesosSchedulerDriver driver( &sched, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL); EXPECT_CALL(sched, registered(_, _, _)); Future<vector<Offer>> offers; EXPECT_CALL(sched, resourceOffers(_, _)) .WillOnce(FutureArg<1>(&offers)) .WillRepeatedly(Return()); // Ignore subsequent offers. driver.start(); AWAIT_READY(offers); EXPECT_NE(0u, offers.get().size()); Offer offer = offers.get()[0]; // Run a task that triggers memory pressure event. We request 1G // disk because we are going to write a 512 MB file repeatedly. TaskInfo task = createTask( offer.slave_id(), Resources::parse("cpus:1;mem:256;disk:1024").get(), "while true; do dd count=512 bs=1M if=/dev/zero of=./temp; done"); Future<TaskStatus> running; Future<TaskStatus> killed; EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(FutureArg<1>(&running)) .WillOnce(FutureArg<1>(&killed)) .WillRepeatedly(Return()); // Ignore subsequent updates. driver.launchTasks(offer.id(), {task}); AWAIT_READY(running); EXPECT_EQ(task.task_id(), running.get().task_id()); EXPECT_EQ(TASK_RUNNING, running.get().state()); Future<hashset<ContainerID>> containers = containerizer->containers(); AWAIT_READY(containers); ASSERT_EQ(1u, containers.get().size()); ContainerID containerId = *(containers.get().begin()); // Wait a while for some memory pressure events to occur. Duration waited = Duration::zero(); do { Future<ResourceStatistics> usage = containerizer->usage(containerId); AWAIT_READY(usage); if (usage.get().mem_low_pressure_counter() > 0) { // We will check the correctness of the memory pressure counters // later, because the memory-hammering task is still active // and potentially incrementing these counters. break; } os::sleep(Milliseconds(100)); waited += Milliseconds(100); } while (waited < Seconds(5)); EXPECT_LE(waited, Seconds(5)); // Pause the clock to ensure that the reaper doesn't reap the exited // command executor and inform the containerizer/slave. Clock::pause(); Clock::settle(); // Stop the memory-hammering task. driver.killTask(task.task_id()); AWAIT_READY_FOR(killed, Seconds(120)); EXPECT_EQ(task.task_id(), killed->task_id()); EXPECT_EQ(TASK_KILLED, killed->state()); // Now check the correctness of the memory pressure counters. Future<ResourceStatistics> usage = containerizer->usage(containerId); AWAIT_READY(usage); EXPECT_GE(usage.get().mem_low_pressure_counter(), usage.get().mem_medium_pressure_counter()); EXPECT_GE(usage.get().mem_medium_pressure_counter(), usage.get().mem_critical_pressure_counter()); Clock::resume(); driver.stop(); driver.join(); }
TEST_F(PhotonMapTest, Consistency) { std::vector<std::string> photonMapTypes; photonMapTypes.emplace_back("naive"); photonMapTypes.emplace_back("kdtree"); // Create photon map with random photons std::vector<std::unique_ptr<PhotonMap>> photonMaps; Photons photons; std::mt19937 gen(42); std::uniform_real_distribution<double> dist; const int Samples = 1<<7; for (int i = 0; i < Samples; i++) { Photon photon; photon.p = Math::Vec3(Math::Float(dist(gen)), Math::Float(dist(gen)), Math::Float(dist(gen))); photons.push_back(photon); } for (auto& type : photonMapTypes) { photonMaps.emplace_back(ComponentFactory::Create<PhotonMap>(type)); photonMaps.back()->Build(photons); } // Compare results for sample queries const int Queries = 1<<7; for (int query = 0; query < Queries; query++) { // Generate query point Math::Vec3 p(Math::Float(dist(gen)), Math::Float(dist(gen)), Math::Float(dist(gen))); for (size_t i = 0; i < photonMapTypes.size(); i++) { for (size_t j = i+1; j < photonMapTypes.size(); j++) { const int N = 10; for (int n = 1; n < N; n++) { const int Steps = 5; const Math::Float Delta = Math::Float(1) / Steps; for (int step = 0; step <= Steps; step++) { auto maxDist = Delta * step; auto maxDist2 = maxDist * maxDist; typedef std::pair<const Photon*, Math::Float> CollectedPhotonInfo; const auto comp = [](const CollectedPhotonInfo& p1, const CollectedPhotonInfo& p2){ return p1.second < p2.second; }; std::vector<CollectedPhotonInfo> psi; std::vector<CollectedPhotonInfo> psj; const auto collectFunc = [&n, &comp](std::vector<CollectedPhotonInfo>& collectedPhotons, const Math::Vec3& p, const Photon& photon, Math::Float& maxDist2) { auto dist2 = Math::Length2(photon.p - p); if (collectedPhotons.size() < (size_t)n) { collectedPhotons.emplace_back(&photon, dist2); if (collectedPhotons.size() == (size_t)n) { // Create heap std::make_heap(collectedPhotons.begin(), collectedPhotons.end(), comp); maxDist2 = collectedPhotons.front().second; } } else { // Update heap std::pop_heap(collectedPhotons.begin(), collectedPhotons.end(), comp); collectedPhotons.back() = std::make_pair(&photon, dist2); std::push_heap(collectedPhotons.begin(), collectedPhotons.end(), comp); maxDist2 = collectedPhotons.front().second; } }; auto maxDist2_i = maxDist2; photonMaps[i]->CollectPhotons(p, maxDist2_i, std::bind(collectFunc, std::ref(psi), std::placeholders::_1, std::placeholders::_2, std::placeholders::_3)); auto maxDist2_j = maxDist2; photonMaps[j]->CollectPhotons(p, maxDist2_j, std::bind(collectFunc, std::ref(psj), std::placeholders::_1, std::placeholders::_2, std::placeholders::_3)); // Check distances for (const auto& info : psi) { EXPECT_LE(info.second, maxDist2_i); } for (const auto& info : psj) { EXPECT_LE(info.second, maxDist2_j); } // Compare two results auto result = ExpectNear(maxDist2_i, maxDist2_j); EXPECT_TRUE(result); if (!result) { LM_LOG_DEBUG("i : " + photonMapTypes[i]); LM_LOG_DEBUG("j : " + photonMapTypes[j]); LM_LOG_DEBUG("maxDist2_i : " + std::to_string(maxDist2_i)); LM_LOG_DEBUG("maxDist2_j : " + std::to_string(maxDist2_j)); } EXPECT_EQ(psi.size(), psj.size()); if (psi.size() == psj.size()) { // Sort two arrays according to distance to #p std::sort(psi.begin(), psi.end(), comp); std::sort(psj.begin(), psj.end(), comp); // Compare elements bool failed = false; for (size_t k = 0; k < psi.size(); k++) { const auto* pi = psi[k].first; const auto* pj = psj[k].first; auto result = ExpectVec3Near(pi->p, pj->p); EXPECT_TRUE(result); if (!result) { failed = true; } } if (failed) { // Show a few elements LM_LOG_DEBUG("i : " + photonMapTypes[i]); LM_LOG_DEBUG("j : " + photonMapTypes[j]); for (size_t k = 0; k < psi.size(); k++) { const auto* pi = psi[k].first; const auto* pj = psj[k].first; LM_LOG_DEBUG("k = " + std::to_string(k) + ":"); LM_LOG_INDENTER(); LM_LOG_DEBUG("ps_i : " + std::to_string(pi->p.x) + ", " + std::to_string(pi->p.y) + ", " + std::to_string(pi->p.z)); LM_LOG_DEBUG("dist_i : " + std::to_string(Math::Length2(pi->p - p))); LM_LOG_DEBUG("ps_j : " + std::to_string(pj->p.x) + ", " + std::to_string(pj->p.y) + ", " + std::to_string(pj->p.z)); LM_LOG_DEBUG("dist_j : " + std::to_string(Math::Length2(pj->p - p))); } } } } } } } } }
// Test queue with concurrency. TEST(ThreadSafePriorityQueue, Concurrent) { #define MIN 0 #define MAX 9 #define LEN 11 typedef ThreadSafePriorityQueue<U32, F32, true> MinQueue; typedef ThreadSafePriorityQueue<U32, F32, false> MaxQueue; struct ProducerThread : public Thread { MinQueue& minQueue; MaxQueue& maxQueue; ProducerThread(MinQueue& min, MaxQueue& max) : minQueue(min), maxQueue(max) {} virtual void run(void*) { U32 indices[LEN] = { 2, 7, 4, 6, 1, 5, 3, 8, 6, 9, 0}; F32 priorities[LEN] = {0.2, 0.7, 0.4, 0.6, 0.1, 0.5, 0.3, 0.8, 0.6, 0.9, 0}; for(U32 i = 0; i < LEN; i++) { minQueue.insert(priorities[i], indices[i]); maxQueue.insert(priorities[i], indices[i]); } } }; MinQueue minQueue; MaxQueue maxQueue; ProducerThread producers[] = { ProducerThread(minQueue, maxQueue), ProducerThread(minQueue, maxQueue), ProducerThread(minQueue, maxQueue) }; const U32 len = sizeof(producers) / sizeof(ProducerThread); for(U32 i = 0; i < len; i++) producers[i].start(); for(U32 i = 0; i < len; i++) producers[i].join(); U32 index = MIN; for(U32 i = 0; i < LEN * len; i++) { U32 popped; EXPECT_TRUE(minQueue.takeNext(popped)) << "Failed to pop element from minQueue"; EXPECT_LE(index, popped) << "Element from minQueue was not in sort order"; index = popped; } index = MAX; for(U32 i = 0; i < LEN * len; i++) { U32 popped; EXPECT_TRUE(maxQueue.takeNext(popped)) << "Failed to pop element from maxQueue"; EXPECT_GE(index, popped) << "Element from maxQueue was not in sort order"; index = popped; } #undef MIN #undef MAX #undef LEN }
TEST(audio_utils_primitives, clamp_to_int) { static const float testArray[] = { -NAN, -INFINITY, -1.e20, -32768., 63.9, -3.5, -3.4, -2.5, 2.4, -1.5, -1.4, -0.5, -0.2, 0., 0.2, 0.5, 0.8, 1.4, 1.5, 1.8, 2.4, 2.5, 2.6, 3.4, 3.5, 32767., 32768., 1.e20, INFINITY, NAN }; for (size_t i = 0; i < ARRAY_SIZE(testArray); ++i) { testClamp16(testArray[i]); } for (size_t i = 0; i < ARRAY_SIZE(testArray); ++i) { testClamp24(testArray[i]); } // used for ULP testing (tweaking the lsb of the float) union { int32_t i; float f; } val; int32_t res; // check clampq4_27_from_float() val.f = 16.; res = clampq4_27_from_float(val.f); EXPECT_EQ(res, 0x7fffffff); val.i--; res = clampq4_27_from_float(val.f); EXPECT_LE(res, 0x7fffffff); EXPECT_GE(res, 0x7fff0000); val.f = -16.; res = clampq4_27_from_float(val.f); EXPECT_EQ(res, (int32_t)0x80000000); // negative val.i++; res = clampq4_27_from_float(val.f); EXPECT_GE(res, (int32_t)0x80000000); // negative EXPECT_LE(res, (int32_t)0x80008000); // negative // check u4_28_from_float and u4_12_from_float uint32_t ures; uint16_t ures16; val.f = 16.; ures = u4_28_from_float(val.f); EXPECT_EQ(ures, 0xffffffff); ures16 = u4_12_from_float(val.f); EXPECT_EQ(ures16, 0xffff); val.f = -1.; ures = u4_28_from_float(val.f); EXPECT_EQ(ures, 0); ures16 = u4_12_from_float(val.f); EXPECT_EQ(ures16, 0); // check float_from_u4_28 and float_from_u4_12 (roundtrip) for (uint32_t v = 0x100000; v <= 0xff000000; v += 0x100000) { ures = u4_28_from_float(float_from_u4_28(v)); EXPECT_EQ(ures, v); } for (uint32_t v = 0; v <= 0xffff; ++v) { // uint32_t prevents overflow ures16 = u4_12_from_float(float_from_u4_12(v)); EXPECT_EQ(ures16, v); } }
TEST_F(TestSystemInfo, GetKernelBitness) { EXPECT_TRUE(g_sysinfo.GetKernelBitness() == 32 || g_sysinfo.GetKernelBitness() == 64) << "'GetKernelBitness()' must return '32' or '64', but not '" << g_sysinfo.GetKernelBitness() << "'"; EXPECT_LE(g_sysinfo.GetXbmcBitness(), g_sysinfo.GetKernelBitness()) << "'GetKernelBitness()' must be greater or equal to 'GetXbmcBitness()'"; }
TEST_F(QuotaTest, usage) { // put quota MojObject obj; MojAssertNoErr( obj.fromJson(_T("{\"owner\":\"com.foo.bar\",\"size\":1000}")) ); MojAssertNoErr( db.putQuotas(&obj, &obj + 1) ); // empty MojInt64 kindUsage = -1; EXPECT_NO_FATAL_FAILURE( getKindUsage(db, _T("Test:1"), kindUsage) ); EXPECT_EQ( 0, kindUsage ) << "Kind without objects should have zero usage"; MojInt64 quotaUsage = -1; EXPECT_NO_FATAL_FAILURE( getQuotaUsage(db, _T("com.foo.bar"), quotaUsage) ); EXPECT_EQ( 0, quotaUsage ) << "Quota without matching objects should have zero usage"; // new obj EXPECT_NO_FATAL_FAILURE( put(db, MojTestKind1Objects[0]) ); MojInt64 kindUsage1 = -1; EXPECT_NO_FATAL_FAILURE( getKindUsage(db, _T("Test:1"), kindUsage1) ); EXPECT_LT( 0, kindUsage1 ) << "Adding new object into kind should increase kind usage"; MojInt64 quotaUsage1 = -1; EXPECT_NO_FATAL_FAILURE( getQuotaUsage(db, _T("com.foo.bar"), quotaUsage1) ); EXPECT_LT( 0, quotaUsage1 ) << "Adding new object matching quota should increase quota usage"; // add prop to existing obj MojAssertNoErr( obj.fromJson(MojTestKind1Objects[0]) ); MojAssertNoErr( obj.put(_T("bar"), 2) ); MojAssertNoErr( db.put(obj, MojDb::FlagForce) ); MojInt64 kindUsage2 = -1; EXPECT_NO_FATAL_FAILURE( getKindUsage(db, _T("Test:1"), kindUsage2) ); EXPECT_LE( 0, kindUsage2 ); EXPECT_LT( kindUsage1, kindUsage2 ) << "Adding property to existing object should increase kind usage"; MojInt64 quotaUsage2 = -1; EXPECT_NO_FATAL_FAILURE( getQuotaUsage(db, _T("com.foo.bar"), quotaUsage2) ); EXPECT_LE( 0, quotaUsage2 ); EXPECT_LT( quotaUsage1, quotaUsage2 ) << "Adding property to existing object that matches quota should increase usage"; // add 2nd obj EXPECT_NO_FATAL_FAILURE( put(db, MojTestKind1Objects[1]) ); MojInt64 kindUsage3 = -1; EXPECT_NO_FATAL_FAILURE( getKindUsage(db, _T("Test:1"), kindUsage3) ); EXPECT_LE( 0, kindUsage3 ); EXPECT_LT( kindUsage2, kindUsage3 ) << "Adding another object should increase kind usage"; MojInt64 quotaUsage3 = -1; EXPECT_NO_FATAL_FAILURE( getQuotaUsage(db, _T("com.foo.bar"), quotaUsage3) ); EXPECT_LE( 0, quotaUsage3 ); EXPECT_LT( quotaUsage2, quotaUsage3 ) << "Adding another object matching to quota should increase usage"; // del first obj bool found = false; MojExpectNoErr( db.del(1, found, MojDb::FlagPurge) ); EXPECT_TRUE( found ) << "Object should be deleted"; MojInt64 kindUsage4 = -1; EXPECT_NO_FATAL_FAILURE( getKindUsage(db, _T("Test:1"), kindUsage4) ); EXPECT_LE( 0, kindUsage4 ); EXPECT_EQ( kindUsage3 - kindUsage2, kindUsage4 ) << "Deletion of object should bring kind usage to expected value"; MojInt64 quotaUsage4 = -1; EXPECT_NO_FATAL_FAILURE( getQuotaUsage(db, _T("com.foo.bar"), quotaUsage4) ); EXPECT_LE( 0, quotaUsage4 ); EXPECT_EQ( quotaUsage3 - quotaUsage2, quotaUsage4 ) << "Deletion of object should bring quota usage to expected value"; // add index MojAssertNoErr( obj.fromJson(MojTestKind1Str2) ); MojExpectNoErr( db.putKind(obj) ); MojInt64 kindUsage5 = -1; EXPECT_NO_FATAL_FAILURE( getKindUsage(db, _T("Test:1"), kindUsage5) ); EXPECT_LE( 0, kindUsage5 ); EXPECT_LT( kindUsage4, kindUsage5 ) << "Adding new index should increase kind usage"; MojInt64 quotaUsage5 = -1; EXPECT_NO_FATAL_FAILURE( getQuotaUsage(db, _T("com.foo.bar"), quotaUsage5) ); EXPECT_LE( 0, quotaUsage5 ); EXPECT_LT( quotaUsage4, quotaUsage5 ) << "Adding new index should increase quota usage"; // update locale MojExpectNoErr( db.updateLocale(_T("FR_fr")) ); MojExpectNoErr( db.updateLocale(_T("EN_us")) ); MojInt64 kindUsage6 = -1; EXPECT_NO_FATAL_FAILURE( getKindUsage(db, _T("Test:1"), kindUsage6) ); EXPECT_LE( 0, kindUsage6 ); EXPECT_EQ( kindUsage5, kindUsage6 ) << "Switching locale forth and back shouldn't affect kind usage"; MojInt64 quotaUsage6 = -1; EXPECT_NO_FATAL_FAILURE( getQuotaUsage(db, _T("com.foo.bar"), quotaUsage6) ); EXPECT_LE( 0, kindUsage6 ); EXPECT_EQ( quotaUsage5, quotaUsage6 ) << "Switching locale forth and back shouldn't affect quota usage"; // drop index MojAssertNoErr( obj.fromJson(MojTestKind1Str1) ); MojExpectNoErr( db.putKind(obj) ); MojInt64 kindUsage7 = -1; EXPECT_NO_FATAL_FAILURE( getKindUsage(db, _T("Test:1"), kindUsage7) ); EXPECT_LE( 0, kindUsage7 ); EXPECT_EQ( kindUsage4, kindUsage7 ) << "Dropping of index should bring kind usage to expected value"; MojInt64 quotaUsage7 = -1; EXPECT_NO_FATAL_FAILURE( getQuotaUsage(db, _T("com.foo.bar"), quotaUsage7) ); EXPECT_LE( 0, quotaUsage7 ); EXPECT_EQ( quotaUsage4, quotaUsage7 ) << "Dropping of index should bring quota usage to expected value"; // drop kind MojString kindStr; MojAssertNoErr( kindStr.assign(_T("Test:1")) ); MojExpectNoErr( db.delKind(kindStr, found) ); EXPECT_TRUE( found ) << "Kind should be deleted"; MojInt64 kindUsage8 = -1; EXPECT_NO_FATAL_FAILURE( getKindUsage(db, _T("Test:1"), kindUsage8) ); EXPECT_EQ( 0, kindUsage8 ) << "Dropping of kind should bring its usage to zero"; MojInt64 quotaUsage8 = -1; EXPECT_NO_FATAL_FAILURE( getQuotaUsage(db, _T("com.foo.bar"), quotaUsage8) ); EXPECT_EQ( 0, quotaUsage8 ) << "Dropping of kind that matches quota should bring quota usage to zero"; MojExpectNoErr( db.quotaStats(obj) ); MojString statStr; MojExpectNoErr( obj.toJson(statStr) ); std::cerr << "quotaStats: " << statStr.data() << std::endl; }