// The purpose of this test is to ensure that when slaves are removed // from the master, and then attempt to re-register, we deny the // re-registration by sending a ShutdownMessage to the slave. // Why? Because during a network partition, the master will remove a // partitioned slave, thus sending its tasks to LOST. At this point, // when the partition is removed, the slave will attempt to // re-register with its running tasks. We've already notified // frameworks that these tasks were LOST, so we have to have the slave // slave shut down. TEST_F(PartitionTest, PartitionedSlaveReregistration) { master::Flags masterFlags = CreateMasterFlags(); Try<PID<Master>> master = StartMaster(masterFlags); ASSERT_SOME(master); // Allow the master to PING the slave, but drop all PONG messages // from the slave. Note that we don't match on the master / slave // PIDs because it's actually the SlaveObserver Process that sends // the pings. Future<Message> ping = FUTURE_MESSAGE(Eq("PING"), _, _); DROP_MESSAGES(Eq("PONG"), _, _); MockExecutor exec(DEFAULT_EXECUTOR_ID); StandaloneMasterDetector detector(master.get()); Try<PID<Slave>> slave = StartSlave(&exec, &detector); ASSERT_SOME(slave); MockScheduler sched; MesosSchedulerDriver driver( &sched, DEFAULT_FRAMEWORK_INFO, master.get(), DEFAULT_CREDENTIAL); EXPECT_CALL(sched, registered(&driver, _, _)); Future<vector<Offer>> offers; EXPECT_CALL(sched, resourceOffers(&driver, _)) .WillOnce(FutureArg<1>(&offers)) .WillRepeatedly(Return()); driver.start(); AWAIT_READY(offers); ASSERT_NE(0u, offers.get().size()); // Launch a task. This is to ensure the task is killed by the slave, // during shutdown. TaskID taskId; taskId.set_value("1"); TaskInfo task; task.set_name(""); task.mutable_task_id()->MergeFrom(taskId); task.mutable_slave_id()->MergeFrom(offers.get()[0].slave_id()); task.mutable_resources()->MergeFrom(offers.get()[0].resources()); task.mutable_executor()->MergeFrom(DEFAULT_EXECUTOR_INFO); task.mutable_executor()->mutable_command()->set_value("sleep 60"); // Set up the expectations for launching the task. EXPECT_CALL(exec, registered(_, _, _, _)); EXPECT_CALL(exec, launchTask(_, _)) .WillOnce(SendStatusUpdateFromTask(TASK_RUNNING)); Future<TaskStatus> runningStatus; EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(FutureArg<1>(&runningStatus)); Future<Nothing> statusUpdateAck = FUTURE_DISPATCH( slave.get(), &Slave::_statusUpdateAcknowledgement); driver.launchTasks(offers.get()[0].id(), {task}); AWAIT_READY(runningStatus); EXPECT_EQ(TASK_RUNNING, runningStatus.get().state()); // Wait for the slave to have handled the acknowledgment prior // to pausing the clock. AWAIT_READY(statusUpdateAck); // Drop the first shutdown message from the master (simulated // partition), allow the second shutdown message to pass when // the slave re-registers. Future<ShutdownMessage> shutdownMessage = DROP_PROTOBUF(ShutdownMessage(), _, slave.get()); Future<TaskStatus> lostStatus; EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(FutureArg<1>(&lostStatus)); Future<Nothing> slaveLost; EXPECT_CALL(sched, slaveLost(&driver, _)) .WillOnce(FutureSatisfy(&slaveLost)); Clock::pause(); // Now, induce a partition of the slave by having the master // timeout the slave. size_t pings = 0; while (true) { AWAIT_READY(ping); pings++; if (pings == masterFlags.max_slave_ping_timeouts) { break; } ping = FUTURE_MESSAGE(Eq("PING"), _, _); Clock::advance(masterFlags.slave_ping_timeout); Clock::settle(); } Clock::advance(masterFlags.slave_ping_timeout); Clock::settle(); // The master will have notified the framework of the lost task. AWAIT_READY(lostStatus); EXPECT_EQ(TASK_LOST, lostStatus.get().state()); // Wait for the master to attempt to shut down the slave. AWAIT_READY(shutdownMessage); // The master will notify the framework that the slave was lost. AWAIT_READY(slaveLost); Clock::resume(); // We now complete the partition on the slave side as well. This // is done by simulating a master loss event which would normally // occur during a network partition. detector.appoint(None()); Future<Nothing> shutdown; EXPECT_CALL(exec, shutdown(_)) .WillOnce(FutureSatisfy(&shutdown)); shutdownMessage = FUTURE_PROTOBUF(ShutdownMessage(), _, slave.get()); // Have the slave re-register with the master. detector.appoint(master.get()); // Upon re-registration, the master will shutdown the slave. // The slave will then shut down the executor. AWAIT_READY(shutdownMessage); AWAIT_READY(shutdown); driver.stop(); driver.join(); Shutdown(); }
HOT_FUNC bool tvEqual(TypedValue tv1, TypedValue tv2) { return tvRelOp(Eq(), tv1, tv2); }
bool cellEqual(Cell cell, bool val) { return cellRelOp(Eq(), cell, val); }
bool cellEqual(Cell cell, const StringData* val) { return cellRelOp(Eq(), cell, val); }
bool cellEqual(Cell cell, const ObjectData* val) { return cellRelOp(Eq(), cell, val); }
TEST_F(TextToBinaryTest, OpLine) { EXPECT_THAT(CompiledInstructions("OpLine %srcfile 42 99"), Eq(MakeInstruction(SpvOpLine, {1, 42, 99}))); }
TEST_P(OpNameTest, AnyString) { const std::string input = std::string("OpName %target \"") + GetParam() + "\""; EXPECT_THAT(CompiledInstructions(input), Eq(MakeInstruction(SpvOpName, {1}, MakeVector(GetParam())))); }
bool wxRichTextBoxStyleDefinition::operator ==(const wxRichTextBoxStyleDefinition& def) const { return (Eq(def)); }
TEST_F(TestCaseIdCreatorTests, Creation_Sample_Base) { EXPECT_THAT(TestCaseIdCreator::createBaseId("foo", 0), Eq("foo_sample")); }
int SOPAngle::__eq(lua_State *L) { lua_pushboolean(L, Eq(Lunar<SOPAngle>::check(L, 1))); return 1; }
bool wxRichTextParagraphStyleDefinition::operator ==(const wxRichTextParagraphStyleDefinition& def) const { return (Eq(def) && m_nextStyle == def.m_nextStyle); }
// This test checks that a scheduler gets a slave lost // message for a partitioned slave. TEST_F(PartitionTest, PartitionedSlave) { master::Flags masterFlags = CreateMasterFlags(); Try<PID<Master>> master = StartMaster(masterFlags); ASSERT_SOME(master); // Set these expectations up before we spawn the slave so that we // don't miss the first PING. Future<Message> ping = FUTURE_MESSAGE(Eq("PING"), _, _); // Drop all the PONGs to simulate slave partition. DROP_MESSAGES(Eq("PONG"), _, _); Try<PID<Slave>> slave = StartSlave(); ASSERT_SOME(slave); MockScheduler sched; MesosSchedulerDriver driver( &sched, DEFAULT_FRAMEWORK_INFO, master.get(), DEFAULT_CREDENTIAL); EXPECT_CALL(sched, registered(&driver, _, _)); Future<Nothing> resourceOffers; EXPECT_CALL(sched, resourceOffers(&driver, _)) .WillOnce(FutureSatisfy(&resourceOffers)) .WillRepeatedly(Return()); // Ignore subsequent offers. driver.start(); // Need to make sure the framework AND slave have registered with // master. Waiting for resource offers should accomplish both. AWAIT_READY(resourceOffers); Clock::pause(); EXPECT_CALL(sched, offerRescinded(&driver, _)) .Times(AtMost(1)); Future<Nothing> slaveLost; EXPECT_CALL(sched, slaveLost(&driver, _)) .WillOnce(FutureSatisfy(&slaveLost)); // Now advance through the PINGs. size_t pings = 0; while (true) { AWAIT_READY(ping); pings++; if (pings == masterFlags.max_slave_ping_timeouts) { break; } ping = FUTURE_MESSAGE(Eq("PING"), _, _); Clock::advance(masterFlags.slave_ping_timeout); } Clock::advance(masterFlags.slave_ping_timeout); AWAIT_READY(slaveLost); this->Stop(slave.get()); JSON::Object stats = Metrics(); EXPECT_EQ(1, stats.values["master/slave_removals"]); EXPECT_EQ(1, stats.values["master/slave_removals/reason_unhealthy"]); driver.stop(); driver.join(); Shutdown(); Clock::resume(); }
// The purpose of this test is to ensure that when slaves are removed // from the master, and then attempt to send exited executor messages, // we send a ShutdownMessage to the slave. Why? Because during a // network partition, the master will remove a partitioned slave, thus // sending its tasks to LOST. At this point, when the partition is // removed, the slave may attempt to send exited executor messages if // it was unaware that the master removed it. We've already // notified frameworks that the tasks under the executors were LOST, // so we have to have the slave shut down. TEST_F(PartitionTest, PartitionedSlaveExitedExecutor) { master::Flags masterFlags = CreateMasterFlags(); Try<PID<Master>> master = StartMaster(masterFlags); ASSERT_SOME(master); // Allow the master to PING the slave, but drop all PONG messages // from the slave. Note that we don't match on the master / slave // PIDs because it's actually the SlaveObserver Process that sends // the pings. Future<Message> ping = FUTURE_MESSAGE(Eq("PING"), _, _); DROP_MESSAGES(Eq("PONG"), _, _); MockExecutor exec(DEFAULT_EXECUTOR_ID); TestContainerizer containerizer(&exec); Try<PID<Slave>> slave = StartSlave(&containerizer); ASSERT_SOME(slave); MockScheduler sched; MesosSchedulerDriver driver( &sched, DEFAULT_FRAMEWORK_INFO, master.get(), DEFAULT_CREDENTIAL); Future<FrameworkID> frameworkId; EXPECT_CALL(sched, registered(&driver, _, _)) .WillOnce(FutureArg<1>(&frameworkId));\ Future<vector<Offer>> offers; EXPECT_CALL(sched, resourceOffers(&driver, _)) .WillOnce(FutureArg<1>(&offers)) .WillRepeatedly(Return()); driver.start(); AWAIT_READY(frameworkId); AWAIT_READY(offers); ASSERT_NE(0u, offers.get().size()); // Launch a task. This allows us to have the slave send an // ExitedExecutorMessage. TaskID taskId; taskId.set_value("1"); TaskInfo task; task.set_name(""); task.mutable_task_id()->MergeFrom(taskId); task.mutable_slave_id()->MergeFrom(offers.get()[0].slave_id()); task.mutable_resources()->MergeFrom(offers.get()[0].resources()); task.mutable_executor()->MergeFrom(DEFAULT_EXECUTOR_INFO); task.mutable_executor()->mutable_command()->set_value("sleep 60"); // Set up the expectations for launching the task. EXPECT_CALL(exec, registered(_, _, _, _)); EXPECT_CALL(exec, launchTask(_, _)) .WillOnce(SendStatusUpdateFromTask(TASK_RUNNING)); // Drop all the status updates from the slave, so that we can // ensure the ExitedExecutorMessage is what triggers the slave // shutdown. DROP_PROTOBUFS(StatusUpdateMessage(), _, master.get()); driver.launchTasks(offers.get()[0].id(), {task}); // Drop the first shutdown message from the master (simulated // partition) and allow the second shutdown message to pass when // triggered by the ExitedExecutorMessage. Future<ShutdownMessage> shutdownMessage = DROP_PROTOBUF(ShutdownMessage(), _, slave.get()); Future<TaskStatus> lostStatus; EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(FutureArg<1>(&lostStatus)); Future<Nothing> slaveLost; EXPECT_CALL(sched, slaveLost(&driver, _)) .WillOnce(FutureSatisfy(&slaveLost)); Clock::pause(); // Now, induce a partition of the slave by having the master // timeout the slave. size_t pings = 0; while (true) { AWAIT_READY(ping); pings++; if (pings == masterFlags.max_slave_ping_timeouts) { break; } ping = FUTURE_MESSAGE(Eq("PING"), _, _); Clock::advance(masterFlags.slave_ping_timeout); Clock::settle(); } Clock::advance(masterFlags.slave_ping_timeout); Clock::settle(); // The master will have notified the framework of the lost task. AWAIT_READY(lostStatus); EXPECT_EQ(TASK_LOST, lostStatus.get().state()); // Wait for the master to attempt to shut down the slave. AWAIT_READY(shutdownMessage); // The master will notify the framework that the slave was lost. AWAIT_READY(slaveLost); shutdownMessage = FUTURE_PROTOBUF(ShutdownMessage(), _, slave.get()); // Induce an ExitedExecutorMessage from the slave. containerizer.destroy( frameworkId.get(), DEFAULT_EXECUTOR_INFO.executor_id()); // Upon receiving the message, the master will shutdown the slave. AWAIT_READY(shutdownMessage); Clock::resume(); driver.stop(); driver.join(); Shutdown(); }
// The purpose of this test is to ensure that when slaves are removed // from the master, and then attempt to send status updates, we send // a ShutdownMessage to the slave. Why? Because during a network // partition, the master will remove a partitioned slave, thus sending // its tasks to LOST. At this point, when the partition is removed, // the slave may attempt to send updates if it was unaware that the // master removed it. We've already notified frameworks that these // tasks were LOST, so we have to have the slave shut down. TEST_F(PartitionTest, PartitionedSlaveStatusUpdates) { master::Flags masterFlags = CreateMasterFlags(); Try<PID<Master>> master = StartMaster(masterFlags); ASSERT_SOME(master); // Allow the master to PING the slave, but drop all PONG messages // from the slave. Note that we don't match on the master / slave // PIDs because it's actually the SlaveObserver Process that sends // the pings. Future<Message> ping = FUTURE_MESSAGE(Eq("PING"), _, _); DROP_MESSAGES(Eq("PONG"), _, _); Future<SlaveRegisteredMessage> slaveRegisteredMessage = FUTURE_PROTOBUF(SlaveRegisteredMessage(), _, _); MockExecutor exec(DEFAULT_EXECUTOR_ID); Try<PID<Slave>> slave = StartSlave(&exec); ASSERT_SOME(slave); AWAIT_READY(slaveRegisteredMessage); SlaveID slaveId = slaveRegisteredMessage.get().slave_id(); MockScheduler sched; MesosSchedulerDriver driver( &sched, DEFAULT_FRAMEWORK_INFO, master.get(), DEFAULT_CREDENTIAL); Future<FrameworkID> frameworkId; EXPECT_CALL(sched, registered(&driver, _, _)) .WillOnce(FutureArg<1>(&frameworkId)); EXPECT_CALL(sched, resourceOffers(&driver, _)) .WillRepeatedly(Return()); driver.start(); AWAIT_READY(frameworkId); // Drop the first shutdown message from the master (simulated // partition), allow the second shutdown message to pass when // the slave sends an update. Future<ShutdownMessage> shutdownMessage = DROP_PROTOBUF(ShutdownMessage(), _, slave.get()); EXPECT_CALL(sched, offerRescinded(&driver, _)) .WillRepeatedly(Return()); Future<Nothing> slaveLost; EXPECT_CALL(sched, slaveLost(&driver, _)) .WillOnce(FutureSatisfy(&slaveLost)); Clock::pause(); // Now, induce a partition of the slave by having the master // timeout the slave. size_t pings = 0; while (true) { AWAIT_READY(ping); pings++; if (pings == masterFlags.max_slave_ping_timeouts) { break; } ping = FUTURE_MESSAGE(Eq("PING"), _, _); Clock::advance(masterFlags.slave_ping_timeout); Clock::settle(); } Clock::advance(masterFlags.slave_ping_timeout); Clock::settle(); // Wait for the master to attempt to shut down the slave. AWAIT_READY(shutdownMessage); // The master will notify the framework that the slave was lost. AWAIT_READY(slaveLost); shutdownMessage = FUTURE_PROTOBUF(ShutdownMessage(), _, slave.get()); // At this point, the slave still thinks it's registered, so we // simulate a status update coming from the slave. TaskID taskId; taskId.set_value("task_id"); const StatusUpdate& update = protobuf::createStatusUpdate( frameworkId.get(), slaveId, taskId, TASK_RUNNING, TaskStatus::SOURCE_SLAVE, UUID::random()); StatusUpdateMessage message; message.mutable_update()->CopyFrom(update); message.set_pid(stringify(slave.get())); process::post(master.get(), message); // The master should shutdown the slave upon receiving the update. AWAIT_READY(shutdownMessage); Clock::resume(); driver.stop(); driver.join(); Shutdown(); }
TEST_P(SRKNTest, ExactInexactTMax) { parameters_.initial.positions.emplace_back(SIUnit<Length>()); parameters_.initial.momenta.emplace_back(Speed()); parameters_.initial.time = Time(); parameters_.tmax = 10.0 * SIUnit<Time>(); parameters_.sampling_period = 1; parameters_.Δt = (1.0 / 3.000001) * SIUnit<Time>(); parameters_.tmax_is_exact = false; integrator_->SolveTrivialKineticEnergyIncrement<Length>( &ComputeHarmonicOscillatorAcceleration, parameters_, &solution_); EXPECT_EQ(30, solution_.size()); EXPECT_THAT(solution_.back().time.value, Lt(parameters_.tmax)); EXPECT_THAT(solution_.back().time.error, Ne(0.0 * SIUnit<Time>())); parameters_.tmax_is_exact = true; integrator_->SolveTrivialKineticEnergyIncrement<Length>( &ComputeHarmonicOscillatorAcceleration, parameters_, &solution_); EXPECT_EQ(30, solution_.size()); EXPECT_THAT(solution_.back().time.value, Eq(parameters_.tmax)); EXPECT_THAT(solution_.back().time.error, Eq(0.0 * SIUnit<Time>())); parameters_.Δt = (1.0 / 2.999999) * SIUnit<Time>(); parameters_.tmax_is_exact = false; integrator_->SolveTrivialKineticEnergyIncrement<Length>( &ComputeHarmonicOscillatorAcceleration, parameters_, &solution_); EXPECT_EQ(29, solution_.size()); EXPECT_THAT(solution_.back().time.value, Lt(parameters_.tmax)); EXPECT_THAT(solution_.back().time.error, Ne(0.0 * SIUnit<Time>())); parameters_.tmax_is_exact = true; integrator_->SolveTrivialKineticEnergyIncrement<Length>( &ComputeHarmonicOscillatorAcceleration, parameters_, &solution_); EXPECT_EQ(30, solution_.size()); EXPECT_THAT(solution_.back().time.value, Eq(parameters_.tmax)); EXPECT_THAT(solution_.back().time.error, Eq(0.0 * SIUnit<Time>())); parameters_.Δt = 11.0 * SIUnit<Time>(); parameters_.tmax_is_exact = false; integrator_->SolveTrivialKineticEnergyIncrement<Length>( &ComputeHarmonicOscillatorAcceleration, parameters_, &solution_); EXPECT_EQ(0, solution_.size()); parameters_.tmax_is_exact = true; integrator_->SolveTrivialKineticEnergyIncrement<Length>( &ComputeHarmonicOscillatorAcceleration, parameters_, &solution_); EXPECT_EQ(1, solution_.size()); EXPECT_THAT(solution_.back().time.value, Eq(parameters_.tmax)); EXPECT_THAT(solution_.back().time.error, Eq(0.0 * SIUnit<Time>())); parameters_.Δt = 100.0 * SIUnit<Time>(); parameters_.tmax_is_exact = false; integrator_->SolveTrivialKineticEnergyIncrement<Length>( &ComputeHarmonicOscillatorAcceleration, parameters_, &solution_); EXPECT_EQ(0, solution_.size()); parameters_.tmax_is_exact = true; integrator_->SolveTrivialKineticEnergyIncrement<Length>( &ComputeHarmonicOscillatorAcceleration, parameters_, &solution_); EXPECT_EQ(1, solution_.size()); EXPECT_THAT(solution_.back().time.value, Eq(parameters_.tmax)); EXPECT_THAT(solution_.back().time.error, Eq(0.0 * SIUnit<Time>())); }
TEST_F(TestCaseIdCreatorTests, Creation_Official_Base) { EXPECT_THAT(TestCaseIdCreator::createBaseId("foo", -1), Eq("foo")); }
bool cellEqual(Cell cell, const ResourceHdr* val) { return cellRelOp(Eq(), cell, val); }
TEST_F(TestCaseIdCreatorTests, Creation_Official_WithGroups_Base) { EXPECT_THAT(TestCaseIdCreator::createBaseId("foo", 7), Eq("foo_7")); }
TEST_F(TextToBinaryTest, OpNoLine) { EXPECT_THAT(CompiledInstructions("OpNoLine"), Eq(MakeInstruction(SpvOpNoLine, {}))); }
TEST_F(ScalarTests, Parsing) { istringstream in("42"); A->parseFrom(&in); EXPECT_THAT(a, Eq(42)); }
bool cellEqual(Cell cell, double val) { return cellRelOp(Eq(), cell, val); }
TEST_F(ScalarTests, Printing) { ostringstream out; a = 42; A->printTo(&out); EXPECT_THAT(out.str(), Eq("42")); }
bool cellEqual(Cell cell, const ArrayData* val) { return cellRelOp(Eq(), cell, val); }
TEST_F(TestCaseIdCreatorTests, SampleTestCaseIdCreation) { EXPECT_THAT(TestCaseIdCreator::create("foo", 0, 42), Eq("foo_sample_42")); }
bool cellEqual(Cell c1, Cell c2) { return cellRelOp(Eq(), c1, c2); }
TEST_F(TestCaseIdCreatorTests, OfficialTestCaseIdCreation) { EXPECT_THAT(TestCaseIdCreator::create("foo", -1, 42), Eq("foo_42")); }
TEST_F(ToDoTest, constructor_createsEmptyList) { EXPECT_THAT(list.size(), Eq(size_t(0))); }
TEST_F(TestCaseIdCreatorTests, OfficialTestCaseIdCreation_WithGroups) { EXPECT_THAT(TestCaseIdCreator::create("foo", 7, 42), Eq("foo_7_42")); }
HOT_FUNC bool cellEqual(Cell cell, int64_t val) { return cellRelOp(Eq(), cell, val); }
void begin (int argc, const char * argv[]) { size_t infile; size_t n_fields; size_t field; size_t n_params; size_t param; const char * comment; int mer_field[256] = {0, }; infile = Infile ("-"); File_fix (infile, 1, 0); puts ("#: taql-0.1/text"); n_fields = N_fields (infile); for (field = 0; field < n_fields; ++field) { Taql name; Taql type; fputs ("# field ", stdout); name = Field_name (infile, field); Fprint (stdout, name); fputs (" ", stdout); type = Field_type (infile, field); Fprint (stdout, type); fputc ('\n', stdout); if ( Eq (type, Sym ("uint64")) && ('m' == Sym_ref(name, 0)) && ('e' == Sym_ref(name, 1)) && ('r' == Sym_ref(name, 2)) && isdigit (Sym_ref(name, 3))) { mer_field[field] = 1; } } n_params = N_params (infile); for (param = 0; param < n_params; ++param) { fputs ("# param ", stdout); Fprint (stdout, Param_name (infile, param)); fputs (" ", stdout); Fprint (stdout, Param_value (infile, param)); fputc ('\n', stdout); } comment = Comment (infile); if (!comment || !comment[0]) { fputs ("#.\n", stdout); } else { const char * c; fputs ("#-\n# ", stdout); for (c = comment; *c; ++c) { if (*c == '\n') fputs ("\n# ", stdout); else fputc (*c, stdout); } fputs ("\n#.\n", stdout); } while (N_ahead (infile)) { for (field = 0; field < n_fields; ++field) { Taql value; if (field) fputc (' ', stdout); value = Peek (infile, 0, field); if (!mer_field [field]) { Fprint (stdout, value); } else { t_taql_uint64 mer; char in_ascii[17]; mer = as_uInt64 (value); mer_to_ascii (in_ascii, mer); fputs (in_ascii, stdout); } } fputc ('\n', stdout); Advance (infile, 1); } }