예제 #1
0
// Test to make sure that the tmaster forms the right pplan
// and sends it to all stmgrs
TEST(StMgr, test_pplan_distribute) {
  CommonResources common;
  SetUpCommonResources(common);
  sp_int8 num_workers_per_stmgr_ = (((common.num_spouts_ * common.num_spout_instances_) +
                                     (common.num_bolts_ * common.num_bolt_instances_)) /
                                    common.num_stmgrs_);
  // Start the tmaster etc.
  StartTMaster(common);

  // Distribute workers across stmgrs
  DistributeWorkersAcrossStmgrs(common);

  // Start the stream managers
  StartStMgrs(common);

  // Wait till we get the physical plan populated on atleast one of the stmgrs
  // We just pick the first one
  while (!common.stmgrs_list_[0]->GetPhysicalPlan()) sleep(1);

  // Stop the schedulers
  for (size_t i = 0; i < common.ss_list_.size(); ++i) {
    common.ss_list_[i]->loopExit();
  }

  // Wait for the threads to terminate
  common.tmaster_thread_->join();
  for (size_t i = 0; i < common.stmgrs_threads_list_.size(); ++i) {
    common.stmgrs_threads_list_[i]->join();
  }

  // Verify that the pplan was made properly
  const heron::proto::system::PhysicalPlan* pplan0 = common.stmgrs_list_[0]->GetPhysicalPlan();
  EXPECT_EQ(pplan0->stmgrs_size(), common.num_stmgrs_);
  EXPECT_EQ(pplan0->instances_size(), common.num_stmgrs_ * num_workers_per_stmgr_);
  std::map<sp_string, heron::config::PhysicalPlanHelper::TaskData> tasks;
  heron::config::PhysicalPlanHelper::GetLocalTasks(*pplan0, common.stmgrs_id_list_[0], tasks);
  EXPECT_EQ((int)tasks.size(), (common.num_spouts_ * common.num_spout_instances_ +
                                common.num_bolts_ * common.num_bolt_instances_) /
                                   common.num_stmgrs_);

  // Delete the common resources
  TearCommonResources(common);
}
예제 #2
0
// Test to see if activate/deactivate works
// and that its distributed to tmasters
TEST(StMgr, test_activate_deactivate) {
  CommonResources common;
  SetUpCommonResources(common);

  sp_int8 num_workers_per_stmgr_ = (((common.num_spouts_ * common.num_spout_instances_) +
                                     (common.num_bolts_ * common.num_bolt_instances_)) /
                                    common.num_stmgrs_);
  // Start the tmaster etc.
  StartTMaster(common);

  // Distribute workers across stmgrs
  DistributeWorkersAcrossStmgrs(common);

  // Start the stream managers
  StartStMgrs(common);

  // Wait till we get the physical plan populated on the stmgrs
  // and lets check that the topology is in running state
  for (size_t i = 0; i < common.stmgrs_list_.size(); ++i) {
    while (!common.stmgrs_list_[i]->GetPhysicalPlan()) sleep(1);
    EXPECT_EQ(common.stmgrs_list_[i]->GetPhysicalPlan()->topology().state(),
              heron::proto::api::RUNNING);
  }

  std::thread* deactivate_thread =
      new std::thread(ControlTopology, common.topology_id_, common.tmaster_controller_port_, false);
  // deactivate_thread->start();
  deactivate_thread->join();
  delete deactivate_thread;

  // Wait for some time and check that the topology is in deactivated state
  sleep(1);
  for (size_t i = 0; i < common.stmgrs_list_.size(); ++i) {
    EXPECT_EQ(common.stmgrs_list_[i]->GetPhysicalPlan()->topology().state(),
              heron::proto::api::PAUSED);
  }

  std::thread* activate_thread =
      new std::thread(ControlTopology, common.topology_id_, common.tmaster_controller_port_, true);
  // activate_thread->start();
  activate_thread->join();
  delete activate_thread;

  // Wait for some time and check that the topology is in deactivated state
  sleep(1);
  for (size_t i = 0; i < common.stmgrs_list_.size(); ++i) {
    EXPECT_EQ(common.stmgrs_list_[i]->GetPhysicalPlan()->topology().state(),
              heron::proto::api::RUNNING);
  }

  // Stop the schedulers
  for (size_t i = 0; i < common.ss_list_.size(); ++i) {
    common.ss_list_[i]->loopExit();
  }

  // Wait for the threads to terminate
  common.tmaster_thread_->join();
  for (size_t i = 0; i < common.stmgrs_threads_list_.size(); ++i) {
    common.stmgrs_threads_list_[i]->join();
  }

  // Verify that the pplan was made properly
  const heron::proto::system::PhysicalPlan* pplan0 = common.stmgrs_list_[0]->GetPhysicalPlan();
  EXPECT_EQ(pplan0->stmgrs_size(), common.num_stmgrs_);
  EXPECT_EQ(pplan0->instances_size(), common.num_stmgrs_ * num_workers_per_stmgr_);
  std::map<sp_string, heron::config::PhysicalPlanHelper::TaskData> tasks;
  heron::config::PhysicalPlanHelper::GetLocalTasks(*pplan0, common.stmgrs_id_list_[0], tasks);
  EXPECT_EQ((int)tasks.size(), (common.num_spouts_ * common.num_spout_instances_ +
                                common.num_bolts_ * common.num_bolt_instances_) /
                                   common.num_stmgrs_);

  // Delete the common resources
  TearCommonResources(common);
}
예제 #3
0
// Test to make sure that the restorer sends restore request
// and sends it to all stmgrs
TEST(StatefulRestorer, test_restore_send) {
  CommonResources common;
  SetUpCommonResources(common);

  // Start the tmaster etc.
  StartDummyTMaster(common);

  // Distribute workers across stmgrs
  DistributeWorkersAcrossStmgrs(common);

  // Start the stmgr
  StartStMgrs(common);

  // Wait until all stmgrs registered
  while (common.tmaster_->stmgrs().size() != common.num_stmgrs_) sleep(1);

  // Make sure that stmgrs have not gotten any restore message
  for (auto stmgr : common.stmgrs_list_) {
    EXPECT_FALSE(stmgr->GotRestoreMessage());
  }
  // Start Restorer
  auto restorer = new heron::tmaster::StatefulRestorer();
  EXPECT_FALSE(restorer->IsInProgress());
  common.tmaster_piper_->ExecuteInEventLoop(
        std::bind(&heron::tmaster::StatefulRestorer::StartRestore,
                  restorer, "ckpt-1", common.tmaster_->stmgrs()));

  sleep(1);
  // all stmgrs should have received restore message
  for (auto stmgr : common.stmgrs_list_) {
    EXPECT_TRUE(stmgr->GotRestoreMessage());
  }
  EXPECT_TRUE(restorer->IsInProgress());
  sp_int64 txid = restorer->GetRestoreTxid();

  // Simulate restored message
  for (auto stmgr : common.stmgrs_list_) {
    EXPECT_FALSE(restorer->GotResponse(stmgr->stmgrid()));
    EXPECT_FALSE(stmgr->GotStartProcessingMessage());
    common.tmaster_piper_->ExecuteInEventLoop(
        std::bind(&heron::tmaster::StatefulRestorer::HandleStMgrRestored,
                  restorer, stmgr->stmgrid(), "ckpt-1", txid, common.tmaster_->stmgrs()));
    sleep(1);
    EXPECT_TRUE(restorer->GotResponse(stmgr->stmgrid()));
  }

  // The 2 phase commit should have finished
  EXPECT_FALSE(restorer->IsInProgress());

  sleep(1);

  // All stmgrs should have gotten start message
  for (auto stmgr : common.stmgrs_list_) {
    EXPECT_TRUE(stmgr->GotStartProcessingMessage());
  }

  // Stop the schedulers
  for (size_t i = 0; i < common.ss_list_.size(); ++i) {
    common.ss_list_[i]->loopExit();
  }

  // Wait for the threads to terminate
  common.tmaster_thread_->join();
  for (size_t i = 0; i < common.stmgrs_threads_list_.size(); ++i) {
    common.stmgrs_threads_list_[i]->join();
  }

  // Delete the common resources
  TearCommonResources(common);
  delete restorer;
}