コード例 #1
0
ファイル: mutate_test.cpp プロジェクト: seckcoder/peloton
TEST_F(MutateTests, UpdateTest) {
  // We are going to insert a tile group into a table in this test
  storage::DataTable *table = ExecutorTestsUtil::CreateTable();
  auto testing_pool = TestingHarness::GetInstance().GetTestingPool();

  LaunchParallelTest(1, InsertTuple, table, testing_pool);
  LaunchParallelTest(1, UpdateTuple, table);

  // Seq scan to check number
  std::vector<oid_t> column_ids = {0};
  auto tuple_cnt = SeqScanCount(table, column_ids, nullptr);
  EXPECT_EQ(tuple_cnt, 10);

  expression::TupleValueExpression *tup_val_exp =
      new expression::TupleValueExpression(0, 2);
  expression::ConstantValueExpression *const_val_exp =
      new expression::ConstantValueExpression(
          ValueFactory::GetDoubleValue(23.5));

  auto predicate = new expression::ComparisonExpression<expression::CmpEq>(
      EXPRESSION_TYPE_COMPARE_EQUAL, tup_val_exp, const_val_exp);

  tuple_cnt = SeqScanCount(table, column_ids, predicate);
  EXPECT_EQ(tuple_cnt, 6);

  delete table;
  tuple_id = 0;
}
コード例 #2
0
ファイル: mutate_test.cpp プロジェクト: seckcoder/peloton
TEST_F(MutateTests, DeleteTest) {
  // We are going to insert a tile group into a table in this test

  storage::DataTable *table = ExecutorTestsUtil::CreateTable();
  auto testing_pool = TestingHarness::GetInstance().GetTestingPool();

  LaunchParallelTest(1, InsertTuple, table, testing_pool);
  LaunchParallelTest(1, DeleteTuple, table);

  auto &txn_manager = concurrency::OptimisticTxnManager::GetInstance();
  auto txn = txn_manager.BeginTransaction();
  std::unique_ptr<executor::ExecutorContext> context(
      new executor::ExecutorContext(txn));
  // Seq scan
  std::vector<oid_t> column_ids = {0};
  planner::SeqScanPlan seq_scan_node(table, nullptr, column_ids);
  executor::SeqScanExecutor seq_scan_executor(&seq_scan_node, context.get());
  EXPECT_TRUE(seq_scan_executor.Init());

  auto tuple_cnt = 0;
  while (seq_scan_executor.Execute()) {
    std::unique_ptr<executor::LogicalTile> result_logical_tile(
        seq_scan_executor.GetOutput());
    tuple_cnt += result_logical_tile->GetTupleCount();
  }
  txn_manager.CommitTransaction();
  EXPECT_EQ(tuple_cnt, 6);
  delete table;
  tuple_id = 0;
}
コード例 #3
0
ファイル: loader_test.cpp プロジェクト: abpoms/peloton-opt
TEST_F(LoaderTests, LoadingTest) {
  // We are going to simply load tile groups concurrently in this test
  oid_t tuples_per_tilegroup = DEFAULT_TUPLES_PER_TILEGROUP;
  bool build_indexes = false;

  // Control the scale
  oid_t loader_threads_count = 2;
  oid_t tilegroup_count_per_loader = 10;

  // Each tuple size ~40 B.
  oid_t tuple_size = 41;

  std::unique_ptr<storage::DataTable> data_table(
      ExecutorTestsUtil::CreateTable(tuples_per_tilegroup, build_indexes));

  auto testing_pool = TestingHarness::GetInstance().GetTestingPool();

  LaunchParallelTest(loader_threads_count, InsertTuple, data_table.get(),
                     testing_pool, tilegroup_count_per_loader);

  auto expected_tile_group_count =
      loader_threads_count * tilegroup_count_per_loader;
  auto bytes_to_megabytes_converter = (1024 * 1024);

  EXPECT_EQ(data_table->GetTileGroupCount(), expected_tile_group_count);

  LOG_INFO("Dataset size : %lu MB \n",
           (expected_tile_group_count * tuples_per_tilegroup * tuple_size) /
           bytes_to_megabytes_converter);
}
コード例 #4
0
// Test multithreaded functionality
TEST_F(SkipListMapTest, MultithreadedTest) {

  std::vector<catalog::Column> columns;

  catalog::Column column1(VALUE_TYPE_INTEGER, GetTypeSize(VALUE_TYPE_INTEGER), "A", true);
  columns.push_back(column1);
  catalog::Schema *schema(new catalog::Schema(columns));
  std::vector<storage::Tuple*> tuples;

  // Parallel Test
  size_t num_threads = 4;
  size_t scale_factor = 100;

  std::vector<std::thread> thread_group;

  LaunchParallelTest(num_threads, InsertTest, scale_factor, schema);

  size_t num_entries = 0;
  for (auto iterator = test_skip_list_map.begin();
      iterator != test_skip_list_map.end();
      ++iterator) {
    num_entries++;
  }

  LOG_INFO("Num Entries : %lu", num_entries);

  EXPECT_EQ(num_entries, num_threads * scale_factor * base_scale);

}
コード例 #5
0
TEST(TransactionTests, TransactionTest) {
  auto &txn_manager = concurrency::TransactionManager::GetInstance();

  LaunchParallelTest(8, TransactionTest, &txn_manager);

  std::cout << "Last Commit Id :: " << txn_manager.GetLastCommitId() << "\n";
}
コード例 #6
0
TEST_F(SerializableTransactionTests, TransactionTest) {
  for (auto protocol_type : PROTOCOL_TYPES) {
    concurrency::TransactionManagerFactory::Configure(protocol_type);
    auto &txn_manager = concurrency::TransactionManagerFactory::GetInstance();

    LaunchParallelTest(8, TransactionTest, &txn_manager);
  }
}
コード例 #7
0
ファイル: manager_test.cpp プロジェクト: HanumathRao/peloton
TEST_F(ManagerTests, TransactionTest) {
  LaunchParallelTest(8, AddTileGroup);

  std::cout << "Catalog allocations :: "
            << catalog::Manager::GetInstance().GetCurrentOid() << "\n";

  EXPECT_EQ(catalog::Manager::GetInstance().GetCurrentOid(), 800);
}
コード例 #8
0
ファイル: manager_test.cpp プロジェクト: camellyx/peloton
TEST_F(ManagerTests, TransactionTest) {
  LaunchParallelTest(8, AddTileGroup);

  LOG_INFO("Catalog allocations :: %u",
           catalog::Manager::GetInstance().GetCurrentTileGroupId());

  // EXPECT_EQ(catalog::Manager::GetInstance().GetCurrentTileGroupId(), 800);
}
コード例 #9
0
ファイル: index_test.cpp プロジェクト: yudun/mypeloton
TEST(IndexTests, DeleteTest) {
  auto pool = TestingHarness::GetInstance().GetTestingPool();
  std::vector<ItemPointer> locations;

  // INDEX
  std::unique_ptr<index::Index> index(BuildIndex());

  // Single threaded test
  size_t scale_factor = 1000;
  LaunchParallelTest(1, InsertTest, index.get(), pool, scale_factor);
  LaunchParallelTest(1, DeleteTest, index.get(), pool, scale_factor);

  // Checks
  std::unique_ptr<storage::Tuple> key0(new storage::Tuple(key_schema, true));
  std::unique_ptr<storage::Tuple> key1(new storage::Tuple(key_schema, true));
  std::unique_ptr<storage::Tuple> key2(new storage::Tuple(key_schema, true));

  key0->SetValue(0, ValueFactory::GetIntegerValue(100), pool);
  key0->SetValue(1, ValueFactory::GetStringValue("a"), pool);
  key1->SetValue(0, ValueFactory::GetIntegerValue(100), pool);
  key1->SetValue(1, ValueFactory::GetStringValue("b"), pool);
  key2->SetValue(0, ValueFactory::GetIntegerValue(100), pool);
  key2->SetValue(1, ValueFactory::GetStringValue("c"), pool);

  locations = index->ScanKey(key0.get());
  EXPECT_EQ(locations.size(), 0);

  locations = index->ScanKey(key1.get());
  if (index->HasUniqueKeys())
    EXPECT_EQ(locations.size(), 0);
  else
    EXPECT_EQ(locations.size(), 2);

  locations = index->ScanKey(key2.get());
  EXPECT_EQ(locations.size(), 1);
  EXPECT_EQ(locations[0].block, item1.block);

  locations = index->ScanAllKeys();
  if (index->HasUniqueKeys())
    EXPECT_EQ(locations.size(), scale_factor);
  else
    EXPECT_EQ(locations.size(), 3 * scale_factor);

  delete tuple_schema;
}
コード例 #10
0
TEST_F(TransactionTests, TransactionTest) {
  for (auto test_type : TEST_TYPES) {
    concurrency::TransactionManagerFactory::Configure(test_type);
    auto &txn_manager = concurrency::TransactionManagerFactory::GetInstance();

    LaunchParallelTest(8, TransactionTest, &txn_manager);

    LOG_INFO("next Commit Id :: %lu", txn_manager.GetNextCommitId());
  }
}
コード例 #11
0
ファイル: index_test.cpp プロジェクト: yudun/mypeloton
TEST(IndexTests, MyMultiThreadedTest) {
  auto pool = TestingHarness::GetInstance().GetTestingPool();
  std::vector<ItemPointer> locations;

  // INDEX
  std::unique_ptr<index::Index> index(BuildIndex());

  // Parallel Test
  size_t num_threads = 15;
  size_t scale_factor = 30;
  LaunchParallelTest(num_threads, InsertTest, index.get(), pool, scale_factor);
  LaunchParallelTest(num_threads, DeleteTest, index.get(), pool, scale_factor);

  locations = index->ScanAllKeys();
  if (index->HasUniqueKeys())
    EXPECT_EQ(locations.size(), scale_factor);
  else
    EXPECT_EQ(locations.size(), 3 * num_threads * scale_factor);

  std::unique_ptr<storage::Tuple> key1(new storage::Tuple(key_schema, true));
  std::unique_ptr<storage::Tuple> key2(new storage::Tuple(key_schema, true));

  key1->SetValue(0, ValueFactory::GetIntegerValue(100), pool);
  key1->SetValue(1, ValueFactory::GetStringValue("b"), pool);
  key2->SetValue(0, ValueFactory::GetIntegerValue(100), pool);
  key2->SetValue(1, ValueFactory::GetStringValue("c"), pool);

  locations = index->ScanKey(key1.get());
  if (index->HasUniqueKeys()) {
    EXPECT_EQ(locations.size(), 0);
  } else {
    EXPECT_EQ(locations.size(), 2 * num_threads);
  }

  locations = index->ScanKey(key2.get());
  if (index->HasUniqueKeys()) {
    EXPECT_EQ(locations.size(), num_threads);
  } else {
    EXPECT_EQ(locations.size(), num_threads);
  }

  delete tuple_schema;
}
コード例 #12
0
TEST_F(GCDeleteTestVacuum, DeleteTest) {

  peloton::gc::GCManagerFactory::Configure(type);
  peloton::gc::GCManagerFactory::GetInstance().StartGC();

  auto *table = ExecutorTestsUtil::CreateTable(1024);
  auto &manager = catalog::Manager::GetInstance();
  storage::Database db(DEFAULT_DB_ID);
  manager.AddDatabase(&db);
  db.AddTable(table);
  // We are going to insert a tile group into a table in this test

  auto testing_pool = TestingHarness::GetInstance().GetTestingPool();

  auto before_insert = catalog::Manager::GetInstance().GetMemoryFootprint();
  LaunchParallelTest(1, InsertTuple, table, testing_pool);
  auto after_insert = catalog::Manager::GetInstance().GetMemoryFootprint();
  EXPECT_GT(after_insert, before_insert);
  LaunchParallelTest(1, DeleteTuple, table);
  std::this_thread::sleep_for(std::chrono::seconds(10));
  auto after_delete = catalog::Manager::GetInstance().GetMemoryFootprint();
  EXPECT_EQ(after_insert, after_delete);
  auto &txn_manager = concurrency::TransactionManagerFactory::GetInstance();
  auto txn = txn_manager.BeginTransaction();
  std::unique_ptr<executor::ExecutorContext> context(
      new executor::ExecutorContext(txn));
  // Seq scan
  std::vector<oid_t> column_ids = {0};
  planner::SeqScanPlan seq_scan_node(table, nullptr, column_ids);
  executor::SeqScanExecutor seq_scan_executor(&seq_scan_node, context.get());
  EXPECT_TRUE(seq_scan_executor.Init());

  auto tuple_cnt = 0;
  while (seq_scan_executor.Execute()) {
    std::unique_ptr<executor::LogicalTile> result_logical_tile(
        seq_scan_executor.GetOutput());
    tuple_cnt += result_logical_tile->GetTupleCount();
  }
  txn_manager.CommitTransaction();
  EXPECT_EQ(tuple_cnt, 6);

  tuple_id = 0;
}
コード例 #13
0
ファイル: loader_test.cpp プロジェクト: ranxian/peloton
TEST_F(LoaderTests, LoadingTest) {
  // We are going to simply load tile groups concurrently in this test
  // WARNING: This test may potentially run for a long time if
  // TEST_TUPLES_PER_TILEGROUP is large, consider rewrite the test or hard
  // code the number of tuples per tile group in this test
  oid_t tuples_per_tilegroup = TEST_TUPLES_PER_TILEGROUP;
  bool build_indexes = false;

  // Control the scale
  oid_t loader_threads_count = 1;
  oid_t tilegroup_count_per_loader = 1002;

  // Each tuple size ~40 B.
  UNUSED_ATTRIBUTE oid_t tuple_size = 41;

  std::unique_ptr<storage::DataTable> data_table(
      ExecutorTestsUtil::CreateTable(tuples_per_tilegroup, build_indexes));

  auto testing_pool = TestingHarness::GetInstance().GetTestingPool();

  LaunchParallelTest(loader_threads_count, InsertTuple, data_table.get(),
                     testing_pool, tilegroup_count_per_loader);

  auto expected_tile_group_count = 0;

  int total_tuple_count = loader_threads_count * tilegroup_count_per_loader * TEST_TUPLES_PER_TILEGROUP;
  int max_cached_tuple_count = TEST_TUPLES_PER_TILEGROUP * storage::DataTable::active_tilegroup_count_;
  int max_unfill_cached_tuple_count = (TEST_TUPLES_PER_TILEGROUP - 1) * storage::DataTable::active_tilegroup_count_;

  if (total_tuple_count - max_cached_tuple_count <= 0) {
    if (total_tuple_count <= max_unfill_cached_tuple_count) {
      expected_tile_group_count = storage::DataTable::active_tilegroup_count_;
    } else {
      expected_tile_group_count = storage::DataTable::active_tilegroup_count_ + total_tuple_count - max_unfill_cached_tuple_count; 
    }
  } else {
    int filled_tile_group_count = total_tuple_count / max_cached_tuple_count * storage::DataTable::active_tilegroup_count_;
    
    if (total_tuple_count - filled_tile_group_count * TEST_TUPLES_PER_TILEGROUP - max_unfill_cached_tuple_count <= 0) {
      expected_tile_group_count = filled_tile_group_count + storage::DataTable::active_tilegroup_count_;
    } else {
      expected_tile_group_count = filled_tile_group_count + storage::DataTable::active_tilegroup_count_ + (total_tuple_count - filled_tile_group_count - max_unfill_cached_tuple_count); 
    }
  }

  UNUSED_ATTRIBUTE auto bytes_to_megabytes_converter = (1024 * 1024);

  EXPECT_EQ(data_table->GetTileGroupCount(), expected_tile_group_count);

  LOG_INFO("Dataset size : %u MB \n",
           (expected_tile_group_count * tuples_per_tilegroup * tuple_size) /
               bytes_to_megabytes_converter);
}
コード例 #14
0
ファイル: index_test.cpp プロジェクト: yudun/mypeloton
TEST(IndexTests, DeleteTest2) {
  auto pool = TestingHarness::GetInstance().GetTestingPool();
  std::vector<ItemPointer> locations;

  // INDEX
  std::unique_ptr<index::Index> index(BuildIndex());

  // Single threaded test
  size_t scale_factor = 1;
  LaunchParallelTest(1, DeleteTest2, index.get(), pool, scale_factor);

  locations = index->ScanAllKeys();
  EXPECT_EQ(locations.size(), 0);

  delete tuple_schema;
}
コード例 #15
0
TEST_F(InsertTests, LoadingTest) {
  // We are going to simply load tile groups concurrently in this test
  // WARNING: This test may potentially run for a long time if
  // TEST_TUPLES_PER_TILEGROUP is large, consider rewrite the test or hard
  // code the number of tuples per tile group in this test
  oid_t tuples_per_tilegroup = TEST_TUPLES_PER_TILEGROUP;
  bool build_indexes = false;

  // Control the scale
  oid_t loader_threads_count = 1;
  oid_t tilegroup_count_per_loader = 1;

  // Each tuple size ~40 B.
  oid_t tuple_size = 41;

  std::unique_ptr<storage::DataTable> data_table(
      ExecutorTestsUtil::CreateTable(tuples_per_tilegroup, build_indexes));

  auto testing_pool = TestingHarness::GetInstance().GetTestingPool();

  Timer<> timer;

  timer.Start();

  LaunchParallelTest(loader_threads_count, InsertTuple, data_table.get(),
                     testing_pool, tilegroup_count_per_loader);

  timer.Stop();
  auto duration = timer.GetDuration();

  LOG_INFO("Duration: %.2lf", duration);

  //EXPECT_LE(duration, 0.2);

  auto expected_tile_group_count =
      loader_threads_count * tilegroup_count_per_loader + 1;
  auto bytes_to_megabytes_converter = (1024 * 1024);

  EXPECT_EQ(data_table->GetTileGroupCount(), expected_tile_group_count);

  LOG_INFO("Dataset size : %u MB \n",
           (expected_tile_group_count * tuples_per_tilegroup * tuple_size) /
               bytes_to_megabytes_converter);
}
コード例 #16
0
ファイル: hash_table_test.cpp プロジェクト: cmu-db/peloton
TEST_F(HashTableTest, CanInsertDuplicateKeys) {
  codegen::util::HashTable table{GetMemPool(), sizeof(Key), sizeof(Value)};

  constexpr uint32_t to_insert = 50000;
  constexpr uint32_t c1 = 4444;
  constexpr uint32_t max_dups = 4;

  std::vector<Key> keys;

  // Insert keys
  uint32_t num_inserts = 0;
  for (uint32_t i = 0; i < to_insert; i++) {
    // Choose a random number of duplicates to insert. Store this in the k1.
    uint32_t num_dups = 1 + (rand() % max_dups);
    Key k{num_dups, i};

    // Duplicate insertion
    for (uint32_t dup = 0; dup < num_dups; dup++) {
      Value v = {.v1 = k.k2, .v2 = 2, .v3 = 3, .v4 = c1};
      table.TypedInsert(k.Hash(), k, v);
      num_inserts++;
    }

    keys.emplace_back(k);
  }

  EXPECT_EQ(num_inserts, table.NumElements());

  // Lookup
  for (const auto &key : keys) {
    uint32_t count = 0;
    std::function<void(const Value &v)> f = [&key, &count, &c1](const Value &v) {
      EXPECT_EQ(key.k2, v.v1)
          << "Value's [v1] found in table doesn't match insert key";
      EXPECT_EQ(c1, v.v4) << "Value's [v4] doesn't match constant";
      count++;
    };
    table.TypedProbe(key.Hash(), key, f);
    EXPECT_EQ(key.k1, count) << key << " found " << count << " dups ...";
  }
}

TEST_F(HashTableTest, CanInsertLazilyWithDups) {
  codegen::util::HashTable table{GetMemPool(), sizeof(Key), sizeof(Value)};

  constexpr uint32_t to_insert = 50000;
  constexpr uint32_t c1 = 4444;
  constexpr uint32_t max_dups = 4;

  std::vector<Key> keys;

  // Insert keys
  uint32_t num_inserts = 0;
  for (uint32_t i = 0; i < to_insert; i++) {
    // Choose a random number of duplicates to insert. Store this in the k1.
    uint32_t num_dups = 1 + (rand() % max_dups);
    Key k{num_dups, i};

    // Duplicate insertion
    for (uint32_t dup = 0; dup < num_dups; dup++) {
      Value v = {.v1 = k.k2, .v2 = 2, .v3 = 3, .v4 = c1};
      table.TypedInsertLazy(k.Hash(), k, v);
      num_inserts++;
    }

    keys.emplace_back(k);
  }

  // Number of elements should reflect lazy insertions
  EXPECT_EQ(num_inserts, table.NumElements());
  EXPECT_LT(table.Capacity(), table.NumElements());

  // Build lazy
  table.BuildLazy();

  // Lookups should succeed
  for (const auto &key : keys) {
    uint32_t count = 0;
    std::function<void(const Value &v)> f = [&key, &count, &c1](const Value &v) {
      EXPECT_EQ(key.k2, v.v1)
          << "Value's [v1] found in table doesn't match insert key";
      EXPECT_EQ(c1, v.v4) << "Value's [v4] doesn't match constant";
      count++;
    };
    table.TypedProbe(key.Hash(), key, f);
    EXPECT_EQ(key.k1, count) << key << " found " << count << " dups ...";
  }
}

TEST_F(HashTableTest, ParallelMerge) {
  constexpr uint32_t num_threads = 4;
  constexpr uint32_t to_insert = 20000;

  // Allocate hash tables for each thread
  executor::ExecutorContext exec_ctx{nullptr};

  auto &thread_states = exec_ctx.GetThreadStates();
  thread_states.Reset(sizeof(codegen::util::HashTable));
  thread_states.Allocate(num_threads);

  // The keys we insert
  std::mutex keys_mutex;
  std::vector<Key> keys;

  // The global hash table
  codegen::util::HashTable global_table{*exec_ctx.GetPool(), sizeof(Key),
                                        sizeof(Value)};

  auto add_key = [&keys_mutex, &keys](const Key &k) {
    std::lock_guard<std::mutex> lock{keys_mutex};
    keys.emplace_back(k);
  };

  // Insert function
  auto insert_fn = [&add_key, &exec_ctx](uint64_t tid) {
    // Get the local table for this thread
    auto *table = reinterpret_cast<codegen::util::HashTable *>(
        exec_ctx.GetThreadStates().AccessThreadState(tid));

    // Initialize it
    codegen::util::HashTable::Init(*table, exec_ctx, sizeof(Key),
                                   sizeof(Value));

    // Insert keys disjoint from other threads
    for (uint32_t i = tid * to_insert, end = i + to_insert; i != end; i++) {
      Key k{static_cast<uint32_t>(tid), i};
      Value v = {.v1 = k.k2, .v2 = k.k1, .v3 = 3, .v4 = 4444};
      table->TypedInsertLazy(k.Hash(), k, v);

      add_key(k);
    }
  };

  auto merge_fn = [&global_table, &thread_states](uint64_t tid) {
    // Get the local table for this threads
    auto *table = reinterpret_cast<codegen::util::HashTable *>(
        thread_states.AccessThreadState(tid));

    // Merge it into the global table
    global_table.MergeLazyUnfinished(*table);
  };

  // First insert into thread local tables in parallel
  LaunchParallelTest(num_threads, insert_fn);
  for (uint32_t tid = 0; tid < num_threads; tid++) {
    auto *ht = reinterpret_cast<codegen::util::HashTable *>(
        thread_states.AccessThreadState(tid));
    EXPECT_EQ(to_insert, ht->NumElements());
  }

  // Now resize global table
  global_table.ReserveLazy(thread_states, 0);
  EXPECT_EQ(NextPowerOf2(keys.size()), global_table.Capacity());

  // Now merge thread-local tables into global table in parallel
  LaunchParallelTest(num_threads, merge_fn);

  // Clean up local tables
  for (uint32_t tid = 0; tid < num_threads; tid++) {
    auto *table = reinterpret_cast<codegen::util::HashTable *>(
        thread_states.AccessThreadState(tid));
    codegen::util::HashTable::Destroy(*table);
  }

  // Now probe global
  EXPECT_EQ(to_insert * num_threads, global_table.NumElements());
  EXPECT_LE(global_table.NumElements(), global_table.Capacity());
  for (const auto &key : keys) {
    uint32_t count = 0;
    std::function<void(const Value &v)> f = [&key, &count](const Value &v) {
      EXPECT_EQ(key.k2, v.v1)
          << "Value's [v1] found in table doesn't match insert key";
      EXPECT_EQ(key.k1, v.v2) << "Key " << key << " inserted by thread "
                              << key.k1 << " but value was inserted by thread "
                              << v.v2;
      count++;
    };
    global_table.TypedProbe(key.Hash(), key, f);
    EXPECT_EQ(1, count) << "Found duplicate keys in unique key test";
  }
}

}  // namespace test
コード例 #17
0
/*
 * TestIndexPerformance() - Test driver for indices of a given type
 *
 * This function tests Insert and Delete performance together with
 * key scan
 */
static void TestIndexPerformance(const IndexType &index_type) {
  // This is where we read all values in and verify them
  std::vector<ItemPointer *> location_ptrs;

  // INDEX
  std::unique_ptr<index::Index> index(BuildIndex(false, index_type));

  // Parallel Test by default 1 Million key

  // Number of threads doing insert or delete
  size_t num_thread = 4;

  // Number of keys inserted by each thread
  size_t num_key = 1024 * 256;

  Timer<> timer;

  ///////////////////////////////////////////////////////////////////
  // Start InsertTest1
  ///////////////////////////////////////////////////////////////////

  timer.Start();

  // First two arguments are used for launching tasks
  // All remaining arguments are passed to the thread body
  LaunchParallelTest(num_thread, InsertTest1, index.get(), num_thread, num_key);

  // Perform garbage collection
  if (index->NeedGC() == true) {
    index->PerformGC();
  }

  index->ScanAllKeys(location_ptrs);
  EXPECT_EQ(num_thread * num_key, location_ptrs.size());
  location_ptrs.clear();

  timer.Stop();
  LOG_INFO("InsertTest1 :: Type=%s; Duration=%.2lf",
           IndexTypeToString(index_type).c_str(), timer.GetDuration());

  ///////////////////////////////////////////////////////////////////
  // Start DeleteTest1
  ///////////////////////////////////////////////////////////////////

  timer.Start();

  LaunchParallelTest(num_thread, DeleteTest1, index.get(), num_thread, num_key);

  // Perform garbage collection
  if (index->NeedGC() == true) {
    index->PerformGC();
  }

  index->ScanAllKeys(location_ptrs);
  EXPECT_EQ(0, location_ptrs.size());
  location_ptrs.clear();

  timer.Stop();
  LOG_INFO("DeleteTest1 :: Type=%s; Duration=%.2lf",
           IndexTypeToString(index_type).c_str(), timer.GetDuration());

  ///////////////////////////////////////////////////////////////////
  // Start InsertTest2
  ///////////////////////////////////////////////////////////////////

  timer.Start();

  LaunchParallelTest(num_thread, InsertTest2, index.get(), num_thread, num_key);

  // Perform garbage collection
  if (index->NeedGC() == true) {
    index->PerformGC();
  }

  index->ScanAllKeys(location_ptrs);
  EXPECT_EQ(num_thread * num_key, location_ptrs.size());
  location_ptrs.clear();

  timer.Stop();
  LOG_INFO("InsertTest2 :: Type=%s; Duration=%.2lf",
           IndexTypeToString(index_type).c_str(), timer.GetDuration());

  ///////////////////////////////////////////////////////////////////
  // Start DeleteTest2
  ///////////////////////////////////////////////////////////////////

  timer.Start();

  LaunchParallelTest(num_thread, DeleteTest2, index.get(), num_thread, num_key);

  // Perform garbage collection
  if (index->NeedGC() == true) {
    index->PerformGC();
  }

  index->ScanAllKeys(location_ptrs);
  EXPECT_EQ(0, location_ptrs.size());
  location_ptrs.clear();

  timer.Stop();
  LOG_INFO("DeleteTest :: Type=%s; Duration=%.2lf",
           IndexTypeToString(index_type).c_str(), timer.GetDuration());

  ///////////////////////////////////////////////////////////////////
  // End of all tests
  ///////////////////////////////////////////////////////////////////

  delete tuple_schema;

  return;
}
コード例 #18
0
ファイル: mutate_test.cpp プロジェクト: seckcoder/peloton
TEST_F(MutateTests, StressTests) {
  auto &txn_manager = concurrency::OptimisticTxnManager::GetInstance();
  auto txn = txn_manager.BeginTransaction();

  std::unique_ptr<executor::ExecutorContext> context(
      new executor::ExecutorContext(txn));

  auto testing_pool = TestingHarness::GetInstance().GetTestingPool();

  // Create insert node for this test.
  storage::DataTable *table = ExecutorTestsUtil::CreateTable();

  // Pass through insert executor.
  storage::Tuple *tuple;
  tuple = ExecutorTestsUtil::GetNullTuple(table, testing_pool);

  auto project_info = MakeProjectInfoFromTuple(tuple);

  planner::InsertPlan node(table, project_info);
  executor::InsertExecutor executor(&node, context.get());

  try {
    executor.Execute();
  } catch (ConstraintException &ce) {
    LOG_ERROR("%s", ce.what());
  }

  delete tuple;

  tuple = ExecutorTestsUtil::GetTuple(table, ++tuple_id, testing_pool);
  project_info = MakeProjectInfoFromTuple(tuple);
  planner::InsertPlan node2(table, project_info);
  executor::InsertExecutor executor2(&node2, context.get());
  executor2.Execute();

  try {
    executor2.Execute();
  } catch (ConstraintException &ce) {
    LOG_ERROR("%s", ce.what());
  }

  delete tuple;

  txn_manager.CommitTransaction();

  LaunchParallelTest(1, InsertTuple, table, testing_pool);
  LOG_TRACE(table->GetInfo().c_str());

  LOG_INFO("---------------------------------------------");

  // LaunchParallelTest(1, UpdateTuple, table);
  // LOG_TRACE(table->GetInfo().c_str());

  LOG_INFO("---------------------------------------------");

  LaunchParallelTest(1, DeleteTuple, table);
  LOG_TRACE(table->GetInfo().c_str());

  // PRIMARY KEY
  std::vector<catalog::Column> columns;

  columns.push_back(ExecutorTestsUtil::GetColumnInfo(0));
  catalog::Schema *key_schema = new catalog::Schema(columns);
  storage::Tuple *key1 = new storage::Tuple(key_schema, true);
  storage::Tuple *key2 = new storage::Tuple(key_schema, true);

  key1->SetValue(0, ValueFactory::GetIntegerValue(10), nullptr);
  key2->SetValue(0, ValueFactory::GetIntegerValue(100), nullptr);

  delete key1;
  delete key2;
  delete key_schema;

  // SEC KEY
  columns.clear();
  columns.push_back(ExecutorTestsUtil::GetColumnInfo(0));
  columns.push_back(ExecutorTestsUtil::GetColumnInfo(1));
  key_schema = new catalog::Schema(columns);

  storage::Tuple *key3 = new storage::Tuple(key_schema, true);
  storage::Tuple *key4 = new storage::Tuple(key_schema, true);

  key3->SetValue(0, ValueFactory::GetIntegerValue(10), nullptr);
  key3->SetValue(1, ValueFactory::GetIntegerValue(11), nullptr);
  key4->SetValue(0, ValueFactory::GetIntegerValue(100), nullptr);
  key4->SetValue(1, ValueFactory::GetIntegerValue(101), nullptr);

  delete key3;
  delete key4;
  delete key_schema;

  delete table;
  tuple_id = 0;
}