Beispiel #1
0
// Do insert and create insert tuple log records
std::vector<ItemPointer> InsertTuples(
    storage::DataTable* table, const std::vector<storage::Tuple*>& tuples,
    bool committed) {
  std::vector<ItemPointer> locations;

  auto& txn_manager = concurrency::TransactionManagerFactory::GetInstance();

  for (auto tuple : tuples) {
    auto txn = txn_manager.BeginTransaction();
    ItemPointer location = table->InsertTuple(tuple);
    if (location.block == INVALID_OID) {
      txn->SetResult(Result::RESULT_FAILURE);
      LOG_ERROR("Insert failed");
      exit(EXIT_FAILURE);
    }

    txn->RecordInsert(location);

    locations.push_back(location);

    // Logging
    {
      auto& log_manager = logging::LogManager::GetInstance();

      if (log_manager.IsInLoggingMode()) {
        auto logger = log_manager.GetBackendLogger();
        auto record = logger->GetTupleRecord(
            LOGRECORD_TYPE_TUPLE_INSERT, txn->GetTransactionId(),
            table->GetOid(), location, INVALID_ITEMPOINTER, tuple,
            LOGGING_TESTS_DATABASE_OID);
        logger->Log(record);
      }
    }

    // commit or abort as required
    if (committed) {
      txn_manager.CommitTransaction();
    } else {
      txn_manager.AbortTransaction();
    }
  }

  return locations;
}
void PessimisticTxnManager::PerformDelete(const oid_t &tile_group_id,
                                          const oid_t &tuple_id) {
  auto &manager = catalog::Manager::GetInstance();
  auto tile_group_header = manager.GetTileGroup(tile_group_id)->GetHeader();

  assert(tile_group_header->GetTransactionId(tuple_id) ==
         current_txn->GetTransactionId());
  assert(tile_group_header->GetBeginCommitId(tuple_id) == MAX_CID);
  assert(tile_group_header->GetEndCommitId(tuple_id) == MAX_CID);

  tile_group_header->SetEndCommitId(tuple_id, INVALID_CID);

  // Add the old tuple into the delete set
  auto old_location = tile_group_header->GetPrevItemPointer(tuple_id);
  if (old_location.IsNull() == false) {
    // delete an inserted version
    current_txn->RecordDelete(old_location.block, old_location.offset);
  } else {
    // if this version is newly inserted.
    current_txn->RecordDelete(tile_group_id, tuple_id);
  }
}
// count number of expired versions.
int GarbageNum(storage::DataTable *table) {
  auto table_tile_group_count_ = table->GetTileGroupCount();
  auto current_tile_group_offset_ = START_OID;

  int old_num = 0;

  while (current_tile_group_offset_ < table_tile_group_count_) {
    auto tile_group = table->GetTileGroup(current_tile_group_offset_++);
    auto tile_group_header = tile_group->GetHeader();
    oid_t active_tuple_count = tile_group->GetNextTupleSlot();

    for (oid_t tuple_id = 0; tuple_id < active_tuple_count; tuple_id++) {
      auto tuple_txn_id = tile_group_header->GetTransactionId(tuple_id);
      auto tuple_end_cid = tile_group_header->GetEndCommitId(tuple_id);
      if (tuple_txn_id == INITIAL_TXN_ID && tuple_end_cid != MAX_CID) {
        old_num++;
      }
    }
  }

  LOG_INFO("old version num = %d", old_num);
  return old_num;
}
void TimestampOrderingTransactionManager::PerformInsert(
    TransactionContext *const current_txn, const ItemPointer &location,
    ItemPointer *index_entry_ptr) {
  PL_ASSERT(current_txn->GetIsolationLevel() != IsolationLevelType::READ_ONLY);

  oid_t tile_group_id = location.block;
  oid_t tuple_id = location.offset;

  auto &manager = catalog::Manager::GetInstance();
  auto tile_group_header = manager.GetTileGroup(tile_group_id)->GetHeader();
  auto transaction_id = current_txn->GetTransactionId();

  // check MVCC info
  // the tuple slot must be empty.
  PL_ASSERT(tile_group_header->GetTransactionId(tuple_id) == INVALID_TXN_ID);
  PL_ASSERT(tile_group_header->GetBeginCommitId(tuple_id) == MAX_CID);
  PL_ASSERT(tile_group_header->GetEndCommitId(tuple_id) == MAX_CID);

  tile_group_header->SetTransactionId(tuple_id, transaction_id);

  // no need to set next item pointer.

  // Add the new tuple into the insert set
  current_txn->RecordInsert(location);

  InitTupleReserved(tile_group_header, tuple_id);

  // Write down the head pointer's address in tile group header
  tile_group_header->SetIndirection(tuple_id, index_entry_ptr);

  // Increment table insert op stats
  if (static_cast<StatsType>(settings::SettingsManager::GetInt(settings::SettingId::stats_mode)) !=
      StatsType::INVALID) {
    stats::BackendStatsContext::GetInstance()->IncrementTableInserts(
        location.block);
  }
}
/**
 * @brief  Populates the tiles in the given tile-group in a specific manner.
 * @param tile_group Tile-group to populate with values.
 * @param num_rows Number of tuples to insert.
 */
void ExecutorTestsUtil::PopulateTiles(
    std::shared_ptr<storage::TileGroup> tile_group, int num_rows) {
  // Create tuple schema from tile schemas.
  std::vector<catalog::Schema> &tile_schemas = tile_group->GetTileSchemas();
  std::unique_ptr<catalog::Schema> schema(
      catalog::Schema::AppendSchemaList(tile_schemas));

  // Ensure that the tile group is as expected.
  assert(schema->GetColumnCount() == 4);

  // Insert tuples into tile_group.
  auto &txn_manager = concurrency::TransactionManager::GetInstance();
  const bool allocate = true;
  auto txn = txn_manager.BeginTransaction();
  const txn_id_t txn_id = txn->GetTransactionId();
  const cid_t commit_id = txn->GetCommitId();
  auto testing_pool = TestingHarness::GetInstance().GetTestingPool();

  for (int col_itr = 0; col_itr < num_rows; col_itr++) {
    storage::Tuple tuple(schema.get(), allocate);
    tuple.SetValue(0, ValueFactory::GetIntegerValue(PopulatedValue(col_itr, 0)),
                   testing_pool);
    tuple.SetValue(1, ValueFactory::GetIntegerValue(PopulatedValue(col_itr, 1)),
                   testing_pool);
    tuple.SetValue(2, ValueFactory::GetDoubleValue(PopulatedValue(col_itr, 2)),
                   testing_pool);
    Value string_value = ValueFactory::GetStringValue(
        std::to_string(PopulatedValue(col_itr, 3)));
    tuple.SetValue(3, string_value, testing_pool);

    oid_t tuple_slot_id = tile_group->InsertTuple(txn_id, &tuple);
    tile_group->CommitInsertedTuple(tuple_slot_id, txn_id, commit_id);
  }

  txn_manager.CommitTransaction();
}
void DeleteTuple(storage::DataTable *table, oid_t id){
	  auto txn = peloton::concurrency::current_txn;
	  std::unique_ptr<executor::ExecutorContext> context(
	      new executor::ExecutorContext(txn));


	  LOG_INFO("Removing tuple with id %d from table %s", (int)id, table->GetName().c_str());
	  LOG_INFO("Transaction ID: %d", (int)txn->GetTransactionId());
	  // Delete
	  planner::DeletePlan delete_node(table, false);
	  executor::DeleteExecutor delete_executor(&delete_node, context.get());

	  // Predicate
	  // WHERE id_in_table = id
	  expression::TupleValueExpression *tup_val_exp =
	      new expression::TupleValueExpression(VALUE_TYPE_INTEGER, 0, 0);
	  expression::ConstantValueExpression *const_val_exp =
	      new expression::ConstantValueExpression(
	          ValueFactory::GetIntegerValue(id));
	  auto predicate = new expression::ComparisonExpression<expression::CmpEq>(
			  EXPRESSION_TYPE_COMPARE_EQUAL, tup_val_exp, const_val_exp);

	  // Seq scan
	  std::vector<oid_t> column_ids = {0, 1};
	  std::unique_ptr<planner::SeqScanPlan> seq_scan_node(
	      new planner::SeqScanPlan(table, predicate, column_ids));
	  executor::SeqScanExecutor seq_scan_executor(seq_scan_node.get(),
	                                              context.get());

	  // Parent-Child relationship
	  delete_node.AddChild(std::move(seq_scan_node));
	  delete_executor.AddChild(&seq_scan_executor);
	  delete_executor.Init();
	  delete_executor.Execute();

}
bool IndexScanExecutor::ExecPrimaryIndexLookup() {
  PL_ASSERT(!done_);

  std::vector<ItemPointer *> tuple_location_ptrs;

  PL_ASSERT(index_->GetIndexType() == INDEX_CONSTRAINT_TYPE_PRIMARY_KEY);

  if (0 == key_column_ids_.size()) {
    index_->ScanAllKeys(tuple_location_ptrs);
  } else {
    index_->Scan(values_, key_column_ids_, expr_types_,
                 SCAN_DIRECTION_TYPE_FORWARD, tuple_location_ptrs);
  }


  if (tuple_location_ptrs.size() == 0) return false;

  auto &transaction_manager =
      concurrency::TransactionManagerFactory::GetInstance();

  std::map<oid_t, std::vector<oid_t>> visible_tuples;
  std::vector<ItemPointer> garbage_tuples;
  // for every tuple that is found in the index.
  for (auto tuple_location_ptr : tuple_location_ptrs) {
    
    ItemPointer tuple_location = *tuple_location_ptr;
    
    auto &manager = catalog::Manager::GetInstance();
    auto tile_group = manager.GetTileGroup(tuple_location.block);
    auto tile_group_header = tile_group.get()->GetHeader();

    size_t chain_length = 0;
    while (true) {

      ++chain_length;

      // if the tuple is visible.
      if (transaction_manager.IsVisible(tile_group_header,
                                        tuple_location.offset)) {

        LOG_TRACE("traverse chain length : %lu", chain_length);
        LOG_TRACE("perform read: %u, %u", tuple_location.block,
                 tuple_location.offset);

        // perform predicate evaluation.
        if (predicate_ == nullptr) {
          visible_tuples[tuple_location.block].push_back(tuple_location.offset);

          auto res = transaction_manager.PerformRead(tuple_location);
          if (!res) {
            transaction_manager.SetTransactionResult(RESULT_FAILURE);
            return res;
          }
        } else {
          expression::ContainerTuple<storage::TileGroup> tuple(
              tile_group.get(), tuple_location.offset);
          auto eval =
              predicate_->Evaluate(&tuple, nullptr, executor_context_).IsTrue();
          if (eval == true) {
            visible_tuples[tuple_location.block]
                .push_back(tuple_location.offset);

            auto res = transaction_manager.PerformRead(tuple_location);
            if (!res) {
              transaction_manager.SetTransactionResult(RESULT_FAILURE);
              return res;
            }
          }
        }
        break;
      }
      // if the tuple is not visible.
      else {
        ItemPointer old_item = tuple_location;
        cid_t old_end_cid = tile_group_header->GetEndCommitId(old_item.offset);

        tuple_location = tile_group_header->GetNextItemPointer(old_item.offset);
        // there must exist a visible version.

        // FIXME: currently, only speculative read transaction manager **may** see a null version
        // it's a potential bug
        if(tuple_location.IsNull()) {
          transaction_manager.SetTransactionResult(RESULT_FAILURE);
          // FIXME: this cause unnecessary abort when we have delete operations
          return false;
        }

        // FIXME: Is this always true? what if we have a deleted tuple? --jiexi
        PL_ASSERT(tuple_location.IsNull() == false);

        cid_t max_committed_cid = transaction_manager.GetMaxCommittedCid();

        // check whether older version is garbage.

        if (old_end_cid <= max_committed_cid) {
          PL_ASSERT(tile_group_header->GetTransactionId(old_item.offset) == INITIAL_TXN_ID ||
                      tile_group_header->GetTransactionId(old_item.offset) == INVALID_TXN_ID);

          if (tile_group_header->SetAtomicTransactionId(old_item.offset, INVALID_TXN_ID) == true) {

            // atomically swap item pointer held in the index bucket.
            AtomicUpdateItemPointer(tuple_location_ptr, tuple_location);

            // currently, let's assume only primary index exists.
            // gc::GCManagerFactory::GetInstance().RecycleTupleSlot(
            //     table_->GetOid(), old_item.block, old_item.offset,
            //     transaction_manager.GetNextCommitId());
            garbage_tuples.push_back(old_item);

            tile_group = manager.GetTileGroup(tuple_location.block);
            tile_group_header = tile_group.get()->GetHeader();
            tile_group_header->SetPrevItemPointer(tuple_location.offset, INVALID_ITEMPOINTER);

          } else {

            tile_group = manager.GetTileGroup(tuple_location.block);
            tile_group_header = tile_group.get()->GetHeader();
          }

        } else {
        tile_group = manager.GetTileGroup(tuple_location.block);
        tile_group_header = tile_group.get()->GetHeader();

        }


      }
    }
  }

  // Add all garbage tuples to GC manager
  if(garbage_tuples.size() != 0) {
    cid_t garbage_timestamp = transaction_manager.GetNextCommitId();
    for (auto garbage : garbage_tuples) {
      gc::GCManagerFactory::GetInstance().RecycleTupleSlot(
        table_->GetOid(), garbage.block, garbage.offset, garbage_timestamp);
    }
  }

  // Construct a logical tile for each block
  for (auto tuples : visible_tuples) {
    auto &manager = catalog::Manager::GetInstance();
    auto tile_group = manager.GetTileGroup(tuples.first);

    std::unique_ptr<LogicalTile> logical_tile(LogicalTileFactory::GetTile());
    // Add relevant columns to logical tile
    logical_tile->AddColumns(tile_group, full_column_ids_);
    logical_tile->AddPositionList(std::move(tuples.second));
    if (column_ids_.size() != 0) {
      logical_tile->ProjectColumns(full_column_ids_, column_ids_);
    }

    result_.push_back(logical_tile.release());
  }

  done_ = true;

  LOG_TRACE("Result tiles : %lu", result_.size());

  return true;
}
Beispiel #8
0
/**
 * @brief Adds a column to the logical tile, using the position lists.
 * @return true on success, false otherwise.
 */
bool InsertExecutor::DExecute() {
  if (done_) return false;

  assert(!done_);
  assert(executor_context_ != nullptr);

  const planner::InsertPlan &node = GetPlanNode<planner::InsertPlan>();
  storage::DataTable *target_table_ = node.GetTable();
  oid_t bulk_insert_count = node.GetBulkInsertCount();
  assert(target_table_);

  auto transaction_ = executor_context_->GetTransaction();
  auto executor_pool = executor_context_->GetExecutorContextPool();

  // Inserting a logical tile.
  if (children_.size() == 1) {
    LOG_INFO("Insert executor :: 1 child \n");

    if (!children_[0]->Execute()) {
      return false;
    }

    std::unique_ptr<LogicalTile> logical_tile(children_[0]->GetOutput());
    assert(logical_tile.get() != nullptr);
    auto target_table_schema = target_table_->GetSchema();
    auto column_count = target_table_schema->GetColumnCount();

    std::unique_ptr<storage::Tuple> tuple(
        new storage::Tuple(target_table_schema, true));

    // Go over the logical tile
    for (oid_t tuple_id : *logical_tile) {
      expression::ContainerTuple<LogicalTile> cur_tuple(logical_tile.get(),
                                                        tuple_id);

      // Materialize the logical tile tuple
      for (oid_t column_itr = 0; column_itr < column_count; column_itr++)
        tuple->SetValue(column_itr, cur_tuple.GetValue(column_itr),
                        executor_pool);

      peloton::ItemPointer location =
          target_table_->InsertTuple(transaction_, tuple.get());
      if (location.block == INVALID_OID) {
        transaction_->SetResult(peloton::Result::RESULT_FAILURE);
        return false;
      }
      transaction_->RecordInsert(location);

      executor_context_->num_processed += 1;  // insert one
    }

    return true;
  }
  // Inserting a collection of tuples from plan node
  else if (children_.size() == 0) {
    LOG_INFO("Insert executor :: 0 child \n");

    // Extract expressions from plan node and construct the tuple.
    // For now we just handle a single tuple
    auto schema = target_table_->GetSchema();
    std::unique_ptr<storage::Tuple> tuple(new storage::Tuple(schema, true));
    auto project_info = node.GetProjectInfo();

    // There should be no direct maps
    assert(project_info);
    assert(project_info->GetDirectMapList().size() == 0);

    for (auto target : project_info->GetTargetList()) {
      peloton::Value value =
          target.second->Evaluate(nullptr, nullptr, executor_context_);
      tuple->SetValue(target.first, value, executor_pool);
    }

    // Bulk Insert Mode
    for (oid_t insert_itr = 0; insert_itr < bulk_insert_count; insert_itr++) {
      // Carry out insertion
      ItemPointer location =
          target_table_->InsertTuple(transaction_, tuple.get());
      LOG_INFO("Inserted into location: %lu, %lu", location.block,
               location.offset);

      if (location.block == INVALID_OID) {
        LOG_INFO("Failed to Insert. Set txn failure.");
        transaction_->SetResult(peloton::Result::RESULT_FAILURE);
        return false;
      }
      transaction_->RecordInsert(location);

      // Logging
      {
        auto &log_manager = logging::LogManager::GetInstance();

        if (log_manager.IsInLoggingMode()) {
          auto logger = log_manager.GetBackendLogger();
          auto record = logger->GetTupleRecord(
              LOGRECORD_TYPE_TUPLE_INSERT, transaction_->GetTransactionId(),
              target_table_->GetOid(), location, INVALID_ITEMPOINTER,
              tuple.get());

          logger->Log(record);
        }
      }
    }

    executor_context_->num_processed += 1;  // insert one
    done_ = true;
    return true;
  }

  return true;
}
TEST(AggregateTests, PlainSumCountDistinctTest) {
  /*
   * SELECT SUM(a), COUNT(b), COUNT(DISTINCT b) from table
   */
  const int tuple_count = TESTS_TUPLES_PER_TILEGROUP;

  // Create a table and wrap it in logical tiles
  auto &txn_manager = concurrency::TransactionManager::GetInstance();
  auto txn = txn_manager.BeginTransaction();
  auto txn_id = txn->GetTransactionId();

  std::unique_ptr<storage::DataTable> data_table(
      ExecutorTestsUtil::CreateTable(tuple_count, false));
  ExecutorTestsUtil::PopulateTable(txn, data_table.get(),
                                   2 * tuple_count, false,
                                   true, true);
  txn_manager.CommitTransaction();

  std::unique_ptr<executor::LogicalTile> source_logical_tile1(
      executor::LogicalTileFactory::WrapTileGroup(data_table->GetTileGroup(0),
                                                  txn_id));

  std::unique_ptr<executor::LogicalTile> source_logical_tile2(
      executor::LogicalTileFactory::WrapTileGroup(data_table->GetTileGroup(1),
                                                  txn_id));

  // (1-5) Setup plan node

  // 1) Set up group-by columns
  std::vector<oid_t> group_by_columns;

  // 2) Set up project info
  planner::ProjectInfo::DirectMapList direct_map_list = {
      {0, {1, 0}}, {1, {1, 1}}, {2, {1, 2}}};

  auto proj_info = new planner::ProjectInfo(planner::ProjectInfo::TargetList(),
                                            std::move(direct_map_list));

  // 3) Set up unique aggregates
  std::vector<planner::AggregatePlan::AggTerm> agg_terms;
  planner::AggregatePlan::AggTerm sumA(EXPRESSION_TYPE_AGGREGATE_SUM,
                                       expression::TupleValueFactory(0, 0),
                                       false);
  planner::AggregatePlan::AggTerm countB(EXPRESSION_TYPE_AGGREGATE_COUNT,
                                         expression::TupleValueFactory(0, 1),
                                         false);  // Flag distinct
  planner::AggregatePlan::AggTerm countDistinctB(
      EXPRESSION_TYPE_AGGREGATE_COUNT, expression::TupleValueFactory(0, 1),
      true);  // Flag distinct
  agg_terms.push_back(sumA);
  agg_terms.push_back(countB);
  agg_terms.push_back(countDistinctB);

  // 4) Set up predicate (empty)
  expression::AbstractExpression* predicate = nullptr;

  // 5) Create output table schema
  auto data_table_schema = data_table.get()->GetSchema();
  std::vector<oid_t> set = {0, 1, 1};
  std::vector<catalog::Column> columns;
  for (auto column_index : set) {
    columns.push_back(data_table_schema->GetColumn(column_index));
  }
  auto output_table_schema = new catalog::Schema(columns);

  // OK) Create the plan node
  planner::AggregatePlan node(proj_info, predicate, std::move(agg_terms),
                              std::move(group_by_columns), output_table_schema,
                              AGGREGATE_TYPE_PLAIN);

  // Create and set up executor
  auto txn2 = txn_manager.BeginTransaction();
  std::unique_ptr<executor::ExecutorContext> context(
      new executor::ExecutorContext(txn2));

  executor::AggregateExecutor executor(&node, context.get());
  MockExecutor child_executor;
  executor.AddChild(&child_executor);

  EXPECT_CALL(child_executor, DInit()).WillOnce(Return(true));

  EXPECT_CALL(child_executor, DExecute())
      .WillOnce(Return(true))
      .WillOnce(Return(true))
      .WillOnce(Return(false));

  EXPECT_CALL(child_executor, GetOutput())
      .WillOnce(Return(source_logical_tile1.release()))
      .WillOnce(Return(source_logical_tile2.release()));

  EXPECT_TRUE(executor.Init());

  EXPECT_TRUE(executor.Execute());

  txn_manager.CommitTransaction();

  /* Verify result */
  std::unique_ptr<executor::LogicalTile> result_tile(executor.GetOutput());
  EXPECT_TRUE(result_tile.get() != nullptr);
  EXPECT_TRUE(result_tile->GetValue(0, 0)
                  .OpEquals(ValueFactory::GetIntegerValue(50))
                  .IsTrue());
  EXPECT_TRUE(result_tile->GetValue(0, 1)
                  .OpEquals(ValueFactory::GetIntegerValue(10))
                  .IsTrue());

  EXPECT_TRUE(result_tile->GetValue(0, 2)
                  .OpLessThanOrEqual(ValueFactory::GetIntegerValue(3))
                  .IsTrue());
}
void TimestampOrderingTransactionManager::PerformUpdate(
    TransactionContext *const current_txn, const ItemPointer &location,
    const ItemPointer &new_location) {
  PL_ASSERT(current_txn->GetIsolationLevel() != IsolationLevelType::READ_ONLY);

  ItemPointer old_location = location;

  LOG_TRACE("Performing Update old tuple %u %u", old_location.block,
            old_location.offset);
  LOG_TRACE("Performing Update new tuple %u %u", new_location.block,
            new_location.offset);

  auto &manager = catalog::Manager::GetInstance();

  auto tile_group_header =
      manager.GetTileGroup(old_location.block)->GetHeader();
  auto new_tile_group_header =
      manager.GetTileGroup(new_location.block)->GetHeader();

  auto transaction_id = current_txn->GetTransactionId();
  // if we can perform update, then we must have already locked the older
  // version.
  PL_ASSERT(tile_group_header->GetTransactionId(old_location.offset) ==
            transaction_id);
  PL_ASSERT(tile_group_header->GetPrevItemPointer(old_location.offset)
                .IsNull() == true);

  // check whether the new version is empty.
  PL_ASSERT(new_tile_group_header->GetTransactionId(new_location.offset) ==
            INVALID_TXN_ID);
  PL_ASSERT(new_tile_group_header->GetBeginCommitId(new_location.offset) ==
            MAX_CID);
  PL_ASSERT(new_tile_group_header->GetEndCommitId(new_location.offset) ==
            MAX_CID);

  // if the executor doesn't call PerformUpdate after AcquireOwnership,
  // no one will possibly release the write lock acquired by this txn.

  // Set double linked list
  tile_group_header->SetPrevItemPointer(old_location.offset, new_location);

  new_tile_group_header->SetNextItemPointer(new_location.offset, old_location);

  new_tile_group_header->SetTransactionId(new_location.offset, transaction_id);

  // we should guarantee that the newer version is all set before linking the
  // newer version to older version.
  COMPILER_MEMORY_FENCE;

  InitTupleReserved(new_tile_group_header, new_location.offset);

  // we must be updating the latest version.
  // Set the header information for the new version
  ItemPointer *index_entry_ptr =
      tile_group_header->GetIndirection(old_location.offset);

  // if there's no primary index on a table, then index_entry_ptr == nullptr.
  if (index_entry_ptr != nullptr) {
    new_tile_group_header->SetIndirection(new_location.offset, index_entry_ptr);

    // Set the index header in an atomic way.
    // We do it atomically because we don't want any one to see a half-done
    // pointer.
    // In case of contention, no one can update this pointer when we are
    // updating it
    // because we are holding the write lock. This update should success in
    // its first trial.
    UNUSED_ATTRIBUTE auto res =
        AtomicUpdateItemPointer(index_entry_ptr, new_location);
    PL_ASSERT(res == true);
  }

  // Add the old tuple into the update set
  current_txn->RecordUpdate(old_location);

  // Increment table update op stats
  if (static_cast<StatsType>(settings::SettingsManager::GetInt(settings::SettingId::stats_mode)) !=
      StatsType::INVALID) {
    stats::BackendStatsContext::GetInstance()->IncrementTableUpdates(
        new_location.block);
  }
}
void TileGroupHeader::PrintVisibility(txn_id_t txn_id, cid_t at_cid) {
  oid_t active_tuple_slots = GetNextTupleSlot();
  std::stringstream os;

  os << "\t-----------------------------------------------------------\n";

  for (oid_t header_itr = 0; header_itr < active_tuple_slots; header_itr++) {
    bool own = (txn_id == GetTransactionId(header_itr));
    bool activated = (at_cid >= GetBeginCommitId(header_itr));
    bool invalidated = (at_cid >= GetEndCommitId(header_itr));

    txn_id_t txn_id = GetTransactionId(header_itr);
    cid_t beg_commit_id = GetBeginCommitId(header_itr);
    cid_t end_commit_id = GetEndCommitId(header_itr);
    bool insert_commit = GetInsertCommit(header_itr);
    bool delete_commit = GetDeleteCommit(header_itr);

    int width = 10;

    os << "\tslot :: " << std::setw(width) << header_itr;

    os << " txn id : ";
    if (txn_id == MAX_TXN_ID)
      os << std::setw(width) << "MAX_TXN_ID";
    else
      os << std::setw(width) << txn_id;

    os << " beg cid : ";
    if (beg_commit_id == MAX_CID)
      os << std::setw(width) << "MAX_CID";
    else
      os << std::setw(width) << beg_commit_id;

    os << " end cid : ";
    if (end_commit_id == MAX_CID)
      os << std::setw(width) << "MAX_CID";
    else
      os << std::setw(width) << end_commit_id;

    os << " insert commit : ";
    if (insert_commit == true)
      os << "O";
    else
      os << "X";

    os << " delete commit : ";
    if (delete_commit == true)
      os << "O";
    else
      os << "X";

    peloton::ItemPointer location = GetPrevItemPointer(header_itr);
    os << " prev : "
       << "[ " << location.block << " , " << location.offset << " ]";  //<<

    os << " own : " << own;
    os << " activated : " << activated;
    os << " invalidated : " << invalidated << " ";

    // Visible iff past Insert || Own Insert
    if ((!own && activated && !invalidated) ||
        (own && !activated && !invalidated))
      os << "\t\t[ true  ]\n";
    else
      os << "\t\t[ false ]\n";
  }

  os << "\t-----------------------------------------------------------\n";

  std::cout << os.str().c_str();
}
bool HybridScanExecutor::ExecPrimaryIndexLookup() {
  assert(!index_done_);

  std::vector<ItemPointer *> tuple_location_ptrs;

  assert(index_->GetIndexType() == INDEX_CONSTRAINT_TYPE_PRIMARY_KEY);

  if (0 == key_column_ids_.size()) {
    index_->ScanAllKeys(tuple_location_ptrs);
  } else {
    index_->Scan(values_, key_column_ids_, expr_types_,
                 SCAN_DIRECTION_TYPE_FORWARD, tuple_location_ptrs);
  }

  LOG_INFO("Tuple_locations.size(): %lu", tuple_location_ptrs.size());

  auto &transaction_manager =
    concurrency::TransactionManagerFactory::GetInstance();

   if (tuple_location_ptrs.size() == 0) {
    index_done_ = true;
    return false;
  }

  //std::set<oid_t> oid_ts;

  std::map<oid_t, std::vector<oid_t>> visible_tuples;
  // for every tuple that is found in the index.
  for (auto tuple_location_ptr : tuple_location_ptrs) {
    ItemPointer tuple_location = *tuple_location_ptr;

    if (type_ == planner::HYBRID &&
      tuple_location.block >= (block_threshold)) {
        item_pointers_.insert(tuple_location);
    //  oid_ts.insert(tuple_location.block);
    }

    auto &manager = catalog::Manager::GetInstance();
    auto tile_group = manager.GetTileGroup(tuple_location.block);
    auto tile_group_header = tile_group.get()->GetHeader();

    // perform transaction read
    size_t chain_length = 0;
    while (true) {

      ++chain_length;

      if (transaction_manager.IsVisible(tile_group_header,
                                        tuple_location.offset)) {
        visible_tuples[tuple_location.block].push_back(tuple_location.offset);
        auto res = transaction_manager.PerformRead(tuple_location);
        if (!res) {
          transaction_manager.SetTransactionResult(RESULT_FAILURE);
          return res;
        }
        break;
      } else {
        ItemPointer old_item = tuple_location;
        cid_t old_end_cid = tile_group_header->GetEndCommitId(old_item.offset);

        tuple_location = tile_group_header->GetNextItemPointer(old_item.offset);
        // there must exist a visible version.
        assert(tuple_location.IsNull() == false);

        cid_t max_committed_cid = transaction_manager.GetMaxCommittedCid();

        // check whether older version is garbage.
        if (old_end_cid < max_committed_cid) {
          assert(tile_group_header->GetTransactionId(old_item.offset) == INITIAL_TXN_ID ||
                 tile_group_header->GetTransactionId(old_item.offset) == INVALID_TXN_ID);

          if (tile_group_header->SetAtomicTransactionId(old_item.offset, INVALID_TXN_ID) == true) {


            // atomically swap item pointer held in the index bucket.
            AtomicUpdateItemPointer(tuple_location_ptr, tuple_location);

            // currently, let's assume only primary index exists.
            //gc::GCManagerFactory::GetInstance().RecycleTupleSlot(
            //  table_->GetOid(), old_item.block, old_item.offset,
            //  max_committed_cid);
          }
        }

        tile_group = manager.GetTileGroup(tuple_location.block);
        tile_group_header = tile_group.get()->GetHeader();
      }
    }
  }

  // Construct a logical tile for each block
  for (auto tuples : visible_tuples) {
    auto &manager = catalog::Manager::GetInstance();
    auto tile_group = manager.GetTileGroup(tuples.first);

    std::unique_ptr<LogicalTile> logical_tile(LogicalTileFactory::GetTile());
    // Add relevant columns to logical tile
    logical_tile->AddColumns(tile_group, full_column_ids_);
    logical_tile->AddPositionList(std::move(tuples.second));
    if (column_ids_.size() != 0) {
      logical_tile->ProjectColumns(full_column_ids_, column_ids_);
    }

    result_.push_back(logical_tile.release());
  }

  index_done_ = true;

  LOG_TRACE("Result tiles : %lu", result_.size());

  return true;
}
bool HybridScanExecutor::ExecPrimaryIndexLookup() {
  PL_ASSERT(index_done_ == false);

  const planner::HybridScanPlan &node = GetPlanNode<planner::HybridScanPlan>();
  bool acquire_owner = GetPlanNode<planner::AbstractScan>().IsForUpdate();

  auto key_column_ids_ = node.GetKeyColumnIds();
  auto expr_type_ = node.GetExprTypes();

  std::vector<ItemPointer *> tuple_location_ptrs;

  PL_ASSERT(index_->GetIndexType() == INDEX_CONSTRAINT_TYPE_PRIMARY_KEY);

  if (0 == key_column_ids_.size()) {
    LOG_TRACE("Scan all keys");
    index_->ScanAllKeys(tuple_location_ptrs);
  } else {
    LOG_TRACE("Scan");
    index_->Scan(values_,
                 key_column_ids_,
                 expr_type_,
                 SCAN_DIRECTION_TYPE_FORWARD,
                 tuple_location_ptrs,
                 &node.GetIndexPredicate().GetConjunctionList()[0]);
  }

  LOG_TRACE("Result tuple count: %lu", tuple_location_ptrs.size());

  auto &transaction_manager =
      concurrency::TransactionManagerFactory::GetInstance();

  auto current_txn = executor_context_->GetTransaction();

  if (tuple_location_ptrs.size() == 0) {
    index_done_ = true;
    return false;
  }

  std::map<oid_t, std::vector<oid_t>> visible_tuples;

  // for every tuple that is found in the index.
  for (auto tuple_location_ptr : tuple_location_ptrs) {
    ItemPointer tuple_location = *tuple_location_ptr;

    if (type_ == HYBRID_SCAN_TYPE_HYBRID &&
        tuple_location.block >= (block_threshold)) {
      item_pointers_.insert(tuple_location);
    }

    auto &manager = catalog::Manager::GetInstance();
    auto tile_group = manager.GetTileGroup(tuple_location.block);
    auto tile_group_header = tile_group.get()->GetHeader();

    // perform transaction read
    size_t chain_length = 0;
    while (true) {
      ++chain_length;

      auto visibility = transaction_manager.IsVisible(current_txn, tile_group_header, tuple_location.offset);

      if (visibility == VISIBILITY_OK) {

        visible_tuples[tuple_location.block].push_back(tuple_location.offset);
        auto res = transaction_manager.PerformRead(current_txn, tuple_location, acquire_owner);
        if (!res) {
          transaction_manager.SetTransactionResult(current_txn, RESULT_FAILURE);
          return res;
        }
        break;
      } else {
        ItemPointer old_item = tuple_location;
        cid_t old_end_cid = tile_group_header->GetEndCommitId(old_item.offset);

        tuple_location = tile_group_header->GetNextItemPointer(old_item.offset);
        // there must exist a visible version.
        assert(tuple_location.IsNull() == false);

        cid_t max_committed_cid = transaction_manager.GetMaxCommittedCid();

        // check whether older version is garbage.
        if (old_end_cid < max_committed_cid) {
          assert(tile_group_header->GetTransactionId(old_item.offset) ==
              INITIAL_TXN_ID ||
              tile_group_header->GetTransactionId(old_item.offset) ==
                  INVALID_TXN_ID);

          if (tile_group_header->SetAtomicTransactionId(
              old_item.offset, INVALID_TXN_ID) == true) {
            // atomically swap item pointer held in the index bucket.
            AtomicUpdateItemPointer(tuple_location_ptr, tuple_location);
          }
        }

        tile_group = manager.GetTileGroup(tuple_location.block);
        tile_group_header = tile_group.get()->GetHeader();
      }
    }
  }

  // Construct a logical tile for each block
  for (auto tuples : visible_tuples) {
    auto &manager = catalog::Manager::GetInstance();
    auto tile_group = manager.GetTileGroup(tuples.first);

    std::unique_ptr<LogicalTile> logical_tile(LogicalTileFactory::GetTile());

    // Add relevant columns to logical tile
    logical_tile->AddColumns(tile_group, full_column_ids_);
    logical_tile->AddPositionList(std::move(tuples.second));

    if (column_ids_.size() != 0) {
      logical_tile->ProjectColumns(full_column_ids_, column_ids_);
    }

    result_.push_back(logical_tile.release());
  }

  index_done_ = true;

  LOG_TRACE("Result tiles : %lu", result_.size());

  return true;
}
TEST(AggregateTests, HashDistinctTest) {
  /*
   * SELECT d, a, b, c FROM table GROUP BY a, b, c, d;
   */

  const int tuple_count = TESTS_TUPLES_PER_TILEGROUP;

  // Create a table and wrap it in logical tiles
  auto &txn_manager = concurrency::TransactionManager::GetInstance();
  auto txn = txn_manager.BeginTransaction();
  auto txn_id = txn->GetTransactionId();
  std::unique_ptr<storage::DataTable> data_table(
      ExecutorTestsUtil::CreateTable(tuple_count, false));
  ExecutorTestsUtil::PopulateTable(txn, data_table.get(),
                                   2 * tuple_count, false,
                                   true,
                                   true);  // let it be random
  txn_manager.CommitTransaction();

  std::unique_ptr<executor::LogicalTile> source_logical_tile1(
      executor::LogicalTileFactory::WrapTileGroup(data_table->GetTileGroup(0),
                                                  txn_id));

  std::unique_ptr<executor::LogicalTile> source_logical_tile2(
      executor::LogicalTileFactory::WrapTileGroup(data_table->GetTileGroup(1),
                                                  txn_id));

  // (1-5) Setup plan node

  // 1) Set up group-by columns
  std::vector<oid_t> group_by_columns = {0, 1, 2, 3};

  // 2) Set up project info
  planner::ProjectInfo::DirectMapList direct_map_list = {
      {0, {0, 3}}, {1, {0, 0}}, {2, {0, 1}}, {3, {0, 2}}};
  auto proj_info = new planner::ProjectInfo(planner::ProjectInfo::TargetList(),
                                            std::move(direct_map_list));

  // 3) Set up unique aggregates (empty)
  std::vector<planner::AggregatePlan::AggTerm> agg_terms;

  // 4) Set up predicate (empty)
  expression::AbstractExpression* predicate = nullptr;

  // 5) Create output table schema
  auto data_table_schema = data_table.get()->GetSchema();
  std::vector<oid_t> set = {3, 0, 1, 2};
  std::vector<catalog::Column> columns;
  for (auto column_index : set) {
    columns.push_back(data_table_schema->GetColumn(column_index));
  }

  auto output_table_schema = new catalog::Schema(columns);

  // OK) Create the plan node
  planner::AggregatePlan node(proj_info, predicate, std::move(agg_terms),
                              std::move(group_by_columns), output_table_schema,
                              AGGREGATE_TYPE_HASH);

  // Create and set up executor
  auto txn2 = txn_manager.BeginTransaction();
  std::unique_ptr<executor::ExecutorContext> context(
      new executor::ExecutorContext(txn2));

  executor::AggregateExecutor executor(&node, context.get());
  MockExecutor child_executor;
  executor.AddChild(&child_executor);

  EXPECT_CALL(child_executor, DInit()).WillOnce(Return(true));

  EXPECT_CALL(child_executor, DExecute())
      .WillOnce(Return(true))
      .WillOnce(Return(true))
      .WillOnce(Return(false));

  EXPECT_CALL(child_executor, GetOutput())
      .WillOnce(Return(source_logical_tile1.release()))
      .WillOnce(Return(source_logical_tile2.release()));

  EXPECT_TRUE(executor.Init());

  EXPECT_TRUE(executor.Execute());
  txn_manager.CommitTransaction();

  /* Verify result */
  std::unique_ptr<executor::LogicalTile> result_tile(executor.GetOutput());
  EXPECT_TRUE(result_tile.get() != nullptr);

  for (auto tuple_id : *result_tile) {
    int colA = ValuePeeker::PeekAsInteger(result_tile->GetValue(tuple_id, 1));
    (void)colA;
  }
}
Result PessimisticTxnManager::CommitTransaction() {
  LOG_TRACE("Committing peloton txn : %lu ", current_txn->GetTransactionId());

  auto &manager = catalog::Manager::GetInstance();

  auto &rw_set = current_txn->GetRWSet();

  //*****************************************************
  // we can optimize read-only transaction.
  if (current_txn->IsReadOnly() == true) {
    // validate read set.
    for (auto &tile_group_entry : rw_set) {
      oid_t tile_group_id = tile_group_entry.first;
      auto tile_group = manager.GetTileGroup(tile_group_id);
      auto tile_group_header = tile_group->GetHeader();
      for (auto &tuple_entry : tile_group_entry.second) {
        auto tuple_slot = tuple_entry.first;
        // if this tuple is not newly inserted.
        if (tuple_entry.second == RW_TYPE_READ) {
          // Release read locks
          if (pessimistic_released_rdlock.find(tile_group_id) ==
                  pessimistic_released_rdlock.end() ||
              pessimistic_released_rdlock[tile_group_id].find(tuple_slot) ==
                  pessimistic_released_rdlock[tile_group_id].end()) {
            ReleaseReadLock(tile_group_header, tuple_slot);
            pessimistic_released_rdlock[tile_group_id].insert(tuple_slot);
          }
        } else {
          assert(tuple_entry.second == RW_TYPE_INS_DEL);
        }
      }
    }
    // is it always true???
    Result ret = current_txn->GetResult();
    EndTransaction();
    return ret;
  }
  //*****************************************************

  // generate transaction id.
  cid_t end_commit_id = GetNextCommitId();

  auto &log_manager = logging::LogManager::GetInstance();
  log_manager.LogBeginTransaction(end_commit_id);

  // install everything.
  for (auto &tile_group_entry : rw_set) {
    oid_t tile_group_id = tile_group_entry.first;
    auto tile_group = manager.GetTileGroup(tile_group_id);
    auto tile_group_header = tile_group->GetHeader();
    for (auto &tuple_entry : tile_group_entry.second) {
      auto tuple_slot = tuple_entry.first;
      if (tuple_entry.second == RW_TYPE_READ) {
        // Release read locks
        if (pessimistic_released_rdlock.find(tile_group_id) ==
                pessimistic_released_rdlock.end() ||
            pessimistic_released_rdlock[tile_group_id].find(tuple_slot) ==
                pessimistic_released_rdlock[tile_group_id].end()) {
          ReleaseReadLock(tile_group_header, tuple_slot);
          pessimistic_released_rdlock[tile_group_id].insert(tuple_slot);
        }
      } else if (tuple_entry.second == RW_TYPE_UPDATE) {
        // we must guarantee that, at any time point, only one version is
        // visible.
        ItemPointer new_version =
            tile_group_header->GetNextItemPointer(tuple_slot);
        ItemPointer old_version(tile_group_id, tuple_slot);

        // logging.
        log_manager.LogUpdate(current_txn, end_commit_id, old_version,
                              new_version);

        auto new_tile_group_header =
            manager.GetTileGroup(new_version.block)->GetHeader();
        
        new_tile_group_header->SetEndCommitId(new_version.offset, MAX_CID);
        new_tile_group_header->SetBeginCommitId(new_version.offset,
                                                end_commit_id);

        COMPILER_MEMORY_FENCE;

        tile_group_header->SetEndCommitId(tuple_slot, end_commit_id);
        
        COMPILER_MEMORY_FENCE;

        new_tile_group_header->SetTransactionId(new_version.offset,
                                                INITIAL_TXN_ID);
        tile_group_header->SetTransactionId(tuple_slot, INITIAL_TXN_ID);

      } else if (tuple_entry.second == RW_TYPE_DELETE) {
        ItemPointer new_version =
            tile_group_header->GetNextItemPointer(tuple_slot);
        ItemPointer delete_location(tile_group_id, tuple_slot);
        
        // logging.
        log_manager.LogDelete(end_commit_id, delete_location);

        // we do not change begin cid for old tuple.
        auto new_tile_group_header =
            manager.GetTileGroup(new_version.block)->GetHeader();

        new_tile_group_header->SetEndCommitId(new_version.offset, MAX_CID);
        new_tile_group_header->SetBeginCommitId(new_version.offset,
                                                end_commit_id);
        
        COMPILER_MEMORY_FENCE;

        tile_group_header->SetEndCommitId(tuple_slot, end_commit_id);
        
        COMPILER_MEMORY_FENCE;

        new_tile_group_header->SetTransactionId(new_version.offset,
                                                INVALID_TXN_ID);
        tile_group_header->SetTransactionId(tuple_slot, INITIAL_TXN_ID);

      } else if (tuple_entry.second == RW_TYPE_INSERT) {
        assert(tile_group_header->GetTransactionId(tuple_slot) ==
               current_txn->GetTransactionId());
        // set the begin commit id to persist insert
        ItemPointer insert_location(tile_group_id, tuple_slot);
        log_manager.LogInsert(current_txn, end_commit_id, insert_location);

        tile_group_header->SetEndCommitId(tuple_slot, MAX_CID);
        tile_group_header->SetBeginCommitId(tuple_slot, end_commit_id);

        COMPILER_MEMORY_FENCE;

        tile_group_header->SetTransactionId(tuple_slot, INITIAL_TXN_ID);

      } else if (tuple_entry.second == RW_TYPE_INS_DEL) {
        assert(tile_group_header->GetTransactionId(tuple_slot) ==
               current_txn->GetTransactionId());

        tile_group_header->SetEndCommitId(tuple_slot, MAX_CID);
        tile_group_header->SetBeginCommitId(tuple_slot, MAX_CID);

        COMPILER_MEMORY_FENCE;

        // set the begin commit id to persist insert
        tile_group_header->SetTransactionId(tuple_slot, INVALID_TXN_ID);
      }
    }
  }
  log_manager.LogCommitTransaction(end_commit_id);

  EndTransaction();

  pessimistic_released_rdlock.clear();

  return Result::RESULT_SUCCESS;
}
Beispiel #16
0
void ExecuteJoinTest(PlanNodeType join_algorithm, PelotonJoinType join_type, oid_t join_test_type) {
  //===--------------------------------------------------------------------===//
  // Mock table scan executors
  //===--------------------------------------------------------------------===//

  MockExecutor left_table_scan_executor, right_table_scan_executor;

  // Create a table and wrap it in logical tile
  size_t tile_group_size = TESTS_TUPLES_PER_TILEGROUP;
  size_t left_table_tile_group_count = 3;
  size_t right_table_tile_group_count = 2;

  auto &txn_manager = concurrency::TransactionManager::GetInstance();
  auto txn = txn_manager.BeginTransaction();
  auto txn_id = txn->GetTransactionId();

  // Left table has 3 tile groups
  std::unique_ptr<storage::DataTable> left_table(
      ExecutorTestsUtil::CreateTable(tile_group_size));
  ExecutorTestsUtil::PopulateTable(
      txn, left_table.get(),
      tile_group_size * left_table_tile_group_count, false,
      false, false);

  // Right table has 2 tile groups
  std::unique_ptr<storage::DataTable> right_table(
      ExecutorTestsUtil::CreateTable(tile_group_size));
  ExecutorTestsUtil::PopulateTable(
      txn, right_table.get(),
      tile_group_size * right_table_tile_group_count, false,
      false, false);

  txn_manager.CommitTransaction();

  //std::cout << (*left_table);

  //std::cout << (*right_table);

  // Wrap the input tables with logical tiles
  std::unique_ptr<executor::LogicalTile> left_table_logical_tile1(
      executor::LogicalTileFactory::WrapTileGroup(left_table->GetTileGroup(0),
                                                  txn_id));
  std::unique_ptr<executor::LogicalTile> left_table_logical_tile2(
      executor::LogicalTileFactory::WrapTileGroup(left_table->GetTileGroup(1),
                                                  txn_id));
  std::unique_ptr<executor::LogicalTile> left_table_logical_tile3(
      executor::LogicalTileFactory::WrapTileGroup(left_table->GetTileGroup(2),
                                                  txn_id));

  std::unique_ptr<executor::LogicalTile> right_table_logical_tile1(
      executor::LogicalTileFactory::WrapTileGroup(
          right_table->GetTileGroup(0),
          txn_id));

  std::unique_ptr<executor::LogicalTile> right_table_logical_tile2(
      executor::LogicalTileFactory::WrapTileGroup(
          right_table->GetTileGroup(1),
          txn_id));

  // Left scan executor returns logical tiles from the left table

  EXPECT_CALL(left_table_scan_executor, DInit()).WillOnce(Return(true));

  //===--------------------------------------------------------------------===//
  // Setup left table
  //===--------------------------------------------------------------------===//

  if(join_test_type == BASIC_TEST) {

    EXPECT_CALL(left_table_scan_executor, DExecute())
        .WillOnce(Return(true))
        .WillOnce(Return(true))
        .WillOnce(Return(true))
        .WillOnce(Return(false));

    EXPECT_CALL(left_table_scan_executor, GetOutput())
        .WillOnce(Return(left_table_logical_tile1.release()))
        .WillOnce(Return(left_table_logical_tile2.release()))
        .WillOnce(Return(left_table_logical_tile3.release()));

  }

  // Right scan executor returns logical tiles from the right table

  EXPECT_CALL(right_table_scan_executor, DInit()).WillOnce(Return(true));

  //===--------------------------------------------------------------------===//
  // Setup right table
  //===--------------------------------------------------------------------===//

  if(join_test_type == BASIC_TEST) {

    EXPECT_CALL(right_table_scan_executor, DExecute())
         .WillOnce(Return(true))
         .WillOnce(Return(true))
         .WillOnce(Return(false));

     EXPECT_CALL(right_table_scan_executor, GetOutput())
         .WillOnce(Return(right_table_logical_tile1.release()))
         .WillOnce(Return(right_table_logical_tile2.release()));

  }

  //===--------------------------------------------------------------------===//
  // Setup join plan nodes and executors and run them
  //===--------------------------------------------------------------------===//

  oid_t result_tuple_count = 0;
  oid_t tuples_with_null = 0;
  auto projection = JoinTestsUtil::CreateProjection();

  // Construct predicate
  expression::AbstractExpression *predicate =
      JoinTestsUtil::CreateJoinPredicate();

  // Differ based on join algorithm
  switch (join_algorithm) {
    case PLAN_NODE_TYPE_NESTLOOP: {
      // Create nested loop join plan node.
      planner::NestedLoopJoinPlan nested_loop_join_node(join_type, predicate,
                                                        projection);

      // Run the nested loop join executor
      executor::NestedLoopJoinExecutor nested_loop_join_executor(
          &nested_loop_join_node, nullptr);

      // Construct the executor tree
      nested_loop_join_executor.AddChild(&left_table_scan_executor);
      nested_loop_join_executor.AddChild(&right_table_scan_executor);

      // Run the nested loop join executor
      EXPECT_TRUE(nested_loop_join_executor.Init());
      while (nested_loop_join_executor.Execute() == true) {
        std::unique_ptr<executor::LogicalTile> result_logical_tile(
            nested_loop_join_executor.GetOutput());

        if (result_logical_tile != nullptr) {
          result_tuple_count += result_logical_tile->GetTupleCount();
          tuples_with_null +=
              CountTuplesWithNullFields(result_logical_tile.get());
          //std::cout << (*result_logical_tile);
        }
      }

    } break;

    case PLAN_NODE_TYPE_MERGEJOIN: {
      // Create join clauses
      std::vector<planner::MergeJoinPlan::JoinClause> join_clauses;
      join_clauses = CreateJoinClauses();

      // Create merge join plan node
      planner::MergeJoinPlan merge_join_node(join_type, predicate, projection,
                                             join_clauses);

      // Construct the merge join executor
      executor::MergeJoinExecutor merge_join_executor(&merge_join_node,
                                                      nullptr);

      // Construct the executor tree
      merge_join_executor.AddChild(&left_table_scan_executor);
      merge_join_executor.AddChild(&right_table_scan_executor);

      // Run the merge join executor
      EXPECT_TRUE(merge_join_executor.Init());
      while (merge_join_executor.Execute() == true) {
        std::unique_ptr<executor::LogicalTile> result_logical_tile(
            merge_join_executor.GetOutput());

        if (result_logical_tile != nullptr) {
          result_tuple_count += result_logical_tile->GetTupleCount();
          tuples_with_null +=
              CountTuplesWithNullFields(result_logical_tile.get());
          //std::cout << (*result_logical_tile);
        }
      }

    } break;

    case PLAN_NODE_TYPE_HASHJOIN: {
      // Create hash plan node
      expression::AbstractExpression *right_table_attr_1 =
          new expression::TupleValueExpression(1, 1);

      std::vector<std::unique_ptr<const expression::AbstractExpression> >
          hash_keys;
      hash_keys.emplace_back(right_table_attr_1);

      // Create hash plan node
      planner::HashPlan hash_plan_node(hash_keys);

      // Construct the hash executor
      executor::HashExecutor hash_executor(&hash_plan_node, nullptr);

      // Create hash join plan node.
      planner::HashJoinPlan hash_join_plan_node(join_type, predicate,
                                                projection);

      // Construct the hash join executor
      executor::HashJoinExecutor hash_join_executor(&hash_join_plan_node,
                                                    nullptr);

      // Construct the executor tree
      hash_join_executor.AddChild(&left_table_scan_executor);
      hash_join_executor.AddChild(&hash_executor);

      hash_executor.AddChild(&right_table_scan_executor);

      // Run the hash_join_executor
      EXPECT_TRUE(hash_join_executor.Init());
      while (hash_join_executor.Execute() == true) {
        std::unique_ptr<executor::LogicalTile> result_logical_tile(
            hash_join_executor.GetOutput());

        if (result_logical_tile != nullptr) {
          result_tuple_count += result_logical_tile->GetTupleCount();
          tuples_with_null +=
              CountTuplesWithNullFields(result_logical_tile.get());
          // std::cout << (*result_logical_tile);
        }
      }

    } break;

    default:
      throw Exception("Unsupported join algorithm : " +
                      std::to_string(join_algorithm));
      break;
  }

  //===--------------------------------------------------------------------===//
  // Execute test
  //===--------------------------------------------------------------------===//

  if(join_test_type == BASIC_TEST) {

    // Check output
    switch (join_type) {
      case JOIN_TYPE_INNER:
        EXPECT_EQ(result_tuple_count, 10);
        EXPECT_EQ(tuples_with_null, 0);
        break;

      case JOIN_TYPE_LEFT:
        EXPECT_EQ(result_tuple_count, 15);
        EXPECT_EQ(tuples_with_null, 5);
        break;

      case JOIN_TYPE_RIGHT:
        EXPECT_EQ(result_tuple_count, 10);
        EXPECT_EQ(tuples_with_null, 0);
        break;

      case JOIN_TYPE_OUTER:
        EXPECT_EQ(result_tuple_count, 15);
        EXPECT_EQ(tuples_with_null, 5);
        break;

      default:
        throw Exception("Unsupported join type : " + std::to_string(join_type));
        break;
    }

  }

}
Beispiel #17
0
/**
 * @brief Build a executor tree and execute it.
 * Use std::vector<Value> as params to make it more elegant for networking
 * Before ExecutePlan, a node first receives value list, so we should pass
 * value list directly rather than passing Postgres's ParamListInfo
 * @return status of execution.
 */
peloton_status PlanExecutor::ExecutePlan(const planner::AbstractPlan *plan,
                                         const std::vector<Value> &params,
                                         TupleDesc tuple_desc) {
  peloton_status p_status;

  if (plan == nullptr) return p_status;

  LOG_TRACE("PlanExecutor Start ");

  bool status;
  bool init_failure = false;
  bool single_statement_txn = false;
  List *slots = NULL;

  auto &txn_manager = concurrency::TransactionManagerFactory::GetInstance();
  auto txn = peloton::concurrency::current_txn;
  // This happens for single statement queries in PG
  if (txn == nullptr) {
    single_statement_txn = true;
    txn = txn_manager.BeginTransaction();
  }
  PL_ASSERT(txn);

  LOG_TRACE("Txn ID = %lu ", txn->GetTransactionId());
  LOG_TRACE("Building the executor tree");

  // Use const std::vector<Value> &params to make it more elegant for network
  std::unique_ptr<executor::ExecutorContext> executor_context(
  BuildExecutorContext(params, txn));
  //auto executor_context = BuildExecutorContext(param_list, txn);

  // Build the executor tree
  std::unique_ptr<executor::AbstractExecutor> executor_tree(
      BuildExecutorTree(nullptr, plan, executor_context.get()));

  LOG_TRACE("Initializing the executor tree");

  // Initialize the executor tree
  status = executor_tree->Init();

  // Abort and cleanup
  if (status == false) {
    init_failure = true;
    txn->SetResult(Result::RESULT_FAILURE);
    goto cleanup;
  }

  LOG_TRACE("Running the executor tree");

  // Execute the tree until we get result tiles from root node
  for (;;) {
    status = executor_tree->Execute();

    // Stop
    if (status == false) {
      break;
    }

    std::unique_ptr<executor::LogicalTile> logical_tile(
        executor_tree->GetOutput());

    // Some executors don't return logical tiles (e.g., Update).
    if (logical_tile.get() == nullptr) {
      continue;
    }

    // Go over the logical tile
    for (oid_t tuple_id : *logical_tile) {
      expression::ContainerTuple<executor::LogicalTile> cur_tuple(
          logical_tile.get(), tuple_id);

      auto slot = TupleTransformer::GetPostgresTuple(&cur_tuple, tuple_desc);

      if (slot != nullptr) {
        slots = lappend(slots, slot);
      }
    }
  }

  // Set the result
  p_status.m_processed = executor_context->num_processed;
  p_status.m_result_slots = slots;

// final cleanup
cleanup:

  LOG_TRACE("About to commit: single stmt: %d, init_failure: %d, status: %d",
            single_statement_txn, init_failure, txn->GetResult());

  // should we commit or abort ?
  if (single_statement_txn == true || init_failure == true) {
    auto status = txn->GetResult();
    switch (status) {
      case Result::RESULT_SUCCESS:
        // Commit
        p_status.m_result = txn_manager.CommitTransaction();

        break;

      case Result::RESULT_FAILURE:
      default:
        // Abort
        p_status.m_result = txn_manager.AbortTransaction();
    }
  }

  // clean up executor tree
  CleanExecutorTree(executor_tree.get());

  return p_status;
}
Beispiel #18
0
/**
 * @brief Build a executor tree and execute it.
 * Use std::vector<Value> as params to make it more elegant for networking
 * Before ExecutePlan, a node first receives value list, so we should pass
 * value list directly rather than passing Postgres's ParamListInfo
 * @return number of executed tuples and logical_tile_list
 */
int PlanExecutor::ExecutePlan(const planner::AbstractPlan *plan,
    const std::vector<Value> &params,
    std::vector<std::unique_ptr<executor::LogicalTile>>& logical_tile_list) {

  if (plan == nullptr) return -1;

  LOG_TRACE("PlanExecutor Start ");

  bool status;
  bool init_failure = false;
  bool single_statement_txn = false;

  auto &txn_manager = concurrency::TransactionManagerFactory::GetInstance();
  auto txn = peloton::concurrency::current_txn;

  // This happens for single statement queries in PG
  if (txn == nullptr) {
    single_statement_txn = true;
    txn = txn_manager.BeginTransaction();
  }
  PL_ASSERT(txn);

  LOG_TRACE("Txn ID = %lu ", txn->GetTransactionId());
  LOG_TRACE("Building the executor tree");

  // Use const std::vector<Value> &params to make it more elegant for network
  std::unique_ptr<executor::ExecutorContext> executor_context(
  BuildExecutorContext(params, txn));

  // Build the executor tree
  std::unique_ptr<executor::AbstractExecutor> executor_tree(
      BuildExecutorTree(nullptr, plan, executor_context.get()));

  LOG_TRACE("Initializing the executor tree");

  // Initialize the executor tree
  status = executor_tree->Init();

  // Abort and cleanup
  if (status == false) {
    init_failure = true;
    txn->SetResult(Result::RESULT_FAILURE);
    goto cleanup;
  }

  LOG_TRACE("Running the executor tree");

  // Execute the tree until we get result tiles from root node
  for (;;) {
    status = executor_tree->Execute();

    // Stop
    if (status == false) {
      break;
    }

    std::unique_ptr<executor::LogicalTile> logical_tile(
        executor_tree->GetOutput());

    // Some executors don't return logical tiles (e.g., Update).
    if (logical_tile.get() == nullptr) {
      continue;
    }

    logical_tile_list.push_back(std::move(logical_tile));
  }

// final cleanup
cleanup:

  LOG_TRACE("About to commit: single stmt: %d, init_failure: %d, status: %d",
            single_statement_txn, init_failure, txn->GetResult());

  // clean up executor tree
  CleanExecutorTree(executor_tree.get());

  // should we commit or abort ?
  if (single_statement_txn == true || init_failure == true) {
    auto status = txn->GetResult();
    switch (status) {
      case Result::RESULT_SUCCESS:
        // Commit
        return
            executor_context->num_processed;

        break;

      case Result::RESULT_FAILURE:
      default:
        // Abort
       return -1;
    }
  }
  return executor_context->num_processed;
}
Beispiel #19
0
/**
 * @brief Delete the table tuples using the position list in the logical tile.
 *
 * If truncate is on, then it will truncate the table itself.
 * @return true on success, false otherwise.
 */
bool DeleteExecutor::DExecute() {
  assert(target_table_);

  // Retrieve next tile.
  const bool success = children_[0]->Execute();
  if (!success) {
    return false;
  }

  std::unique_ptr<LogicalTile> source_tile(children_[0]->GetOutput());

  storage::Tile *tile = source_tile->GetBaseTile(0);
  storage::TileGroup *tile_group = tile->GetTileGroup();

  auto &pos_lists = source_tile.get()->GetPositionLists();
  auto tile_group_id = tile_group->GetTileGroupId();
  auto transaction_ = executor_context_->GetTransaction();

  LOG_INFO("Source tile : %p Tuples : %lu \n", source_tile.get(),
           source_tile->GetTupleCount());

  LOG_INFO("Transaction ID: %lu\n", transaction_->GetTransactionId());

  // Delete each tuple
  for (oid_t visible_tuple_id : *source_tile) {
    oid_t physical_tuple_id = pos_lists[0][visible_tuple_id];

    LOG_INFO("Visible Tuple id : %lu, Physical Tuple id : %lu \n",
             visible_tuple_id, physical_tuple_id);

    peloton::ItemPointer delete_location(tile_group_id, physical_tuple_id);

    // Logging
    {
      auto &log_manager = logging::LogManager::GetInstance();

      if (log_manager.IsInLoggingMode()) {
        auto logger = log_manager.GetBackendLogger();
        auto record = logger->GetTupleRecord(
            LOGRECORD_TYPE_TUPLE_DELETE, transaction_->GetTransactionId(),
            target_table_->GetOid(), INVALID_ITEMPOINTER, delete_location);

        logger->Log(record);
      }
    }

    // try to delete the tuple
    // this might fail due to a concurrent operation that has latched the tuple
    bool status = target_table_->DeleteTuple(transaction_, delete_location);

    if (status == false) {
      LOG_INFO("Fail to delete. Set txn failure");
      transaction_->SetResult(peloton::Result::RESULT_FAILURE);
      return false;
    }

    executor_context_->num_processed += 1;  // deleted one
    transaction_->RecordDelete(delete_location);
  }

  return true;
}
/**
 * @brief Build a executor tree and execute it.
 * Use std::vector<common::Value> as params to make it more elegant for
 * networking
 * Before ExecutePlan, a node first receives value list, so we should pass
 * value list directly rather than passing Postgres's ParamListInfo
 * @return status of execution.
 */
peloton_status PlanExecutor::ExecutePlan(
    const planner::AbstractPlan *plan,
    const std::vector<common::Value> &params, std::vector<ResultType> &result,
    const std::vector<int> &result_format) {
  peloton_status p_status;

  if (plan == nullptr) return p_status;

  LOG_TRACE("PlanExecutor Start ");

  bool status;
  bool init_failure = false;
  bool single_statement_txn = false;

  auto &txn_manager = concurrency::TransactionManagerFactory::GetInstance();
  // auto txn = peloton::concurrency::current_txn;
  // This happens for single statement queries in PG
  // if (txn == nullptr) {
  single_statement_txn = true;
  auto txn = txn_manager.BeginTransaction();
  // }
  PL_ASSERT(txn);

  LOG_TRACE("Txn ID = %lu ", txn->GetTransactionId());
  LOG_TRACE("Building the executor tree");

  // Use const std::vector<common::Value> &params to make it more elegant for
  // network
  std::unique_ptr<executor::ExecutorContext> executor_context(
      BuildExecutorContext(params, txn));

  // Build the executor tree
  std::unique_ptr<executor::AbstractExecutor> executor_tree(
      BuildExecutorTree(nullptr, plan, executor_context.get()));

  LOG_TRACE("Initializing the executor tree");

  // Initialize the executor tree
  status = executor_tree->Init();

  // Abort and cleanup
  if (status == false) {
    init_failure = true;
    txn->SetResult(Result::RESULT_FAILURE);
    goto cleanup;
  }

  LOG_TRACE("Running the executor tree");
  result.clear();

  // Execute the tree until we get result tiles from root node
  while (status == true) {
    status = executor_tree->Execute();

    std::unique_ptr<executor::LogicalTile> logical_tile(
        executor_tree->GetOutput());
    // Some executors don't return logical tiles (e.g., Update).
    if (logical_tile.get() != nullptr) {
      LOG_TRACE("Final Answer: %s",
                logical_tile->GetInfo().c_str());  // Printing the answers
      std::unique_ptr<catalog::Schema> output_schema(
          logical_tile->GetPhysicalSchema());  // Physical schema of the tile
      std::vector<std::vector<std::string>> answer_tuples;
      answer_tuples =
          std::move(logical_tile->GetAllValuesAsStrings(result_format));

      // Construct the returned results
      for (auto &tuple : answer_tuples) {
        unsigned int col_index = 0;
        auto &schema_columns = output_schema->GetColumns();
        for (auto &column : schema_columns) {
          auto column_name = column.GetName();
          auto res = ResultType();
          PlanExecutor::copyFromTo(column_name, res.first);
          LOG_TRACE("column name: %s", column_name.c_str());
          PlanExecutor::copyFromTo(tuple[col_index++], res.second);
          if (tuple[col_index - 1].c_str() != nullptr) {
            LOG_TRACE("column content: %s", tuple[col_index - 1].c_str());
          }
          result.push_back(res);
        }
      }
    }
  }

  // Set the result
  p_status.m_processed = executor_context->num_processed;
  p_status.m_result_slots = nullptr;

// final cleanup
cleanup:

  LOG_TRACE("About to commit: single stmt: %d, init_failure: %d, status: %d",
            single_statement_txn, init_failure, txn->GetResult());

  // should we commit or abort ?
  if (single_statement_txn == true || init_failure == true) {
    auto status = txn->GetResult();
    switch (status) {
      case Result::RESULT_SUCCESS:
        // Commit
        LOG_TRACE("Commit Transaction");
        p_status.m_result = txn_manager.CommitTransaction(txn);
        break;

      case Result::RESULT_FAILURE:
      default:
        // Abort
        LOG_TRACE("Abort Transaction");
        p_status.m_result = txn_manager.AbortTransaction(txn);
    }
  }

  // clean up executor tree
  CleanExecutorTree(executor_tree.get());

  return p_status;
}
Beispiel #21
0
// Validate that MVCC storage is correct, it assumes an old-to-new chain
// Invariants
// 1. Transaction id should either be INVALID_TXNID or INITIAL_TXNID
// 2. Begin commit id should <= end commit id
// 3. Timestamp consistence
// 4. Version doubly linked list consistency
static void ValidateMVCC_OldToNew(storage::DataTable *table) {
  auto &catalog_manager = catalog::Manager::GetInstance();
  LOG_INFO("Validating MVCC storage");
  int tile_group_count = table->GetTileGroupCount();
  LOG_INFO("The table has %d tile groups in the table", tile_group_count);

  for (int tile_group_offset = 0; tile_group_offset < tile_group_count;
       tile_group_offset++) {
    LOG_INFO("Validate tile group #%d", tile_group_offset);
    auto tile_group = table->GetTileGroup(tile_group_offset);
    auto tile_group_header = tile_group->GetHeader();
    size_t tuple_count = tile_group->GetAllocatedTupleCount();
    LOG_INFO("Tile group #%d has allocated %lu tuples", tile_group_offset,
             tuple_count);

    // 1. Transaction id should either be INVALID_TXNID or INITIAL_TXNID
    for (oid_t tuple_slot = 0; tuple_slot < tuple_count; tuple_slot++) {
      txn_id_t txn_id = tile_group_header->GetTransactionId(tuple_slot);
      EXPECT_TRUE(txn_id == INVALID_TXN_ID || txn_id == INITIAL_TXN_ID)
          << "Transaction id is not INVALID_TXNID or INITIAL_TXNID";
    }

    LOG_INFO("[OK] All tuples have valid txn id");

    // double avg_version_chain_length = 0.0;
    for (oid_t tuple_slot = 0; tuple_slot < tuple_count; tuple_slot++) {
      txn_id_t txn_id = tile_group_header->GetTransactionId(tuple_slot);
      cid_t begin_cid = tile_group_header->GetBeginCommitId(tuple_slot);
      cid_t end_cid = tile_group_header->GetEndCommitId(tuple_slot);
      ItemPointer next_location =
          tile_group_header->GetNextItemPointer(tuple_slot);
      ItemPointer prev_location =
          tile_group_header->GetPrevItemPointer(tuple_slot);

      // 2. Begin commit id should <= end commit id
      EXPECT_TRUE(begin_cid <= end_cid)
          << "Tuple begin commit id is less than or equal to end commit id";

      // This test assumes a oldest-to-newest version chain
      if (txn_id != INVALID_TXN_ID) {
        EXPECT_TRUE(begin_cid != MAX_CID)
            << "Non invalid txn shouldn't have a MAX_CID begin commit id";

        // The version is an oldest version
        if (prev_location.IsNull()) {
          if (next_location.IsNull()) {
            EXPECT_EQ(end_cid, MAX_CID)
                << "Single version has a non MAX_CID end commit time";
          } else {
            cid_t prev_end_cid = end_cid;
            ItemPointer prev_location(tile_group->GetTileGroupId(), tuple_slot);
            while (!next_location.IsNull()) {
              auto next_tile_group =
                  catalog_manager.GetTileGroup(next_location.block);
              auto next_tile_group_header = next_tile_group->GetHeader();

              txn_id_t next_txn_id = next_tile_group_header->GetTransactionId(
                  next_location.offset);

              if (next_txn_id == INVALID_TXN_ID) {
                // If a version in the version chain has a INVALID_TXN_ID, it
                // must be at the tail
                // of the chain. It is either because we have deleted a tuple
                // (so append a invalid tuple),
                // or because this new version is aborted.
                EXPECT_TRUE(
                    next_tile_group_header->GetNextItemPointer(
                                                next_location.offset).IsNull())
                    << "Invalid version in a version chain and is not delete";
              }

              cid_t next_begin_cid = next_tile_group_header->GetBeginCommitId(
                  next_location.offset);
              cid_t next_end_cid =
                  next_tile_group_header->GetEndCommitId(next_location.offset);

              // 3. Timestamp consistence
              if (next_begin_cid == MAX_CID) {
                // It must be an aborted version, it should be at the end of the
                // chain
                EXPECT_TRUE(
                    next_tile_group_header->GetNextItemPointer(
                                                next_location.offset).IsNull())
                    << "Version with MAX_CID begin cid is not version tail";
              } else {
                EXPECT_EQ(prev_end_cid, next_begin_cid)
                    << "Prev end commit id should equal net begin commit id";
                ItemPointer next_prev_location =
                    next_tile_group_header->GetPrevItemPointer(
                        next_location.offset);

                // 4. Version doubly linked list consistency
                EXPECT_TRUE(next_prev_location.offset == prev_location.offset &&
                            next_prev_location.block == prev_location.block)
                    << "Next version's prev version does not match";
              }

              prev_location = next_location;
              prev_end_cid = next_end_cid;
              next_location = next_tile_group_header->GetNextItemPointer(
                  next_location.offset);
            }

            // Now prev_location is at the tail of the version chain
            ItemPointer last_location = prev_location;
            auto last_tile_group =
                catalog_manager.GetTileGroup(last_location.block);
            auto last_tile_group_header = last_tile_group->GetHeader();
            // txn_id_t last_txn_id =
            // last_tile_group_header->GetTransactionId(last_location.offset);
            cid_t last_end_cid =
                last_tile_group_header->GetEndCommitId(last_location.offset);
            EXPECT_TRUE(
                last_tile_group_header->GetNextItemPointer(last_location.offset)
                    .IsNull())
                << "Last version has a next pointer";

            EXPECT_EQ(last_end_cid, MAX_CID)
                << "Last version doesn't end with MAX_CID";
          }
        }
      } else {
        EXPECT_TRUE(tile_group_header->GetNextItemPointer(tuple_slot).IsNull())
            << "Invalid tuple must not have next item pointer";
      }
    }
    LOG_INFO("[OK] oldest-to-newest version chain validated");
  }
}
Beispiel #22
0
/**
 * @brief Creates logical tile from tile group and applies scan predicate.
 * @return true on success, false otherwise.
 */
bool SeqScanExecutor::DExecute() {
  // Scanning over a logical tile.
  if (children_.size() == 1) {
    // FIXME Check all requirements for children_.size() == 0 case.
    LOG_TRACE("Seq Scan executor :: 1 child \n");

    assert(target_table_ == nullptr);
    assert(column_ids_.size() == 0);

    while (children_[0]->Execute()) {
      std::unique_ptr<LogicalTile> tile(children_[0]->GetOutput());

      if (predicate_ != nullptr) {
        // Invalidate tuples that don't satisfy the predicate.
        for (oid_t tuple_id : *tile) {
          expression::ContainerTuple<LogicalTile> tuple(tile.get(), tuple_id);
          if (predicate_->Evaluate(&tuple, nullptr, executor_context_)
                  .IsFalse()) {
            tile->RemoveVisibility(tuple_id);
          }
        }
      }

      if (0 == tile->GetTupleCount()) {  // Avoid returning empty tiles
        continue;
      }

      /* Hopefully we needn't do projections here */
      SetOutput(tile.release());
      return true;
    }

    return false;
  }
  // Scanning a table
  else if (children_.size() == 0) {
    LOG_TRACE("Seq Scan executor :: 0 child \n");

    assert(target_table_ != nullptr);
    assert(column_ids_.size() > 0);

    // Retrieve next tile group.
    while (current_tile_group_offset_ < table_tile_group_count_) {
      auto tile_group =
          target_table_->GetTileGroup(current_tile_group_offset_++);

      storage::TileGroupHeader *tile_group_header = tile_group->GetHeader();

      auto transaction_ = executor_context_->GetTransaction();
      txn_id_t txn_id = transaction_->GetTransactionId();
      cid_t commit_id = transaction_->GetLastCommitId();
      oid_t active_tuple_count = tile_group->GetNextTupleSlot();

      // Print tile group visibility
      // tile_group_header->PrintVisibility(txn_id, commit_id);

      // Construct logical tile.
      std::unique_ptr<LogicalTile> logical_tile(LogicalTileFactory::GetTile());
      logical_tile->AddColumns(tile_group, column_ids_);

      // Construct position list by looping through tile group
      // and applying the predicate.
      std::vector<oid_t> position_list;
      for (oid_t tuple_id = 0; tuple_id < active_tuple_count; tuple_id++) {
        if (tile_group_header->IsVisible(tuple_id, txn_id, commit_id) ==
            false) {
          continue;
        }

        expression::ContainerTuple<storage::TileGroup> tuple(tile_group.get(),
                                                             tuple_id);
        if (predicate_ == nullptr) {
          position_list.push_back(tuple_id);
        } else {
          auto eval =
              predicate_->Evaluate(&tuple, nullptr, executor_context_).IsTrue();
          if (eval == true) position_list.push_back(tuple_id);
        }
      }

      logical_tile->AddPositionList(std::move(position_list));

      // Don't return empty tiles
      if (0 == logical_tile->GetTupleCount()) {
        continue;
      }

      SetOutput(logical_tile.release());
      return true;
    }
  }

  return false;
}