示例#1
0
TEST_F(LogicalTileTests, TempTableTest) {
  const int tuple_count = TESTS_TUPLES_PER_TILEGROUP;
  auto pool = TestingHarness::GetInstance().GetTestingPool();

  catalog::Schema *schema = new catalog::Schema(
      {TestingExecutorUtil::GetColumnInfo(0), TestingExecutorUtil::GetColumnInfo(1),
       TestingExecutorUtil::GetColumnInfo(2)});

  // Create our TempTable
  storage::TempTable table(INVALID_OID, schema, true);
  EXPECT_EQ(0, table.GetTupleCount());

  // Then shove some tuples in it
  for (int i = 0; i < tuple_count; i++) {
    std::unique_ptr<storage::Tuple> tuple(
        new storage::Tuple(table.GetSchema(), true));
    auto val1 = type::ValueFactory::GetIntegerValue(
        TestingExecutorUtil::PopulatedValue(i, 0));
    auto val2 = type::ValueFactory::GetIntegerValue(
        TestingExecutorUtil::PopulatedValue(i, 1));
    auto val3 = type::ValueFactory::GetDecimalValue(
        TestingExecutorUtil::PopulatedValue(i, 2));
    tuple->SetValue(0, val1, pool);
    tuple->SetValue(1, val2, pool);
    tuple->SetValue(2, val3, pool);
    table.InsertTuple(tuple.get());
  }
  LOG_INFO("%s", table.GetInfo().c_str());
  LOG_INFO("%s", GETINFO_SINGLE_LINE.c_str());

  // Check to see whether we can wrap a LogicalTile around it
  auto tile_group_count = table.GetTileGroupCount();
  for (oid_t tile_group_itr = 0; tile_group_itr < tile_group_count;
       tile_group_itr++) {
    auto tile_group = table.GetTileGroup(tile_group_itr);
    EXPECT_NE(nullptr, tile_group);
    std::unique_ptr<executor::LogicalTile> logical_tile(
        executor::LogicalTileFactory::WrapTileGroup(tile_group));
    EXPECT_NE(nullptr, logical_tile);

    // Make sure that we can iterate over the LogicalTile and get
    // at our TempTable tuples
    EXPECT_NE(0, logical_tile->GetTupleCount());

    LOG_INFO("GetActiveTupleCount() = %d",
             (int)tile_group->GetActiveTupleCount());
    LOG_INFO("\n%s", tile_group->GetInfo().c_str());
    LOG_INFO("%s", peloton::GETINFO_THICK_LINE.c_str());
    LOG_INFO("%s", logical_tile->GetInfo().c_str());
  }
}
示例#2
0
/**
 * @brief Creates logical tile from tile group and applies scan predicate.
 * @return true on success, false otherwise.
 */
bool SeqScanExecutor::DExecute() {
  // Scanning over a logical tile.
  if (children_.size() == 1) {
    // FIXME Check all requirements for children_.size() == 0 case.
    LOG_TRACE("Seq Scan executor :: 1 child ");

    PL_ASSERT(target_table_ == nullptr);
    PL_ASSERT(column_ids_.size() == 0);

    while (children_[0]->Execute()) {
      std::unique_ptr<LogicalTile> tile(children_[0]->GetOutput());

      if (predicate_ != nullptr) {
        // Invalidate tuples that don't satisfy the predicate.
        for (oid_t tuple_id : *tile) {
          expression::ContainerTuple<LogicalTile> tuple(tile.get(), tuple_id);
          if (predicate_->Evaluate(&tuple, nullptr, executor_context_)
                  .IsFalse()) {
            tile->RemoveVisibility(tuple_id);
          }
        }
      }

      if (0 == tile->GetTupleCount()) {  // Avoid returning empty tiles
        continue;
      }

      /* Hopefully we needn't do projections here */
      SetOutput(tile.release());
      return true;
    }

    return false;
  }
  // Scanning a table
  else if (children_.size() == 0) {
    LOG_TRACE("Seq Scan executor :: 0 child ");

    PL_ASSERT(target_table_ != nullptr);
    PL_ASSERT(column_ids_.size() > 0);

    // Force to use occ txn manager if dirty read is forbidden
    concurrency::TransactionManager &transaction_manager =
        concurrency::TransactionManagerFactory::GetInstance();

    // LOG_TRACE("Number of tuples: %f",
    // target_table_->GetIndex(0)->GetNumberOfTuples());

    // Retrieve next tile group.
    while (current_tile_group_offset_ < table_tile_group_count_) {
      auto tile_group =
          target_table_->GetTileGroup(current_tile_group_offset_++);
      auto tile_group_header = tile_group->GetHeader();

      oid_t active_tuple_count = tile_group->GetNextTupleSlot();


      // Construct position list by looping through tile group
      // and applying the predicate.
      std::vector<oid_t> position_list;
      for (oid_t tuple_id = 0; tuple_id < active_tuple_count; tuple_id++) {

        ItemPointer location(tile_group->GetTileGroupId(), tuple_id);

        // check transaction visibility
        if (transaction_manager.IsVisible(tile_group_header, tuple_id)) {
          // if the tuple is visible, then perform predicate evaluation.
          if (predicate_ == nullptr) {
            position_list.push_back(tuple_id);
            auto res = transaction_manager.PerformRead(location);
            if (!res) {
              transaction_manager.SetTransactionResult(RESULT_FAILURE);
              return res;
            }
          } else {
            expression::ContainerTuple<storage::TileGroup> tuple(
                tile_group.get(), tuple_id);
            auto eval = predicate_->Evaluate(&tuple, nullptr, executor_context_)
                            .IsTrue();
            if (eval == true) {
              position_list.push_back(tuple_id);
              auto res = transaction_manager.PerformRead(location);
              if (!res) {
                transaction_manager.SetTransactionResult(RESULT_FAILURE);
                return res;
              }
            }
          }
        }
      }

      // Don't return empty tiles
      if (position_list.size() == 0) {
        continue;
      }

      // Construct logical tile.
      std::unique_ptr<LogicalTile> logical_tile(LogicalTileFactory::GetTile());
      logical_tile->AddColumns(tile_group, column_ids_);
      logical_tile->AddPositionList(std::move(position_list));

      SetOutput(logical_tile.release());
      return true;
    }
  }

  return false;
}
示例#3
0
bool IndexScanExecutor::ExecSecondaryIndexLookup() {
  PL_ASSERT(!done_);

  std::vector<ItemPointer> tuple_locations;

  PL_ASSERT(index_->GetIndexType() != INDEX_CONSTRAINT_TYPE_PRIMARY_KEY);

  if (0 == key_column_ids_.size()) {
    index_->ScanAllKeys(tuple_locations);
  } else {
    index_->Scan(values_, key_column_ids_, expr_types_,
                 SCAN_DIRECTION_TYPE_FORWARD, tuple_locations);
  }

  LOG_TRACE("Tuple_locations.size(): %lu", tuple_locations.size());

  if (tuple_locations.size() == 0) return false;

  auto &transaction_manager =
      concurrency::TransactionManagerFactory::GetInstance();

  std::map<oid_t, std::vector<oid_t>> visible_tuples;
  // for every tuple that is found in the index.
  for (auto tuple_location : tuple_locations) {
    auto &manager = catalog::Manager::GetInstance();
    auto tile_group = manager.GetTileGroup(tuple_location.block);
    auto tile_group_header = tile_group.get()->GetHeader();
    auto tile_group_id = tuple_location.block;
    auto tuple_id = tuple_location.offset;

    // if the tuple is visible.
    if (transaction_manager.IsVisible(tile_group_header, tuple_id)) {
      // perform predicate evaluation.
      if (predicate_ == nullptr) {
        visible_tuples[tile_group_id].push_back(tuple_id);
        auto res = transaction_manager.PerformRead(tuple_location);
        if (!res) {
          transaction_manager.SetTransactionResult(RESULT_FAILURE);
          return res;
        }
      } else {
        expression::ContainerTuple<storage::TileGroup> tuple(tile_group.get(),
                                                             tuple_id);
        auto eval =
            predicate_->Evaluate(&tuple, nullptr, executor_context_).IsTrue();
        if (eval == true) {
          visible_tuples[tile_group_id].push_back(tuple_id);
          auto res = transaction_manager.PerformRead(tuple_location);
          if (!res) {
            transaction_manager.SetTransactionResult(RESULT_FAILURE);
            return res;
          }
        }
      }
    }
  }
  // Construct a logical tile for each block
  for (auto tuples : visible_tuples) {
    auto &manager = catalog::Manager::GetInstance();
    auto tile_group = manager.GetTileGroup(tuples.first);

    std::unique_ptr<LogicalTile> logical_tile(LogicalTileFactory::GetTile());
    // Add relevant columns to logical tile
    logical_tile->AddColumns(tile_group, full_column_ids_);
    logical_tile->AddPositionList(std::move(tuples.second));
    if (column_ids_.size() != 0) {
      logical_tile->ProjectColumns(full_column_ids_, column_ids_);
    }

    result_.push_back(logical_tile.release());
  }

  done_ = true;

  LOG_TRACE("Result tiles : %lu", result_.size());

  return true;
}
示例#4
0
bool IndexScanExecutor::ExecPrimaryIndexLookup() {
  PL_ASSERT(!done_);

  std::vector<ItemPointer *> tuple_location_ptrs;

  PL_ASSERT(index_->GetIndexType() == INDEX_CONSTRAINT_TYPE_PRIMARY_KEY);

  if (0 == key_column_ids_.size()) {
    index_->ScanAllKeys(tuple_location_ptrs);
  } else {
    index_->Scan(values_, key_column_ids_, expr_types_,
                 SCAN_DIRECTION_TYPE_FORWARD, tuple_location_ptrs);
  }


  if (tuple_location_ptrs.size() == 0) return false;

  auto &transaction_manager =
      concurrency::TransactionManagerFactory::GetInstance();

  std::map<oid_t, std::vector<oid_t>> visible_tuples;
  std::vector<ItemPointer> garbage_tuples;
  // for every tuple that is found in the index.
  for (auto tuple_location_ptr : tuple_location_ptrs) {
    
    ItemPointer tuple_location = *tuple_location_ptr;
    
    auto &manager = catalog::Manager::GetInstance();
    auto tile_group = manager.GetTileGroup(tuple_location.block);
    auto tile_group_header = tile_group.get()->GetHeader();

    size_t chain_length = 0;
    while (true) {

      ++chain_length;

      // if the tuple is visible.
      if (transaction_manager.IsVisible(tile_group_header,
                                        tuple_location.offset)) {

        LOG_TRACE("traverse chain length : %lu", chain_length);
        LOG_TRACE("perform read: %u, %u", tuple_location.block,
                 tuple_location.offset);

        // perform predicate evaluation.
        if (predicate_ == nullptr) {
          visible_tuples[tuple_location.block].push_back(tuple_location.offset);

          auto res = transaction_manager.PerformRead(tuple_location);
          if (!res) {
            transaction_manager.SetTransactionResult(RESULT_FAILURE);
            return res;
          }
        } else {
          expression::ContainerTuple<storage::TileGroup> tuple(
              tile_group.get(), tuple_location.offset);
          auto eval =
              predicate_->Evaluate(&tuple, nullptr, executor_context_).IsTrue();
          if (eval == true) {
            visible_tuples[tuple_location.block]
                .push_back(tuple_location.offset);

            auto res = transaction_manager.PerformRead(tuple_location);
            if (!res) {
              transaction_manager.SetTransactionResult(RESULT_FAILURE);
              return res;
            }
          }
        }
        break;
      }
      // if the tuple is not visible.
      else {
        ItemPointer old_item = tuple_location;
        cid_t old_end_cid = tile_group_header->GetEndCommitId(old_item.offset);

        tuple_location = tile_group_header->GetNextItemPointer(old_item.offset);
        // there must exist a visible version.

        // FIXME: currently, only speculative read transaction manager **may** see a null version
        // it's a potential bug
        if(tuple_location.IsNull()) {
          transaction_manager.SetTransactionResult(RESULT_FAILURE);
          // FIXME: this cause unnecessary abort when we have delete operations
          return false;
        }

        // FIXME: Is this always true? what if we have a deleted tuple? --jiexi
        PL_ASSERT(tuple_location.IsNull() == false);

        cid_t max_committed_cid = transaction_manager.GetMaxCommittedCid();

        // check whether older version is garbage.

        if (old_end_cid <= max_committed_cid) {
          PL_ASSERT(tile_group_header->GetTransactionId(old_item.offset) == INITIAL_TXN_ID ||
                      tile_group_header->GetTransactionId(old_item.offset) == INVALID_TXN_ID);

          if (tile_group_header->SetAtomicTransactionId(old_item.offset, INVALID_TXN_ID) == true) {

            // atomically swap item pointer held in the index bucket.
            AtomicUpdateItemPointer(tuple_location_ptr, tuple_location);

            // currently, let's assume only primary index exists.
            // gc::GCManagerFactory::GetInstance().RecycleTupleSlot(
            //     table_->GetOid(), old_item.block, old_item.offset,
            //     transaction_manager.GetNextCommitId());
            garbage_tuples.push_back(old_item);

            tile_group = manager.GetTileGroup(tuple_location.block);
            tile_group_header = tile_group.get()->GetHeader();
            tile_group_header->SetPrevItemPointer(tuple_location.offset, INVALID_ITEMPOINTER);

          } else {

            tile_group = manager.GetTileGroup(tuple_location.block);
            tile_group_header = tile_group.get()->GetHeader();
          }

        } else {
        tile_group = manager.GetTileGroup(tuple_location.block);
        tile_group_header = tile_group.get()->GetHeader();

        }


      }
    }
  }

  // Add all garbage tuples to GC manager
  if(garbage_tuples.size() != 0) {
    cid_t garbage_timestamp = transaction_manager.GetNextCommitId();
    for (auto garbage : garbage_tuples) {
      gc::GCManagerFactory::GetInstance().RecycleTupleSlot(
        table_->GetOid(), garbage.block, garbage.offset, garbage_timestamp);
    }
  }

  // Construct a logical tile for each block
  for (auto tuples : visible_tuples) {
    auto &manager = catalog::Manager::GetInstance();
    auto tile_group = manager.GetTileGroup(tuples.first);

    std::unique_ptr<LogicalTile> logical_tile(LogicalTileFactory::GetTile());
    // Add relevant columns to logical tile
    logical_tile->AddColumns(tile_group, full_column_ids_);
    logical_tile->AddPositionList(std::move(tuples.second));
    if (column_ids_.size() != 0) {
      logical_tile->ProjectColumns(full_column_ids_, column_ids_);
    }

    result_.push_back(logical_tile.release());
  }

  done_ = true;

  LOG_TRACE("Result tiles : %lu", result_.size());

  return true;
}
示例#5
0
TEST_F(LogicalTileTests, TileMaterializationTest) {
  const int tuple_count = 4;
  std::shared_ptr<storage::TileGroup> tile_group(
      TestingExecutorUtil::CreateTileGroup(tuple_count));

  std::vector<const catalog::Schema *> tile_schemas;
  for (unsigned int i = 0; i < tile_group->NumTiles(); i++) {
    tile_schemas.push_back(tile_group->GetTile(i)->GetSchema());
  }

  // Create tuple schema from tile schemas.
  std::unique_ptr<catalog::Schema> schema(
      catalog::Schema::AppendSchemaPtrList(tile_schemas));

  // Create tuples and insert them into tile group.
  const bool allocate = true;
  storage::Tuple tuple1(schema.get(), allocate);
  storage::Tuple tuple2(schema.get(), allocate);
  auto pool = tile_group->GetTilePool(1);

  tuple1.SetValue(0, type::ValueFactory::GetIntegerValue(1), pool);
  tuple1.SetValue(1, type::ValueFactory::GetIntegerValue(1), pool);
  tuple1.SetValue(2, type::ValueFactory::GetTinyIntValue(1), pool);
  tuple1.SetValue(3, type::ValueFactory::GetVarcharValue("tuple 1"), pool);

  tuple2.SetValue(0, type::ValueFactory::GetIntegerValue(2), pool);
  tuple2.SetValue(1, type::ValueFactory::GetIntegerValue(2), pool);
  tuple2.SetValue(2, type::ValueFactory::GetTinyIntValue(2), pool);
  tuple2.SetValue(3, type::ValueFactory::GetVarcharValue("tuple 2"), pool);

  auto &txn_manager = concurrency::TransactionManagerFactory::GetInstance();
  auto txn = txn_manager.BeginTransaction();
  // txn_id_t txn_id = txn->GetTransactionId();

  auto tuple_id1 = tile_group->InsertTuple(&tuple1);
  auto tuple_id2 = tile_group->InsertTuple(&tuple2);
  auto tuple_id3 = tile_group->InsertTuple(&tuple1);

  ItemPointer *index_entry_ptr = nullptr;
  txn_manager.PerformInsert(
      txn, ItemPointer(tile_group->GetTileGroupId(), tuple_id1),
      index_entry_ptr);
  txn_manager.PerformInsert(
      txn, ItemPointer(tile_group->GetTileGroupId(), tuple_id2),
      index_entry_ptr);
  txn_manager.PerformInsert(
      txn, ItemPointer(tile_group->GetTileGroupId(), tuple_id3),
      index_entry_ptr);

  txn_manager.CommitTransaction(txn);

  ////////////////////////////////////////////////////////////////
  // LOGICAL TILE (1 BASE TILE)
  ////////////////////////////////////////////////////////////////

  // Don't transfer ownership of any base tile to logical tile.
  auto base_tile_ref = tile_group->GetTileReference(1);

  std::vector<oid_t> position_list1 = {0, 1};
  std::vector<oid_t> position_list2 = {0, 1};

  std::unique_ptr<executor::LogicalTile> logical_tile(
      executor::LogicalTileFactory::GetTile());

  logical_tile->AddPositionList(std::move(position_list1));
  logical_tile->AddPositionList(std::move(position_list2));

  oid_t column_count = tile_schemas[1]->GetColumnCount();
  for (oid_t column_itr = 0; column_itr < column_count; column_itr++) {
    logical_tile->AddColumn(base_tile_ref, column_itr, column_itr);
  }

  LOG_TRACE("%s", logical_tile->GetInfo().c_str());

  ////////////////////////////////////////////////////////////////
  // LOGICAL TILE (2 BASE TILE)
  ////////////////////////////////////////////////////////////////

  logical_tile.reset(executor::LogicalTileFactory::GetTile());

  auto base_tile_ref1 = tile_group->GetTileReference(0);
  auto base_tile_ref2 = tile_group->GetTileReference(1);

  position_list1 = {0, 1};
  position_list2 = {0, 1};
  std::vector<oid_t> position_list3 = {0, 1};
  std::vector<oid_t> position_list4 = {0, 1};

  logical_tile->AddPositionList(std::move(position_list1));
  logical_tile->AddPositionList(std::move(position_list2));
  logical_tile->AddPositionList(std::move(position_list3));
  logical_tile->AddPositionList(std::move(position_list4));

  oid_t column_count1 = tile_schemas[0]->GetColumnCount();
  for (oid_t column_itr = 0; column_itr < column_count1; column_itr++) {
    logical_tile->AddColumn(base_tile_ref1, column_itr, column_itr);
  }

  oid_t column_count2 = tile_schemas[1]->GetColumnCount();
  for (oid_t column_itr = 0; column_itr < column_count2; column_itr++) {
    logical_tile->AddColumn(base_tile_ref2, column_itr,
                            column_count1 + column_itr);
  }

  LOG_TRACE("%s", logical_tile->GetInfo().c_str());
}
示例#6
0
/**
 * @brief Creates logical tile from tile group and applies scan predicate.
 * @return true on success, false otherwise.
 */
bool SeqScanExecutor::DExecute() {

  // Scanning over a logical tile.
  if (children_.size() == 1 &&
      // There will be a child node on the create index scenario,
      // but we don't want to use this execution flow
      !(GetRawNode()->GetChildren().size() > 0 &&
        GetRawNode()->GetChildren()[0].get()->GetPlanNodeType() ==
            PlanNodeType::CREATE &&
        ((planner::CreatePlan *)GetRawNode()->GetChildren()[0].get())
                ->GetCreateType() == CreateType::INDEX)) {
    // FIXME Check all requirements for children_.size() == 0 case.
    LOG_TRACE("Seq Scan executor :: 1 child ");

    PELOTON_ASSERT(target_table_ == nullptr);
    PELOTON_ASSERT(column_ids_.size() == 0);

    while (children_[0]->Execute()) {
      std::unique_ptr<LogicalTile> tile(children_[0]->GetOutput());

      if (predicate_ != nullptr) {
        // Invalidate tuples that don't satisfy the predicate.
        for (oid_t tuple_id : *tile) {
          ContainerTuple<LogicalTile> tuple(tile.get(), tuple_id);
          auto eval = predicate_->Evaluate(&tuple, nullptr, executor_context_);
          if (eval.IsFalse()) {
            // if (predicate_->Evaluate(&tuple, nullptr, executor_context_)
            //        .IsFalse()) {
            tile->RemoveVisibility(tuple_id);
          }
        }
      }

      if (0 == tile->GetTupleCount()) {  // Avoid returning empty tiles
        continue;
      }

      /* Hopefully we needn't do projections here */
      SetOutput(tile.release());
      return true;
    }
    return false;
  }
  // Scanning a table
  else if (children_.size() == 0 ||
           // If we are creating an index, there will be a child
           (children_.size() == 1 &&
            // This check is only needed to pass seq_scan_test
            // unless it is possible to add a executor child
            // without a corresponding plan.
            GetRawNode()->GetChildren().size() > 0 &&
            // Check if the plan is what we actually expect.
            GetRawNode()->GetChildren()[0].get()->GetPlanNodeType() ==
                PlanNodeType::CREATE &&
            // If it is, confirm it is for indexes
            ((planner::CreatePlan *)GetRawNode()->GetChildren()[0].get())
                    ->GetCreateType() == CreateType::INDEX)) {
    LOG_TRACE("Seq Scan executor :: 0 child ");

    PELOTON_ASSERT(target_table_ != nullptr);
    PELOTON_ASSERT(column_ids_.size() > 0);
    if (children_.size() > 0 && !index_done_) {
      children_[0]->Execute();
      // This stops continuous executions due to
      // a parent and avoids multiple creations
      // of the same index.
      index_done_ = true;
    }
    
    concurrency::TransactionManager &transaction_manager =
        concurrency::TransactionManagerFactory::GetInstance();

    bool acquire_owner = GetPlanNode<planner::AbstractScan>().IsForUpdate();
    auto current_txn = executor_context_->GetTransaction();

    // Retrieve next tile group.
    while (current_tile_group_offset_ < table_tile_group_count_) {
      auto tile_group =
          target_table_->GetTileGroup(current_tile_group_offset_++);
      auto tile_group_header = tile_group->GetHeader();

      oid_t active_tuple_count = tile_group->GetNextTupleSlot();

      // Construct position list by looping through tile group
      // and applying the predicate.
      std::vector<oid_t> position_list;
      for (oid_t tuple_id = 0; tuple_id < active_tuple_count; tuple_id++) {
        ItemPointer location(tile_group->GetTileGroupId(), tuple_id);

        auto visibility = transaction_manager.IsVisible(
            current_txn, tile_group_header, tuple_id);

        // check transaction visibility
        if (visibility == VisibilityType::OK) {
          // if the tuple is visible, then perform predicate evaluation.
          if (predicate_ == nullptr) {
            position_list.push_back(tuple_id);
            auto res = transaction_manager.PerformRead(current_txn, location,
                                                       acquire_owner);
            if (!res) {
              transaction_manager.SetTransactionResult(current_txn,
                                                       ResultType::FAILURE);
              return res;
            }
          } else {
            ContainerTuple<storage::TileGroup> tuple(tile_group.get(),
                                                     tuple_id);
            LOG_TRACE("Evaluate predicate for a tuple");
            auto eval =
                predicate_->Evaluate(&tuple, nullptr, executor_context_);
            LOG_TRACE("Evaluation result: %s", eval.GetInfo().c_str());
            if (eval.IsTrue()) {
              position_list.push_back(tuple_id);
              auto res = transaction_manager.PerformRead(current_txn, location,
                                                         acquire_owner);
              if (!res) {
                transaction_manager.SetTransactionResult(current_txn,
                                                         ResultType::FAILURE);
                return res;
              } else {
                LOG_TRACE("Sequential Scan Predicate Satisfied");
              }
            }
          }
        }
      }

      // Don't return empty tiles
      if (position_list.size() == 0) {
        continue;
      }

      // Construct logical tile.
      std::unique_ptr<LogicalTile> logical_tile(LogicalTileFactory::GetTile());
      logical_tile->AddColumns(tile_group, column_ids_);
      logical_tile->AddPositionList(std::move(position_list));

      LOG_TRACE("Information %s", logical_tile->GetInfo().c_str());
      SetOutput(logical_tile.release());
      return true;
    }
  }

  return false;
}
示例#7
0
/**
 * @brief Adds a column to the logical tile, using the position lists.
 * @return true on success, false otherwise.
 */
bool InsertExecutor::DExecute() {
  if (done_) return false;

  assert(!done_);
  assert(executor_context_ != nullptr);

  const planner::InsertPlan &node = GetPlanNode<planner::InsertPlan>();
  storage::DataTable *target_table_ = node.GetTable();
  oid_t bulk_insert_count = node.GetBulkInsertCount();
  assert(target_table_);

  auto transaction_ = executor_context_->GetTransaction();
  auto executor_pool = executor_context_->GetExecutorContextPool();

  // Inserting a logical tile.
  if (children_.size() == 1) {
    LOG_INFO("Insert executor :: 1 child \n");

    if (!children_[0]->Execute()) {
      return false;
    }

    std::unique_ptr<LogicalTile> logical_tile(children_[0]->GetOutput());
    assert(logical_tile.get() != nullptr);
    auto target_table_schema = target_table_->GetSchema();
    auto column_count = target_table_schema->GetColumnCount();

    std::unique_ptr<storage::Tuple> tuple(
        new storage::Tuple(target_table_schema, true));

    // Go over the logical tile
    for (oid_t tuple_id : *logical_tile) {
      expression::ContainerTuple<LogicalTile> cur_tuple(logical_tile.get(),
                                                        tuple_id);

      // Materialize the logical tile tuple
      for (oid_t column_itr = 0; column_itr < column_count; column_itr++)
        tuple->SetValue(column_itr, cur_tuple.GetValue(column_itr),
                        executor_pool);

      peloton::ItemPointer location =
          target_table_->InsertTuple(transaction_, tuple.get());
      if (location.block == INVALID_OID) {
        transaction_->SetResult(peloton::Result::RESULT_FAILURE);
        return false;
      }
      transaction_->RecordInsert(location);

      executor_context_->num_processed += 1;  // insert one
    }

    return true;
  }
  // Inserting a collection of tuples from plan node
  else if (children_.size() == 0) {
    LOG_INFO("Insert executor :: 0 child \n");

    // Extract expressions from plan node and construct the tuple.
    // For now we just handle a single tuple
    auto schema = target_table_->GetSchema();
    std::unique_ptr<storage::Tuple> tuple(new storage::Tuple(schema, true));
    auto project_info = node.GetProjectInfo();

    // There should be no direct maps
    assert(project_info);
    assert(project_info->GetDirectMapList().size() == 0);

    for (auto target : project_info->GetTargetList()) {
      peloton::Value value =
          target.second->Evaluate(nullptr, nullptr, executor_context_);
      tuple->SetValue(target.first, value, executor_pool);
    }

    // Bulk Insert Mode
    for (oid_t insert_itr = 0; insert_itr < bulk_insert_count; insert_itr++) {
      // Carry out insertion
      ItemPointer location =
          target_table_->InsertTuple(transaction_, tuple.get());
      LOG_INFO("Inserted into location: %lu, %lu", location.block,
               location.offset);

      if (location.block == INVALID_OID) {
        LOG_INFO("Failed to Insert. Set txn failure.");
        transaction_->SetResult(peloton::Result::RESULT_FAILURE);
        return false;
      }
      transaction_->RecordInsert(location);

      // Logging
      {
        auto &log_manager = logging::LogManager::GetInstance();

        if (log_manager.IsInLoggingMode()) {
          auto logger = log_manager.GetBackendLogger();
          auto record = logger->GetTupleRecord(
              LOGRECORD_TYPE_TUPLE_INSERT, transaction_->GetTransactionId(),
              target_table_->GetOid(), location, INVALID_ITEMPOINTER,
              tuple.get());

          logger->Log(record);
        }
      }
    }

    executor_context_->num_processed += 1;  // insert one
    done_ = true;
    return true;
  }

  return true;
}
示例#8
0
bool HybridScanExecutor::SeqScanUtil() {
  assert(children_.size() == 0);
  // LOG_INFO("Hybrid executor, Seq Scan :: 0 child");

  assert(table_ != nullptr);
  assert(column_ids_.size() > 0);

  auto &transaction_manager =
    concurrency::TransactionManagerFactory::GetInstance();

  // Retrieve next tile group.
  while (current_tile_group_offset_ < table_tile_group_count_) {
    auto tile_group =
      table_->GetTileGroup(current_tile_group_offset_++);
    auto tile_group_header = tile_group->GetHeader();

    oid_t active_tuple_count = tile_group->GetNextTupleSlot();


    // Construct position list by looping through tile group
    // and applying the predicate.
    oid_t upper_bound_block = 0;
    if (item_pointers_.size() > 0) {
      auto reverse_iter = item_pointers_.rbegin();
      upper_bound_block = reverse_iter->block;
    }

    std::vector<oid_t> position_list;
    for (oid_t tuple_id = 0; tuple_id < active_tuple_count; tuple_id++) {

      ItemPointer location(tile_group->GetTileGroupId(), tuple_id);
      if (type_ == planner::HYBRID &&
          item_pointers_.size() > 0 &&
          location.block <= upper_bound_block) {
        if (item_pointers_.find(location) != item_pointers_.end()) {
          continue;
        }
      }

      // check transaction visibility
      if (transaction_manager.IsVisible(tile_group_header, tuple_id)) {
        // if the tuple is visible, then perform predicate evaluation.
        if (predicate_ == nullptr) {
          position_list.push_back(tuple_id);
        } else {
          expression::ContainerTuple<storage::TileGroup> tuple(
            tile_group.get(), tuple_id);
          auto eval = predicate_->Evaluate(&tuple, nullptr, executor_context_)
            .IsTrue();
          if (eval == true) {
            position_list.push_back(tuple_id);
          }
        }
      } else {
          expression::ContainerTuple<storage::TileGroup> tuple(
            tile_group.get(), tuple_id);
          auto eval = predicate_->Evaluate(&tuple, nullptr, executor_context_)
            .IsTrue();
          if (eval == true) {
            position_list.push_back(tuple_id);
            auto res = transaction_manager.PerformRead(location);
            if (!res) {
              transaction_manager.SetTransactionResult(RESULT_FAILURE);
              return res;
            }
          }
      }
    }
      // Don't return empty tiles
      if (position_list.size() == 0) {
        continue;
      }

      // Construct logical tile.
      std::unique_ptr<LogicalTile> logical_tile(LogicalTileFactory::GetTile());
      logical_tile->AddColumns(tile_group, column_ids_);
      logical_tile->AddPositionList(std::move(position_list));
      LOG_INFO("Hybrid executor, Seq Scan :: Got a logical tile");
      SetOutput(logical_tile.release());
      return true;
  }


  return false;
}
示例#9
0
bool HybridScanExecutor::ExecPrimaryIndexLookup() {
  assert(!index_done_);

  std::vector<ItemPointer *> tuple_location_ptrs;

  assert(index_->GetIndexType() == INDEX_CONSTRAINT_TYPE_PRIMARY_KEY);

  if (0 == key_column_ids_.size()) {
    index_->ScanAllKeys(tuple_location_ptrs);
  } else {
    index_->Scan(values_, key_column_ids_, expr_types_,
                 SCAN_DIRECTION_TYPE_FORWARD, tuple_location_ptrs);
  }

  LOG_INFO("Tuple_locations.size(): %lu", tuple_location_ptrs.size());

  auto &transaction_manager =
    concurrency::TransactionManagerFactory::GetInstance();

   if (tuple_location_ptrs.size() == 0) {
    index_done_ = true;
    return false;
  }

  //std::set<oid_t> oid_ts;

  std::map<oid_t, std::vector<oid_t>> visible_tuples;
  // for every tuple that is found in the index.
  for (auto tuple_location_ptr : tuple_location_ptrs) {
    ItemPointer tuple_location = *tuple_location_ptr;

    if (type_ == planner::HYBRID &&
      tuple_location.block >= (block_threshold)) {
        item_pointers_.insert(tuple_location);
    //  oid_ts.insert(tuple_location.block);
    }

    auto &manager = catalog::Manager::GetInstance();
    auto tile_group = manager.GetTileGroup(tuple_location.block);
    auto tile_group_header = tile_group.get()->GetHeader();

    // perform transaction read
    size_t chain_length = 0;
    while (true) {

      ++chain_length;

      if (transaction_manager.IsVisible(tile_group_header,
                                        tuple_location.offset)) {
        visible_tuples[tuple_location.block].push_back(tuple_location.offset);
        auto res = transaction_manager.PerformRead(tuple_location);
        if (!res) {
          transaction_manager.SetTransactionResult(RESULT_FAILURE);
          return res;
        }
        break;
      } else {
        ItemPointer old_item = tuple_location;
        cid_t old_end_cid = tile_group_header->GetEndCommitId(old_item.offset);

        tuple_location = tile_group_header->GetNextItemPointer(old_item.offset);
        // there must exist a visible version.
        assert(tuple_location.IsNull() == false);

        cid_t max_committed_cid = transaction_manager.GetMaxCommittedCid();

        // check whether older version is garbage.
        if (old_end_cid < max_committed_cid) {
          assert(tile_group_header->GetTransactionId(old_item.offset) == INITIAL_TXN_ID ||
                 tile_group_header->GetTransactionId(old_item.offset) == INVALID_TXN_ID);

          if (tile_group_header->SetAtomicTransactionId(old_item.offset, INVALID_TXN_ID) == true) {


            // atomically swap item pointer held in the index bucket.
            AtomicUpdateItemPointer(tuple_location_ptr, tuple_location);

            // currently, let's assume only primary index exists.
            //gc::GCManagerFactory::GetInstance().RecycleTupleSlot(
            //  table_->GetOid(), old_item.block, old_item.offset,
            //  max_committed_cid);
          }
        }

        tile_group = manager.GetTileGroup(tuple_location.block);
        tile_group_header = tile_group.get()->GetHeader();
      }
    }
  }

  // Construct a logical tile for each block
  for (auto tuples : visible_tuples) {
    auto &manager = catalog::Manager::GetInstance();
    auto tile_group = manager.GetTileGroup(tuples.first);

    std::unique_ptr<LogicalTile> logical_tile(LogicalTileFactory::GetTile());
    // Add relevant columns to logical tile
    logical_tile->AddColumns(tile_group, full_column_ids_);
    logical_tile->AddPositionList(std::move(tuples.second));
    if (column_ids_.size() != 0) {
      logical_tile->ProjectColumns(full_column_ids_, column_ids_);
    }

    result_.push_back(logical_tile.release());
  }

  index_done_ = true;

  LOG_TRACE("Result tiles : %lu", result_.size());

  return true;
}
示例#10
0
/**
 * @brief Build a executor tree and execute it.
 * Use std::vector<Value> as params to make it more elegant for networking
 * Before ExecutePlan, a node first receives value list, so we should pass
 * value list directly rather than passing Postgres's ParamListInfo
 * @return number of executed tuples and logical_tile_list
 */
int PlanExecutor::ExecutePlan(const planner::AbstractPlan *plan,
    const std::vector<Value> &params,
    std::vector<std::unique_ptr<executor::LogicalTile>>& logical_tile_list) {

  if (plan == nullptr) return -1;

  LOG_TRACE("PlanExecutor Start ");

  bool status;
  bool init_failure = false;
  bool single_statement_txn = false;

  auto &txn_manager = concurrency::TransactionManagerFactory::GetInstance();
  auto txn = peloton::concurrency::current_txn;

  // This happens for single statement queries in PG
  if (txn == nullptr) {
    single_statement_txn = true;
    txn = txn_manager.BeginTransaction();
  }
  PL_ASSERT(txn);

  LOG_TRACE("Txn ID = %lu ", txn->GetTransactionId());
  LOG_TRACE("Building the executor tree");

  // Use const std::vector<Value> &params to make it more elegant for network
  std::unique_ptr<executor::ExecutorContext> executor_context(
  BuildExecutorContext(params, txn));

  // Build the executor tree
  std::unique_ptr<executor::AbstractExecutor> executor_tree(
      BuildExecutorTree(nullptr, plan, executor_context.get()));

  LOG_TRACE("Initializing the executor tree");

  // Initialize the executor tree
  status = executor_tree->Init();

  // Abort and cleanup
  if (status == false) {
    init_failure = true;
    txn->SetResult(Result::RESULT_FAILURE);
    goto cleanup;
  }

  LOG_TRACE("Running the executor tree");

  // Execute the tree until we get result tiles from root node
  for (;;) {
    status = executor_tree->Execute();

    // Stop
    if (status == false) {
      break;
    }

    std::unique_ptr<executor::LogicalTile> logical_tile(
        executor_tree->GetOutput());

    // Some executors don't return logical tiles (e.g., Update).
    if (logical_tile.get() == nullptr) {
      continue;
    }

    logical_tile_list.push_back(std::move(logical_tile));
  }

// final cleanup
cleanup:

  LOG_TRACE("About to commit: single stmt: %d, init_failure: %d, status: %d",
            single_statement_txn, init_failure, txn->GetResult());

  // clean up executor tree
  CleanExecutorTree(executor_tree.get());

  // should we commit or abort ?
  if (single_statement_txn == true || init_failure == true) {
    auto status = txn->GetResult();
    switch (status) {
      case Result::RESULT_SUCCESS:
        // Commit
        return
            executor_context->num_processed;

        break;

      case Result::RESULT_FAILURE:
      default:
        // Abort
       return -1;
    }
  }
  return executor_context->num_processed;
}
示例#11
0
/**
 * @brief Build a executor tree and execute it.
 * Use std::vector<Value> as params to make it more elegant for networking
 * Before ExecutePlan, a node first receives value list, so we should pass
 * value list directly rather than passing Postgres's ParamListInfo
 * @return status of execution.
 */
peloton_status PlanExecutor::ExecutePlan(const planner::AbstractPlan *plan,
                                         const std::vector<Value> &params,
                                         TupleDesc tuple_desc) {
  peloton_status p_status;

  if (plan == nullptr) return p_status;

  LOG_TRACE("PlanExecutor Start ");

  bool status;
  bool init_failure = false;
  bool single_statement_txn = false;
  List *slots = NULL;

  auto &txn_manager = concurrency::TransactionManagerFactory::GetInstance();
  auto txn = peloton::concurrency::current_txn;
  // This happens for single statement queries in PG
  if (txn == nullptr) {
    single_statement_txn = true;
    txn = txn_manager.BeginTransaction();
  }
  PL_ASSERT(txn);

  LOG_TRACE("Txn ID = %lu ", txn->GetTransactionId());
  LOG_TRACE("Building the executor tree");

  // Use const std::vector<Value> &params to make it more elegant for network
  std::unique_ptr<executor::ExecutorContext> executor_context(
  BuildExecutorContext(params, txn));
  //auto executor_context = BuildExecutorContext(param_list, txn);

  // Build the executor tree
  std::unique_ptr<executor::AbstractExecutor> executor_tree(
      BuildExecutorTree(nullptr, plan, executor_context.get()));

  LOG_TRACE("Initializing the executor tree");

  // Initialize the executor tree
  status = executor_tree->Init();

  // Abort and cleanup
  if (status == false) {
    init_failure = true;
    txn->SetResult(Result::RESULT_FAILURE);
    goto cleanup;
  }

  LOG_TRACE("Running the executor tree");

  // Execute the tree until we get result tiles from root node
  for (;;) {
    status = executor_tree->Execute();

    // Stop
    if (status == false) {
      break;
    }

    std::unique_ptr<executor::LogicalTile> logical_tile(
        executor_tree->GetOutput());

    // Some executors don't return logical tiles (e.g., Update).
    if (logical_tile.get() == nullptr) {
      continue;
    }

    // Go over the logical tile
    for (oid_t tuple_id : *logical_tile) {
      expression::ContainerTuple<executor::LogicalTile> cur_tuple(
          logical_tile.get(), tuple_id);

      auto slot = TupleTransformer::GetPostgresTuple(&cur_tuple, tuple_desc);

      if (slot != nullptr) {
        slots = lappend(slots, slot);
      }
    }
  }

  // Set the result
  p_status.m_processed = executor_context->num_processed;
  p_status.m_result_slots = slots;

// final cleanup
cleanup:

  LOG_TRACE("About to commit: single stmt: %d, init_failure: %d, status: %d",
            single_statement_txn, init_failure, txn->GetResult());

  // should we commit or abort ?
  if (single_statement_txn == true || init_failure == true) {
    auto status = txn->GetResult();
    switch (status) {
      case Result::RESULT_SUCCESS:
        // Commit
        p_status.m_result = txn_manager.CommitTransaction();

        break;

      case Result::RESULT_FAILURE:
      default:
        // Abort
        p_status.m_result = txn_manager.AbortTransaction();
    }
  }

  // clean up executor tree
  CleanExecutorTree(executor_tree.get());

  return p_status;
}
示例#12
0
/**
 * @brief Build a executor tree and execute it.
 * Use std::vector<common::Value> as params to make it more elegant for
 * networking
 * Before ExecutePlan, a node first receives value list, so we should pass
 * value list directly rather than passing Postgres's ParamListInfo
 * @return status of execution.
 */
peloton_status PlanExecutor::ExecutePlan(
    const planner::AbstractPlan *plan,
    const std::vector<common::Value> &params, std::vector<ResultType> &result,
    const std::vector<int> &result_format) {
  peloton_status p_status;

  if (plan == nullptr) return p_status;

  LOG_TRACE("PlanExecutor Start ");

  bool status;
  bool init_failure = false;
  bool single_statement_txn = false;

  auto &txn_manager = concurrency::TransactionManagerFactory::GetInstance();
  // auto txn = peloton::concurrency::current_txn;
  // This happens for single statement queries in PG
  // if (txn == nullptr) {
  single_statement_txn = true;
  auto txn = txn_manager.BeginTransaction();
  // }
  PL_ASSERT(txn);

  LOG_TRACE("Txn ID = %lu ", txn->GetTransactionId());
  LOG_TRACE("Building the executor tree");

  // Use const std::vector<common::Value> &params to make it more elegant for
  // network
  std::unique_ptr<executor::ExecutorContext> executor_context(
      BuildExecutorContext(params, txn));

  // Build the executor tree
  std::unique_ptr<executor::AbstractExecutor> executor_tree(
      BuildExecutorTree(nullptr, plan, executor_context.get()));

  LOG_TRACE("Initializing the executor tree");

  // Initialize the executor tree
  status = executor_tree->Init();

  // Abort and cleanup
  if (status == false) {
    init_failure = true;
    txn->SetResult(Result::RESULT_FAILURE);
    goto cleanup;
  }

  LOG_TRACE("Running the executor tree");
  result.clear();

  // Execute the tree until we get result tiles from root node
  while (status == true) {
    status = executor_tree->Execute();

    std::unique_ptr<executor::LogicalTile> logical_tile(
        executor_tree->GetOutput());
    // Some executors don't return logical tiles (e.g., Update).
    if (logical_tile.get() != nullptr) {
      LOG_TRACE("Final Answer: %s",
                logical_tile->GetInfo().c_str());  // Printing the answers
      std::unique_ptr<catalog::Schema> output_schema(
          logical_tile->GetPhysicalSchema());  // Physical schema of the tile
      std::vector<std::vector<std::string>> answer_tuples;
      answer_tuples =
          std::move(logical_tile->GetAllValuesAsStrings(result_format));

      // Construct the returned results
      for (auto &tuple : answer_tuples) {
        unsigned int col_index = 0;
        auto &schema_columns = output_schema->GetColumns();
        for (auto &column : schema_columns) {
          auto column_name = column.GetName();
          auto res = ResultType();
          PlanExecutor::copyFromTo(column_name, res.first);
          LOG_TRACE("column name: %s", column_name.c_str());
          PlanExecutor::copyFromTo(tuple[col_index++], res.second);
          if (tuple[col_index - 1].c_str() != nullptr) {
            LOG_TRACE("column content: %s", tuple[col_index - 1].c_str());
          }
          result.push_back(res);
        }
      }
    }
  }

  // Set the result
  p_status.m_processed = executor_context->num_processed;
  p_status.m_result_slots = nullptr;

// final cleanup
cleanup:

  LOG_TRACE("About to commit: single stmt: %d, init_failure: %d, status: %d",
            single_statement_txn, init_failure, txn->GetResult());

  // should we commit or abort ?
  if (single_statement_txn == true || init_failure == true) {
    auto status = txn->GetResult();
    switch (status) {
      case Result::RESULT_SUCCESS:
        // Commit
        LOG_TRACE("Commit Transaction");
        p_status.m_result = txn_manager.CommitTransaction(txn);
        break;

      case Result::RESULT_FAILURE:
      default:
        // Abort
        LOG_TRACE("Abort Transaction");
        p_status.m_result = txn_manager.AbortTransaction(txn);
    }
  }

  // clean up executor tree
  CleanExecutorTree(executor_tree.get());

  return p_status;
}
示例#13
0
void SimpleCheckpoint::Scan(storage::DataTable *target_table,
                            oid_t database_oid) {
  auto schema = target_table->GetSchema();
  PL_ASSERT(schema);
  std::vector<oid_t> column_ids;
  column_ids.resize(schema->GetColumnCount());
  std::iota(column_ids.begin(), column_ids.end(), 0);

  oid_t current_tile_group_offset = START_OID;
  auto table_tile_group_count = target_table->GetTileGroupCount();
  CheckpointTileScanner scanner;

  // TODO scan assigned tile in multi-thread checkpoint
  while (current_tile_group_offset < table_tile_group_count) {
    // Retrieve a tile group
    auto tile_group = target_table->GetTileGroup(current_tile_group_offset);

    // Retrieve a logical tile
    std::unique_ptr<executor::LogicalTile> logical_tile(
        scanner.Scan(tile_group, column_ids, start_commit_id_));

    // Empty result
    if (!logical_tile) {
      current_tile_group_offset++;
      continue;
    }

    auto tile_group_id = logical_tile->GetColumnInfo(0)
                             .base_tile->GetTileGroup()
                             ->GetTileGroupId();

    // Go over the logical tile
    for (oid_t tuple_id : *logical_tile) {
      expression::ContainerTuple<executor::LogicalTile> cur_tuple(
          logical_tile.get(), tuple_id);

      // Logging
      {
        // construct a physical tuple from the logical tuple
        std::unique_ptr<storage::Tuple> tuple(new storage::Tuple(schema, true));
        for (auto column_id : column_ids) {
          tuple->SetValue(column_id, cur_tuple.GetValue(column_id),
                          this->pool.get());
        }
        ItemPointer location(tile_group_id, tuple_id);
        // TODO is it possible to avoid `new` for checkpoint?
        std::shared_ptr<LogRecord> record(logger_->GetTupleRecord(
            LOGRECORD_TYPE_TUPLE_INSERT, INITIAL_TXN_ID, target_table->GetOid(),
            database_oid, location, INVALID_ITEMPOINTER, tuple.get()));
        PL_ASSERT(record);
        CopySerializeOutput output_buffer;
        record->Serialize(output_buffer);
        LOG_TRACE("Insert a new record for checkpoint (%u, %u)", tile_group_id,
                  tuple_id);
        records_.push_back(record);
      }
    }
    // persist to file once per tile
    Persist();
    current_tile_group_offset++;
  }
}
bool HybridScanExecutor::ExecPrimaryIndexLookup() {
  PL_ASSERT(index_done_ == false);

  const planner::HybridScanPlan &node = GetPlanNode<planner::HybridScanPlan>();
  bool acquire_owner = GetPlanNode<planner::AbstractScan>().IsForUpdate();

  auto key_column_ids_ = node.GetKeyColumnIds();
  auto expr_type_ = node.GetExprTypes();

  std::vector<ItemPointer *> tuple_location_ptrs;

  PL_ASSERT(index_->GetIndexType() == INDEX_CONSTRAINT_TYPE_PRIMARY_KEY);

  if (0 == key_column_ids_.size()) {
    LOG_TRACE("Scan all keys");
    index_->ScanAllKeys(tuple_location_ptrs);
  } else {
    LOG_TRACE("Scan");
    index_->Scan(values_,
                 key_column_ids_,
                 expr_type_,
                 SCAN_DIRECTION_TYPE_FORWARD,
                 tuple_location_ptrs,
                 &node.GetIndexPredicate().GetConjunctionList()[0]);
  }

  LOG_TRACE("Result tuple count: %lu", tuple_location_ptrs.size());

  auto &transaction_manager =
      concurrency::TransactionManagerFactory::GetInstance();

  auto current_txn = executor_context_->GetTransaction();

  if (tuple_location_ptrs.size() == 0) {
    index_done_ = true;
    return false;
  }

  std::map<oid_t, std::vector<oid_t>> visible_tuples;

  // for every tuple that is found in the index.
  for (auto tuple_location_ptr : tuple_location_ptrs) {
    ItemPointer tuple_location = *tuple_location_ptr;

    if (type_ == HYBRID_SCAN_TYPE_HYBRID &&
        tuple_location.block >= (block_threshold)) {
      item_pointers_.insert(tuple_location);
    }

    auto &manager = catalog::Manager::GetInstance();
    auto tile_group = manager.GetTileGroup(tuple_location.block);
    auto tile_group_header = tile_group.get()->GetHeader();

    // perform transaction read
    size_t chain_length = 0;
    while (true) {
      ++chain_length;

      auto visibility = transaction_manager.IsVisible(current_txn, tile_group_header, tuple_location.offset);

      if (visibility == VISIBILITY_OK) {

        visible_tuples[tuple_location.block].push_back(tuple_location.offset);
        auto res = transaction_manager.PerformRead(current_txn, tuple_location, acquire_owner);
        if (!res) {
          transaction_manager.SetTransactionResult(current_txn, RESULT_FAILURE);
          return res;
        }
        break;
      } else {
        ItemPointer old_item = tuple_location;
        cid_t old_end_cid = tile_group_header->GetEndCommitId(old_item.offset);

        tuple_location = tile_group_header->GetNextItemPointer(old_item.offset);
        // there must exist a visible version.
        assert(tuple_location.IsNull() == false);

        cid_t max_committed_cid = transaction_manager.GetMaxCommittedCid();

        // check whether older version is garbage.
        if (old_end_cid < max_committed_cid) {
          assert(tile_group_header->GetTransactionId(old_item.offset) ==
              INITIAL_TXN_ID ||
              tile_group_header->GetTransactionId(old_item.offset) ==
                  INVALID_TXN_ID);

          if (tile_group_header->SetAtomicTransactionId(
              old_item.offset, INVALID_TXN_ID) == true) {
            // atomically swap item pointer held in the index bucket.
            AtomicUpdateItemPointer(tuple_location_ptr, tuple_location);
          }
        }

        tile_group = manager.GetTileGroup(tuple_location.block);
        tile_group_header = tile_group.get()->GetHeader();
      }
    }
  }

  // Construct a logical tile for each block
  for (auto tuples : visible_tuples) {
    auto &manager = catalog::Manager::GetInstance();
    auto tile_group = manager.GetTileGroup(tuples.first);

    std::unique_ptr<LogicalTile> logical_tile(LogicalTileFactory::GetTile());

    // Add relevant columns to logical tile
    logical_tile->AddColumns(tile_group, full_column_ids_);
    logical_tile->AddPositionList(std::move(tuples.second));

    if (column_ids_.size() != 0) {
      logical_tile->ProjectColumns(full_column_ids_, column_ids_);
    }

    result_.push_back(logical_tile.release());
  }

  index_done_ = true;

  LOG_TRACE("Result tiles : %lu", result_.size());

  return true;
}
示例#15
0
/**
 * @return true on success, false otherwise.
 */
bool CopyExecutor::DExecute() {
  // skip if we're done
  if (done) {
    return false;
  }

  while (children_[0]->Execute() == true) {
    // Get input a tile
    std::unique_ptr<LogicalTile> logical_tile(children_[0]->GetOutput());
    LOG_DEBUG("Looping over the output tile..");

    // Get physical schema of the tile
    std::unique_ptr<catalog::Schema> output_schema(
        logical_tile->GetPhysicalSchema());

    // vectors for prepared statement parameters
    int num_params = 0;
    std::vector<std::pair<type::TypeId, std::string>> bind_parameters;
    std::vector<type::Value> param_values;
    std::vector<int16_t> formats;
    std::vector<int32_t> types;

    // Construct result format as varchar
    auto col_count = output_schema->GetColumnCount();
    std::vector<std::vector<std::string>> answer_tuples;
    std::vector<int> result_format(col_count, 0);
    answer_tuples =
        logical_tile->GetAllValuesAsStrings(result_format, true);

    // Loop over the returned results
    for (auto &tuple : answer_tuples) {
      // Loop over the columns
      for (unsigned int col_index = 0; col_index < col_count; col_index++) {
        auto val = tuple[col_index];
        auto origin_col_id =
            logical_tile->GetColumnInfo(col_index).origin_column_id;
        int len = val.length();

        if (origin_col_id == num_param_col_id) {
          // num_param column
          num_params = std::stoi(val);
          Copy(val.c_str(), val.length(), false);

        } else if (origin_col_id == param_type_col_id) {
          // param_types column
          PELOTON_ASSERT(output_schema->GetColumn(col_index).GetType() ==
                    type::TypeId::VARBINARY);

          network::InputPacket packet(len, val);

          // Read param types
          types.resize(num_params);
          //TODO: Instead of passing packet to executor, some data structure more generic is need
          network::PostgresProtocolHandler::ReadParamType(&packet, num_params, types);

          // Write all the types to output file
          for (int i = 0; i < num_params; i++) {
            std::string type_str = std::to_string(types[i]);
            Copy(type_str.c_str(), type_str.length(), false);
          }
        } else if (origin_col_id == param_format_col_id) {
          // param_formats column
          PELOTON_ASSERT(output_schema->GetColumn(col_index).GetType() ==
                    type::TypeId::VARBINARY);

          network::InputPacket packet(len, val);

          // Read param formats
          formats.resize(num_params);
          //TODO: Instead of passing packet to executor, some data structure more generic is need
          network::PostgresProtocolHandler::ReadParamFormat(&packet, num_params, formats);

        } else if (origin_col_id == param_val_col_id) {
          // param_values column
          PELOTON_ASSERT(output_schema->GetColumn(col_index).GetType() ==
                    type::TypeId::VARBINARY);

          network::InputPacket packet(len, val);
          bind_parameters.resize(num_params);
          param_values.resize(num_params);
          //TODO: Instead of passing packet to executor, some data structure more generic is need
          network::PostgresProtocolHandler::ReadParamValue(&packet, num_params, types,
                                              bind_parameters, param_values,
                                              formats);

          // Write all the values to output file
          for (int i = 0; i < num_params; i++) {
            auto param_value = param_values[i];
            LOG_TRACE("param_value.GetTypeId(): %s",
                      TypeIdToString(param_value.GetTypeId()).c_str());
            // Avoid extra copy for varlen types
            if (param_value.GetTypeId() == type::TypeId::VARBINARY) {
              const char *data = param_value.GetData();
              Copy(data, param_value.GetLength(), false);
            } else if (param_value.GetTypeId() == type::TypeId::VARCHAR) {
              const char *data = param_value.GetData();
              // Don't write the NULL character for varchar
              Copy(data, param_value.GetLength() - 1, false);
            } else {
              // Convert integer / double types to string before copying
              auto param_str = param_value.ToString();
              Copy(param_str.c_str(), param_str.length(), false);
            }
          }
        } else {
          // For other columns, just copy the content to local buffer
          bool end_of_line = col_index == col_count - 1;
          Copy(val.c_str(), val.length(), end_of_line);
        }
      }
    }
    LOG_DEBUG("Done writing to csv file for this tile");
  }
  LOG_INFO("Done copying all logical tiles");
  FlushBuffer();
  FFlushFsync();
  // Sync and close
  fclose(file_handle_.file);

  done = true;
  return true;
}
示例#16
0
/**
 * @brief Adds a column to the logical tile, using the position lists.
 * @return true on success, false otherwise.
 */
bool InsertExecutor::DExecute() {
  if (done_) return false;

  PELOTON_ASSERT(!done_);
  PELOTON_ASSERT(executor_context_ != nullptr);

  const planner::InsertPlan &node = GetPlanNode<planner::InsertPlan>();
  storage::DataTable *target_table = node.GetTable();
  oid_t bulk_insert_count = node.GetBulkInsertCount();

  auto &transaction_manager =
      concurrency::TransactionManagerFactory::GetInstance();

  auto current_txn = executor_context_->GetTransaction();

  if (!target_table) {
    transaction_manager.SetTransactionResult(current_txn,
                                             peloton::ResultType::FAILURE);
    return false;
  }

  LOG_TRACE("Number of tuples in table before insert: %lu",
            target_table->GetTupleCount());
  auto executor_pool = executor_context_->GetPool();

  trigger::TriggerList *trigger_list = target_table->GetTriggerList();
  if (trigger_list != nullptr) {
    LOG_TRACE("size of trigger list in target table: %d",
              trigger_list->GetTriggerListSize());
    if (trigger_list->HasTriggerType(TriggerType::BEFORE_INSERT_STATEMENT)) {
      LOG_TRACE("target table has per-statement-before-insert triggers!");
      trigger_list->ExecTriggers(TriggerType::BEFORE_INSERT_STATEMENT,
                                 current_txn);
    }
  }

  // Inserting a logical tile.
  if (children_.size() == 1) {
    if (!children_[0]->Execute()) {
      return false;
    }

    std::unique_ptr<LogicalTile> logical_tile(children_[0]->GetOutput());

    // FIXME: Wrong? What if the result of select is nothing? Michael
    PELOTON_ASSERT(logical_tile.get() != nullptr);

    auto target_table_schema = target_table->GetSchema();
    auto column_count = target_table_schema->GetColumnCount();

    std::unique_ptr<storage::Tuple> tuple(
        new storage::Tuple(target_table_schema, true));

    // Go over the logical tile
    for (oid_t tuple_id : *logical_tile) {
      ContainerTuple<LogicalTile> cur_tuple(logical_tile.get(), tuple_id);

      // Materialize the logical tile tuple
      for (oid_t column_itr = 0; column_itr < column_count; column_itr++) {
        type::Value val = (cur_tuple.GetValue(column_itr));
        tuple->SetValue(column_itr, val, executor_pool);
      }

      // insert tuple into the table.
      ItemPointer *index_entry_ptr = nullptr;
      peloton::ItemPointer location =
          target_table->InsertTuple(tuple.get(), current_txn, &index_entry_ptr);

      // it is possible that some concurrent transactions have inserted the same
      // tuple.
      // in this case, abort the transaction.
      if (location.block == INVALID_OID) {
        transaction_manager.SetTransactionResult(current_txn,
                                                 peloton::ResultType::FAILURE);
        return false;
      }

      transaction_manager.PerformInsert(current_txn, location, index_entry_ptr);

      executor_context_->num_processed += 1;  // insert one
    }

    // execute after-insert-statement triggers and
    // record on-commit-insert-statement triggers into current transaction
    if (trigger_list != nullptr) {
      LOG_TRACE("size of trigger list in target table: %d",
                trigger_list->GetTriggerListSize());
      if (trigger_list->HasTriggerType(TriggerType::AFTER_INSERT_STATEMENT)) {
        LOG_TRACE("target table has per-statement-after-insert triggers!");
        trigger_list->ExecTriggers(TriggerType::AFTER_INSERT_STATEMENT,
                                   current_txn);
      }
      if (trigger_list->HasTriggerType(
              TriggerType::ON_COMMIT_INSERT_STATEMENT)) {
        LOG_TRACE("target table has per-statement-on-commit-insert triggers!");
        trigger_list->ExecTriggers(TriggerType::ON_COMMIT_INSERT_STATEMENT,
                                   current_txn);
      }
    }
    return true;
  }
  // Inserting a collection of tuples from plan node
  else if (children_.size() == 0) {
    // Extract expressions from plan node and construct the tuple.
    // For now we just handle a single tuple
    auto schema = target_table->GetSchema();
    auto project_info = node.GetProjectInfo();
    auto tuple = node.GetTuple(0);
    std::unique_ptr<storage::Tuple> storage_tuple;

    // Check if this is not a raw tuple
    if (project_info) {
      // Otherwise, there must exist a project info
      PELOTON_ASSERT(project_info);
      // There should be no direct maps
      PELOTON_ASSERT(project_info->GetDirectMapList().size() == 0);

      storage_tuple.reset(new storage::Tuple(schema, true));

      for (auto target : project_info->GetTargetList()) {
        auto value =
            target.second.expr->Evaluate(nullptr, nullptr, executor_context_);
        storage_tuple->SetValue(target.first, value, executor_pool);
      }

      // Set tuple to point to temporary project tuple
      tuple = storage_tuple.get();
    }

    // Bulk Insert Mode
    for (oid_t insert_itr = 0; insert_itr < bulk_insert_count; insert_itr++) {
      // if we are doing a bulk insert from values not project_info

      if (!project_info) {
        tuple = node.GetTuple(insert_itr);

        if (tuple == nullptr) {
          storage_tuple.reset(new storage::Tuple(schema, true));

          // read from values
          uint32_t num_columns = schema->GetColumnCount();
          for (uint32_t col_id = 0; col_id < num_columns; col_id++) {
            auto value = node.GetValue(col_id + insert_itr * num_columns);
            storage_tuple->SetValue(col_id, value, executor_pool);
          }

          // Set tuple to point to temporary project tuple
          tuple = storage_tuple.get();
        }
      }

      trigger::TriggerList *trigger_list = target_table->GetTriggerList();

      auto new_tuple = tuple;
      if (trigger_list != nullptr) {
        LOG_TRACE("size of trigger list in target table: %d",
                  trigger_list->GetTriggerListSize());
        if (trigger_list->HasTriggerType(TriggerType::BEFORE_INSERT_ROW)) {
          LOG_TRACE("target table has per-row-before-insert triggers!");
          LOG_TRACE("address of the origin tuple before firing triggers: 0x%lx",
                    long(tuple));
          trigger_list->ExecTriggers(TriggerType::BEFORE_INSERT_ROW,
                                     current_txn,
                                     const_cast<storage::Tuple *>(tuple),
                                     executor_context_, nullptr, &new_tuple);
          LOG_TRACE("address of the new tuple after firing triggers: 0x%lx",
                    long(new_tuple));
        }
      }

      if (new_tuple == nullptr) {
        // trigger doesn't allow this tuple to be inserted
        LOG_TRACE("this tuple is rejected by trigger");
        continue;
      }

      // Carry out insertion
      ItemPointer *index_entry_ptr = nullptr;
      ItemPointer location =
          target_table->InsertTuple(new_tuple, current_txn, &index_entry_ptr);
      if (new_tuple->GetColumnCount() > 2) {
        type::Value val = (new_tuple->GetValue(2));
        LOG_TRACE("value: %s", val.GetInfo().c_str());
      }

      if (location.block == INVALID_OID) {
        LOG_TRACE("Failed to Insert. Set txn failure.");
        transaction_manager.SetTransactionResult(current_txn,
                                                 ResultType::FAILURE);
        return false;
      }

      transaction_manager.PerformInsert(current_txn, location, index_entry_ptr);

      LOG_TRACE("Number of tuples in table after insert: %lu",
                target_table->GetTupleCount());

      executor_context_->num_processed += 1;  // insert one

      // execute after-insert-row triggers and
      // record on-commit-insert-row triggers into current transaction
      new_tuple = tuple;
      if (trigger_list != nullptr) {
        LOG_TRACE("size of trigger list in target table: %d",
                  trigger_list->GetTriggerListSize());
        if (trigger_list->HasTriggerType(TriggerType::AFTER_INSERT_ROW)) {
          LOG_TRACE("target table has per-row-after-insert triggers!");
          LOG_TRACE("address of the origin tuple before firing triggers: 0x%lx",
                    long(tuple));
          trigger_list->ExecTriggers(TriggerType::AFTER_INSERT_ROW, current_txn,
                                     const_cast<storage::Tuple *>(tuple),
                                     executor_context_, nullptr, &new_tuple);
          LOG_TRACE("address of the new tuple after firing triggers: 0x%lx",
                    long(new_tuple));
        }
        if (trigger_list->HasTriggerType(TriggerType::ON_COMMIT_INSERT_ROW)) {
          LOG_TRACE("target table has per-row-on-commit-insert triggers!");
          LOG_TRACE("address of the origin tuple before firing triggers: 0x%lx",
                    long(tuple));
          trigger_list->ExecTriggers(TriggerType::ON_COMMIT_INSERT_ROW,
                                     current_txn,
                                     const_cast<storage::Tuple *>(tuple),
                                     executor_context_, nullptr, &new_tuple);
          LOG_TRACE("address of the new tuple after firing triggers: 0x%lx",
                    long(new_tuple));
        }
      }
    }
    // execute after-insert-statement triggers and
    // record on-commit-insert-statement triggers into current transaction
    trigger_list = target_table->GetTriggerList();
    if (trigger_list != nullptr) {
      LOG_TRACE("size of trigger list in target table: %d",
                trigger_list->GetTriggerListSize());
      if (trigger_list->HasTriggerType(TriggerType::AFTER_INSERT_STATEMENT)) {
        LOG_TRACE("target table has per-statement-after-insert triggers!");
        trigger_list->ExecTriggers(TriggerType::AFTER_INSERT_STATEMENT,
                                   current_txn);
      }
      if (trigger_list->HasTriggerType(
              TriggerType::ON_COMMIT_INSERT_STATEMENT)) {
        LOG_TRACE("target table has per-statement-on-commit-insert triggers!");
        trigger_list->ExecTriggers(TriggerType::ON_COMMIT_INSERT_STATEMENT,
                                   current_txn);
      }
    }
    done_ = true;
    return true;
  }
  return true;
}
示例#17
0
TEST_F(LogicalTileTests, TileMaterializationTest) {
    const int tuple_count = 4;
    std::shared_ptr<storage::TileGroup> tile_group(
        ExecutorTestsUtil::CreateTileGroup(tuple_count));

    // Create tuple schema from tile schemas.
    std::vector<catalog::Schema> &tile_schemas = tile_group->GetTileSchemas();
    std::unique_ptr<catalog::Schema> schema(
        catalog::Schema::AppendSchemaList(tile_schemas));

    // Create tuples and insert them into tile group.
    const bool allocate = true;
    storage::Tuple tuple1(schema.get(), allocate);
    storage::Tuple tuple2(schema.get(), allocate);
    auto pool = tile_group->GetTilePool(1);

    tuple1.SetValue(0, ValueFactory::GetIntegerValue(1), pool);
    tuple1.SetValue(1, ValueFactory::GetIntegerValue(1), pool);
    tuple1.SetValue(2, ValueFactory::GetTinyIntValue(1), pool);
    tuple1.SetValue(3, ValueFactory::GetStringValue("tuple 1"), pool);

    tuple2.SetValue(0, ValueFactory::GetIntegerValue(2), pool);
    tuple2.SetValue(1, ValueFactory::GetIntegerValue(2), pool);
    tuple2.SetValue(2, ValueFactory::GetTinyIntValue(2), pool);
    tuple2.SetValue(3, ValueFactory::GetStringValue("tuple 2"), pool);

    auto &txn_manager = concurrency::TransactionManagerFactory::GetInstance();
    txn_manager.BeginTransaction();
    // txn_id_t txn_id = txn->GetTransactionId();

    auto tuple_id1 = tile_group->InsertTuple(&tuple1);
    auto tuple_id2 = tile_group->InsertTuple(&tuple2);
    auto tuple_id3 = tile_group->InsertTuple(&tuple1);

    txn_manager.PerformInsert(ItemPointer(tile_group->GetTileGroupId(), tuple_id1));
    txn_manager.PerformInsert(ItemPointer(tile_group->GetTileGroupId(), tuple_id2));
    txn_manager.PerformInsert(ItemPointer(tile_group->GetTileGroupId(), tuple_id3));

    txn_manager.CommitTransaction();

    ////////////////////////////////////////////////////////////////
    // LOGICAL TILE (1 BASE TILE)
    ////////////////////////////////////////////////////////////////

    // Don't transfer ownership of any base tile to logical tile.
    auto base_tile_ref = tile_group->GetTileReference(1);

    std::vector<oid_t> position_list1 = {0, 1};
    std::vector<oid_t> position_list2 = {0, 1};

    std::unique_ptr<executor::LogicalTile> logical_tile(
        executor::LogicalTileFactory::GetTile());

    logical_tile->AddPositionList(std::move(position_list1));
    logical_tile->AddPositionList(std::move(position_list2));

    assert(tile_schemas.size() == 2);
    catalog::Schema *schema1 = &tile_schemas[0];
    catalog::Schema *schema2 = &tile_schemas[1];
    oid_t column_count = schema2->GetColumnCount();
    for (oid_t column_itr = 0; column_itr < column_count; column_itr++) {
        logical_tile->AddColumn(base_tile_ref, column_itr, column_itr);
    }

    LOG_INFO("%s", logical_tile->GetInfo().c_str());

    ////////////////////////////////////////////////////////////////
    // LOGICAL TILE (2 BASE TILE)
    ////////////////////////////////////////////////////////////////

    logical_tile.reset(executor::LogicalTileFactory::GetTile());

    auto base_tile_ref1 = tile_group->GetTileReference(0);
    auto base_tile_ref2 = tile_group->GetTileReference(1);

    position_list1 = {0, 1};
    position_list2 = {0, 1};
    std::vector<oid_t> position_list3 = {0, 1};
    std::vector<oid_t> position_list4 = {0, 1};

    logical_tile->AddPositionList(std::move(position_list1));
    logical_tile->AddPositionList(std::move(position_list2));
    logical_tile->AddPositionList(std::move(position_list3));
    logical_tile->AddPositionList(std::move(position_list4));

    oid_t column_count1 = schema1->GetColumnCount();
    for (oid_t column_itr = 0; column_itr < column_count1; column_itr++) {
        logical_tile->AddColumn(base_tile_ref1, column_itr, column_itr);
    }

    oid_t column_count2 = schema2->GetColumnCount();
    for (oid_t column_itr = 0; column_itr < column_count2; column_itr++) {
        logical_tile->AddColumn(base_tile_ref2, column_itr,
                                column_count1 + column_itr);
    }

    LOG_INFO("%s", logical_tile->GetInfo().c_str());
}
示例#18
0
/**
 * @brief Creates logical tile from tile group and applies scan predicate.
 * @return true on success, false otherwise.
 */
bool SeqScanExecutor::DExecute() {
  // Scanning over a logical tile.
  if (children_.size() == 1) {
    // FIXME Check all requirements for children_.size() == 0 case.
    LOG_TRACE("Seq Scan executor :: 1 child \n");

    assert(target_table_ == nullptr);
    assert(column_ids_.size() == 0);

    while (children_[0]->Execute()) {
      std::unique_ptr<LogicalTile> tile(children_[0]->GetOutput());

      if (predicate_ != nullptr) {
        // Invalidate tuples that don't satisfy the predicate.
        for (oid_t tuple_id : *tile) {
          expression::ContainerTuple<LogicalTile> tuple(tile.get(), tuple_id);
          if (predicate_->Evaluate(&tuple, nullptr, executor_context_)
                  .IsFalse()) {
            tile->RemoveVisibility(tuple_id);
          }
        }
      }

      if (0 == tile->GetTupleCount()) {  // Avoid returning empty tiles
        continue;
      }

      /* Hopefully we needn't do projections here */
      SetOutput(tile.release());
      return true;
    }

    return false;
  }
  // Scanning a table
  else if (children_.size() == 0) {
    LOG_TRACE("Seq Scan executor :: 0 child \n");

    assert(target_table_ != nullptr);
    assert(column_ids_.size() > 0);

    // Retrieve next tile group.
    while (current_tile_group_offset_ < table_tile_group_count_) {
      auto tile_group =
          target_table_->GetTileGroup(current_tile_group_offset_++);

      storage::TileGroupHeader *tile_group_header = tile_group->GetHeader();

      auto transaction_ = executor_context_->GetTransaction();
      txn_id_t txn_id = transaction_->GetTransactionId();
      cid_t commit_id = transaction_->GetLastCommitId();
      oid_t active_tuple_count = tile_group->GetNextTupleSlot();

      // Print tile group visibility
      // tile_group_header->PrintVisibility(txn_id, commit_id);

      // Construct logical tile.
      std::unique_ptr<LogicalTile> logical_tile(LogicalTileFactory::GetTile());
      logical_tile->AddColumns(tile_group, column_ids_);

      // Construct position list by looping through tile group
      // and applying the predicate.
      std::vector<oid_t> position_list;
      for (oid_t tuple_id = 0; tuple_id < active_tuple_count; tuple_id++) {
        if (tile_group_header->IsVisible(tuple_id, txn_id, commit_id) ==
            false) {
          continue;
        }

        expression::ContainerTuple<storage::TileGroup> tuple(tile_group.get(),
                                                             tuple_id);
        if (predicate_ == nullptr) {
          position_list.push_back(tuple_id);
        } else {
          auto eval =
              predicate_->Evaluate(&tuple, nullptr, executor_context_).IsTrue();
          if (eval == true) position_list.push_back(tuple_id);
        }
      }

      logical_tile->AddPositionList(std::move(position_list));

      // Don't return empty tiles
      if (0 == logical_tile->GetTupleCount()) {
        continue;
      }

      SetOutput(logical_tile.release());
      return true;
    }
  }

  return false;
}