示例#1
0
void Transaction::RecordUpdate(const ItemPointer &location) {
  oid_t tile_group_id = location.block;
  oid_t tuple_id = location.offset;

  if (rw_set_.find(tile_group_id) != rw_set_.end() &&
      rw_set_.at(tile_group_id).find(tuple_id) !=
          rw_set_.at(tile_group_id).end()) {
    RWType &type = rw_set_.at(tile_group_id).at(tuple_id);
    if (type == RW_TYPE_READ || type == RW_TYPE_READ_OWN) {
      type = RW_TYPE_UPDATE;
      // record write.
      is_written_ = true;

      return;
    }
    if (type == RW_TYPE_UPDATE) {
      return;
    }
    if (type == RW_TYPE_INSERT) {
      return;
    }
    if (type == RW_TYPE_DELETE) {
      PL_ASSERT(false);
      return;
    }
    PL_ASSERT(false);
  }
}
示例#2
0
void LoadTable(std::unique_ptr<storage::DataTable> &hyadapt_table) {
  auto table_schema = hyadapt_table->GetSchema();

  /////////////////////////////////////////////////////////
  // Load in the data
  /////////////////////////////////////////////////////////

  // Insert tuples into tile_group.
  auto &txn_manager = concurrency::TransactionManagerFactory::GetInstance();
  const bool allocate = true;
  auto txn = txn_manager.BeginTransaction();

  for (size_t tuple_itr = 0; tuple_itr < tuple_count; tuple_itr++) {
    storage::Tuple tuple(table_schema, allocate);
    for (oid_t col_itr = 0; col_itr < column_count; col_itr++) {
      auto value = type::ValueFactory::GetIntegerValue(tuple_itr);
      tuple.SetValue(col_itr, value, nullptr);
    }

    ItemPointer *index_entry_ptr = nullptr;
    ItemPointer tuple_slot_id =
        hyadapt_table->InsertTuple(&tuple, txn, &index_entry_ptr);
    PL_ASSERT(tuple_slot_id.block != INVALID_OID);
    PL_ASSERT(tuple_slot_id.offset != INVALID_OID);

    txn_manager.PerformInsert(txn, tuple_slot_id, index_entry_ptr);
  }

  txn_manager.CommitTransaction(txn);
}
示例#3
0
/**
 * Grab next slot (thread-safe) and fill in the tuple if tuple != nullptr
 *
 * Returns slot where inserted (INVALID_ID if not inserted)
 */
oid_t TileGroup::InsertTuple(const Tuple *tuple) {
  oid_t tuple_slot_id = tile_group_header->GetNextEmptyTupleSlot();

  LOG_TRACE("Tile Group Id :: %u status :: %u out of %u slots ", tile_group_id,
            tuple_slot_id, num_tuple_slots);

  // No more slots
  if (tuple_slot_id == INVALID_OID) {
    LOG_TRACE("Failed to get next empty tuple slot within tile group.");
    return INVALID_OID;
  }

  // if the input tuple is nullptr, then it means that the tuple with be filled
  // in
  // outside the function. directly return the empty slot.
  if (tuple == nullptr) {
    return tuple_slot_id;
  }

  // copy tuple.
  CopyTuple(tuple, tuple_slot_id);

  // Set MVCC info
  PL_ASSERT(tile_group_header->GetTransactionId(tuple_slot_id) ==
            INVALID_TXN_ID);
  PL_ASSERT(tile_group_header->GetBeginCommitId(tuple_slot_id) == MAX_CID);
  PL_ASSERT(tile_group_header->GetEndCommitId(tuple_slot_id) == MAX_CID);
  return tuple_slot_id;
}
Value DecimalType::Modulo(const Value& left, const Value &right) const {
  PL_ASSERT(GetTypeId() == Type::DECIMAL);
  PL_ASSERT(left.CheckComparable(right));
  if (left.IsNull() || right.IsNull())
    return left.OperateNull(right);
  
  if (right.IsZero()) {
    throw Exception(EXCEPTION_TYPE_DIVIDE_BY_ZERO,
                    "Division by zero.");
  }
  switch(right.GetTypeId()) {
    case Type::TINYINT:
      return ValueFactory::GetDecimalValue(ValMod(left.value_.decimal, right.GetAs<int8_t>()));
    case Type::SMALLINT:
      return ValueFactory::GetDecimalValue(ValMod(left.value_.decimal, right.GetAs<int16_t>()));
    case Type::INTEGER:
      return ValueFactory::GetDecimalValue(ValMod(left.value_.decimal, right.GetAs<int32_t>()));
    case Type::BIGINT:
      return ValueFactory::GetDecimalValue(ValMod(left.value_.decimal, right.GetAs<int64_t>()));
    case Type::DECIMAL:
      return ValueFactory::GetDecimalValue(ValMod(left.value_.decimal, right.GetAs<double>()));
    default:
      throw Exception("type error");
  }
}
示例#5
0
/**
 * @brief Basic checks.
 * @return true on success, false otherwise.
 */
bool AppendExecutor::DInit() {
  // should have >= 2 children, otherwise pointless.
  PL_ASSERT(children_.size() >= 2);
  PL_ASSERT(cur_child_id_ == 0);

  return true;
}
/**
 * @ brief Build the joined tile with schema derived from children tiles
 */
std::unique_ptr<LogicalTile> AbstractJoinExecutor::BuildOutputLogicalTile(
    LogicalTile *left_tile, LogicalTile *right_tile) {
  // Check the input logical tiles.
  PL_ASSERT(left_tile != nullptr);
  PL_ASSERT(right_tile != nullptr);

  // Construct output logical tile.
  std::unique_ptr<LogicalTile> output_tile(LogicalTileFactory::GetTile());

  auto left_tile_schema = left_tile->GetSchema();
  auto right_tile_schema = right_tile->GetSchema();

  // advance the position list index of right tile schema
  for (auto &col : right_tile_schema) {
    col.position_list_idx += left_tile->GetPositionLists().size();
  }

  /* build the schema given the projection */
  auto output_tile_schema = BuildSchema(left_tile_schema, right_tile_schema);

  // Set the output logical tile schema
  output_tile->SetSchema(std::move(output_tile_schema));

  return output_tile;
}
示例#7
0
void LoadTable() {
  const oid_t col_count = state.column_count + 1;
  const int tuple_count = state.scale_factor * state.tuples_per_tilegroup;

  auto table_schema = sdbench_table->GetSchema();

  /////////////////////////////////////////////////////////
  // Load in the data
  /////////////////////////////////////////////////////////

  // Insert tuples into tile_group.
  auto &txn_manager = concurrency::TransactionManagerFactory::GetInstance();
  const bool allocate = true;
  auto txn = txn_manager.BeginTransaction();
  std::unique_ptr<VarlenPool> pool(new VarlenPool(BACKEND_TYPE_MM));

  int rowid;
  for (rowid = 0; rowid < tuple_count; rowid++) {
    int populate_value = rowid;

    storage::Tuple tuple(table_schema, allocate);

    for (oid_t col_itr = 0; col_itr < col_count; col_itr++) {
      auto value = ValueFactory::GetIntegerValue(populate_value);
      tuple.SetValue(col_itr, value, pool.get());
    }

    ItemPointer tuple_slot_id = sdbench_table->InsertTuple(&tuple);
    PL_ASSERT(tuple_slot_id.block != INVALID_OID);
    PL_ASSERT(tuple_slot_id.offset != INVALID_OID);
    txn->RecordInsert(tuple_slot_id);
  }

  txn_manager.CommitTransaction();
}
示例#8
0
// Set all columns by value into this tuple.
void Tuple::SetValue(oid_t column_offset, const common::Value &value) {
  PL_ASSERT(tuple_schema);
  PL_ASSERT(tuple_data);

  const common::Type::TypeId type = tuple_schema->GetType(column_offset);

  const bool is_inlined = tuple_schema->IsInlined(column_offset);
  char *value_location = GetDataPtr(column_offset);
  UNUSED_ATTRIBUTE int32_t column_length =
      tuple_schema->GetLength(column_offset);
  if (is_inlined == false)
    column_length = tuple_schema->GetVariableLength(column_offset);

  // const bool is_in_bytes = false;
  // Allocate in heap or given data pool depending on whether a pool is provided
  // Skip casting if type is same
  if (type == value.GetTypeId()) {
    value.SerializeTo(value_location, is_inlined, nullptr);
  } else {
    common::Value *casted_value = value.CastAs(type);
    casted_value->SerializeTo(value_location, is_inlined, nullptr);
    // Do not clean up immediately
    // casted_value.SetCleanUp(false);
  }
}
示例#9
0
// For an insert, the copy should do an allocation for all uninlinable columns
// This does not do any schema checks. They must match.
void Tuple::Copy(const void *source, common::VarlenPool *pool) {
  PL_ASSERT(tuple_schema);
  PL_ASSERT(tuple_data);

  const bool is_inlined = tuple_schema->IsInlined();
  const oid_t uninlineable_column_count =
      tuple_schema->GetUninlinedColumnCount();

  if (is_inlined) {
    // copy the data
    PL_MEMCPY(tuple_data, source, tuple_schema->GetLength());
  } else {
    // copy the data
    PL_MEMCPY(tuple_data, source, tuple_schema->GetLength());

    // Copy each uninlined column doing an allocation for copies.
    for (oid_t column_itr = 0; column_itr < uninlineable_column_count;
         column_itr++) {
      const oid_t unlineable_column_id =
          tuple_schema->GetUninlinedColumn(column_itr);

      // Get original value from uninlined pool
      common::Value *value = GetValue(unlineable_column_id);

      // Make a copy of the value at a new location in uninlined pool
      SetValue(unlineable_column_id, *value, pool);
    }
  }
}
示例#10
0
/**
 * @ brief Build the schema of the joined tile based on the projection info
 */
std::vector<LogicalTile::ColumnInfo> AbstractJoinExecutor::BuildSchema(
    std::vector<LogicalTile::ColumnInfo> &left,
    std::vector<LogicalTile::ColumnInfo> &right) {
  std::vector<LogicalTile::ColumnInfo> schema;
  if (proj_info_ == nullptr) {
    // no projection
    schema.assign(left.begin(), left.end());
    schema.insert(schema.end(), right.begin(), right.end());
  } else {
    PL_ASSERT(!proj_info_->isNonTrivial());
    auto &direct_map_list = proj_info_->GetDirectMapList();
    schema.resize(direct_map_list.size());

    LOG_TRACE("left size: %lu, right size: %lu", left.size(), right.size());
    LOG_TRACE("Projection: %s", proj_info_->Debug().c_str());
    for (auto &entry : direct_map_list) {
      if (entry.second.first == 0) {
        PL_ASSERT(entry.second.second < left.size());
        schema[entry.first] = left[entry.second.second];
      } else {
        PL_ASSERT(entry.second.second < right.size());
        schema[entry.first] = right[entry.second.second];
      }
    }
  }
  return schema;
}
示例#11
0
bool Transaction::RecordDelete(const ItemPointer &location) {
  oid_t tile_group_id = location.block;
  oid_t tuple_id = location.offset;

  if (rw_set_.find(tile_group_id) != rw_set_.end() &&
      rw_set_.at(tile_group_id).find(tuple_id) !=
          rw_set_.at(tile_group_id).end()) {
    RWType &type = rw_set_.at(tile_group_id).at(tuple_id);
    if (type == RW_TYPE_READ) {
      type = RW_TYPE_DELETE;
      // record write.
      is_written_ = true;
      return false;
    }
    if (type == RW_TYPE_UPDATE) {
      type = RW_TYPE_DELETE;
      return false;
    }
    if (type == RW_TYPE_INSERT) {
      type = RW_TYPE_INS_DEL;
      --insert_count_;
      return true;
    }
    if (type == RW_TYPE_DELETE) {
      PL_ASSERT(false);
      return false;
    }
    PL_ASSERT(false);
  } else {
    PL_ASSERT(false);
  }
  return false;
}
示例#12
0
/*@brief   return all the columns this index indexed
* @param   index_oid
* @param   txn  Transaction
* @return  a vector of column oid(logical position)
*/
std::vector<oid_t> IndexCatalog::GetIndexedAttributes(
    oid_t index_oid, concurrency::Transaction *txn) {
  std::vector<oid_t> column_ids({6});  // Indexed attributes
  oid_t index_offset = 0;              // Index of index_oid
  std::vector<type::Value> values;
  values.push_back(type::ValueFactory::GetIntegerValue(index_oid).Copy());

  std::vector<oid_t> key_attrs;
  std::string temp;
  auto result_tiles =
      GetResultWithIndexScan(column_ids, index_offset, values, txn);

  PL_ASSERT(result_tiles->size() <= 1);  // index_oid is unique
  if (result_tiles->size() != 0) {
    PL_ASSERT((*result_tiles)[0]->GetTupleCount() <= 1);
    if ((*result_tiles)[0]->GetTupleCount() != 0) {
      temp = (*result_tiles)[0]->GetValue(0, 0).ToString();
    }
  }
  LOG_TRACE("the string value for index keys is %s", temp.c_str());
  // using " " as delimiter to split up string and turn into vector of oid_t
  std::stringstream os(temp.c_str());  // Turn the string into a stream.
  std::string tok;

  while (std::getline(os, tok, ' ')) {
    key_attrs.push_back(std::stoi(tok));
  }
  LOG_TRACE("the size for indexed key is %lu", key_attrs.size());
  return key_attrs;
}
示例#13
0
Value TinyintType::Modulo(const Value& left, const Value &right) const {
  PL_ASSERT(left.CheckInteger());
  PL_ASSERT(left.CheckComparable(right));
  if (left.IsNull() || right.IsNull())
    return left.OperateNull(right);

  if (right.IsZero()) {
    throw Exception(EXCEPTION_TYPE_DIVIDE_BY_ZERO,
                    "Division by zero on right-hand side");
  }

  switch (right.GetTypeId()) {
  case TypeId::TINYINT:
    return ModuloValue<int8_t, int8_t>(left, right);
  case TypeId::SMALLINT:
    return ModuloValue<int8_t, int16_t>(left, right);
  case TypeId::INTEGER:
  case TypeId::PARAMETER_OFFSET:
    return ModuloValue<int8_t, int32_t>(left, right);
  case TypeId::BIGINT:
    return ModuloValue<int8_t, int64_t>(left, right);
  case TypeId::DECIMAL:
    return ValueFactory::GetDecimalValue(
        ValMod(left.value_.tinyint, right.GetAs<double>()));
  case TypeId::VARCHAR: {
    auto r_value = right.CastAs(TypeId::TINYINT);
    return ModuloValue<int8_t, int8_t>(left, r_value);
  }
  default:
    break;
  }

  throw Exception("type error");
}
void FrontendLoggingThread::ExecuteNext() {
  // Prepare data for operation
  logging_op_type op = schedule->operations[cur_seq].op;

  cur_seq++;

  // Execute the operation
  switch (op) {
    case LOGGING_OP_COLLECT: {
      LOG_INFO("Execute Collect");
      PL_ASSERT(frontend_logger);
      frontend_logger->CollectLogRecordsFromBackendLoggers();
      break;
    }
    case LOGGING_OP_FLUSH: {
      LOG_INFO("Execute Flush");
      PL_ASSERT(frontend_logger);
      frontend_logger->FlushLogRecords();
      results.push_back(frontend_logger->GetMaxFlushedCommitId());
      break;
    }
    default: {
      LOG_ERROR("Unsupported operation type!");
      PL_ASSERT(false);
      break;
    }
  }
}
示例#15
0
AbstractExpression *GetMoreSpecialized(ExpressionType c, L *l, R *r) {
  PL_ASSERT(l);
  PL_ASSERT(r);
  switch (c) {
    case (EXPRESSION_TYPE_COMPARE_EQUAL):
      return new InlinedComparisonExpression<CmpEq, L, R>(c, l, r);
    case (EXPRESSION_TYPE_COMPARE_NOTEQUAL):
      return new InlinedComparisonExpression<CmpNe, L, R>(c, l, r);
    case (EXPRESSION_TYPE_COMPARE_LESSTHAN):
      return new InlinedComparisonExpression<CmpLt, L, R>(c, l, r);
    case (EXPRESSION_TYPE_COMPARE_GREATERTHAN):
      return new InlinedComparisonExpression<CmpGt, L, R>(c, l, r);
    case (EXPRESSION_TYPE_COMPARE_LESSTHANOREQUALTO):
      return new InlinedComparisonExpression<CmpLte, L, R>(c, l, r);
    case (EXPRESSION_TYPE_COMPARE_GREATERTHANOREQUALTO):
      return new InlinedComparisonExpression<CmpGte, L, R>(c, l, r);
    case (EXPRESSION_TYPE_COMPARE_LIKE):
      return new InlinedComparisonExpression<CmpLike, L, R>(c, l, r);
    case (EXPRESSION_TYPE_COMPARE_NOTLIKE):
      return new InlinedComparisonExpression<CmpNotLike, L, R>(c, l, r);
    case (EXPRESSION_TYPE_COMPARE_IN):
      return new InlinedComparisonExpression<CmpIn, L, R>(c, l, r);
    default:
      char message[256];
      snprintf(message, 256,
               "Invalid ExpressionType '%s' called for"
               " ComparisonExpression",
               ExpressionTypeToString(c).c_str());
      throw Exception(message);
  }
}
示例#16
0
expression::AbstractExpression *ExprTransformer::TransformScalarArrayOp(
    const ExprState *es) {
  LOG_TRACE("Transform ScalarArrayOp ");

  auto op_expr = reinterpret_cast<const ScalarArrayOpExpr *>(es->expr);
  // auto sa_state = reinterpret_cast<const ScalarArrayOpExprState*>(es);
  PL_ASSERT(op_expr->opfuncid !=
         0);  // Hopefully it has been filled in by PG planner
  const List *list = op_expr->args;
  PL_ASSERT(list_length(list) <= 2);  // Hopefully it has at most two parameters

  // Extract function arguments (at most two)
  expression::AbstractExpression *lc = nullptr;
  expression::AbstractExpression *rc = nullptr;
  int ic = 0;
  ListCell *arg;
  foreach (arg, list) {
    Expr *ex = (Expr *)lfirst(arg);

    if (ic >= list_length(list)) break;
    if (ic == 0)
      lc = TransformExpr(ex);
    else if (ic == 1)
      rc = TransformExpr(ex);
    else
      break;

    ic++;
  }
示例#17
0
AbstractExpression *GetGeneral(ExpressionType c, AbstractExpression *l,
                               AbstractExpression *r) {
  PL_ASSERT(l);
  PL_ASSERT(r);
  switch (c) {
    case (EXPRESSION_TYPE_COMPARE_EQUAL):
      return new ComparisonExpression<CmpEq>(c, l, r);
    case (EXPRESSION_TYPE_COMPARE_NOTEQUAL):
      return new ComparisonExpression<CmpNe>(c, l, r);
    case (EXPRESSION_TYPE_COMPARE_LESSTHAN):
      return new ComparisonExpression<CmpLt>(c, l, r);
    case (EXPRESSION_TYPE_COMPARE_GREATERTHAN):
      return new ComparisonExpression<CmpGt>(c, l, r);
    case (EXPRESSION_TYPE_COMPARE_LESSTHANOREQUALTO):
      return new ComparisonExpression<CmpLte>(c, l, r);
    case (EXPRESSION_TYPE_COMPARE_GREATERTHANOREQUALTO):
      return new ComparisonExpression<CmpGte>(c, l, r);
    case (EXPRESSION_TYPE_COMPARE_LIKE):
      return new ComparisonExpression<CmpLike>(c, l, r);
    case (EXPRESSION_TYPE_COMPARE_NOTLIKE):
      return new ComparisonExpression<CmpNotLike>(c, l, r);
    case (EXPRESSION_TYPE_COMPARE_IN):
      return new ComparisonExpression<CmpIn>(c, l, r);
    default:
      char message[256];
      snprintf(message, 256,
               "Invalid ExpressionType '%s' called"
               " for ComparisonExpression",
               ExpressionTypeToString(c).c_str());
      throw Exception(message);
  }
}
示例#18
0
/**
 * Grab next slot (thread-safe) and fill in the tuple
 *
 * Returns slot where inserted (INVALID_ID if not inserted)
 */
void TileGroup::CopyTuple(const Tuple *tuple, const oid_t &tuple_slot_id) {
    LOG_TRACE("Tile Group Id :: %u status :: %u out of %u slots ", tile_group_id,
              tuple_slot_id, num_tuple_slots);

    oid_t tile_column_count;
    oid_t column_itr = 0;

    for (oid_t tile_itr = 0; tile_itr < tile_count; tile_itr++) {
        const catalog::Schema &schema = tile_schemas[tile_itr];
        tile_column_count = schema.GetColumnCount();

        storage::Tile *tile = GetTile(tile_itr);
        PL_ASSERT(tile);
        char *tile_tuple_location = tile->GetTupleLocation(tuple_slot_id);
        PL_ASSERT(tile_tuple_location);

        // NOTE:: Only a tuple wrapper
        storage::Tuple tile_tuple(&schema, tile_tuple_location);

        for (oid_t tile_column_itr = 0; tile_column_itr < tile_column_count;
                tile_column_itr++) {
            tile_tuple.SetValue(tile_column_itr, tuple->GetValue(column_itr),
                                tile->GetPool());
            column_itr++;
        }
    }
}
/**
 * @brief Basic initialization.
 * @return true on success, false otherwise.
 */
bool AggregateExecutor::DInit() {
  PL_ASSERT(children_.size() == 1);

  LOG_TRACE("Aggregate executor :: 1 child ");

  // Grab info from plan node and check it
  const planner::AggregatePlan &node = GetPlanNode<planner::AggregatePlan>();

  // Construct the output table
  auto output_table_schema =
      const_cast<catalog::Schema *>(node.GetOutputSchema());

  PL_ASSERT(output_table_schema->GetColumnCount() >= 1);

  // clean up result
  result_itr = START_OID;
  result.clear();

  // reset done
  done = false;

  // clean up temporary aggregation table
  delete output_table;

  bool own_schema = false;
  bool adapt_table = false;
  output_table = storage::TableFactory::GetDataTable(
      INVALID_OID, INVALID_OID, output_table_schema, "aggregate_temp_table",
      DEFAULT_TUPLES_PER_TILEGROUP, own_schema, adapt_table);

  return true;
}
示例#20
0
/**
 * Apply the column delta on the rollback segment to the given tuple
 */
void TileGroup::ApplyRollbackSegment(char *rb_seg, const oid_t &tuple_slot_id) {
    auto seg_col_count = storage::RollbackSegmentPool::GetColCount(rb_seg);
    auto table_schema = GetAbstractTable()->GetSchema();

    for (size_t idx = 0; idx < seg_col_count; ++idx) {
        auto col_id =
            storage::RollbackSegmentPool::GetIdOffsetPair(rb_seg, idx)->col_id;
        Value col_value =
            storage::RollbackSegmentPool::GetValue(rb_seg, table_schema, idx);

        // Get target tile
        auto tile_id = GetTileIdFromColumnId(col_id);
        PL_ASSERT(tile_id < GetTileCount());
        storage::Tile *tile = GetTile(tile_id);
        PL_ASSERT(tile);

        // Get tile schema
        auto &tile_schema = tile_schemas[tile_id];

        // Get a tuple wrapper
        char *tile_tuple_location = tile->GetTupleLocation(tuple_slot_id);
        PL_ASSERT(tile_tuple_location);
        storage::Tuple tile_tuple(&tile_schema, tile_tuple_location);

        // Write the value to tuple
        auto tile_col_idx = GetTileColumnId(col_id);
        tile_tuple.SetValue(tile_col_idx, col_value, tile->GetPool());
    }
}
示例#21
0
bool Tile::SerializeHeaderTo(SerializeOutput &output) {
  std::size_t start;

  // Use the cache if possible
  if (column_header != NULL) {
    PL_ASSERT(column_header_size != INVALID_OID);
    output.WriteBytes(column_header, column_header_size);
    return true;
  }

  PL_ASSERT(column_header_size == INVALID_OID);

  // Skip header position
  start = output.Position();
  output.WriteInt(-1);

  // Status code
  output.WriteByte(-128);

  // Column counts as a short
  output.WriteShort(static_cast<int16_t>(column_count));

  // Write an array of column types as bytes
  for (oid_t column_itr = 0; column_itr < column_count; ++column_itr) {
    type::Type::TypeId type = schema.GetType(column_itr);
    output.WriteByte(static_cast<int8_t>(type));
  }

  // Write the array of column names as strings
  // NOTE: strings are ASCII only in metadata (UTF-8 in table storage)
  for (oid_t column_itr = 0; column_itr < column_count; ++column_itr) {
    // Column name: Write (offset, length) for column definition, and string to
    // string table
    const std::string &name = GetColumnName(column_itr);

    // Column names can't be null, so length must be >= 0
    int32_t length = static_cast<int32_t>(name.size());
    PL_ASSERT(length >= 0);

    // this is standard string serialization for voltdb
    output.WriteInt(length);
    output.WriteBytes(name.data(), length);
  }

  // Write the header size which is a non-inclusive int
  size_t Position = output.Position();
  column_header_size = static_cast<int32_t>(Position - start);

  int32_t non_inclusive_header_size =
      static_cast<int32_t>(column_header_size - sizeof(int32_t));
  output.WriteIntAt(start, non_inclusive_header_size);

  // Cache the column header
  column_header = new char[column_header_size];
  PL_MEMCPY(column_header, static_cast<const char *>(output.Data()) + start,
            column_header_size);

  return true;
}
void EventHandler(UNUSED_ATTRIBUTE evutil_socket_t connfd, short ev_flags, void *arg) {
  LOG_TRACE("Event callback fired for connfd: %d", connfd);
  LibeventSocket *conn = static_cast<LibeventSocket *>(arg);
  PL_ASSERT(conn != nullptr);
  conn->event_flags = ev_flags;
  PL_ASSERT(connfd == conn->sock_fd);
  StateMachine(conn);
}
示例#23
0
Value SmallintType::Multiply(const Value& left, const Value &right) const {
  PL_ASSERT(left.CheckInteger());
  PL_ASSERT(left.CheckComparable(right));
  if (left.IsNull() || right.IsNull())
    return left.OperateNull(right);

  SMALLINT_MODIFY_FUNC(MultiplyValue, *);

  throw Exception("type error");
}
示例#24
0
storage::DataTable *StatsAggregator::GetMetricTable(std::string table_name) {
  auto catalog = catalog::Catalog::GetInstance();
  PL_ASSERT(catalog->GetDatabaseCount() > 0);
  storage::Database *catalog_database =
      catalog->GetDatabaseWithName(CATALOG_DATABASE_NAME);
  PL_ASSERT(catalog_database != nullptr);
  auto metrics_table = catalog_database->GetTableWithName(table_name);
  PL_ASSERT(metrics_table != nullptr);
  return metrics_table;
}
Value DecimalType::Max(const Value& left, const Value &right) const {
  PL_ASSERT(GetTypeId() == Type::DECIMAL);
  PL_ASSERT(left.CheckComparable(right));
  if (left.IsNull() || right.IsNull())
    return left.OperateNull(right);

  if (left.CompareGreaterThanEquals(right) == CMP_TRUE)
    return left.Copy();
  return right.Copy();
}
示例#26
0
文件: tile.cpp 项目: ranxian/peloton
// column offset is the actual offset of the column within the tuple slot
common::Value *Tile::GetValueFast(const oid_t tuple_offset, const size_t column_offset,
                                  const common::Type::TypeId column_type, const bool is_inlined) {
    PL_ASSERT(tuple_offset < GetAllocatedTupleCount());
    PL_ASSERT(column_offset < schema.GetLength());

    const char *tuple_location = GetTupleLocation(tuple_offset);
    const char *field_location = tuple_location + column_offset;

    return common::Value::DeserializeFrom(field_location, column_type, is_inlined);
}
示例#27
0
void Tuple::SetAllNulls() {
  PL_ASSERT(tuple_schema);
  PL_ASSERT(tuple_data);
  const int column_count = tuple_schema->GetColumnCount();

  for (int column_itr = 0; column_itr < column_count; column_itr++) {
    Value value = Value::GetNullValue(tuple_schema->GetType(column_itr));
    SetValue(column_itr, value, nullptr);
  }
}
示例#28
0
void Tuple::SetAllZeros() {
  PL_ASSERT(tuple_schema_);
  PL_ASSERT(tuple_data_);
  const int column_count = tuple_schema_->GetColumnCount();

  for (int column_itr = 0; column_itr < column_count; column_itr++) {
    type::Value value(type::ValueFactory::GetZeroValueByType(
        tuple_schema_->GetType(column_itr)));
    SetValue(column_itr, value, nullptr);
  }
}
示例#29
0
/**
 * @brief create a rollback segment by selecting columns from a tuple
 * @param target_list The columns to be selected
 * @param tuple The tuple to construct the RB
 *
 * TODO: Optimization can be done. We can save copying those columns that are
 *already
 * in the rollback segment created by the same transaction. What we need to do
 *is to add
 * a bitmap in the rollback segment indicating columns it contains. After that
 *we
 * can bypass these columns when making a new rollback segment.
 */
RBSegType RollbackSegmentPool::CreateSegmentFromTuple(
    const catalog::Schema *schema, const TargetList &target_list,
    const AbstractTuple *tuple) {
  PL_ASSERT(schema);
  PL_ASSERT(target_list.size() != 0);

  size_t col_count = target_list.size();
  size_t header_size = pairs_start_offset + col_count * sizeof(ColIdOffsetPair);
  size_t data_size = 0;
  RBSegType rb_seg = nullptr;

  // First figure out the total size of the rollback segment data area
  for (auto &target : target_list) {
    auto col_id = target.first;
    data_size += schema->GetLength(col_id);
  }

  // Allocate the RBSeg
  rb_seg = (RBSegType)pool_.AllocateZeroes(header_size + data_size);
  PL_ASSERT(rb_seg);

  // Fill in the header
  SetNextPtr(rb_seg, nullptr);
  SetTimeStamp(rb_seg, MAX_CID);
  SetColCount(rb_seg, col_count);

  // Fill in the col_id & offset pair and set the data field
  size_t offset = 0;
  for (size_t idx = 0; idx < target_list.size(); ++idx) {
    auto &target = target_list[idx];
    auto col_id = target.first;

    const bool is_inlined = schema->IsInlined(col_id);
    const bool is_inbytes = false;

    size_t inline_col_size = schema->GetLength(col_id);
    size_t allocate_col_size =
        (is_inlined) ? inline_col_size : schema->GetVariableLength(col_id);

    SetColIdOffsetPair(rb_seg, idx, target.first, offset);

    // Set the value
    char *value_location = GetColDataLocation(rb_seg, idx);
    Value value = tuple->GetValue(col_id);
    PL_ASSERT(schema->GetType(col_id) == value.GetValueType());
    value.SerializeToTupleStorageAllocateForObjects(
        value_location, is_inlined, allocate_col_size, is_inbytes, &pool_);

    // Update the offset
    offset += inline_col_size;
  }

  return rb_seg;
}
示例#30
0
/**
 * Grab specific slot and fill in the tuple
 * Used by recovery
 * Returns slot where inserted (INVALID_ID if not inserted)
 */
oid_t TileGroup::InsertTupleFromRecovery(cid_t commit_id, oid_t tuple_slot_id,
        const Tuple *tuple) {
    auto status = tile_group_header->GetEmptyTupleSlot(tuple_slot_id);

    // No more slots
    if (status == false) return INVALID_OID;

    tile_group_header->GetHeaderLock().Lock();

    cid_t current_begin_cid = tile_group_header->GetBeginCommitId(tuple_slot_id);
    if (current_begin_cid != MAX_CID && current_begin_cid > commit_id) {
        tile_group_header->GetHeaderLock().Unlock();
        return tuple_slot_id;
    }

    LOG_TRACE("Tile Group Id :: %u status :: %u out of %u slots ", tile_group_id,
              tuple_slot_id, num_tuple_slots);

    oid_t tile_column_count;
    oid_t column_itr = 0;

    for (oid_t tile_itr = 0; tile_itr < tile_count; tile_itr++) {
        const catalog::Schema &schema = tile_schemas[tile_itr];
        tile_column_count = schema.GetColumnCount();

        storage::Tile *tile = GetTile(tile_itr);
        PL_ASSERT(tile);
        char *tile_tuple_location = tile->GetTupleLocation(tuple_slot_id);
        PL_ASSERT(tile_tuple_location);

        // NOTE:: Only a tuple wrapper
        storage::Tuple tile_tuple(&schema, tile_tuple_location);

        for (oid_t tile_column_itr = 0; tile_column_itr < tile_column_count;
                tile_column_itr++) {
            tile_tuple.SetValue(tile_column_itr, tuple->GetValue(column_itr),
                                tile->GetPool());
            column_itr++;
        }
    }

    // Set MVCC info
    tile_group_header->SetTransactionId(tuple_slot_id, INITIAL_TXN_ID);
    tile_group_header->SetBeginCommitId(tuple_slot_id, commit_id);
    tile_group_header->SetEndCommitId(tuple_slot_id, MAX_CID);
    tile_group_header->SetInsertCommit(tuple_slot_id, false);
    tile_group_header->SetDeleteCommit(tuple_slot_id, false);
    tile_group_header->SetNextItemPointer(tuple_slot_id, INVALID_ITEMPOINTER);

    tile_group_header->GetHeaderLock().Unlock();

    return tuple_slot_id;
}