bool PredicateFactory::ProtoIsValid(const serialization::Predicate &proto, const CatalogDatabase &database) { // Check that proto is fully initialized. if (!proto.IsInitialized()) { return false; } // Check that the predicate_type is valid, and extensions if any. switch (proto.predicate_type()) { case serialization::Predicate::TRUE: // Fall through. case serialization::Predicate::FALSE: return true; case serialization::Predicate::COMPARISON: { if (proto.HasExtension(serialization::ComparisonPredicate::comparison) && proto.HasExtension(serialization::ComparisonPredicate::left_operand) && proto.HasExtension(serialization::ComparisonPredicate::right_operand)) { return ComparisonFactory::ProtoIsValid(proto.GetExtension(serialization::ComparisonPredicate::comparison)) && ScalarFactory::ProtoIsValid(proto.GetExtension(serialization::ComparisonPredicate::left_operand), database) && ScalarFactory::ProtoIsValid(proto.GetExtension(serialization::ComparisonPredicate::right_operand), database); } break; } case serialization::Predicate::NEGATION: { if (proto.HasExtension(serialization::NegationPredicate::operand)) { return ProtoIsValid(proto.GetExtension(serialization::NegationPredicate::operand), database); } break; } case serialization::Predicate::CONJUNCTION: // Fall through. case serialization::Predicate::DISJUNCTION: { for (int i = 0; i < proto.ExtensionSize(serialization::PredicateWithList::operands); ++i) { if (!ProtoIsValid(proto.GetExtension(serialization::PredicateWithList::operands, i), database)) { return false; } } return true; } default: { break; } } return false; }
void CatalogDatabaseCache::update(const serialization::CatalogDatabase &proto) { DCHECK(ProtoIsValid(proto)) << "Attempted to create CatalogDatabaseCache from an invalid proto description:\n" << proto.DebugString(); vector<int> new_relation_schema_proto_indices; { SpinSharedMutexSharedLock<false> read_lock(relations_mutex_); for (int i = 0; i < proto.relations_size(); ++i) { const auto it = rel_map_.find(proto.relations(i).relation_id()); if (it == rel_map_.end()) { new_relation_schema_proto_indices.push_back(i); } else { // TODO(quickstep-team): Support schema changes by adding the index of // changed schema proto in 'changed_relation_schema_proto_indices'. } } } SpinSharedMutexExclusiveLock<false> write_lock(relations_mutex_); for (const int i : new_relation_schema_proto_indices) { const serialization::CatalogRelationSchema &proto_relation = proto.relations(i); auto relation_schema = make_unique<const CatalogRelationSchema>(proto_relation); rel_map_.emplace(proto_relation.relation_id(), move(relation_schema)); } // TODO(quickstep-team): Reset the schema for the changes in the following // steps for each index in 'changed_relation_schema_proto_indices': // 1. Drop the blocks belonged to 'proto.relations(i).relation_id()' in the // buffer pool. // 2. Reset the changed schema, while the scheduler ensures no queries will // load back the related blocks. // 3. Signal the scheduler to accept new queries for the changed schema. }
CatalogDatabaseCache::CatalogDatabaseCache(const serialization::CatalogDatabase &proto) { DCHECK(ProtoIsValid(proto)) << "Attempted to create CatalogDatabaseCache from an invalid proto description:\n" << proto.DebugString(); for (int i = 0; i < proto.relations_size(); ++i) { auto relation_schema = make_unique<const CatalogRelationSchema>(proto.relations(i)); rel_map_.emplace(relation_schema->getID(), move(relation_schema)); } }
/** * @brief Generate the tuple from the serialized Protocol Buffer * representation. * * @param proto A serialized Protocol Buffer representation of a Tuple, * originally generated by the optimizer. * * @return The generated Tuple. **/ static Tuple* ReconstructFromProto(const serialization::Tuple &proto) { DCHECK(ProtoIsValid(proto)) << "Attempted to create Tuple from an invalid proto description:\n" << proto.DebugString(); std::vector<TypedValue> attribute_values; for (int i = 0; i < proto.attribute_values_size(); ++i) { attribute_values.emplace_back(TypedValue::ReconstructFromProto(proto.attribute_values(i))); } return new Tuple(std::move(attribute_values)); }
CatalogDatabase::CatalogDatabase(const serialization::CatalogDatabase &proto) : name_(proto.name()) { DCHECK(ProtoIsValid(proto)) << "Attempted to create CatalogDatabase from an invalid proto description:\n" << proto.DebugString(); for (int index_relations = 0, index_null_relations = 0; index_relations < proto.null_relations_size() + proto.relations_size(); ++index_relations) { if (index_null_relations < proto.null_relations_size() && index_relations == proto.null_relations(index_null_relations)) { rel_vec_.push_back(NULL); ++index_null_relations; } else { addRelation(new CatalogRelation(proto.relations(index_relations - index_null_relations))); } } }
Predicate* PredicateFactory::ReconstructFromProto(const serialization::Predicate &proto, const CatalogDatabase &database) { DCHECK(ProtoIsValid(proto, database)) << "Attempted to create Predicate from an invalid proto description:\n" << proto.DebugString(); switch (proto.predicate_type()) { case serialization::Predicate::TRUE: return new TruePredicate(); case serialization::Predicate::FALSE: return new FalsePredicate(); case serialization::Predicate::COMPARISON: return new ComparisonPredicate( ComparisonFactory::ReconstructFromProto(proto.GetExtension(serialization::ComparisonPredicate::comparison)), ScalarFactory::ReconstructFromProto(proto.GetExtension(serialization::ComparisonPredicate::left_operand), database), ScalarFactory::ReconstructFromProto(proto.GetExtension(serialization::ComparisonPredicate::right_operand), database)); case serialization::Predicate::NEGATION: return NegationPredicate::NegatePredicate( ReconstructFromProto(proto.GetExtension(serialization::NegationPredicate::operand), database)); case serialization::Predicate::CONJUNCTION: { ConjunctionPredicate* predicate = new ConjunctionPredicate(); for (int i = 0; i < proto.ExtensionSize(serialization::PredicateWithList::operands); ++i) { predicate->addPredicate( ReconstructFromProto(proto.GetExtension(serialization::PredicateWithList::operands, i), database)); } return predicate; } case serialization::Predicate::DISJUNCTION: { DisjunctionPredicate* predicate = new DisjunctionPredicate(); for (int i = 0; i < proto.ExtensionSize(serialization::PredicateWithList::operands); ++i) { predicate->addPredicate( ReconstructFromProto(proto.GetExtension(serialization::PredicateWithList::operands, i), database)); } return predicate; } default: FATAL_ERROR("Unknown Predicate Type in PredicateFactory::ReconstructFromProto"); } }
SortConfiguration* SortConfiguration::ReconstructFromProto(const serialization::SortConfiguration &proto, const CatalogDatabaseLite &database) { DCHECK(ProtoIsValid(proto, database)); PtrVector<Scalar> order_by; vector<bool> ordering; vector<bool> null_ordering; for (int i = 0; i < proto.order_by_list_size(); ++i) { const serialization::SortConfiguration::OrderBy &order_by_proto = proto.order_by_list(i); order_by.push_back(ScalarFactory::ReconstructFromProto(order_by_proto.expression(), database)); ordering.push_back(order_by_proto.is_ascending()); null_ordering.push_back(order_by_proto.null_first()); } return new SortConfiguration(order_by, move(ordering), move(null_ordering)); }
const BinaryOperation& BinaryOperationFactory::ReconstructFromProto( const serialization::BinaryOperation &proto) { DCHECK(ProtoIsValid(proto)) << "Attempted to create BinaryOperation from an invalid proto description:\n" << proto.DebugString(); switch (proto.operation_id()) { case serialization::BinaryOperation::ADD: return GetBinaryOperation(BinaryOperationID::kAdd); case serialization::BinaryOperation::SUBTRACT: return GetBinaryOperation(BinaryOperationID::kSubtract); case serialization::BinaryOperation::MULTIPLY: return GetBinaryOperation(BinaryOperationID::kMultiply); case serialization::BinaryOperation::DIVIDE: return GetBinaryOperation(BinaryOperationID::kDivide); case serialization::BinaryOperation::MODULO: return GetBinaryOperation(BinaryOperationID::kModulo); default: FATAL_ERROR("Unrecognized BinaryOperationID in " "BinaryOperationFactory::ReconstructFromProto"); } }
const Comparison& ComparisonFactory::ReconstructFromProto(const serialization::Comparison &proto) { DCHECK(ProtoIsValid(proto)) << "Attempted to create Comparison from an invalid proto description:\n" << proto.DebugString(); switch (proto.comparison_id()) { case serialization::Comparison::EQUAL: return GetComparison(ComparisonID::kEqual); case serialization::Comparison::NOT_EQUAL: return GetComparison(ComparisonID::kNotEqual); case serialization::Comparison::LESS: return GetComparison(ComparisonID::kLess); case serialization::Comparison::LESS_OR_EQUAL: return GetComparison(ComparisonID::kLessOrEqual); case serialization::Comparison::GREATER: return GetComparison(ComparisonID::kGreater); case serialization::Comparison::GREATER_OR_EQUAL: return GetComparison(ComparisonID::kGreaterOrEqual); default: FATAL_ERROR("Unrecognized ComparisonID in ComparisonFactory::ReconstructFromProto"); } }
TypedValue TypedValue::ReconstructFromProto(const serialization::TypedValue &proto) { DCHECK(ProtoIsValid(proto)) << "Attempted to create TypedValue from an invalid proto description:\n" << proto.DebugString(); switch (proto.type_id()) { case serialization::Type::INT: return proto.has_int_value() ? TypedValue(static_cast<int>(proto.int_value())) : TypedValue(kInt); case serialization::Type::LONG: return proto.has_long_value() ? TypedValue(static_cast<std::int64_t>(proto.long_value())) : TypedValue(kLong); case serialization::Type::FLOAT: return proto.has_float_value() ? TypedValue(static_cast<float>(proto.float_value())) : TypedValue(kFloat); case serialization::Type::DOUBLE: return proto.has_double_value() ? TypedValue(static_cast<double>(proto.double_value())) : TypedValue(kDouble); case serialization::Type::DATETIME: if (proto.has_datetime_value()) { DatetimeLit datetime; datetime.ticks = proto.datetime_value(); return TypedValue(datetime); } else { return TypedValue(kDatetime); } case serialization::Type::DATETIME_INTERVAL: if (proto.has_datetime_interval_value()) { DatetimeIntervalLit interval; interval.interval_ticks = proto.datetime_interval_value(); return TypedValue(interval); } else { return TypedValue(kDatetimeInterval); } case serialization::Type::YEAR_MONTH_INTERVAL: if (proto.has_year_month_interval_value()) { YearMonthIntervalLit interval; interval.months = proto.year_month_interval_value(); return TypedValue(interval); } else { return TypedValue(kYearMonthInterval); } case serialization::Type::CHAR: return proto.has_out_of_line_data() ? TypedValue(kChar, static_cast<const void*>(proto.out_of_line_data().c_str()), proto.out_of_line_data().size()).ensureNotReference() : TypedValue(kChar); case serialization::Type::VAR_CHAR: return proto.has_out_of_line_data() ? TypedValue(kVarChar, static_cast<const void*>(proto.out_of_line_data().c_str()), proto.out_of_line_data().size()).ensureNotReference() : TypedValue(kVarChar); case serialization::Type::NULL_TYPE: return TypedValue(kNullType); default: FATAL_ERROR("Unrecognized TypeID in TypedValue::ReconstructFromProto"); } }
WorkOrder* WorkOrderFactory::ReconstructFromProto(const serialization::WorkOrder &proto, CatalogDatabaseLite *catalog_database, QueryContext *query_context, StorageManager *storage_manager, const tmb::client_id shiftboss_client_id, tmb::MessageBus *bus) { DCHECK(query_context != nullptr); DCHECK(ProtoIsValid(proto, *catalog_database, *query_context)) << "Attempted to create WorkOrder from an invalid proto description:\n" << proto.DebugString(); switch (proto.work_order_type()) { case serialization::AGGREGATION: { LOG(INFO) << "Creating AggregationWorkOrder"; return new AggregationWorkOrder( proto.GetExtension(serialization::AggregationWorkOrder::block_id), query_context->getAggregationState( proto.GetExtension(serialization::AggregationWorkOrder::aggr_state_index))); } case serialization::BUILD_HASH: { LOG(INFO) << "Creating BuildHashWorkOrder"; vector<attribute_id> join_key_attributes; for (int i = 0; i < proto.ExtensionSize(serialization::BuildHashWorkOrder::join_key_attributes); ++i) { join_key_attributes.push_back( proto.GetExtension(serialization::BuildHashWorkOrder::join_key_attributes, i)); } return new BuildHashWorkOrder( catalog_database->getRelationSchemaById( proto.GetExtension(serialization::BuildHashWorkOrder::relation_id)), move(join_key_attributes), proto.GetExtension(serialization::BuildHashWorkOrder::any_join_key_attributes_nullable), proto.GetExtension(serialization::BuildHashWorkOrder::block_id), query_context->getJoinHashTable( proto.GetExtension(serialization::BuildHashWorkOrder::join_hash_table_index)), storage_manager); } case serialization::DELETE: { LOG(INFO) << "Creating DeleteWorkOrder"; return new DeleteWorkOrder( catalog_database->getRelationSchemaById( proto.GetExtension(serialization::DeleteWorkOrder::relation_id)), proto.GetExtension(serialization::DeleteWorkOrder::block_id), query_context->getPredicate( proto.GetExtension(serialization::DeleteWorkOrder::predicate_index)), storage_manager, proto.GetExtension(serialization::DeleteWorkOrder::operator_index), shiftboss_client_id, bus); } case serialization::DESTROY_HASH: { LOG(INFO) << "Creating DestroyHashWorkOrder"; return new DestroyHashWorkOrder( proto.GetExtension(serialization::DestroyHashWorkOrder::join_hash_table_index), query_context); } case serialization::DROP_TABLE: { LOG(INFO) << "Creating DropTableWorkOrder"; vector<block_id> blocks; for (int i = 0; i < proto.ExtensionSize(serialization::DropTableWorkOrder::block_ids); ++i) { blocks.push_back( proto.GetExtension(serialization::DropTableWorkOrder::block_ids, i)); } return new DropTableWorkOrder( move(blocks), storage_manager, proto.HasExtension(serialization::DropTableWorkOrder::relation_id) ? proto.GetExtension(serialization::DropTableWorkOrder::relation_id) : kInvalidCatalogId, catalog_database); } case serialization::FINALIZE_AGGREGATION: { LOG(INFO) << "Creating FinalizeAggregationWorkOrder"; return new FinalizeAggregationWorkOrder( query_context->releaseAggregationState( proto.GetExtension(serialization::FinalizeAggregationWorkOrder::aggr_state_index)), query_context->getInsertDestination( proto.GetExtension(serialization::FinalizeAggregationWorkOrder::insert_destination_index))); } case serialization::HASH_JOIN: { const auto hash_join_work_order_type = proto.GetExtension(serialization::HashJoinWorkOrder::hash_join_work_order_type); const CatalogRelationSchema &build_relation = catalog_database->getRelationSchemaById( proto.GetExtension(serialization::HashJoinWorkOrder::build_relation_id)); const CatalogRelationSchema &probe_relation = catalog_database->getRelationSchemaById( proto.GetExtension(serialization::HashJoinWorkOrder::probe_relation_id)); vector<attribute_id> join_key_attributes; const int join_key_attributes_size = proto.ExtensionSize(serialization::HashJoinWorkOrder::join_key_attributes); for (int i = 0; i < join_key_attributes_size; ++i) { join_key_attributes.push_back( proto.GetExtension(serialization::HashJoinWorkOrder::join_key_attributes, i)); } const bool any_join_key_attributes_nullable = proto.GetExtension(serialization::HashJoinWorkOrder::any_join_key_attributes_nullable); const block_id lookup_block_id = proto.GetExtension(serialization::HashJoinWorkOrder::block_id); const Predicate *residual_predicate = nullptr; if (hash_join_work_order_type != serialization::HashJoinWorkOrder::HASH_OUTER_JOIN) { residual_predicate = query_context->getPredicate( proto.GetExtension(serialization::HashJoinWorkOrder::residual_predicate_index)); } const std::vector<std::unique_ptr<const Scalar>> &selection = query_context->getScalarGroup( proto.GetExtension(serialization::HashJoinWorkOrder::selection_index)); const JoinHashTable &hash_table = *query_context->getJoinHashTable( proto.GetExtension(serialization::HashJoinWorkOrder::join_hash_table_index)); InsertDestination *output_destination = query_context->getInsertDestination( proto.GetExtension(serialization::HashJoinWorkOrder::insert_destination_index)); switch (hash_join_work_order_type) { case serialization::HashJoinWorkOrder::HASH_ANTI_JOIN: { LOG(INFO) << "Creating HashAntiJoinWorkOrder"; return new HashAntiJoinWorkOrder( build_relation, probe_relation, move(join_key_attributes), any_join_key_attributes_nullable, lookup_block_id, residual_predicate, selection, hash_table, output_destination, storage_manager); } case serialization::HashJoinWorkOrder::HASH_INNER_JOIN: { LOG(INFO) << "Creating HashInnerJoinWorkOrder"; return new HashInnerJoinWorkOrder( build_relation, probe_relation, move(join_key_attributes), any_join_key_attributes_nullable, lookup_block_id, residual_predicate, selection, hash_table, output_destination, storage_manager); } case serialization::HashJoinWorkOrder::HASH_OUTER_JOIN: { vector<bool> is_selection_on_build; const int is_selection_on_build_size = proto.ExtensionSize(serialization::HashJoinWorkOrder::is_selection_on_build); for (int i = 0; i < is_selection_on_build_size; ++i) { is_selection_on_build.push_back( proto.GetExtension(serialization::HashJoinWorkOrder::is_selection_on_build, i)); } LOG(INFO) << "Creating HashOuterJoinWorkOrder"; return new HashOuterJoinWorkOrder( build_relation, probe_relation, move(join_key_attributes), any_join_key_attributes_nullable, lookup_block_id, selection, move(is_selection_on_build), hash_table, output_destination, storage_manager); } case serialization::HashJoinWorkOrder::HASH_SEMI_JOIN: { LOG(INFO) << "Creating HashSemiJoinWorkOrder"; return new HashSemiJoinWorkOrder( build_relation, probe_relation, move(join_key_attributes), any_join_key_attributes_nullable, lookup_block_id, residual_predicate, selection, hash_table, output_destination, storage_manager); } default: LOG(FATAL) << "Unknown HashJoinWorkOrder Type in WorkOrderFactory::ReconstructFromProto"; } } case serialization::INSERT: { LOG(INFO) << "Creating InsertWorkOrder"; return new InsertWorkOrder( query_context->getInsertDestination( proto.GetExtension(serialization::InsertWorkOrder::insert_destination_index)), query_context->releaseTuple( proto.GetExtension(serialization::InsertWorkOrder::tuple_index))); } case serialization::NESTED_LOOP_JOIN: { LOG(INFO) << "Creating NestedLoopsJoinWorkOrder"; return new NestedLoopsJoinWorkOrder( catalog_database->getRelationSchemaById( proto.GetExtension(serialization::NestedLoopsJoinWorkOrder::left_relation_id)), catalog_database->getRelationSchemaById( proto.GetExtension(serialization::NestedLoopsJoinWorkOrder::right_relation_id)), proto.GetExtension(serialization::NestedLoopsJoinWorkOrder::left_block_id), proto.GetExtension(serialization::NestedLoopsJoinWorkOrder::right_block_id), query_context->getPredicate( proto.GetExtension(serialization::NestedLoopsJoinWorkOrder::join_predicate_index)), query_context->getScalarGroup( proto.GetExtension(serialization::NestedLoopsJoinWorkOrder::selection_index)), query_context->getInsertDestination( proto.GetExtension(serialization::NestedLoopsJoinWorkOrder::insert_destination_index)), storage_manager); } case serialization::SAMPLE: { LOG(INFO) << "Creating SampleWorkOrder"; return new SampleWorkOrder( catalog_database->getRelationSchemaById( proto.GetExtension(serialization::SampleWorkOrder::relation_id)), proto.GetExtension(serialization::SampleWorkOrder::block_id), proto.GetExtension(serialization::SampleWorkOrder::is_block_sample), proto.GetExtension(serialization::SampleWorkOrder::percentage), query_context->getInsertDestination( proto.GetExtension(serialization::SampleWorkOrder::insert_destination_index)), storage_manager); } case serialization::SAVE_BLOCKS: { LOG(INFO) << "Creating SaveBlocksWorkOrder"; return new SaveBlocksWorkOrder( proto.GetExtension(serialization::SaveBlocksWorkOrder::block_id), proto.GetExtension(serialization::SaveBlocksWorkOrder::force), storage_manager); } case serialization::SELECT: { LOG(INFO) << "Creating SelectWorkOrder"; const bool simple_projection = proto.GetExtension(serialization::SelectWorkOrder::simple_projection); vector<attribute_id> simple_selection; for (int i = 0; i < proto.ExtensionSize(serialization::SelectWorkOrder::simple_selection); ++i) { simple_selection.push_back( proto.GetExtension(serialization::SelectWorkOrder::simple_selection, i)); } return new SelectWorkOrder( catalog_database->getRelationSchemaById( proto.GetExtension(serialization::SelectWorkOrder::relation_id)), proto.GetExtension(serialization::SelectWorkOrder::block_id), query_context->getPredicate( proto.GetExtension(serialization::SelectWorkOrder::predicate_index)), simple_projection, move(simple_selection), simple_projection ? nullptr : &query_context->getScalarGroup( proto.GetExtension(serialization::SelectWorkOrder::selection_index)), query_context->getInsertDestination( proto.GetExtension(serialization::SelectWorkOrder::insert_destination_index)), storage_manager); } case serialization::SORT_MERGE_RUN: { LOG(INFO) << "Creating SortMergeRunWorkOrder"; vector<merge_run_operator::Run> runs; for (int i = 0; i < proto.ExtensionSize(serialization::SortMergeRunWorkOrder::runs); ++i) { merge_run_operator::Run run; const serialization::Run &run_proto = proto.GetExtension(serialization::SortMergeRunWorkOrder::runs, i); for (int j = 0; j < run_proto.blocks_size(); ++j) { run.push_back(run_proto.blocks(j)); } runs.push_back(move(run)); } return new SortMergeRunWorkOrder( query_context->getSortConfig( proto.GetExtension(serialization::SortMergeRunWorkOrder::sort_config_index)), catalog_database->getRelationSchemaById( proto.GetExtension(serialization::SortMergeRunWorkOrder::relation_id)), move(runs), proto.GetExtension(serialization::SortMergeRunWorkOrder::top_k), proto.GetExtension(serialization::SortMergeRunWorkOrder::merge_level), query_context->getInsertDestination( proto.GetExtension(serialization::SortMergeRunWorkOrder::insert_destination_index)), storage_manager, proto.GetExtension(serialization::SortMergeRunWorkOrder::operator_index), shiftboss_client_id, bus); } case serialization::SORT_RUN_GENERATION: { LOG(INFO) << "Creating SortRunGenerationWorkOrder"; return new SortRunGenerationWorkOrder( catalog_database->getRelationSchemaById( proto.GetExtension(serialization::SortRunGenerationWorkOrder::relation_id)), proto.GetExtension(serialization::SortRunGenerationWorkOrder::block_id), query_context->getSortConfig( proto.GetExtension(serialization::SortRunGenerationWorkOrder::sort_config_index)), query_context->getInsertDestination( proto.GetExtension(serialization::SortRunGenerationWorkOrder::insert_destination_index)), storage_manager); } case serialization::TABLE_GENERATOR: { LOG(INFO) << "Creating SortRunGenerationWorkOrder"; return new TableGeneratorWorkOrder( query_context->getGeneratorFunctionHandle( proto.GetExtension(serialization::TableGeneratorWorkOrder::generator_function_index)), query_context->getInsertDestination( proto.GetExtension(serialization::TableGeneratorWorkOrder::insert_destination_index))); } case serialization::TEXT_SCAN: { LOG(INFO) << "Creating TextScanWorkOrder"; if (proto.HasExtension(serialization::TextScanWorkOrder::filename)) { return new TextScanWorkOrder( proto.GetExtension(serialization::TextScanWorkOrder::filename), proto.GetExtension(serialization::TextScanWorkOrder::field_terminator), proto.GetExtension(serialization::TextScanWorkOrder::process_escape_sequences), query_context->getInsertDestination( proto.GetExtension(serialization::TextScanWorkOrder::insert_destination_index)), storage_manager); } else { const serialization::TextBlob &text_blob_proto = proto.GetExtension(serialization::TextScanWorkOrder::text_blob); return new TextScanWorkOrder( text_blob_proto.blob_id(), text_blob_proto.size(), proto.GetExtension(serialization::TextScanWorkOrder::field_terminator), proto.GetExtension(serialization::TextScanWorkOrder::process_escape_sequences), query_context->getInsertDestination( proto.GetExtension(serialization::TextScanWorkOrder::insert_destination_index)), storage_manager); } } case serialization::TEXT_SPLIT: { LOG(INFO) << "Creating TextSplitWorkOrder"; return new TextSplitWorkOrder( proto.GetExtension(serialization::TextSplitWorkOrder::filename), proto.GetExtension(serialization::TextSplitWorkOrder::process_escape_sequences), storage_manager, proto.GetExtension(serialization::TextSplitWorkOrder::operator_index), shiftboss_client_id, bus); } case serialization::UPDATE: { LOG(INFO) << "Creating UpdateWorkOrder"; return new UpdateWorkOrder( catalog_database->getRelationSchemaById( proto.GetExtension(serialization::UpdateWorkOrder::relation_id)), proto.GetExtension(serialization::UpdateWorkOrder::block_id), query_context->getPredicate( proto.GetExtension(serialization::UpdateWorkOrder::predicate_index)), query_context->getUpdateGroup( proto.GetExtension(serialization::UpdateWorkOrder::update_group_index)), query_context->getInsertDestination( proto.GetExtension(serialization::UpdateWorkOrder::insert_destination_index)), storage_manager, proto.GetExtension(serialization::UpdateWorkOrder::operator_index), shiftboss_client_id, bus); } default: LOG(FATAL) << "Unknown WorkOrder Type in WorkOrderFactory::ReconstructFromProto"; } }
bool ScalarFactory::ProtoIsValid(const serialization::Scalar &proto, const CatalogDatabase &database) { // Check that proto is fully initialized. if (!proto.IsInitialized()) { return false; } // Check that the data_source is valid, and extensions if any. switch (proto.data_source()) { case serialization::Scalar::LITERAL: { return proto.HasExtension(serialization::ScalarLiteral::literal) && proto.HasExtension(serialization::ScalarLiteral::literal_type) && TypedValue::ProtoIsValid(proto.GetExtension(serialization::ScalarLiteral::literal)) && TypeFactory::ProtoIsValid(proto.GetExtension(serialization::ScalarLiteral::literal_type)); } case serialization::Scalar::ATTRIBUTE: { if (proto.HasExtension(serialization::ScalarAttribute::relation_id) && proto.HasExtension(serialization::ScalarAttribute::attribute_id)) { const relation_id rel_id = proto.GetExtension(serialization::ScalarAttribute::relation_id); const attribute_id attr_id = proto.GetExtension(serialization::ScalarAttribute::attribute_id); return database.hasRelationWithId(rel_id) && database.getRelationById(rel_id)->hasAttributeWithId(attr_id); } break; } case serialization::Scalar::UNARY_EXPRESSION: { if (proto.HasExtension(serialization::ScalarUnaryExpression::operation) && proto.HasExtension(serialization::ScalarUnaryExpression::operand)) { return UnaryOperationFactory::ProtoIsValid(proto.GetExtension(serialization::ScalarUnaryExpression::operation)) && ProtoIsValid(proto.GetExtension(serialization::ScalarUnaryExpression::operand), database); } break; } case serialization::Scalar::BINARY_EXPRESSION: { if (proto.HasExtension(serialization::ScalarBinaryExpression::operation) && proto.HasExtension(serialization::ScalarBinaryExpression::left_operand) && proto.HasExtension(serialization::ScalarBinaryExpression::right_operand)) { return BinaryOperationFactory::ProtoIsValid( proto.GetExtension(serialization::ScalarBinaryExpression::operation)) && ProtoIsValid(proto.GetExtension(serialization::ScalarBinaryExpression::left_operand), database) && ProtoIsValid(proto.GetExtension(serialization::ScalarBinaryExpression::right_operand), database); } break; } case serialization::Scalar::CASE_EXPRESSION: { // Check result type. if (!(proto.HasExtension(serialization::ScalarCaseExpression::result_type) && TypeFactory::ProtoIsValid(proto.GetExtension( serialization::ScalarCaseExpression::result_type)))) { return false; } // Check when-predicates and result expressions. if (proto.ExtensionSize(serialization::ScalarCaseExpression::when_predicate) == proto.ExtensionSize(serialization::ScalarCaseExpression::result_expression)) { for (int case_num = 0; case_num < proto.ExtensionSize(serialization::ScalarCaseExpression::when_predicate); ++case_num) { if (!PredicateFactory::ProtoIsValid( proto.GetExtension( serialization::ScalarCaseExpression::when_predicate, case_num), database)) { return false; } if (!ProtoIsValid( proto.GetExtension(serialization::ScalarCaseExpression::result_expression, case_num), database)) { return false; } } } else { return false; } // Check else-result expression. if (!(proto.HasExtension(serialization::ScalarCaseExpression::else_result_expression) && ProtoIsValid(proto.GetExtension(serialization::ScalarCaseExpression::else_result_expression), database))) { return false; } // Everything checks out. return true; } default: { break; } } return false; }
Scalar* ScalarFactory::ReconstructFromProto(const serialization::Scalar &proto, const CatalogDatabase &database) { DCHECK(ProtoIsValid(proto, database)) << "Attempted to create Scalar from an invalid proto description:\n" << proto.DebugString(); switch (proto.data_source()) { case serialization::Scalar::LITERAL: return new ScalarLiteral( TypedValue::ReconstructFromProto(proto.GetExtension(serialization::ScalarLiteral::literal)), TypeFactory::ReconstructFromProto(proto.GetExtension(serialization::ScalarLiteral::literal_type))); case serialization::Scalar::ATTRIBUTE: { const relation_id rel_id = proto.GetExtension(serialization::ScalarAttribute::relation_id); DCHECK(database.hasRelationWithId(rel_id)); return new ScalarAttribute(*database.getRelationById(rel_id)->getAttributeById( proto.GetExtension(serialization::ScalarAttribute::attribute_id))); } case serialization::Scalar::UNARY_EXPRESSION: { return new ScalarUnaryExpression( UnaryOperationFactory::ReconstructFromProto( proto.GetExtension(serialization::ScalarUnaryExpression::operation)), ReconstructFromProto(proto.GetExtension(serialization::ScalarUnaryExpression::operand), database)); } case serialization::Scalar::BINARY_EXPRESSION: { return new ScalarBinaryExpression( BinaryOperationFactory::ReconstructFromProto( proto.GetExtension(serialization::ScalarBinaryExpression::operation)), ReconstructFromProto(proto.GetExtension(serialization::ScalarBinaryExpression::left_operand), database), ReconstructFromProto(proto.GetExtension(serialization::ScalarBinaryExpression::right_operand), database)); } case serialization::Scalar::CASE_EXPRESSION: { const Type &result_type = TypeFactory::ReconstructFromProto( proto.GetExtension(serialization::ScalarCaseExpression::result_type)); std::vector<std::unique_ptr<Predicate>> when_predicates; for (int when_pred_num = 0; when_pred_num < proto.ExtensionSize(serialization::ScalarCaseExpression::when_predicate); ++when_pred_num) { when_predicates.emplace_back(PredicateFactory::ReconstructFromProto( proto.GetExtension(serialization::ScalarCaseExpression::when_predicate, when_pred_num), database)); } std::vector<std::unique_ptr<Scalar>> result_expressions; for (int result_expr_num = 0; result_expr_num < proto.ExtensionSize(serialization::ScalarCaseExpression::result_expression); ++result_expr_num) { result_expressions.emplace_back(ReconstructFromProto( proto.GetExtension(serialization::ScalarCaseExpression::result_expression, result_expr_num), database)); } std::unique_ptr<Scalar> else_result_expression(ReconstructFromProto( proto.GetExtension(serialization::ScalarCaseExpression::else_result_expression), database)); return new ScalarCaseExpression(result_type, std::move(when_predicates), std::move(result_expressions), else_result_expression.release()); } default: FATAL_ERROR("Unknown Scalar data source in ScalarFactory::ReconstructFromProto"); } }
QueryContext::QueryContext(const serialization::QueryContext &proto, const CatalogDatabaseLite &database, StorageManager *storage_manager, const tmb::client_id scheduler_client_id, tmb::MessageBus *bus) { DCHECK(ProtoIsValid(proto, database)) << "Attempted to create QueryContext from an invalid proto description:\n" << proto.DebugString(); for (int i = 0; i < proto.aggregation_states_size(); ++i) { PartitionedAggregationOperationStates partitioned_aggregation_states; const serialization::QueryContext::AggregationOperationStateContext &aggr_state_context_proto = proto.aggregation_states(i); for (std::uint64_t j = 0; j < aggr_state_context_proto.num_partitions(); ++j) { partitioned_aggregation_states.emplace_back( AggregationOperationState::ReconstructFromProto(aggr_state_context_proto.aggregation_state(), database, storage_manager)); } aggregation_states_.push_back(move(partitioned_aggregation_states)); } for (int i = 0; i < proto.generator_functions_size(); ++i) { const GeneratorFunctionHandle *func_handle = GeneratorFunctionFactory::Instance().reconstructFromProto(proto.generator_functions(i)); DCHECK(func_handle != nullptr); generator_functions_.emplace_back( std::unique_ptr<const GeneratorFunctionHandle>(func_handle)); } for (int i = 0; i < proto.join_hash_tables_size(); ++i) { PartitionedJoinHashTables partitioned_join_hash_tables; const serialization::QueryContext::HashTableContext &hash_table_context_proto = proto.join_hash_tables(i); for (std::uint64_t j = 0; j < hash_table_context_proto.num_partitions(); ++j) { partitioned_join_hash_tables.emplace_back( JoinHashTableFactory::CreateResizableFromProto(hash_table_context_proto.join_hash_table(), storage_manager)); } join_hash_tables_.push_back(move(partitioned_join_hash_tables)); } for (int i = 0; i < proto.insert_destinations_size(); ++i) { const serialization::InsertDestination &insert_destination_proto = proto.insert_destinations(i); insert_destinations_.emplace_back(InsertDestination::ReconstructFromProto( proto.query_id(), insert_destination_proto, database.getRelationSchemaById(insert_destination_proto.relation_id()), storage_manager, scheduler_client_id, bus)); } for (int i = 0; i < proto.lip_filters_size(); ++i) { lip_filters_.emplace_back( std::unique_ptr<LIPFilter>( LIPFilterFactory::ReconstructFromProto(proto.lip_filters(i)))); } for (int i = 0; i < proto.lip_filter_deployments_size(); ++i) { lip_deployments_.emplace_back( std::make_unique<LIPFilterDeployment>( proto.lip_filter_deployments(i), lip_filters_)); } for (int i = 0; i < proto.predicates_size(); ++i) { predicates_.emplace_back( PredicateFactory::ReconstructFromProto(proto.predicates(i), database)); } for (int i = 0; i < proto.scalar_groups_size(); ++i) { vector<unique_ptr<const Scalar>> scalar_group; const serialization::QueryContext::ScalarGroup &scalar_group_proto = proto.scalar_groups(i); for (int j = 0; j < scalar_group_proto.scalars_size(); ++j) { scalar_group.emplace_back( ScalarFactory::ReconstructFromProto(scalar_group_proto.scalars(j), database)); } scalar_groups_.push_back(move(scalar_group)); } for (int i = 0; i < proto.sort_configs_size(); ++i) { sort_configs_.emplace_back( SortConfiguration::ReconstructFromProto(proto.sort_configs(i), database)); } for (int i = 0; i < proto.tuples_size(); ++i) { tuples_.emplace_back(Tuple::ReconstructFromProto(proto.tuples(i))); } for (int i = 0; i < proto.update_groups_size(); ++i) { const serialization::QueryContext::UpdateGroup &update_group_proto = proto.update_groups(i); std::unordered_map<attribute_id, std::unique_ptr<const Scalar>> update_group; for (int j = 0; j < update_group_proto.update_assignments_size(); ++j) { const serialization::QueryContext::UpdateGroup::UpdateAssignment &update_assignment_proto = update_group_proto.update_assignments(j); unique_ptr<const Scalar> scalar( ScalarFactory::ReconstructFromProto(update_assignment_proto.scalar(), database)); update_group.emplace(update_assignment_proto.attribute_id(), move(scalar)); } update_groups_.push_back(move(update_group)); } for (int i = 0; i < proto.window_aggregation_states_size(); ++i) { window_aggregation_states_.emplace_back( WindowAggregationOperationState::ReconstructFromProto(proto.window_aggregation_states(i), database, storage_manager)); } }