ColumnsDescription InterpreterCreateQuery::setColumns(
    ASTCreateQuery & create, const Block & as_select_sample, const StoragePtr & as_storage) const
{
    ColumnsDescription res;

    if (create.columns)
    {
        res = getColumnsDescription(*create.columns, context);
    }
    else if (!create.as_table.empty())
    {
        res = as_storage->getColumns();
    }
    else if (create.select)
    {
        for (size_t i = 0; i < as_select_sample.columns(); ++i)
            res.ordinary.emplace_back(as_select_sample.safeGetByPosition(i).name, as_select_sample.safeGetByPosition(i).type);
    }
    else
        throw Exception("Incorrect CREATE query: required list of column descriptions or AS section or SELECT.", ErrorCodes::INCORRECT_QUERY);

    /// Even if query has list of columns, canonicalize it (unfold Nested columns).
    ASTPtr new_columns = formatColumns(res);
    if (create.columns)
        create.replace(create.columns, new_columns);
    else
        create.set(create.columns, new_columns);

    /// Check for duplicates
    std::set<String> all_columns;
    auto check_column_already_exists = [&all_columns](const NameAndTypePair & column_name_and_type)
    {
        if (!all_columns.emplace(column_name_and_type.name).second)
            throw Exception("Column " + backQuoteIfNeed(column_name_and_type.name) + " already exists", ErrorCodes::DUPLICATE_COLUMN);
    };

    for (const auto & elem : res.ordinary)
        check_column_already_exists(elem);
    for (const auto & elem : res.materialized)
        check_column_already_exists(elem);
    for (const auto & elem : res.aliases)
        check_column_already_exists(elem);

    return res;
}
Beispiel #2
0
BlockInputStreams StorageMerge::createSourceStreams(const SelectQueryInfo & query_info, const QueryProcessingStage::Enum & processed_stage,
                                                    const size_t max_block_size, const Block & header, const StoragePtr & storage,
                                                    const TableStructureReadLockPtr & struct_lock, Names & real_column_names,
                                                    Context & modified_context, size_t streams_num, bool has_table_virtual_column,
                                                    bool concat_streams)
{
    SelectQueryInfo modified_query_info = query_info;
    modified_query_info.query = query_info.query->clone();

    VirtualColumnUtils::rewriteEntityInAst(modified_query_info.query, "_table", storage ? storage->getTableName() : "");

    if (!storage)
        return BlockInputStreams{
            InterpreterSelectQuery(modified_query_info.query, modified_context, std::make_shared<OneBlockInputStream>(header),
                                   processed_stage, true).execute().in};

    BlockInputStreams source_streams;

    if (processed_stage <= storage->getQueryProcessingStage(modified_context))
    {
        /// If there are only virtual columns in query, you must request at least one other column.
        if (real_column_names.size() ==0)
            real_column_names.push_back(ExpressionActions::getSmallestColumn(storage->getColumns().getAllPhysical()));

        source_streams = storage->read(real_column_names, modified_query_info, modified_context, processed_stage, max_block_size,
                                       UInt32(streams_num));
    }
    else if (processed_stage > storage->getQueryProcessingStage(modified_context))
    {
        typeid_cast<ASTSelectQuery *>(modified_query_info.query.get())->replaceDatabaseAndTable(source_database, storage->getTableName());

        /// Maximum permissible parallelism is streams_num
        modified_context.getSettingsRef().max_threads = UInt64(streams_num);
        modified_context.getSettingsRef().max_streams_to_max_threads_ratio = 1;

        InterpreterSelectQuery interpreter{modified_query_info.query, modified_context, Names{}, processed_stage};
        BlockInputStreamPtr interpreter_stream = interpreter.execute().in;

        /** Materialization is needed, since from distributed storage the constants come materialized.
          * If you do not do this, different types (Const and non-Const) columns will be produced in different threads,
          * And this is not allowed, since all code is based on the assumption that in the block stream all types are the same.
          */
        source_streams.emplace_back(std::make_shared<MaterializingBlockInputStream>(interpreter_stream));
    }

    if (!source_streams.empty())
    {
        if (concat_streams)
        {
            BlockInputStreamPtr stream =
                source_streams.size() > 1 ? std::make_shared<ConcatBlockInputStream>(source_streams) : source_streams[0];

            source_streams.resize(1);
            source_streams[0] = stream;
        }

        for (BlockInputStreamPtr & source_stream : source_streams)
        {
            if (has_table_virtual_column)
                source_stream = std::make_shared<AddingConstColumnBlockInputStream<String>>(
                    source_stream, std::make_shared<DataTypeString>(), storage->getTableName(), "_table");

            /// Subordinary tables could have different but convertible types, like numeric types of different width.
            /// We must return streams with structure equals to structure of Merge table.
            convertingSourceStream(header, modified_context, modified_query_info.query, source_stream, processed_stage);

            source_stream->addTableLock(struct_lock);
        }
    }

    return source_streams;
}