void DescribeStreamFactory::createForShard(
        const Cluster::ShardInfo & shard_info,
        const String & query, const ASTPtr & query_ast,
        const Context & context, const ThrottlerPtr & throttler,
        BlockInputStreams & res)
{
    for (const Cluster::Address & local_address : shard_info.local_addresses)
    {
        InterpreterDescribeQuery interpreter{query_ast, context};
        BlockInputStreamPtr stream = interpreter.execute().in;

        /** Materialization is needed, since from remote servers the constants come materialized.
         * If you do not do this, different types (Const and non-Const) columns will be produced in different threads,
         * And this is not allowed, since all code is based on the assumption that in the block stream all types are the same.
         */
        BlockInputStreamPtr materialized_stream = std::make_shared<MaterializingBlockInputStream>(stream);
        res.emplace_back(std::make_shared<BlockExtraInfoInputStream>(materialized_stream, toBlockExtraInfo(local_address)));
    }

    auto remote_stream = std::make_shared<RemoteBlockInputStream>(
            shard_info.pool, query, context, nullptr, throttler);
    remote_stream->setPoolMode(PoolMode::GET_ALL);
    remote_stream->appendExtraInfo();
    res.emplace_back(std::move(remote_stream));
}
Esempio n. 2
0
BlockInputStreamPtr DescribeQueryConstructor::createRemote(
        ConnectionPoolWithFailoverPtrs && pools, const std::string & query,
        const Settings & settings, ThrottlerPtr throttler, const Context & context)
{
    auto stream =  std::make_shared<RemoteBlockInputStream>(std::move(pools), query, &settings, context, throttler);
    stream->setPoolMode(pool_mode);
    stream->appendExtraInfo();
    return stream;
}