void ExecuteScalarSubqueriesMatcher::visit(ASTPtr & ast, Data & data) { if (auto * t = typeid_cast<ASTSubquery *>(ast.get())) visit(*t, ast, data); if (auto * t = typeid_cast<ASTFunction *>(ast.get())) visit(*t, ast, data); }
AnalyzeLambdas::LambdaParameters AnalyzeLambdas::extractLambdaParameters(ASTPtr & ast) { /// Lambda parameters could be specified in AST in two forms: /// - just as single parameter: x -> x + 1 /// - parameters in tuple: (x, y) -> x + 1 #define LAMBDA_ERROR_MESSAGE " There are two valid forms of lambda expressions: x -> ... and (x, y...) -> ..." if (!ast->tryGetAlias().empty()) throw Exception("Lambda parameters cannot have aliases." LAMBDA_ERROR_MESSAGE, ErrorCodes::BAD_LAMBDA); if (const ASTIdentifier * identifier = typeid_cast<const ASTIdentifier *>(ast.get())) { return { identifier->name }; } else if (const ASTFunction * function = typeid_cast<const ASTFunction *>(ast.get())) { if (function->name != "tuple") throw Exception("Left hand side of '->' or first argument of 'lambda' is a function, but this function is not tuple." LAMBDA_ERROR_MESSAGE " Found function '" + function->name + "' instead.", ErrorCodes::BAD_LAMBDA); if (!function->arguments || function->arguments->children.empty()) throw Exception("Left hand side of '->' or first argument of 'lambda' is empty tuple." LAMBDA_ERROR_MESSAGE, ErrorCodes::BAD_LAMBDA); LambdaParameters res; res.reserve(function->arguments->children.size()); for (const ASTPtr & arg : function->arguments->children) { const ASTIdentifier * arg_identifier = typeid_cast<const ASTIdentifier *>(arg.get()); if (!arg_identifier) throw Exception("Left hand side of '->' or first argument of 'lambda' contains something that is not just identifier." LAMBDA_ERROR_MESSAGE, ErrorCodes::BAD_LAMBDA); if (!arg_identifier->children.empty()) throw Exception("Left hand side of '->' or first argument of 'lambda' contains compound identifier." LAMBDA_ERROR_MESSAGE, ErrorCodes::BAD_LAMBDA); if (!arg_identifier->alias.empty()) throw Exception("Lambda parameters cannot have aliases." LAMBDA_ERROR_MESSAGE, ErrorCodes::BAD_LAMBDA); res.emplace_back(arg_identifier->name); } return res; } else throw Exception("Unexpected left hand side of '->' or first argument of 'lambda'." LAMBDA_ERROR_MESSAGE, ErrorCodes::BAD_LAMBDA); #undef LAMBDA_ERROR_MESSAGE }
static Field extractValueFromNode(ASTPtr & node, const IDataType & type, const Context & context) { if (ASTLiteral * lit = typeid_cast<ASTLiteral *>(node.get())) return convertFieldToType(lit->value, type); else if (typeid_cast<ASTFunction *>(node.get())) return convertFieldToType(evaluateConstantExpression(node, context), type); else throw Exception("Incorrect element of set. Must be literal or constant expression.", ErrorCodes::INCORRECT_ELEMENT_OF_SET); }
bool ParserWithOptionalAliasImpl<ParserAlias>::parseImpl(Pos & pos, Pos end, ASTPtr & node, Pos & max_parsed_pos, Expected & expected) { ParserWhiteSpaceOrComments ws; if (!elem_parser->parse(pos, end, node, max_parsed_pos, expected)) return false; /** Маленький хак. * * В секции SELECT мы разрешаем парсить алиасы без указания ключевого слова AS. * Эти алиасы не могут совпадать с ключевыми словами запроса. * А само выражение может быть идентификатором, совпадающем с ключевым словом. * Например, столбец может называться where. И в запросе может быть написано SELECT where AS x FROM table или даже SELECT where x FROM table. * Даже может быть написано SELECT where AS from FROM table, но не может быть написано SELECT where from FROM table. * Смотрите подробнее в реализации ParserAlias. * * Но возникает небольшая проблема - неудобное сообщение об ошибке, если в секции SELECT в конце есть лишняя запятая. * Хотя такая ошибка очень распространена. Пример: SELECT x, y, z, FROM tbl * Если ничего не предпринять, то это парсится как выбор столбца с именем FROM и алиасом tbl. * Чтобы избежать такой ситуации, мы не разрешаем парсить алиас без ключевого слова AS для идентификатора с именем FROM. * * Замечание: это также фильтрует случай, когда идентификатор квотирован. * Пример: SELECT x, y, z, `FROM` tbl. Но такой случай можно было бы разрешить. * * В дальнейшем было бы проще запретить неквотированные идентификаторы, совпадающие с ключевыми словами. */ bool allow_alias_without_as_keyword_now = allow_alias_without_as_keyword; if (allow_alias_without_as_keyword) if (const ASTIdentifier * id = typeid_cast<const ASTIdentifier *>(node.get())) if (0 == strcasecmp(id->name.data(), "FROM")) allow_alias_without_as_keyword_now = false; ws.ignore(pos, end); ASTPtr alias_node; if (ParserAlias(allow_alias_without_as_keyword_now).parse(pos, end, alias_node, max_parsed_pos, expected)) { String alias_name = typeid_cast<ASTIdentifier &>(*alias_node).name; if (ASTWithAlias * ast_with_alias = dynamic_cast<ASTWithAlias *>(node.get())) ast_with_alias->alias = alias_name; else { expected = "alias cannot be here"; return false; } } return true; }
bool ParserAliasImpl<ParserIdentifier>::parseImpl(Pos & pos, Pos end, ASTPtr & node, Pos & max_parsed_pos, Expected & expected) { ParserWhiteSpaceOrComments ws; ParserString s_as("AS", true, true); ParserIdentifier id_p; bool has_as_word = s_as.parse(pos, end, node, max_parsed_pos, expected); if (!allow_alias_without_as_keyword && !has_as_word) return false; ws.ignore(pos, end); if (!id_p.parse(pos, end, node, max_parsed_pos, expected)) return false; if (!has_as_word) { /** В этом случае алиас не может совпадать с ключевым словом - для того, * чтобы в запросе "SELECT x FROM t", слово FROM не считалось алиасом, * а в запросе "SELECT x FRO FROM t", слово FRO считалось алиасом. */ const String & name = static_cast<const ASTIdentifier &>(*node.get()).name; for (const char ** keyword = restricted_keywords; *keyword != nullptr; ++keyword) if (0 == strcasecmp(name.data(), *keyword)) return false; } return true; }
bool ExecuteScalarSubqueriesMatcher::needChildVisit(ASTPtr & node, const ASTPtr & child) { /// Processed if (typeid_cast<ASTSubquery *>(node.get()) || typeid_cast<ASTFunction *>(node.get())) return false; /// Don't descend into subqueries in FROM section if (typeid_cast<ASTTableExpression *>(node.get())) return false; if (typeid_cast<ASTSelectQuery *>(node.get())) { /// Do not go to FROM, JOIN, UNION. if (typeid_cast<ASTTableExpression *>(child.get()) || typeid_cast<ASTSelectQuery *>(child.get())) return false; } return true; }
/// Extract all subfunctions of the main conjunction, but depending only on the specified columns static void extractFunctions(const ASTPtr & expression, const NameSet & columns, std::vector<ASTPtr> & result) { const ASTFunction * function = typeid_cast<const ASTFunction *>(expression.get()); if (function && function->name == "and") { for (size_t i = 0; i < function->arguments->children.size(); ++i) extractFunctions(function->arguments->children[i], columns, result); } else if (isValidFunction(expression, columns)) { result.push_back(expression->clone()); } }
void AnalyzeColumns::process(ASTPtr & ast, const CollectAliases & aliases, const CollectTables & tables) { /// If this is SELECT query, don't go into FORMAT and SETTINGS clauses /// - they contain identifiers that are not columns. const ASTSelectQuery * select = typeid_cast<const ASTSelectQuery *>(ast.get()); for (auto & child : ast->children) { if (select && (child.get() == select->format.get() || child.get() == select->settings.get())) continue; processImpl(child, columns, aliases, tables); } }
bool MergeTreeMinMaxIndex::mayBenefitFromIndexForIn(const ASTPtr & node) const { const String column_name = node->getColumnName(); for (const auto & name : columns) if (column_name == name) return true; if (const auto * func = typeid_cast<const ASTFunction *>(node.get())) if (func->arguments->children.size() == 1) return mayBenefitFromIndexForIn(func->arguments->children.front()); return false; }
StoragePtr TableFunctionNumbers::executeImpl(const ASTPtr & ast_function, const Context & context) const { if (const ASTFunction * function = typeid_cast<ASTFunction *>(ast_function.get())) { auto arguments = function->arguments->children; if (arguments.size() != 1 && arguments.size() != 2) throw Exception("Table function 'numbers' requires 'length' or 'offset, length'.", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); UInt64 offset = arguments.size() == 2 ? evaluateArgument(context, arguments[0]) : 0; UInt64 length = arguments.size() == 2 ? evaluateArgument(context, arguments[1]) : evaluateArgument(context, arguments[0]); auto res = StorageSystemNumbers::create(getName(), false, length, offset); res->startup(); return res; } throw Exception("Table function 'numbers' requires 'limit' or 'offset, limit'.", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); }
StorageMerge::StorageListWithLocks StorageMerge::getSelectedTables(const ASTPtr & query, bool has_virtual_column, bool get_lock) const { StorageListWithLocks selected_tables; DatabasePtr database = global_context.getDatabase(source_database); DatabaseIteratorPtr iterator = database->getIterator(global_context); auto virtual_column = ColumnString::create(); while (iterator->isValid()) { if (table_name_regexp.match(iterator->name())) { StoragePtr storage = iterator->table(); if (query && typeid_cast<ASTSelectQuery *>(query.get())->prewhere_expression && !storage->supportsPrewhere()) throw Exception("Storage " + storage->getName() + " doesn't support PREWHERE.", ErrorCodes::ILLEGAL_PREWHERE); if (storage.get() != this) { virtual_column->insert(storage->getTableName()); selected_tables.emplace_back(storage, get_lock ? storage->lockStructure(false) : TableStructureReadLockPtr{}); } } iterator->next(); } if (has_virtual_column) { Block virtual_columns_block = Block{ColumnWithTypeAndName(std::move(virtual_column), std::make_shared<DataTypeString>(), "_table")}; VirtualColumnUtils::filterBlockWithQuery(query, virtual_columns_block, global_context); auto values = VirtualColumnUtils::extractSingleValueFromBlock<String>(virtual_columns_block, "_table"); /// Remove unused tables from the list selected_tables.remove_if([&] (const auto & elem) { return values.find(elem.first->getTableName()) == values.end(); }); } return selected_tables; }
void StorageMerge::convertingSourceStream(const Block & header, const Context & context, ASTPtr & query, BlockInputStreamPtr & source_stream, QueryProcessingStage::Enum processed_stage) { Block before_block_header = source_stream->getHeader(); source_stream = std::make_shared<ConvertingBlockInputStream>(context, source_stream, header, ConvertingBlockInputStream::MatchColumnsMode::Name); ASTPtr where_expression = typeid_cast<ASTSelectQuery *>(query.get())->where_expression; if (!where_expression) return; for (size_t column_index : ext::range(0, header.columns())) { ColumnWithTypeAndName header_column = header.getByPosition(column_index); ColumnWithTypeAndName before_column = before_block_header.getByName(header_column.name); /// If the processed_stage greater than FetchColumns and the block structure between streams is different. /// the where expression maybe invalid because of convertingBlockInputStream. /// So we need to throw exception. if (!header_column.type->equals(*before_column.type.get()) && processed_stage > QueryProcessingStage::FetchColumns) { NamesAndTypesList source_columns = getSampleBlock().getNamesAndTypesList(); NameAndTypePair virtual_column = getColumn("_table"); source_columns.insert(source_columns.end(), virtual_column); auto syntax_result = SyntaxAnalyzer(context).analyze(where_expression, source_columns); ExpressionActionsPtr actions = ExpressionAnalyzer{where_expression, syntax_result, context}.getActions(false, false); Names required_columns = actions->getRequiredColumns(); for (const auto & required_column : required_columns) { if (required_column == header_column.name) throw Exception("Block structure mismatch in Merge Storage: different types:\n" + before_block_header.dumpStructure() + "\n" + header.dumpStructure(), ErrorCodes::BLOCKS_HAVE_DIFFERENT_STRUCTURE); } } } }
void OptimizeGroupOrderLimitBy::process(ASTPtr & ast, TypeAndConstantInference & expression_info) { ASTSelectQuery * select = typeid_cast<ASTSelectQuery *>(ast.get()); if (!select) throw Exception("AnalyzeResultOfQuery::process was called for not a SELECT query", ErrorCodes::UNEXPECTED_AST_STRUCTURE); if (!select->select_expression_list) throw Exception("SELECT query doesn't have select_expression_list", ErrorCodes::UNEXPECTED_AST_STRUCTURE); processGroupByLikeList(select->group_expression_list, expression_info); processGroupByLikeList(select->limit_by_expression_list, expression_info); if (select->order_expression_list) { processOrderByList(select->order_expression_list, expression_info); /// ORDER BY could be completely eliminated if (select->order_expression_list->children.empty()) { select->children.erase(std::remove( select->children.begin(), select->children.end(), select->order_expression_list), select->children.end()); select->order_expression_list.reset(); } } }
void executeQuery( ReadBuffer & istr, WriteBuffer & ostr, Context & context, BlockInputStreamPtr & query_plan, std::function<void(const String &)> set_content_type) { PODArray<char> parse_buf; const char * begin; const char * end; /// If 'istr' is empty now, fetch next data into buffer. if (istr.buffer().size() == 0) istr.next(); size_t max_query_size = context.getSettingsRef().max_query_size; if (istr.buffer().end() - istr.position() >= static_cast<ssize_t>(max_query_size)) { /// If remaining buffer space in 'istr' is enough to parse query up to 'max_query_size' bytes, then parse inplace. begin = istr.position(); end = istr.buffer().end(); istr.position() += end - begin; } else { /// If not - copy enough data into 'parse_buf'. parse_buf.resize(max_query_size); parse_buf.resize(istr.read(&parse_buf[0], max_query_size)); begin = &parse_buf[0]; end = begin + parse_buf.size(); } ASTPtr ast; BlockIO streams; std::tie(ast, streams) = executeQueryImpl(begin, end, context, false, QueryProcessingStage::Complete); try { if (streams.out) { const ASTInsertQuery * ast_insert_query = dynamic_cast<const ASTInsertQuery *>(ast.get()); if (!ast_insert_query) throw Exception("Logical error: query requires data to insert, but it is not INSERT query", ErrorCodes::LOGICAL_ERROR); String format = ast_insert_query->format; if (format.empty()) format = "Values"; /// Data could be in parsed (ast_insert_query.data) and in not parsed yet (istr) part of query. ConcatReadBuffer::ReadBuffers buffers; ReadBuffer buf1(const_cast<char *>(ast_insert_query->data), ast_insert_query->data ? ast_insert_query->end - ast_insert_query->data : 0, 0); if (ast_insert_query->data) buffers.push_back(&buf1); buffers.push_back(&istr); /** NOTE Must not read from 'istr' before read all between 'ast_insert_query.data' and 'ast_insert_query.end'. * - because 'query.data' could refer to memory piece, used as buffer for 'istr'. */ ConcatReadBuffer data_istr(buffers); BlockInputStreamPtr in{ context.getInputFormat( format, data_istr, streams.out_sample, context.getSettings().max_insert_block_size)}; copyData(*in, *streams.out); } if (streams.in) { const ASTQueryWithOutput * ast_query_with_output = dynamic_cast<const ASTQueryWithOutput *>(ast.get()); String format_name = ast_query_with_output && (ast_query_with_output->getFormat() != nullptr) ? typeid_cast<const ASTIdentifier &>(*ast_query_with_output->getFormat()).name : context.getDefaultFormat(); BlockOutputStreamPtr out = context.getOutputFormat(format_name, ostr, streams.in_sample); if (IProfilingBlockInputStream * stream = dynamic_cast<IProfilingBlockInputStream *>(streams.in.get())) { /// NOTE Progress callback takes shared ownership of 'out'. stream->setProgressCallback([out] (const Progress & progress) { out->onProgress(progress); }); } if (set_content_type) set_content_type(out->getContentType()); copyData(*streams.in, *out); } } catch (...) { streams.onException(); throw; } streams.onFinish(); }
void executeQuery( ReadBuffer & istr, WriteBuffer & ostr, bool allow_into_outfile, Context & context, std::function<void(const String &)> set_content_type) { PODArray<char> parse_buf; const char * begin; const char * end; /// If 'istr' is empty now, fetch next data into buffer. if (istr.buffer().size() == 0) istr.next(); size_t max_query_size = context.getSettingsRef().max_query_size; if (istr.buffer().end() - istr.position() >= static_cast<ssize_t>(max_query_size)) { /// If remaining buffer space in 'istr' is enough to parse query up to 'max_query_size' bytes, then parse inplace. begin = istr.position(); end = istr.buffer().end(); istr.position() += end - begin; } else { /// If not - copy enough data into 'parse_buf'. parse_buf.resize(max_query_size); parse_buf.resize(istr.read(&parse_buf[0], max_query_size)); begin = &parse_buf[0]; end = begin + parse_buf.size(); } ASTPtr ast; BlockIO streams; std::tie(ast, streams) = executeQueryImpl(begin, end, context, false, QueryProcessingStage::Complete); try { if (streams.out) { InputStreamFromASTInsertQuery in(ast, istr, streams, context); copyData(in, *streams.out); } if (streams.in) { const ASTQueryWithOutput * ast_query_with_output = dynamic_cast<const ASTQueryWithOutput *>(ast.get()); WriteBuffer * out_buf = &ostr; std::experimental::optional<WriteBufferFromFile> out_file_buf; if (ast_query_with_output && ast_query_with_output->out_file) { if (!allow_into_outfile) throw Exception("INTO OUTFILE is not allowed", ErrorCodes::INTO_OUTFILE_NOT_ALLOWED); const auto & out_file = typeid_cast<const ASTLiteral &>(*ast_query_with_output->out_file).value.safeGet<std::string>(); out_file_buf.emplace(out_file, DBMS_DEFAULT_BUFFER_SIZE, O_WRONLY | O_EXCL | O_CREAT); out_buf = &out_file_buf.value(); } String format_name = ast_query_with_output && (ast_query_with_output->format != nullptr) ? typeid_cast<const ASTIdentifier &>(*ast_query_with_output->format).name : context.getDefaultFormat(); BlockOutputStreamPtr out = context.getOutputFormat(format_name, *out_buf, streams.in_sample); if (auto stream = dynamic_cast<IProfilingBlockInputStream *>(streams.in.get())) { /// Save previous progress callback if any. TODO Do it more conveniently. auto previous_progress_callback = context.getProgressCallback(); /// NOTE Progress callback takes shared ownership of 'out'. stream->setProgressCallback([out, previous_progress_callback] (const Progress & progress) { if (previous_progress_callback) previous_progress_callback(progress); out->onProgress(progress); }); } if (set_content_type) set_content_type(out->getContentType()); copyData(*streams.in, *out); } } catch (...) { streams.onException(); throw; } streams.onFinish(); }
static std::tuple<ASTPtr, BlockIO> executeQueryImpl( IParser::Pos begin, IParser::Pos end, Context & context, bool internal, QueryProcessingStage::Enum stage) { ProfileEvents::increment(ProfileEvents::Query); time_t current_time = time(0); const Settings & settings = context.getSettingsRef(); ParserQuery parser; ASTPtr ast; size_t query_size; size_t max_query_size = settings.max_query_size; try { ast = parseQuery(parser, begin, end, ""); /// Copy query into string. It will be written to log and presented in processlist. If an INSERT query, string will not include data to insertion. query_size = ast->range.second - ast->range.first; if (max_query_size && query_size > max_query_size) throw Exception("Query is too large (" + toString(query_size) + ")." " max_query_size = " + toString(max_query_size), ErrorCodes::QUERY_IS_TOO_LARGE); } catch (...) { /// Anyway log query. if (!internal) { String query = String(begin, begin + std::min(end - begin, static_cast<ptrdiff_t>(max_query_size))); logQuery(query.substr(0, settings.log_queries_cut_to_length), context); onExceptionBeforeStart(query, context, current_time); } throw; } String query(begin, query_size); BlockIO res; try { if (!internal) logQuery(query.substr(0, settings.log_queries_cut_to_length), context); /// Check the limits. checkLimits(*ast, settings.limits); QuotaForIntervals & quota = context.getQuota(); quota.addQuery(); /// NOTE Seems that when new time interval has come, first query is not accounted in number of queries. quota.checkExceeded(current_time); /// Put query to process list. But don't put SHOW PROCESSLIST query itself. ProcessList::EntryPtr process_list_entry; if (!internal && nullptr == typeid_cast<const ASTShowProcesslistQuery *>(&*ast)) { process_list_entry = context.getProcessList().insert( query, ast.get(), context.getClientInfo(), settings); context.setProcessListElement(&process_list_entry->get()); } auto interpreter = InterpreterFactory::get(ast, context, stage); res = interpreter->execute(); /// Delayed initialization of query streams (required for KILL QUERY purposes) if (process_list_entry) (*process_list_entry)->setQueryStreams(res); /// Hold element of process list till end of query execution. res.process_list_entry = process_list_entry; if (res.in) { if (auto stream = dynamic_cast<IProfilingBlockInputStream *>(res.in.get())) { stream->setProgressCallback(context.getProgressCallback()); stream->setProcessListElement(context.getProcessListElement()); } } if (res.out) { if (auto stream = dynamic_cast<CountingBlockOutputStream *>(res.out.get())) { stream->setProcessListElement(context.getProcessListElement()); } } /// Everything related to query log. { QueryLogElement elem; elem.type = QueryLogElement::QUERY_START; elem.event_time = current_time; elem.query_start_time = current_time; elem.query = query.substr(0, settings.log_queries_cut_to_length); elem.client_info = context.getClientInfo(); bool log_queries = settings.log_queries && !internal; /// Log into system table start of query execution, if need. if (log_queries) context.getQueryLog().add(elem); /// Also make possible for caller to log successful query finish and exception during execution. res.finish_callback = [elem, &context, log_queries] (IBlockInputStream * stream_in, IBlockOutputStream * stream_out) mutable { ProcessListElement * process_list_elem = context.getProcessListElement(); if (!process_list_elem) return; double elapsed_seconds = process_list_elem->watch.elapsedSeconds(); elem.type = QueryLogElement::QUERY_FINISH; elem.event_time = time(0); elem.query_duration_ms = elapsed_seconds * 1000; elem.read_rows = process_list_elem->progress_in.rows; elem.read_bytes = process_list_elem->progress_in.bytes; elem.written_rows = process_list_elem->progress_out.rows; elem.written_bytes = process_list_elem->progress_out.bytes; auto memory_usage = process_list_elem->memory_tracker.getPeak(); elem.memory_usage = memory_usage > 0 ? memory_usage : 0; if (stream_in) { if (auto profiling_stream = dynamic_cast<const IProfilingBlockInputStream *>(stream_in)) { const BlockStreamProfileInfo & info = profiling_stream->getProfileInfo(); /// NOTE: INSERT SELECT query contains zero metrics elem.result_rows = info.rows; elem.result_bytes = info.bytes; } } else if (stream_out) /// will be used only for ordinary INSERT queries { if (auto counting_stream = dynamic_cast<const CountingBlockOutputStream *>(stream_out)) { /// NOTE: Redundancy. The same values could be extracted from process_list_elem->progress_out. elem.result_rows = counting_stream->getProgress().rows; elem.result_bytes = counting_stream->getProgress().bytes; } } if (elem.read_rows != 0) { LOG_INFO(&Logger::get("executeQuery"), std::fixed << std::setprecision(3) << "Read " << elem.read_rows << " rows, " << formatReadableSizeWithBinarySuffix(elem.read_bytes) << " in " << elapsed_seconds << " sec., " << static_cast<size_t>(elem.read_rows / elapsed_seconds) << " rows/sec., " << formatReadableSizeWithBinarySuffix(elem.read_bytes / elapsed_seconds) << "/sec."); } if (log_queries) context.getQueryLog().add(elem); }; res.exception_callback = [elem, &context, log_queries] () mutable { context.getQuota().addError(); elem.type = QueryLogElement::EXCEPTION_WHILE_PROCESSING; elem.event_time = time(0); elem.query_duration_ms = 1000 * (elem.event_time - elem.query_start_time); elem.exception = getCurrentExceptionMessage(false); ProcessListElement * process_list_elem = context.getProcessListElement(); if (process_list_elem) { double elapsed_seconds = process_list_elem->watch.elapsedSeconds(); elem.query_duration_ms = elapsed_seconds * 1000; elem.read_rows = process_list_elem->progress_in.rows; elem.read_bytes = process_list_elem->progress_in.bytes; auto memory_usage = process_list_elem->memory_tracker.getPeak(); elem.memory_usage = memory_usage > 0 ? memory_usage : 0; } setExceptionStackTrace(elem); logException(context, elem); if (log_queries) context.getQueryLog().add(elem); }; if (!internal && res.in) { std::stringstream log_str; log_str << "Query pipeline:\n"; res.in->dumpTree(log_str); LOG_DEBUG(&Logger::get("executeQuery"), log_str.str()); } } } catch (...) { if (!internal) onExceptionBeforeStart(query, context, current_time); throw; } return std::make_tuple(ast, res); }