Names NamesAndTypesList::getNames() const
{
    Names res;
    res.reserve(size());
    for (const NameAndTypePair & column : *this)
        res.push_back(column.name);
    return res;
}
NameSet MergeTreeReadPool::injectRequiredColumns(const MergeTreeData::DataPartPtr & part, Names & columns) const
{
	NameSet required_columns{std::begin(columns), std::end(columns)};
	NameSet injected_columns;

	auto all_column_files_missing = true;

	for (size_t i = 0; i < columns.size(); ++i)
	{
		const auto & column_name = columns[i];

		/// column has files and hence does not require evaluation
		if (part->hasColumnFiles(column_name))
		{
			all_column_files_missing = false;
			continue;
		}

		const auto default_it = data.column_defaults.find(column_name);
		/// columns has no explicit default expression
		if (default_it == std::end(data.column_defaults))
			continue;

		/// collect identifiers required for evaluation
		IdentifierNameSet identifiers;
		default_it->second.expression->collectIdentifierNames(identifiers);

		for (const auto & identifier : identifiers)
		{
			if (data.hasColumn(identifier))
			{
				/// ensure each column is added only once
				if (required_columns.count(identifier) == 0)
				{
					columns.emplace_back(identifier);
					required_columns.emplace(identifier);
					injected_columns.emplace(identifier);
				}
			}
		}
	}

	/** Добавить столбец минимального размера.
		* Используется в случае, когда ни один столбец не нужен или файлы отсутствуют, но нужно хотя бы знать количество строк.
		* Добавляет в columns.
		*/
	if (all_column_files_missing)
	{
		const auto minimum_size_column_name = part->getColumnNameWithMinumumCompressedSize();
		columns.push_back(minimum_size_column_name);
		/// correctly report added column
		injected_columns.insert(columns.back());
	}

	return injected_columns;
}
Exemple #3
0
/**
 * @brief Reloads the ui scripts and reinitializes the ui
 */
static void UI_Restart_f (void)
{
	typedef std::vector<std::string> Names;
	Names names;
	for (int i = 0; i < ui_global.windowStackPos; i++) {
		names.push_back(std::string(ui_global.windowStack[i]->name));
	}

	UI_Shutdown();
	CLMN_Shutdown();
	R_FontShutdown();
	UI_Init();
	R_FontInit();
	Com_Printf("%i ui script files\n", FS_BuildFileList("ufos/ui/*.ufo"));
	FS_NextScriptHeader(nullptr, nullptr, nullptr);
	const char* type, *name, *text;
	text = nullptr;
	while ((type = FS_NextScriptHeader("ufos/*.ufo", &name, &text)) != nullptr) {
		if (Q_streq(type, "font"))
			UI_ParseFont(name, &text);
		else if (Q_streq(type, "menu_model"))
			UI_ParseUIModel(name, &text);
		else if (Q_streq(type, "sprite"))
			UI_ParseSprite(name, &text);
	}
	UI_Reinit();
	FS_NextScriptHeader(nullptr, nullptr, nullptr);
	text = nullptr;
	while ((type = FS_NextScriptHeader("ufos/ui/*.ufo", &name, &text)) != nullptr) {
		if (Q_streq(type, "window"))
			UI_ParseWindow(type, name, &text);
		else if (Q_streq(type, "component"))
			UI_ParseComponent(type, name, &text);
		else if (Q_streq(type, "menu_model"))
			UI_ParseUIModel(name, &text);
		else if (Q_streq(type, "sprite"))
			UI_ParseSprite(name, &text);
		else if (Q_streq(type, "lua"))
			UI_ParseAndLoadLuaScript(name, &text);
	}

	CLMN_Init();

	for (Names::iterator i = names.begin(); i != names.end(); ++i) {
		UI_PushWindow(i->c_str());
	}
}
BlockInputStreams StorageMerge::createSourceStreams(const SelectQueryInfo & query_info, const QueryProcessingStage::Enum & processed_stage,
                                                    const size_t max_block_size, const Block & header, const StoragePtr & storage,
                                                    const TableStructureReadLockPtr & struct_lock, Names & real_column_names,
                                                    Context & modified_context, size_t streams_num, bool has_table_virtual_column,
                                                    bool concat_streams)
{
    SelectQueryInfo modified_query_info = query_info;
    modified_query_info.query = query_info.query->clone();

    VirtualColumnUtils::rewriteEntityInAst(modified_query_info.query, "_table", storage ? storage->getTableName() : "");

    if (!storage)
        return BlockInputStreams{
            InterpreterSelectQuery(modified_query_info.query, modified_context, std::make_shared<OneBlockInputStream>(header),
                                   processed_stage, true).execute().in};

    BlockInputStreams source_streams;

    if (processed_stage <= storage->getQueryProcessingStage(modified_context))
    {
        /// If there are only virtual columns in query, you must request at least one other column.
        if (real_column_names.size() ==0)
            real_column_names.push_back(ExpressionActions::getSmallestColumn(storage->getColumns().getAllPhysical()));

        source_streams = storage->read(real_column_names, modified_query_info, modified_context, processed_stage, max_block_size,
                                       UInt32(streams_num));
    }
    else if (processed_stage > storage->getQueryProcessingStage(modified_context))
    {
        typeid_cast<ASTSelectQuery *>(modified_query_info.query.get())->replaceDatabaseAndTable(source_database, storage->getTableName());

        /// Maximum permissible parallelism is streams_num
        modified_context.getSettingsRef().max_threads = UInt64(streams_num);
        modified_context.getSettingsRef().max_streams_to_max_threads_ratio = 1;

        InterpreterSelectQuery interpreter{modified_query_info.query, modified_context, Names{}, processed_stage};
        BlockInputStreamPtr interpreter_stream = interpreter.execute().in;

        /** Materialization is needed, since from distributed storage the constants come materialized.
          * If you do not do this, different types (Const and non-Const) columns will be produced in different threads,
          * And this is not allowed, since all code is based on the assumption that in the block stream all types are the same.
          */
        source_streams.emplace_back(std::make_shared<MaterializingBlockInputStream>(interpreter_stream));
    }

    if (!source_streams.empty())
    {
        if (concat_streams)
        {
            BlockInputStreamPtr stream =
                source_streams.size() > 1 ? std::make_shared<ConcatBlockInputStream>(source_streams) : source_streams[0];

            source_streams.resize(1);
            source_streams[0] = stream;
        }

        for (BlockInputStreamPtr & source_stream : source_streams)
        {
            if (has_table_virtual_column)
                source_stream = std::make_shared<AddingConstColumnBlockInputStream<String>>(
                    source_stream, std::make_shared<DataTypeString>(), storage->getTableName(), "_table");

            /// Subordinary tables could have different but convertible types, like numeric types of different width.
            /// We must return streams with structure equals to structure of Merge table.
            convertingSourceStream(header, modified_context, modified_query_info.query, source_stream, processed_stage);

            source_stream->addTableLock(struct_lock);
        }
    }

    return source_streams;
}