void IndexForNativeFormat::read(ReadBuffer & istr, const NameSet & required_columns)
{
	while (!istr.eof())
	{
		blocks.emplace_back();
		IndexOfBlockForNativeFormat & block = blocks.back();

		readVarUInt(block.num_columns, istr);
		readVarUInt(block.num_rows, istr);

		if (block.num_columns < required_columns.size())
			throw Exception("Index contain less than required columns", ErrorCodes::INCORRECT_INDEX);

		for (size_t i = 0; i < block.num_columns; ++i)
		{
			IndexOfOneColumnForNativeFormat column_index;

			readBinary(column_index.name, istr);
			readBinary(column_index.type, istr);
			readBinary(column_index.location.offset_in_compressed_file, istr);
			readBinary(column_index.location.offset_in_decompressed_block, istr);

			if (required_columns.count(column_index.name))
				block.columns.push_back(std::move(column_index));
		}

		if (block.columns.size() < required_columns.size())
			throw Exception("Index contain less than required columns", ErrorCodes::INCORRECT_INDEX);
		if (block.columns.size() > required_columns.size())
			throw Exception("Index contain duplicate columns", ErrorCodes::INCORRECT_INDEX);

		block.num_columns = block.columns.size();
	}
}
void BlockStreamProfileInfo::read(ReadBuffer & in)
{
	readVarUInt(rows, in);
	readVarUInt(blocks, in);
	readVarUInt(bytes, in);
	readBinary(applied_limit, in);
	readVarUInt(rows_before_limit, in);
	readBinary(calculated_rows_before_limit, in);
}
예제 #3
0
void DataTypeArray::deserializeBinary(IColumn & column, ReadBuffer & istr) const
{
    ColumnArray & column_array = static_cast<ColumnArray &>(column);
    ColumnArray::Offsets & offsets = column_array.getOffsets();

    size_t size;
    readVarUInt(size, istr);

    IColumn & nested_column = column_array.getData();

    size_t i = 0;
    try
    {
        for (; i < size; ++i)
            nested->deserializeBinary(nested_column, istr);
    }
    catch (...)
    {
        if (i)
            nested_column.popBack(i);
        throw;
    }

    offsets.push_back((offsets.empty() ? 0 : offsets.back()) + size);
}
void DataTypeAggregateFunction::deserializeBinary(Field & field, ReadBuffer & istr) const
{
	UInt64 size;
	readVarUInt(size, istr);
	field = String();
	String & s = get<String &>(field);
	s.resize(size);
	istr.readStrict(&s[0], size);
}
예제 #5
0
void DataTypeArray::deserializeBinary(Field & field, ReadBuffer & istr) const
{
    size_t size;
    readVarUInt(size, istr);
    field = Array(size);
    Array & arr = get<Array &>(field);
    for (size_t i = 0; i < size; ++i)
        nested->deserializeBinary(arr[i], istr);
}
예제 #6
0
ReshardingJob::ReshardingJob(const std::string & serialized_job)
{
    ReadBufferFromString buf{serialized_job};

    readBinary(database_name, buf);
    readBinary(table_name, buf);
    readBinary(partition, buf);

    std::string expr;
    readBinary(expr, buf);

    IParser::Pos pos = expr.data();
    IParser::Pos max_parsed_pos = pos;
    const char * end = pos + expr.size();

    ParserExpressionWithOptionalAlias parser(false);
    Expected expected = "";
    if (!parser.parse(pos, end, sharding_key_expr, max_parsed_pos, expected))
        throw Exception{"ReshardingJob: Internal error", ErrorCodes::LOGICAL_ERROR};

    readBinary(coordinator_id, buf);
    readVarUInt(block_number, buf);
    readBinary(do_copy, buf);

    size_t s;
    readVarUInt(s, buf);

    for (size_t i = 0; i < s; ++i)
    {
        std::string path;
        readBinary(path, buf);

        UInt64 weight;
        readVarUInt(weight, buf);

        paths.emplace_back(path, weight);
    }
}
Block NativeBlockInputStream::readImpl()
{
	Block res;

	const DataTypeFactory & data_type_factory = DataTypeFactory::instance();

	if (use_index && index_block_it == index_block_end)
		return res;

	if (istr.eof())
	{
		if (use_index)
			throw Exception("Input doesn't contain all data for index.", ErrorCodes::CANNOT_READ_ALL_DATA);

		return res;
	}

	/// Дополнительная информация о блоке.
	if (server_revision >= DBMS_MIN_REVISION_WITH_BLOCK_INFO)
		res.info.read(istr);

	/// Размеры
	size_t columns = 0;
	size_t rows = 0;

	if (!use_index)
	{
		readVarUInt(columns, istr);
		readVarUInt(rows, istr);
	}
	else
	{
		columns = index_block_it->num_columns;
		rows = index_block_it->num_rows;
	}

	for (size_t i = 0; i < columns; ++i)
	{
		if (use_index)
		{
			/// Если текущая позиция и так какая требуется, то реального seek-а не происходит.
			istr_concrete->seek(index_column_it->location.offset_in_compressed_file, index_column_it->location.offset_in_decompressed_block);
		}

		ColumnWithTypeAndName column;

		/// Name
		readBinary(column.name, istr);

		/// Type
		String type_name;
		readBinary(type_name, istr);
		column.type = data_type_factory.get(type_name);

		if (use_index)
		{
			/// Index allows to do more checks.
			if (index_column_it->name != column.name)
				throw Exception("Index points to column with wrong name: corrupted index or data", ErrorCodes::INCORRECT_INDEX);
			if (index_column_it->type != type_name)
				throw Exception("Index points to column with wrong type: corrupted index or data", ErrorCodes::INCORRECT_INDEX);
		}

		/// Data
		column.column = column.type->createColumn();

		if (rows)	/// If no rows, nothing to read.
			readData(*column.type, *column.column, istr, rows);

		res.insert(column);

		if (use_index)
			++index_column_it;
	}

	if (use_index)
	{
		if (index_column_it != index_block_it->columns.end())
			throw Exception("Inconsistent index: not all columns were read", ErrorCodes::INCORRECT_INDEX);

		++index_block_it;
		if (index_block_it != index_block_end)
			index_column_it = index_block_it->columns.begin();
	}

	return res;
}