void TinyLogBlockOutputStream::writeData(const String & name, const IDataType & type, const IColumn & column, WrittenStreams & written_streams) { IDataType::SerializeBinaryBulkSettings settings; settings.getter = createStreamGetter(name, written_streams); if (serialize_states.count(name) == 0) type.serializeBinaryBulkStatePrefix(settings, serialize_states[name]); type.serializeBinaryBulkWithMultipleStreams(column, 0, 0, settings, serialize_states[name]); }
void NativeBlockOutputStream::writeData(const IDataType & type, const ColumnPtr & column, WriteBuffer & ostr, UInt64 offset, UInt64 limit) { /** If there are columns-constants - then we materialize them. * (Since the data type does not know how to serialize / deserialize constants.) */ ColumnPtr full_column = column->convertToFullColumnIfConst(); IDataType::SerializeBinaryBulkSettings settings; settings.getter = [&ostr](IDataType::SubstreamPath) -> WriteBuffer * { return &ostr; }; settings.position_independent_encoding = false; settings.low_cardinality_max_dictionary_size = 0; IDataType::SerializeBinaryBulkStatePtr state; type.serializeBinaryBulkStatePrefix(settings, state); type.serializeBinaryBulkWithMultipleStreams(*full_column, offset, limit, settings, state); type.serializeBinaryBulkStateSuffix(settings, state); }
void IMergedBlockOutputStream::writeData( const String & name, const IDataType & type, const IColumn & column, OffsetColumns & offset_columns, bool skip_offsets) { size_t size = column.size(); size_t prev_mark = 0; while (prev_mark < size) { size_t limit = 0; /// If there is `index_offset`, then the first mark goes not immediately, but after this number of rows. if (prev_mark == 0 && index_offset != 0) limit = index_offset; else { limit = storage.index_granularity; /// Write marks. type.enumerateStreams([&] (const IDataType::SubstreamPath & substream_path) { bool is_offsets = !substream_path.empty() && substream_path.back().type == IDataType::Substream::ArraySizes; if (is_offsets && skip_offsets) return; String stream_name = IDataType::getFileNameForStream(name, substream_path); /// Don't write offsets more than one time for Nested type. if (is_offsets && offset_columns.count(stream_name)) return; ColumnStream & stream = *column_streams[stream_name]; /// There could already be enough data to compress into the new block. if (stream.compressed.offset() >= min_compress_block_size) stream.compressed.next(); writeIntBinary(stream.plain_hashing.count(), stream.marks); writeIntBinary(stream.compressed.offset(), stream.marks); }, {}); } IDataType::OutputStreamGetter stream_getter = [&] (const IDataType::SubstreamPath & substream_path) -> WriteBuffer * { bool is_offsets = !substream_path.empty() && substream_path.back().type == IDataType::Substream::ArraySizes; if (is_offsets && skip_offsets) return nullptr; String stream_name = IDataType::getFileNameForStream(name, substream_path); /// Don't write offsets more than one time for Nested type. if (is_offsets && offset_columns.count(stream_name)) return nullptr; return &column_streams[stream_name]->compressed; }; type.serializeBinaryBulkWithMultipleStreams(column, stream_getter, prev_mark, limit, true, {}); /// So that instead of the marks pointing to the end of the compressed block, there were marks pointing to the beginning of the next one. type.enumerateStreams([&] (const IDataType::SubstreamPath & substream_path) { bool is_offsets = !substream_path.empty() && substream_path.back().type == IDataType::Substream::ArraySizes; if (is_offsets && skip_offsets) return; String stream_name = IDataType::getFileNameForStream(name, substream_path); /// Don't write offsets more than one time for Nested type. if (is_offsets && offset_columns.count(stream_name)) return; column_streams[stream_name]->compressed.nextIfAtEnd(); }, {}); prev_mark += limit; } /// Memoize offsets for Nested types, that are already written. They will not be written again for next columns of Nested structure. type.enumerateStreams([&] (const IDataType::SubstreamPath & substream_path) { bool is_offsets = !substream_path.empty() && substream_path.back().type == IDataType::Substream::ArraySizes; if (is_offsets) { String stream_name = IDataType::getFileNameForStream(name, substream_path); offset_columns.insert(stream_name); } }, {}); }