void BlockStreamProfileInfo::write(WriteBuffer & out) const { writeVarUInt(rows, out); writeVarUInt(blocks, out); writeVarUInt(bytes, out); writeBinary(hasAppliedLimit(), out); writeVarUInt(getRowsBeforeLimit(), out); writeBinary(calculated_rows_before_limit, out); }
void NativeBlockOutputStream::write(const Block & block) { /// Additional information about the block. if (client_revision >= DBMS_MIN_REVISION_WITH_BLOCK_INFO) block.info.write(ostr); /// Dimensions size_t columns = block.columns(); size_t rows = block.rows(); writeVarUInt(columns, ostr); writeVarUInt(rows, ostr); /** The index has the same structure as the data stream. * But instead of column values, it contains a mark that points to the location in the data file where this part of the column is located. */ if (index_ostr) { writeVarUInt(columns, *index_ostr); writeVarUInt(rows, *index_ostr); } for (size_t i = 0; i < columns; ++i) { /// For the index. MarkInCompressedFile mark; if (index_ostr) { ostr_concrete->next(); /// Finish compressed block. mark.offset_in_compressed_file = initial_size_of_file + ostr_concrete->getCompressedBytes(); mark.offset_in_decompressed_block = ostr_concrete->getRemainingBytes(); } const ColumnWithTypeAndName & column = block.safeGetByPosition(i); /// Name writeStringBinary(column.name, ostr); /// Type writeStringBinary(column.type->getName(), ostr); /// Data if (rows) /// Zero items of data is always represented as zero number of bytes. writeData(*column.type, column.column, ostr, 0, 0); if (index_ostr) { writeStringBinary(column.name, *index_ostr); writeStringBinary(column.type->getName(), *index_ostr); writeBinary(mark.offset_in_compressed_file, *index_ostr); writeBinary(mark.offset_in_decompressed_block, *index_ostr); } } }
void BinaryRowOutputStream::writePrefix() { size_t columns = sample.columns(); if (with_names || with_types) { writeVarUInt(columns, ostr); } if (with_names) { for (size_t i = 0; i < columns; ++i) { writeStringBinary(sample.safeGetByPosition(i).name, ostr); } } if (with_types) { for (size_t i = 0; i < columns; ++i) { writeStringBinary(sample.safeGetByPosition(i).type->getName(), ostr); } } }
void DataTypeArray::serializeBinary(const Field & field, WriteBuffer & ostr) const { const Array & a = get<const Array &>(field); writeVarUInt(a.size(), ostr); for (size_t i = 0; i < a.size(); ++i) { nested->serializeBinary(a[i], ostr); } }
std::string ReshardingJob::toString() const { std::string serialized_job; WriteBufferFromString buf{serialized_job}; writeBinary(database_name, buf); writeBinary(table_name, buf); writeBinary(partition, buf); writeBinary(queryToString(sharding_key_expr), buf); writeBinary(coordinator_id, buf); writeVarUInt(block_number, buf); writeBinary(do_copy, buf); writeVarUInt(paths.size(), buf); for (const auto & path : paths) { writeBinary(path.first, buf); writeVarUInt(path.second, buf); } buf.next(); return serialized_job; }
void DataTypeArray::serializeBinary(const IColumn & column, size_t row_num, WriteBuffer & ostr) const { const ColumnArray & column_array = static_cast<const ColumnArray &>(column); const ColumnArray::Offsets & offsets = column_array.getOffsets(); size_t offset = row_num == 0 ? 0 : offsets[row_num - 1]; size_t next_offset = offsets[row_num]; size_t size = next_offset - offset; writeVarUInt(size, ostr); const IColumn & nested_column = column_array.getData(); for (size_t i = offset; i < next_offset; ++i) nested->serializeBinary(nested_column, i, ostr); }
void DataTypeAggregateFunction::serializeBinary(const Field & field, WriteBuffer & ostr) const { const String & s = get<const String &>(field); writeVarUInt(s.size(), ostr); writeString(s, ostr); }
void NativeBlockOutputStream::write(const Block & block) { /// Additional information about the block. if (client_revision > 0) block.info.write(ostr); block.checkNumberOfRows(); /// Dimensions size_t columns = block.columns(); size_t rows = block.rows(); writeVarUInt(columns, ostr); writeVarUInt(rows, ostr); /** The index has the same structure as the data stream. * But instead of column values, it contains a mark that points to the location in the data file where this part of the column is located. */ if (index_ostr) { writeVarUInt(columns, *index_ostr); writeVarUInt(rows, *index_ostr); } for (size_t i = 0; i < columns; ++i) { /// For the index. MarkInCompressedFile mark; if (index_ostr) { ostr_concrete->next(); /// Finish compressed block. mark.offset_in_compressed_file = initial_size_of_file + ostr_concrete->getCompressedBytes(); mark.offset_in_decompressed_block = ostr_concrete->getRemainingBytes(); } ColumnWithTypeAndName column = block.safeGetByPosition(i); /// Send data to old clients without low cardinality type. if (remove_low_cardinality || (client_revision && client_revision < DBMS_MIN_REVISION_WITH_LOW_CARDINALITY_TYPE)) { column.column = recursiveRemoveLowCardinality(column.column); column.type = recursiveRemoveLowCardinality(column.type); } /// Name writeStringBinary(column.name, ostr); /// Type String type_name = column.type->getName(); /// For compatibility, we will not send explicit timezone parameter in DateTime data type /// to older clients, that cannot understand it. if (client_revision < DBMS_MIN_REVISION_WITH_TIME_ZONE_PARAMETER_IN_DATETIME_DATA_TYPE && startsWith(type_name, "DateTime(")) type_name = "DateTime"; writeStringBinary(type_name, ostr); /// Data if (rows) /// Zero items of data is always represented as zero number of bytes. writeData(*column.type, column.column, ostr, 0, 0); if (index_ostr) { writeStringBinary(column.name, *index_ostr); writeStringBinary(column.type->getName(), *index_ostr); writeBinary(mark.offset_in_compressed_file, *index_ostr); writeBinary(mark.offset_in_decompressed_block, *index_ostr); } } }