コード例 #1
0
void MergedColumnOnlyOutputStream::write(const Block & block)
{
    if (!initialized)
    {
        column_streams.clear();
        for (size_t i = 0; i < block.columns(); ++i)
        {
            addStreams(part_path, block.safeGetByPosition(i).name,
                *block.safeGetByPosition(i).type, 0, skip_offsets);
        }
        initialized = true;
    }

    size_t rows = block.rows();

    OffsetColumns offset_columns;
    for (size_t i = 0; i < block.columns(); ++i)
    {
        const ColumnWithTypeAndName & column = block.safeGetByPosition(i);
        writeData(column.name, *column.type, *column.column, offset_columns, skip_offsets);
    }

    size_t written_for_last_mark = (storage.index_granularity - index_offset + rows) % storage.index_granularity;
    index_offset = (storage.index_granularity - written_for_last_mark) % storage.index_granularity;
}
コード例 #2
0
MergedBlockOutputStream::MergedBlockOutputStream(
    MergeTreeData & storage_,
    String part_path_,
    const NamesAndTypesList & columns_list_,
    CompressionSettings compression_settings,
    const MergeTreeData::DataPart::ColumnToSize & merged_column_to_size_,
    size_t aio_threshold_)
    : IMergedBlockOutputStream(
        storage_, storage_.context.getSettings().min_compress_block_size,
        storage_.context.getSettings().max_compress_block_size, compression_settings,
        aio_threshold_),
    columns_list(columns_list_), part_path(part_path_)
{
    init();
    for (const auto & it : columns_list)
    {
        size_t estimated_size = 0;
        if (aio_threshold > 0)
        {
            auto it2 = merged_column_to_size_.find(it.name);
            if (it2 != merged_column_to_size_.end())
                estimated_size = it2->second;
        }
        addStreams(part_path, it.name, *it.type, estimated_size, false);
    }
}
コード例 #3
0
ファイル: MergeTreeReader.cpp プロジェクト: bamx23/ClickHouse
MergeTreeReader::MergeTreeReader(const String & path,
    const MergeTreeData::DataPartPtr & data_part, const NamesAndTypesList & columns,
    UncompressedCache * uncompressed_cache, MarkCache * mark_cache, bool save_marks_in_cache,
    MergeTreeData & storage, const MarkRanges & all_mark_ranges,
    size_t aio_threshold, size_t max_read_buffer_size, const ValueSizeMap & avg_value_size_hints,
    const ReadBufferFromFileBase::ProfileCallback & profile_callback,
    clockid_t clock_type)
    : avg_value_size_hints(avg_value_size_hints), path(path), data_part(data_part), columns(columns)
    , uncompressed_cache(uncompressed_cache), mark_cache(mark_cache), save_marks_in_cache(save_marks_in_cache), storage(storage)
    , all_mark_ranges(all_mark_ranges), aio_threshold(aio_threshold), max_read_buffer_size(max_read_buffer_size)
{
    try
    {
        if (!Poco::File(path).exists())
            throw Exception("Part " + path + " is missing", ErrorCodes::NOT_FOUND_EXPECTED_DATA_PART);

        for (const NameAndTypePair & column : columns)
            addStreams(column.name, *column.type, all_mark_ranges, profile_callback, clock_type);
    }
    catch (...)
    {
        storage.reportBrokenPart(data_part->name);
        throw;
    }
}
コード例 #4
0
MergedBlockOutputStream::MergedBlockOutputStream(
    MergeTreeData & storage_,
    String part_path_,
    const NamesAndTypesList & columns_list_,
    CompressionSettings compression_settings)
    : IMergedBlockOutputStream(
        storage_, storage_.context.getSettings().min_compress_block_size,
        storage_.context.getSettings().max_compress_block_size, compression_settings,
        storage_.context.getSettings().min_bytes_to_use_direct_io),
    columns_list(columns_list_), part_path(part_path_)
{
    init();
    for (const auto & it : columns_list)
        addStreams(part_path, it.name, *it.type, 0, false);
}
コード例 #5
0
ファイル: StreamController.hpp プロジェクト: ALaDyn/picongpu
 /** enable StreamController and add one stream
  *
  * If StreamController is not activated getNextStream() will crash on its first call
  */
 void activate()
 {
     addStreams(1);
     isActivated=true;
 }