void write(const Block & block) { if (!block) return; size_t rows = block.rowsInFirstColumn(); if (!rows) return; StoragePtr destination; if (!storage.no_destination) { destination = storage.context.tryGetTable(storage.destination_database, storage.destination_table); if (destination) { if (destination.get() == &storage) throw Exception("Destination table is myself. Write will cause infinite loop.", ErrorCodes::INFINITE_LOOP); /// Проверяем структуру таблицы. try { destination->check(block, true); } catch (Exception & e) { e.addMessage("(when looking at destination table " + storage.destination_database + "." + storage.destination_table + ")"); throw; } } } size_t bytes = block.bytes(); /// Если блок уже превышает максимальные ограничения, то пишем минуя буфер. if (rows > storage.max_thresholds.rows || bytes > storage.max_thresholds.bytes) { if (!storage.no_destination) { LOG_TRACE(storage.log, "Writing block with " << rows << " rows, " << bytes << " bytes directly."); storage.writeBlockToDestination(block, destination); } return; } /// Распределяем нагрузку по шардам по номеру потока. const auto start_shard_num = Poco::ThreadNumber::get() % storage.num_shards; /// Перебираем буферы по кругу, пытаясь заблокировать mutex. Не более одного круга. auto shard_num = start_shard_num; size_t try_no = 0; for (; try_no != storage.num_shards; ++try_no) { std::unique_lock<std::mutex> lock(storage.buffers[shard_num].mutex, std::try_to_lock_t()); if (lock.owns_lock()) { insertIntoBuffer(block, storage.buffers[shard_num], std::move(lock)); break; } ++shard_num; if (shard_num == storage.num_shards) shard_num = 0; } /// Если так и не удалось ничего сразу заблокировать, то будем ждать на mutex-е. if (try_no == storage.num_shards) insertIntoBuffer(block, storage.buffers[start_shard_num], std::unique_lock<std::mutex>(storage.buffers[start_shard_num].mutex)); }
void write(const Block & block) override { if (!block) return; size_t rows = block.rows(); if (!rows) return; StoragePtr destination; if (!storage.no_destination) { destination = storage.context.tryGetTable(storage.destination_database, storage.destination_table); if (destination) { if (destination.get() == &storage) throw Exception("Destination table is myself. Write will cause infinite loop.", ErrorCodes::INFINITE_LOOP); /// Check table structure. try { destination->check(block, true); } catch (Exception & e) { e.addMessage("(when looking at destination table " + storage.destination_database + "." + storage.destination_table + ")"); throw; } } } size_t bytes = block.bytes(); /// If the block already exceeds the maximum limit, then we skip the buffer. if (rows > storage.max_thresholds.rows || bytes > storage.max_thresholds.bytes) { if (!storage.no_destination) { LOG_TRACE(storage.log, "Writing block with " << rows << " rows, " << bytes << " bytes directly."); storage.writeBlockToDestination(block, destination); } return; } /// We distribute the load on the shards by the stream number. const auto start_shard_num = Poco::ThreadNumber::get() % storage.num_shards; /// We loop through the buffers, trying to lock mutex. No more than one lap. auto shard_num = start_shard_num; StorageBuffer::Buffer * least_busy_buffer = nullptr; std::unique_lock<std::mutex> least_busy_lock; size_t least_busy_shard_rows = 0; for (size_t try_no = 0; try_no < storage.num_shards; ++try_no) { std::unique_lock<std::mutex> lock(storage.buffers[shard_num].mutex, std::try_to_lock_t()); if (lock.owns_lock()) { size_t num_rows = storage.buffers[shard_num].data.rows(); if (!least_busy_buffer || num_rows < least_busy_shard_rows) { least_busy_buffer = &storage.buffers[shard_num]; least_busy_lock = std::move(lock); least_busy_shard_rows = num_rows; } } shard_num = (shard_num + 1) % storage.num_shards; } /// If you still can not lock anything at once, then we'll wait on mutex. if (!least_busy_buffer) insertIntoBuffer(block, storage.buffers[start_shard_num], std::unique_lock<std::mutex>(storage.buffers[start_shard_num].mutex)); else insertIntoBuffer(block, *least_busy_buffer, std::move(least_busy_lock)); }