bool LLInventoryFilter::FilterOps::DateRange::validateBlock( bool emit_errors /*= true*/ ) const { bool valid = LLInitParam::Block<DateRange>::validateBlock(emit_errors); if (valid) { if (max_date() < min_date()) { if (emit_errors) { LL_WARNS() << "max_date should be greater or equal to min_date" << LL_ENDL; } valid = false; } } return valid; }
void BalanceSheetReport::refresh_map() { // TODO MEDIUM PRIORITY Can we just ignore equity Accounts here? m_balance_map.clear(); JEWEL_ASSERT (m_balance_map.empty()); optional<gregorian::date> const maybe_max_d = maybe_max_date(); gregorian::date const min_d = min_date(); // Special case: use the opening balance and current // balance of each Account, to optimize for the special but probably common // case where the min and max date are both blank. gregorian::date const earliest_possible_date = database_connection().opening_balance_journal_date() + gregorian::date_duration(1); JEWEL_ASSERT (min_d >= earliest_possible_date); if ((min_d == earliest_possible_date) && (!maybe_max_d)) { AccountTableIterator atit(database_connection()); AccountTableIterator const atend; for ( ; atit != atend; ++atit) { Handle<Account> const& account = *atit; if ( account->account_super_type() == AccountSuperType::balance_sheet ) { BalanceDatum datum; datum.opening_balance = account->friendly_opening_balance(); datum.closing_balance = account->friendly_balance(); m_balance_map[account->id()] = datum; } } return; } // General case EntryTableIterator it = make_date_ordered_actual_ordinary_entry_table_iterator ( database_connection() ); EntryTableIterator const end; for ( ; it != end; ++it) { Handle<Account> const account = (*it)->account(); AccountSuperType const s_type = account->account_super_type(); if (s_type != AccountSuperType::balance_sheet) { continue; } sqloxx::Id const account_id = account->id(); BalanceMap::iterator jt = m_balance_map.find(account_id); if (jt == m_balance_map.end()) { BalanceDatum const balance_datum(account); m_balance_map[account_id] = balance_datum; jt = m_balance_map.find(account_id); } JEWEL_ASSERT (jt != m_balance_map.end()); gregorian::date const date = (*it)->date(); if (maybe_max_d && (date > value(maybe_max_d))) { break; } Decimal const amount = (*it)->amount(); jt->second.closing_balance += amount; if (date < min_d) { jt->second.opening_balance += amount; } } return; }
MergeTreeData::MutableDataPartPtr MergeTreeDataWriter::writeTempPart(BlockWithPartition & block_with_partition) { Block & block = block_with_partition.block; static const String TMP_PREFIX = "tmp_insert_"; /// This will generate unique name in scope of current server process. Int64 temp_index = data.insert_increment.get(); MergeTreeDataPart::MinMaxIndex minmax_idx; minmax_idx.update(block, data.minmax_idx_columns); MergeTreePartition partition(std::move(block_with_partition.partition)); MergeTreePartInfo new_part_info(partition.getID(data), temp_index, temp_index, 0); String part_name; if (data.format_version < MERGE_TREE_DATA_MIN_FORMAT_VERSION_WITH_CUSTOM_PARTITIONING) { DayNum min_date(minmax_idx.parallelogram[data.minmax_idx_date_column_pos].left.get<UInt64>()); DayNum max_date(minmax_idx.parallelogram[data.minmax_idx_date_column_pos].right.get<UInt64>()); const auto & date_lut = DateLUT::instance(); DayNum min_month = date_lut.toFirstDayNumOfMonth(DayNum(min_date)); DayNum max_month = date_lut.toFirstDayNumOfMonth(DayNum(max_date)); if (min_month != max_month) throw Exception("Logical error: part spans more than one month.", ErrorCodes::LOGICAL_ERROR); part_name = new_part_info.getPartNameV0(min_date, max_date); } else part_name = new_part_info.getPartName(); MergeTreeData::MutableDataPartPtr new_data_part = std::make_shared<MergeTreeData::DataPart>(data, part_name, new_part_info); new_data_part->partition = std::move(partition); new_data_part->minmax_idx = std::move(minmax_idx); new_data_part->relative_path = TMP_PREFIX + part_name; new_data_part->is_temp = true; /// The name could be non-unique in case of stale files from previous runs. String full_path = new_data_part->getFullPath(); Poco::File dir(full_path); if (dir.exists()) { LOG_WARNING(log, "Removing old temporary directory " + full_path); dir.remove(true); } dir.createDirectories(); /// If we need to calculate some columns to sort. if (data.hasSortingKey()) data.sorting_key_expr->execute(block); Names sort_columns = data.sorting_key_columns; SortDescription sort_description; size_t sort_columns_size = sort_columns.size(); sort_description.reserve(sort_columns_size); for (size_t i = 0; i < sort_columns_size; ++i) sort_description.emplace_back(block.getPositionByName(sort_columns[i]), 1, 1); ProfileEvents::increment(ProfileEvents::MergeTreeDataWriterBlocks); /// Sort. IColumn::Permutation * perm_ptr = nullptr; IColumn::Permutation perm; if (!sort_description.empty()) { if (!isAlreadySorted(block, sort_description)) { stableGetPermutation(block, sort_description, perm); perm_ptr = &perm; } else ProfileEvents::increment(ProfileEvents::MergeTreeDataWriterBlocksAlreadySorted); } /// This effectively chooses minimal compression method: /// either default lz4 or compression method with zero thresholds on absolute and relative part size. auto compression_settings = data.context.chooseCompressionSettings(0, 0); NamesAndTypesList columns = data.getColumns().getAllPhysical().filter(block.getNames()); MergedBlockOutputStream out(data, new_data_part->getFullPath(), columns, compression_settings); out.writePrefix(); out.writeWithPermutation(block, perm_ptr); out.writeSuffixAndFinalizePart(new_data_part); ProfileEvents::increment(ProfileEvents::MergeTreeDataWriterRows, block.rows()); ProfileEvents::increment(ProfileEvents::MergeTreeDataWriterUncompressedBytes, block.bytes()); ProfileEvents::increment(ProfileEvents::MergeTreeDataWriterCompressedBytes, new_data_part->bytes_on_disk); return new_data_part; }