void send_history_result(const std::error_code& ec, const blockchain::history_list& history, const incoming_message& request, queue_send_callback queue_send) { constexpr size_t row_size = 36 + 4 + 8 + 36 + 4; data_chunk result(4 + row_size * history.size()); auto serial = make_serializer(result.begin()); write_error_code(serial, ec); for (const blockchain::history_row row: history) { serial.write_hash(row.output.hash); serial.write_4_bytes(row.output.index); serial.write_4_bytes(row.output_height); serial.write_8_bytes(row.value); serial.write_hash(row.spend.hash); serial.write_4_bytes(row.spend.index); serial.write_4_bytes(row.spend_height); } BITCOIN_ASSERT(serial.iterator() == result.end()); // TODO: Slows down queries! //log_debug(LOG_WORKER) // << "*.fetch_history() finished. Sending response."; outgoing_message response(request, result); queue_send(response); }
void history_fetched(const std::error_code& ec, const blockchain::history_list& history) { if (ec) { log_error() << "Failed to fetch history: " << ec.message(); return; } #define LOG_RESULT "result" uint64_t total_recv = 0, balance = 0; for (const auto& row: history) { uint64_t value = row.value; BITCOIN_ASSERT(value >= 0); total_recv += value; if (row.spend.hash == null_hash) balance += value; } log_debug(LOG_RESULT) << "Queried " << history.size() << " outpoints, values and their spends."; log_debug(LOG_RESULT) << "Total received: " << total_recv; log_debug(LOG_RESULT) << "Balance: " << balance; log_info(LOG_RESULT) << "History fetched"; }
void indexer_history_fetched(const std::error_code& ec, const output_info_list& outputs, const spend_info_list& spends, blockchain::history_list history, blockchain::fetch_handler_history handle_fetch) { constexpr uint32_t max_height = std::numeric_limits<uint32_t>::max(); if (ec) { handle_fetch(ec, blockchain::history_list()); return; } // Just add in outputs. for (const output_info_type& output_info: outputs) { // There is always a chance of inconsistency. // So we resolve these and move on. // This can happen when new blocks arrive in, and indexer.query() // is called midway through a bunch of txpool.try_delete() // operations. // If do_query() is queued before the last do_doindex() and there's // a transaction in our query in that block, then we will have // a conflict. bool is_conflict = false; for (const blockchain::history_row& row: history) { // Usually the indexer and memory doesn't have any // transactions indexed that are already confirmed // and in the blockchain. // This is a rare corner case. if (row.output == output_info.point) { is_conflict = true; break; } //BITCOIN_ASSERT(row.output != output_info.point); } if (is_conflict) continue; // Everything OK. Insert outpoint. history.emplace_back(blockchain::history_row{ output_info.point, 0, output_info.value, {null_hash, max_index}, max_height }); } // Now mark spends. for (const spend_info_type& spend_info: spends) { // Iterate history looking for the output we need. bool found = false; for (blockchain::history_row& row: history) { if (row.output != spend_info.previous_output) continue; // Another consistency check. This time for spends. // We just avoid this spend and assume the blockchain // one is the correct one. if (row.spend_height != max_height) { // Don't insert. found = true; break; } BITCOIN_ASSERT((row.spend == input_point{null_hash, max_index})); // Everything OK. Insert spend. row.spend = spend_info.point; row.spend_height = 0; found = true; break; } // This assert can be triggered if the pool fills and starts // dropping transactions. // In practice this should not happen often and isn't a problem. //BITCOIN_ASSERT_MSG(found, "Couldn't find output for adding spend"); } handle_fetch(std::error_code(), history); }