void stealth_database::modify_entries_count() { entries_count_ += entries_written_count_; uint8_t* item = file_.data() + 8; auto serial = make_serializer(item); serial.write_4_bytes(entries_count_); }
void block_database::write_position(const position_type position) { const auto record = index_.allocate(); const auto data = index_.get(record); auto serial = make_serializer(data); serial.write_8_bytes(position); }
void block_database::write_position(const position_type position) { const index_type height = index_.allocate(); record_type record = index_.get(height); auto serial = make_serializer(record); serial.write_8_bytes(position); }
void wrap_fetch_transaction_args(data_chunk& data, const hash_digest& tx_hash) { data.resize(hash_digest_size); auto serial = make_serializer(data.begin()); serial.write_hash(tx_hash); BITCOIN_ASSERT(serial.iterator() == data.end()); }
void protocol_broadcast_transaction(server_node& node, const incoming_message& request, queue_send_callback queue_send) { const data_chunk& raw_tx = request.data(); chain::transaction tx; data_chunk result(4); auto serial = make_serializer(result.begin()); if (!tx.from_data(raw_tx)) { // error write_error_code(serial, error::bad_stream); outgoing_message response(request, result); queue_send(response); return; } auto ignore_send = [](const std::error_code&, size_t) {}; // Send and hope for the best! node.protocol().broadcast(tx, ignore_send); // Response back to user saying everything is fine. write_error_code(serial, std::error_code()); log_debug(LOG_SERVICE) << "protocol.broadcast_transaction() finished. Sending response."; outgoing_message response(request, result); queue_send(response); }
void hashtable_database_writer::write_records_size() { const size_t header_size = 24 + buckets_ * 8; BITCOIN_ASSERT(file_.size() >= header_size + total_records_size_); auto serial = make_serializer(file_.data() + 16); serial.write_8_bytes(total_records_size_); }
void block_database::store(const block_type& block) { const uint32_t height = index_.count(); const auto number_txs = block.transactions.size(); const uint32_t number_txs32 = static_cast<uint32_t>(number_txs); // Write block data. const auto write = [&](uint8_t* data) { satoshi_save(block.header, data); auto serial = make_serializer(data + 80); serial.write_4_bytes(height); serial.write_4_bytes(number_txs32); for (const auto& tx: block.transactions) { const auto tx_hash = hash_transaction(tx); serial.write_hash(tx_hash); } }; const auto key = hash_block_header(block.header); const auto value_size = 80 + 4 + 4 + number_txs * hash_size; const auto position = map_.store(key, write, value_size); // Write height -> position mapping. write_position(position); }
data_chunk create_spent_key(const Point& point) { data_chunk spent_key(hash_digest_size + 4); auto serial = make_serializer(spent_key.begin()); serial.write_hash(point.hash); serial.write_4_bytes(point.index); return spent_key; }
void hashtable_database_writer::link_record( uint64_t bucket_index, uint64_t record_begin) { BITCOIN_ASSERT(file_.size() > 24 + buckets_ * 8); BITCOIN_ASSERT(bucket_index < buckets_); uint8_t* bucket_begin = file_.data() + bucket_offset(bucket_index); auto serial = make_serializer(bucket_begin); serial.write_8_bytes(record_begin); }
void stealth_database::add_header_entry_index(uint32_t block_height) { const uint32_t interval = block_height; BITCOIN_ASSERT(interval < max_header_rows_); uint64_t offset = header_sector_ + interval * 4; uint8_t* iter = file_.data() + offset; auto serial = make_serializer(iter); serial.write_4_bytes(entries_count_); }
void wrap_fetch_history_args(data_chunk& data, const payment_address& address, size_t from_height) { data.resize(1 + short_hash_size + 4); auto serial = make_serializer(data.begin()); serial.write_byte(address.version()); serial.write_short_hash(address.hash()); serial.write_4_bytes(from_height); BITCOIN_ASSERT(serial.iterator() == data.end()); }
// Write the count value to the first 32 bits of the file after the header. void record_manager::write_count() { BITCOIN_ASSERT(header_size_ + sizeof(array_index) <= file_.size()); // The accessor must remain in scope until the end of the block. auto memory = file_.access(); auto payload_size_address = REMAP_ADDRESS(memory) + header_size_; auto serial = make_serializer(payload_size_address); serial.write_little_endian(record_count_); }
uint64_t addr_key_checksum(const output_point& outpoint) { data_chunk checksum_data(hash_digest_size + 4); auto serial = make_serializer(checksum_data.begin()); serial.write_hash(outpoint.hash); serial.write_4_bytes(outpoint.index); BITCOIN_ASSERT(serial.iterator() == checksum_data.end()); hash_digest hash = generate_sha256_hash(checksum_data); data_chunk raw_checksum(hash.begin(), hash.begin() + 8); return cast_chunk<uint64_t>(raw_checksum); }
data_chunk create_address_key( const payment_address& address, const output_point& outpoint) { data_chunk result(1 + short_hash_size + 8); auto serial = make_serializer(result.begin()); serial.write_byte(address.version()); serial.write_short_hash(address.hash()); serial.write_8_bytes(addr_key_checksum(outpoint)); BITCOIN_ASSERT(serial.iterator() == result.end()); return result; }
uint32_t addr_key_checksum(const output_point& outpoint) { data_chunk chksum_data(hash_digest_size + 4); auto serial = make_serializer(chksum_data.begin()); serial.write_hash(outpoint.hash); serial.write_4_bytes(outpoint.index); BITCOIN_ASSERT( std::distance(chksum_data.begin(), serial.iterator()) == hash_digest_size + 4); return generate_sha256_checksum(chksum_data); }
void spend_database::store(const output_point& outpoint, const input_point& spend) { const auto write = [&spend](uint8_t* data) { auto serial = make_serializer(data); serial.write_data(spend.hash); serial.write_4_bytes(spend.index); }; const auto key = output_to_hash(outpoint); map_.store(key, write); }
index_type linked_records::insert(index_type next) { static_assert(sizeof(index_type) == sizeof(uint32_t), "index_type incorrect size"); // Create new record. index_type record = allocator_.allocate(); record_type data = allocator_.get(record); // Write next value at first 4 bytes of record. auto serial = make_serializer(data); serial.write_4_bytes(next); return record; }
void transaction_database::store(size_t height, size_t index, const chain::transaction& tx) { // Write block data. const hash_digest key = tx.hash(); const size_t value_size = 4 + 4 + tx.serialized_size(); auto write = [&height, &index, &tx](uint8_t* data) { auto serial = make_serializer(data); serial.write_4_bytes_little_endian(height); serial.write_4_bytes_little_endian(index); data_chunk tx_data = tx.to_data(); serial.write_data(tx_data); }; map_.store(key, write, value_size); }
bool leveldb_common::save_transaction(leveldb_transaction_batch& batch, uint32_t block_height, uint32_t tx_index, const hash_digest& tx_hash, const transaction_type& block_tx) { if (duplicate_exists(tx_hash, block_height, tx_index)) return true; data_chunk tx_data(8 + satoshi_raw_size(block_tx)); // Serialize tx. auto serial = make_serializer(tx_data.begin()); serial.write_4_bytes(block_height); serial.write_4_bytes(tx_index); // Actual tx data. auto end_iter = satoshi_save(block_tx, serial.iterator()); BITCOIN_ASSERT( std::distance(tx_data.begin(), end_iter) == 8 + satoshi_raw_size(block_tx)); // Save tx to leveldb batch.tx.Put(slice(tx_hash), slice(tx_data)); // Add inputs to spends database. // Coinbase inputs do not spend anything. if (!is_coinbase(block_tx)) for (uint32_t input_index = 0; input_index < block_tx.inputs.size(); ++input_index) { const transaction_input_type& input = block_tx.inputs[input_index]; const input_point inpoint{tx_hash, input_index}; if (!mark_spent_outputs(batch.spend, input.previous_output, inpoint)) return false; if (!add_debit(batch.debit, input, {tx_hash, input_index}, block_height)) return false; } // Save address -> output mappings. for (uint32_t output_index = 0; output_index < block_tx.outputs.size(); ++output_index) { const transaction_output_type& output = block_tx.outputs[output_index]; if (!add_credit(batch.credit, output, {tx_hash, output_index}, block_height)) return false; } return true; }
void hashtable_database_writer::store(const hash_digest& key_hash, size_t value_size, write_value_function write) { // Calculate the end of the last record. const uint64_t header_size = 24 + buckets_ * 8; const uint64_t records_end_offset = header_size + total_records_size_; // [ tx hash ] 32 // [ varuint value size ] // [ ... value data ... ] // [ next tx in bucket ] 8 const size_t record_size = 32 + variable_uint_size(value_size) + value_size + 8; // If a record crosses a page boundary then we align it with // the beginning of the next page. const size_t record_begin = align_if_crossing_page(page_size_, records_end_offset, record_size); BITCOIN_ASSERT(file_.size() >= record_begin + record_size); // We will insert new transactions at the beginning of the bucket's list. // I assume that more recent transactions in the blockchain are used // more often than older ones. // We lookup the existing value in the bucket first. const uint64_t bucket_index = remainder(key_hash.data(), buckets_); BITCOIN_ASSERT(bucket_index < buckets_); const uint64_t previous_bucket_value = read_bucket_value(bucket_index); // Now begin writing the record itself. uint8_t* entry = file_.data() + record_begin; auto serial = make_serializer(entry); serial.write_hash(key_hash); serial.write_variable_uint(value_size); // Call the supplied callback to serialize the data. write(serial.iterator()); serial.set_iterator(serial.iterator() + value_size); serial.write_8_bytes(previous_bucket_value); BITCOIN_ASSERT(serial.iterator() == entry + record_size); // Change file size value at file start. // This must be done first so any subsequent writes don't // overwrite this record in case of a crash or interruption. BITCOIN_ASSERT(record_begin >= header_size); const uint64_t alignment_padding = record_begin - header_size - total_records_size_; BITCOIN_ASSERT(alignment_padding <= page_size_); total_records_size_ += record_size + alignment_padding; // Now add record to bucket. const uint64_t record_begin_offset = record_begin - header_size; link_record(bucket_index, record_begin_offset); }
void add_stealth_info(const data_chunk& stealth_data, const payment_address& address, const hash_digest& tx_hash, stealth_database& db) { const stealth_bitfield bitfield = calculate_bitfield(stealth_data); const data_chunk ephemkey = read_ephemkey(stealth_data); auto write_func = [&](uint8_t *it) { auto serial = make_serializer(it); serial.write_uint_auto(bitfield); serial.write_data(ephemkey); serial.write_byte(address.version()); serial.write_short_hash(address.hash()); serial.write_hash(tx_hash); BITCOIN_ASSERT(serial.iterator() == it + bitfield_size + 33 + 21 + 32); }; db.store(write_func); }
void protocol_total_connections(server_node& node, const incoming_message& request, queue_send_callback queue_send) { BITCOIN_ASSERT(node.protocol().total_connections() <= max_uint32); const auto total_connections32 = static_cast<uint32_t>( node.protocol().total_connections()); data_chunk result(8); auto serial = make_serializer(result.begin()); write_error_code(serial, std::error_code()); serial.write_4_bytes_little_endian(total_connections32); BITCOIN_ASSERT(serial.iterator() == result.end()); log_debug(LOG_REQUEST) << "protocol.total_connections() finished. Sending response."; outgoing_message response(request, result); queue_send(response); }
bool leveldb_common::save_block( uint32_t height, const block_type& serial_block) { leveldb_transaction_batch batch; // Write block header + tx hashes data_chunk raw_block_data( 80 + 4 + serial_block.transactions.size() * hash_digest_size); // Downcast to base header type so serializer selects that. auto header_end = satoshi_save( serial_block.header, raw_block_data.begin()); BITCOIN_ASSERT(std::distance(raw_block_data.begin(), header_end) == 80); auto serial_hashes = make_serializer(header_end); // Write the number of transactions... serial_hashes.write_4_bytes(serial_block.transactions.size()); // ... And now the tx themselves. for (uint32_t tx_index = 0; tx_index < serial_block.transactions.size(); ++tx_index) { const transaction_type& block_tx = serial_block.transactions[tx_index]; const hash_digest& tx_hash = hash_transaction(block_tx); if (!save_transaction(batch, height, tx_index, tx_hash, block_tx)) { log_fatal(LOG_BLOCKCHAIN) << "Could not save transaction"; return false; } serial_hashes.write_hash(tx_hash); } BITCOIN_ASSERT(serial_hashes.iterator() == raw_block_data.begin() + 80 + 4 + serial_block.transactions.size() * hash_digest_size); data_chunk raw_height = uncast_type(height); hash_digest block_hash = hash_block_header(serial_block.header); // Write block header batch.block.Put(slice(raw_height), slice(raw_block_data)); batch.block_hash.Put(slice_block_hash(block_hash), slice(raw_height)); // Execute batches. db_.write(batch); // Sync stealth database. db_stealth_->sync(height); return true; }
bool add_credit(leveldb::WriteBatch& batch, const payment_address& address, uint64_t output_value, const output_point& outpoint, uint32_t block_height) { data_chunk addr_key = create_address_key(address, outpoint); // outpoint, value, block_height data_chunk row_info(36 + 8 + 4); auto serial = make_serializer(row_info.begin()); // outpoint serial.write_hash(outpoint.hash); serial.write_4_bytes(outpoint.index); // value serial.write_8_bytes(output_value); // block_height serial.write_4_bytes(block_height); BITCOIN_ASSERT( std::distance(row_info.begin(), serial.iterator()) == 36 + 8 + 4); batch.Put(slice(addr_key), slice(row_info)); return true; }
bool add_debit(leveldb::WriteBatch& batch, const transaction_input_type& input, const input_point& inpoint, uint32_t block_height) { payment_address address; // Not a Bitcoin address so skip this output. if (!extract(address, input.script)) return true; data_chunk addr_key = create_address_key(address, input.previous_output); // inpoint data_chunk row_info(36 + 4); auto serial = make_serializer(row_info.begin()); // inpoint serial.write_hash(inpoint.hash); serial.write_4_bytes(inpoint.index); // block_height serial.write_4_bytes(block_height); BITCOIN_ASSERT( std::distance(row_info.begin(), serial.iterator()) == 36 + 4); batch.Put(slice(addr_key), slice(row_info)); return true; }
hash_digest build_merkle_tree(hash_list& merkle) { // Stop if hash list is empty. if (merkle.empty()) return null_hash; else if (merkle.size() == 1) return merkle[0]; // While there is more than 1 hash in the list, keep looping... while (merkle.size() > 1) { // If number of hashes is odd, duplicate last hash in the list. if (merkle.size() % 2 != 0) merkle.push_back(merkle.back()); // List size is now even. BITCOIN_ASSERT(merkle.size() % 2 == 0); // New hash list. hash_list new_merkle; // Loop through hashes 2 at a time. for (auto it = merkle.begin(); it != merkle.end(); it += 2) { // Join both current hashes together (concatenate). data_chunk concat_data(hash_size * 2); auto concat = make_serializer(concat_data.begin()); concat.write_hash(*it); concat.write_hash(*(it + 1)); BITCOIN_ASSERT(concat.iterator() == concat_data.end()); // Hash both of the hashes. hash_digest new_root = bitcoin_hash(concat_data); // Add this to the new list. new_merkle.push_back(new_root); } // This is the new list. merkle = new_merkle; } // Finally we end up with a single item. return merkle[0]; }
void history_scan_database::add(const address_bitset& key, const uint8_t marker, const point_type& point, uint32_t block_height, uint64_t value) { BITCOIN_ASSERT(key.size() >= settings_.sharded_bitsize); // Both add() and sync() must have identical lookup of shards. hsdb_shard& shard = lookup(key); address_bitset sub_key = drop_prefix(key); BITCOIN_ASSERT(sub_key.size() == settings_.scan_bitsize()); #ifdef HSDB_DEBUG log_debug(LOG_HSDB) << "Sub key = " << sub_key; #endif data_chunk row_data(settings_.row_value_size); auto serial = make_serializer(row_data.begin()); serial.write_byte(marker); serial.write_hash(point.hash); serial.write_4_bytes(point.index); serial.write_4_bytes(block_height); serial.write_8_bytes(value); BITCOIN_ASSERT(serial.iterator() == row_data.begin() + settings_.row_value_size); shard.add(sub_key, row_data); }
void block_database::store(const block_type& block) { const size_t height = index_.size(); // Write block data. const hash_digest key = hash_block_header(block.header); const size_t number_txs = block.transactions.size(); const size_t value_size = 80 + 4 + 4 + number_txs * hash_size; auto write = [&](uint8_t* data) { satoshi_save(block.header, data); auto serial = make_serializer(data + 80); serial.write_4_bytes(height); serial.write_4_bytes(number_txs); for (const transaction_type& tx: block.transactions) { const hash_digest tx_hash = hash_transaction(tx); serial.write_hash(tx_hash); } }; const position_type position = map_.store(key, value_size, write); // Write height -> position mapping. write_position(position); }
binary_data serialize_c(obj o) { binary_data buffer; make_serializer(buffer)(o); return buffer; }
bool leveldb_common::save_transaction(leveldb_transaction_batch& batch, uint32_t block_height, uint32_t tx_index, const hash_digest& tx_hash, const transaction_type& block_tx) { if (is_special_duplicate(block_height, tx_index)) return true; data_chunk tx_data(8 + satoshi_raw_size(block_tx)); // Serialize tx. auto serial = make_serializer(tx_data.begin()); serial.write_4_bytes(block_height); serial.write_4_bytes(tx_index); // Actual tx data. auto end_iter = satoshi_save(block_tx, serial.iterator()); BITCOIN_ASSERT( tx_data.begin() + 8 + satoshi_raw_size(block_tx) == end_iter); // Save tx to leveldb batch.tx.Put(slice(tx_hash), slice(tx_data)); // Add inputs to spends database. // Coinbase inputs do not spend anything. if (!is_coinbase(block_tx)) for (uint32_t input_index = 0; input_index < block_tx.inputs.size(); ++input_index) { const transaction_input_type& input = block_tx.inputs[input_index]; const input_point inpoint{tx_hash, input_index}; if (!mark_spent_outputs(batch.spend, input.previous_output, inpoint)) return false; if (!add_debit(batch.debit, input, {tx_hash, input_index}, block_height)) return false; } // A stack of size 1. Keep the stealth_data from // one iteration to the next. data_chunk stealth_data_store; auto unload_stealth_store = [&]() { return std::move(stealth_data_store); }; // Save address -> output mappings. for (uint32_t output_index = 0; output_index < block_tx.outputs.size(); ++output_index) { const transaction_output_type& output = block_tx.outputs[output_index]; // If a stealth output then skip processing. if (process_stealth_output_info(output, stealth_data_store)) continue; data_chunk stealth_data = unload_stealth_store(); // Try to extract an address. payment_address address; if (!extract(address, output.script)) continue; // Process this output. if (!stealth_data.empty()) add_stealth_info(stealth_data, address, tx_hash, *db_stealth_); if (!add_credit(batch.credit, address, output.value, {tx_hash, output_index}, block_height)) return false; } return true; }