bool inventory_vector::already_have( db_tx & tx_db, const inventory_vector & inv ) { switch (inv.type()) { case type_error: { // ... } break; case type_msg_tx: { auto tx_in_map = transaction_pool::instance().exists(inv.hash()); return tx_in_map || globals::instance().orphan_transactions().count(inv.hash()) || tx_db.contains_transaction(inv.hash() ); } break; case type_msg_block: { return globals::instance().block_indexes().count(inv.hash()) || globals::instance().orphan_blocks().count(inv.hash()) ; } break; } return true; }
std::pair<bool, std::string> transaction_wallet::accept_wallet_transaction( db_tx & tx_db ) { /** * Add previous supporting transactions first. */ for (auto & i : m_previous_transactions) { if (i.is_coin_base() == false && i.is_coin_stake() == false) { auto hash_tx = i.get_hash(); if ( transaction_pool::instance().exists(hash_tx) == false && tx_db.contains_transaction(hash_tx) == false ) { i.accept_to_transaction_pool(tx_db); } } } return transaction::accept_to_transaction_pool(tx_db); }
bool transaction::read_from_disk( db_tx & tx_db, const point_out & previous_out, transaction_index & tx_index ) { set_null(); if ( tx_db.read_transaction_index( previous_out.get_hash(), tx_index) == false ) { log_debug( "Transaction failed to read from disk, read transaction index " "failed, previous out = " << previous_out.get_hash().to_string().substr(0, 20) << "." ); return false; } if (read_from_disk(tx_index.get_transaction_position()) == false) { log_debug( "Transaction failed to read from disk, read from disk failed." ); return false; } if (previous_out.n() >= m_transactions_out.size()) { log_debug( "Transaction failed to read from disk, n is greater than outputs." ); set_null(); return false; } return true; }
bool transaction::fetch_inputs( db_tx & dbtx, const std::map<sha256, transaction_index> & test_pool, const bool & best_block, const bool & create_block, transaction::previous_t & inputs, bool & invalid ) { /** * If the transaction is invalid this will be set to true. */ invalid = false; /** * Coinbase transactions have no inputs to fetch. */ if (is_coin_base()) { return true; } for (auto i = 0; i < m_transactions_in.size(); i++) { auto previous_out = m_transactions_in[i].previous_out(); if (inputs.count(previous_out.get_hash()) > 0) { continue; } /** * Read the transaction index. */ auto & tx_index = inputs[previous_out.get_hash()].first; bool found = true; if ( (best_block || create_block) && test_pool.count(previous_out.get_hash()) > 0 ) { /** * Get the transaction index from the current proposed changes. */ tx_index = test_pool.find(previous_out.get_hash())->second; } else { /** * Read transaction index from transaction database. */ found = dbtx.read_transaction_index( previous_out.get_hash(), tx_index ); } if (found == false && (best_block || create_block)) { if (create_block) { return false; } else { log_error( "Transaction " << get_hash().to_string().substr(0, 10) << " previous transaction " << previous_out.get_hash().to_string().substr(0, 10) << " index entry not found." ); return false; } } /** * Read previous transaction. */ auto & tx_prev = inputs[previous_out.get_hash()].second; if ( found == false || tx_index.get_transaction_position() == transaction_position(1, 1, 1) ) { if ( transaction_pool::instance().exists( previous_out.get_hash()) == false ) { log_debug( "Transaction failed to fetch inputs, " << get_hash().to_string().substr(0, 10) << " pool previous transaction not found " << previous_out.get_hash().to_string().substr(0, 10) << "." ); return false; } tx_prev = transaction_pool::instance().lookup( previous_out.get_hash() ); if (found == false) { tx_index.spent().resize(tx_prev.transactions_out().size()); } } else { /** * Read previous transaction from disk. */ if ( tx_prev.read_from_disk( tx_index.get_transaction_position()) == false ) { log_error( "Transaction " << get_hash().to_string().substr(0, 10) << " failed to read previous transaction " << previous_out.get_hash().to_string().substr(0, 10) << " from disk." ); return false; } } } /** * Check that all previous out's n indexes are valid. */ for (auto i = 0; i < m_transactions_in.size(); i++) { const auto & previous_out = m_transactions_in[i].previous_out(); assert(inputs.count(previous_out.get_hash()) != 0); auto & tx_index = inputs[previous_out.get_hash()].first; auto & tx_prev = inputs[previous_out.get_hash()].second; if ( previous_out.n() >= tx_prev.transactions_out().size() || previous_out.n() >= tx_index.spent().size() ) { /** * Revisit this if/when transaction replacement is implemented * and allows adding inputs. */ invalid = true; log_error( "Transaction " << get_hash().to_string().substr(0, 10) << " previous out n out of range " << previous_out.n() << ":" << tx_prev.transactions_out().size() << ":" << tx_index.spent().size() << " previous transaction " << previous_out.get_hash().to_string().substr(0, 10) << "\n" << tx_prev.to_string() << "." ); return false; } } return true; }
bool transaction::disconnect_inputs(db_tx & tx_db) { /** * Relinquish previous transactions' spent pointers. */ if (is_coin_base() == false) { for (auto & i : m_transactions_in) { auto prev_out = i.previous_out(); /** * Get previous transaction index from disk. */ transaction_index txindex; if ( tx_db.read_transaction_index( prev_out.get_hash(), txindex) == false ) { log_error( "Transaction disconnect_inputs failed, " "read_transaction_index failed." ); return false; } if (prev_out.n() >= txindex.spent().size()) { log_error( "Transaction disconnect_inputs failed, previous" " out n is out of range" ); return false; } /** * Mark outpoint as not spent. */ txindex.spent()[prev_out.n()].set_null(); /** * Write back. */ if ( tx_db.update_transaction_index( prev_out.get_hash(), txindex) == false ) { log_error( "Transaction disconnect_inputs failed, " "update_transaction_index failed." ); return false; } } } /** * Remove transaction from the index. This can fail if a duplicate of this * transaction was in a chain that got reorganized away. This is only * possible if this transaction was completely spent, so erasing it would * be a no-op anyway. */ tx_db.erase_transaction_index(*this); return true; }
bool db_tx::reorganize( db_tx & tx_db, std::shared_ptr<block_index> & index_new ) { log_debug("Db Tx reorganize started."); /** * Find the fork. */ auto fork = stack_impl::get_block_index_best(); auto longer = index_new; while (fork != longer) { while (longer->height() > fork->height()) { if (!(longer = longer->block_index_previous())) { log_error( "Db Tx reorganize failed, (longer) previous block " "index is null." ); return false; } } if (fork == longer) { break; } if (!(fork = fork->block_index_previous())) { log_error( "Db Tx reorganize failed, (fork) previous block " "index is null." ); return false; } } /** * List of what to disconnect. */ std::vector< std::shared_ptr<block_index> > to_disconnect; for ( auto index = stack_impl::get_block_index_best(); index != fork; index = index->block_index_previous() ) { to_disconnect.push_back(index); } /** * List of what to connect. */ std::vector< std::shared_ptr<block_index> > to_connect; for ( auto index = index_new; index != fork; index = index->block_index_previous() ) { to_connect.push_back(index); } std::reverse(to_connect.begin(), to_connect.end()); log_debug( "Db Tx reorganize is disconnecting " << to_disconnect.size() << " blocks; " << fork->get_block_hash().to_string().substr(0, 20) << ".." << stack_impl::get_block_index_best()->get_block_hash().to_string( ).substr() << "."); log_debug( "Db Tx reorganize is connecting " << to_connect.size() << " blocks; " << fork->get_block_hash().to_string().substr(0, 20) << ".." << index_new->get_block_hash().to_string().substr() << "." ); /** * Disconnect shorter branch. */ std::vector<transaction> to_resurrect; for (auto & i : to_disconnect) { block blk; if (blk.read_from_disk(i) == false) { log_error("Db Tx reorganize failed, read from disk failed."); return false; } if (blk.disconnect_block(tx_db, i) == false) { log_error( "Db Tx reorganize failed, disconnect_block failed " << i->get_block_hash().to_string().substr(0, 20) << "." ); return false; } /** * Queue memory transactions to resurrect. */ for (auto & j : blk.transactions()) { if ((j.is_coin_base() || j.is_coin_stake()) == false) { to_resurrect.push_back(j); } } } /** * Connect longer branch. */ std::vector<transaction> to_delete; for (auto i = 0; i < to_connect.size(); i++) { auto & pindex = to_connect[i]; block blk; if (blk.read_from_disk(pindex) == false) { log_error( "Db Tx reorganize failed, read_from_disk for connect failed." ); return false; } if (blk.connect_block(tx_db, pindex) == false) { /** * Invalid block. */ log_error( "Db Tx reorganize failed, connect block " << pindex->get_block_hash().to_string().substr(0, 20) << " failed." ); return false; } /** * Queue memory transactions to delete. */ for (auto & i : blk.transactions()) { to_delete.push_back(i); } } /** * Write the hash of the best chain. */ if (tx_db.write_hash_best_chain(index_new->get_block_hash()) == false) { log_error("Db Tx reorganize failed, write best hash chain failed."); return false; } /** * Make sure it's successfully written to disk before changing memory * structure. */ if (tx_db.txn_commit() == false) { log_error("Db Tx reorganize failed, txn_commit failed."); return false; } /** * Disconnect shorter branch. */ for (auto & i : to_disconnect) { if (i->block_index_previous()) { i->block_index_previous()->block_index_next() = 0; } } /** * Connect longer branch. */ for (auto & i : to_connect) { if (i->block_index_previous()) { i->block_index_previous()->block_index_next() = i; } } /** * Resurrect memory transactions that were in the disconnected branch. */ for (auto & i : to_resurrect) { i.accept_to_transaction_pool(tx_db); } /** * Delete redundant memory transactions that are in the connected branch. */ for (auto & i : to_delete) { transaction_pool::instance().remove(i); } log_debug("Db Tx reorganize finished."); return true; }
void transaction_wallet::relay_wallet_transaction( db_tx & tx_db, const std::shared_ptr<tcp_connection_manager> & connection_manager, const bool & use_udp ) { for (auto & i : m_previous_transactions) { if ((i.is_coin_base() || i.is_coin_stake()) == false) { auto hash_tx = i.get_hash(); if (tx_db.contains_transaction(hash_tx) == false) { inventory_vector inv( inventory_vector::type_msg_tx, hash_tx ); data_buffer buffer; i.encode(buffer); for (auto & j : connection_manager->tcp_connections()) { if (auto t = j.second.lock()) { t->send_relayed_inv_message(inv, buffer); } } } } } if ((is_coin_base() || is_coin_stake()) == false) { auto hash_tx = get_hash(); if (tx_db.contains_transaction(hash_tx) == false) { log_debug( "Transaction wallet is relaying " << hash_tx.to_string().substr(0, 10) << "." ); /** * Allocate the inventory_vector. */ inventory_vector inv(inventory_vector::type_msg_tx, hash_tx); /** * Allocate the data_buffer. */ data_buffer buffer; /** * Encode the transaction. */ reinterpret_cast<transaction *> (this)->encode(buffer); /** * Relay the transaction over TCP. */ for (auto & i : connection_manager->tcp_connections()) { if (auto t = i.second.lock()) { t->send_relayed_inv_message(inv, buffer); } } if (use_udp) { if (wallet_) { /** * Allocate the message. */ message msg(inv.command(), buffer); /** * Encode the message. */ msg.encode(); /** * Allocate the UDP packet. */ std::vector<std::uint8_t> udp_packet(msg.size()); /** * Copy the message to the UDP packet. */ std::memcpy(&udp_packet[0], msg.data(), msg.size()); /** * Broadcast the message over UDP. */ const_cast<stack_impl *> (wallet_->get_stack_impl() )->get_database_stack()->broadcast(udp_packet ); } } } } }
void transaction_wallet::add_supporting_transactions(db_tx & tx_db) { /** * Clear the previois transactions. */ m_previous_transactions.clear(); const int copy_depth = 3; if (set_merkle_branch() < copy_depth) { std::vector<sha256> work_queue; for (auto & tx_in : transactions_in()) { work_queue.push_back(tx_in.previous_out().get_hash()); } std::map<sha256, const transaction_merkle *> wallet_previous; std::set<sha256> already_done; for (auto i = 0; i < work_queue.size(); i++) { sha256 hash = work_queue[i]; if (already_done.count(hash) > 0) { continue; } already_done.insert(hash); transaction_merkle tx; auto it = wallet_->transactions().find(hash); if (it != wallet_->transactions().end()) { tx = it->second; for (auto & tx_previous : it->second.previous_transactions()) { wallet_previous[tx_previous.get_hash()] = &tx_previous; } } else if (wallet_previous.count(hash) > 0) { tx = *wallet_previous[hash]; } else if ( globals::instance().is_client_spv() == false && tx_db.read_disk_transaction(hash, tx) ) { // ... } else { log_error( "Transaction wallet, add supporting transactions failed, " "unsupported transaction." ); continue; } auto depth = tx.set_merkle_branch(); m_previous_transactions.push_back(tx); if (depth < copy_depth) { for (auto & txin : tx.transactions_in()) { work_queue.push_back(txin.previous_out().get_hash()); } } } } /** * Reverse the previous transactions. */ std::reverse( m_previous_transactions.begin(), m_previous_transactions.end() ); }