void session_batch::start_connect(const code& ec, const authority& host, connector::ptr connect, channel_handler handler) { if (stopped() || ec == error::service_stopped) { LOG_DEBUG(LOG_NETWORK) << "Batch session stopped while starting."; handler(error::service_stopped, nullptr); return; } // This termination prevents a tight loop in the empty address pool case. if (ec) { LOG_WARNING(LOG_NETWORK) << "Failure fetching new address: " << ec.message(); handler(ec, nullptr); return; } // This creates a tight loop in the case of a small address pool. if (blacklisted(host)) { LOG_DEBUG(LOG_NETWORK) << "Fetched blacklisted address [" << host << "] "; handler(error::address_blocked, nullptr); return; } LOG_DEBUG(LOG_NETWORK) << "Connecting to [" << host << "]"; // CONNECT connect->connect(host, handler); }
void session_manual::handle_connect(const code& ec, channel::ptr channel, const std::string& hostname, uint16_t port, uint32_t remaining, connector::ptr connector, channel_handler handler) { unpend(connector); if (ec) { LOG_WARNING(LOG_NETWORK) << "Failure connecting [" << config::endpoint(hostname, port) << "] manually: " << ec.message(); // Retry logic. // The handler invoke is the failure end of the connect sequence. if (settings_.manual_attempt_limit == 0) start_connect(hostname, port, 0, handler); else if (remaining > 0) start_connect(hostname, port, remaining - 1, handler); else handler(ec, nullptr); return; } LOG_INFO(LOG_NETWORK) << "Connected manual channel [" << config::endpoint(hostname, port) << "] as [" << channel->authority() << "]"; register_channel(channel, BIND5(handle_channel_start, _1, hostname, port, channel, handler), BIND3(handle_channel_stop, _1, hostname, port)); }
// This is fired by the base timer and stop handler. void protocol_header_sync::handle_event(const code& ec, event_handler complete) { if (ec == error::channel_stopped) { complete(ec); return; } if (ec && ec != error::channel_timeout) { log::warning(LOG_PROTOCOL) << "Failure in header sync timer for [" << authority() << "] " << ec.message(); complete(ec); return; } // It was a timeout, so ten more seconds have passed. current_second_ += expiry_interval.total_seconds(); // Drop the channel if it falls below the min sync rate averaged over all. if (sync_rate() < minimum_rate_) { log::debug(LOG_PROTOCOL) << "Header sync rate (" << sync_rate() << "/sec) from [" << authority() << "]"; complete(error::channel_timeout); return; } }
void session_batch::start_connect(const code& ec, const authority& host, connector::ptr connect, atomic_counter_ptr counter, channel_handler handler) { if (counter->load() == batch_size_) return; // This termination prevents a tight loop in the empty address pool case. if (ec) { log::error(LOG_NETWORK) << "Failure fetching new address: " << ec.message(); handler(ec, nullptr); return; } // This creates a tight loop in the case of a small address pool. if (blacklisted(host)) { log::debug(LOG_NETWORK) << "Fetched blacklisted address [" << host << "] "; handler(error::address_blocked, nullptr); return; } log::debug(LOG_NETWORK) << "Connecting to [" << host << "]"; // CONNECT connect->connect(host, BIND6(handle_connect, _1, _2, host, connect, counter, handler)); }
void protocol::handle_connect(const code& ec, channel::ptr node, const config::authority& peer) { if (ec) { log_debug(LOG_PROTOCOL) << "Failure connecting [" << peer << "] " << ec.message(); // Restart connection attempt. new_connection(); return; } // Save the connection as we are now assured of getting stop event. outbound_connections_.push_back(node); // Connected! log_info(LOG_PROTOCOL) << "Connected to peer [" << peer.to_string() << "] (" << outbound_connections_.size() << " total)"; const auto stop_handler = dispatch_.ordered_delegate(&protocol::outbound_channel_stopped, this, _1, node, peer.to_string()); start_talking(node, stop_handler, relay_); }
void protocol::handle_manual_connect(const code& ec, channel::ptr node, const std::string& hostname, uint16_t port, bool relay, size_t retries) { const config::endpoint peer(hostname, port); if (ec) { // Warn because we are supposed to maintain this connection. log_warning(LOG_PROTOCOL) << "Failure connecting [" << peer << "] manually: " << ec.message(); // Retry connection. const config::endpoint address(hostname, port); retry_manual_connection(address, relay, retries); return; } // Save the connection as we are now assured of getting a stop event. manual_connections_.push_back(node); // Connected! log_info(LOG_PROTOCOL) << "Connected to peer [" << peer << "] manually (" << manual_connections_.size() << " total)"; const auto stop_handler = dispatch_.ordered_delegate(&protocol::manual_channel_stopped, this, _1, node, peer.to_string(), relay, retries); start_talking(node, stop_handler, relay); }
// This originates from send_header->annoucements and get_headers requests. bool protocol_block_in::handle_receive_headers(const code& ec, message::headers::ptr message) { if (stopped()) return false; if (ec) { log::debug(LOG_NODE) << "Failure getting headers from [" << authority() << "] " << ec.message(); stop(ec); return false; } /////////////////////////////////////////////////////////////////////////// // There is no benefit to this use of headers, in fact it is suboptimal. // In v3 headers will be used to build block tree before getting blocks. /////////////////////////////////////////////////////////////////////////// hash_list block_hashes; message->to_hashes(block_hashes); // TODO: implement orphan_pool_.fetch_missing_block_hashes(...) handle_fetch_missing_orphans(error::success, block_hashes); return true; }
bool protocol_block_in::handle_receive_not_found(const code& ec, message::not_found::ptr message) { if (stopped()) return false; if (ec) { log::debug(LOG_NODE) << "Failure getting block not_found from [" << authority() << "] " << ec.message(); stop(ec); return false; } hash_list hashes; message->to_hashes(hashes, inventory_type_id::block); // The peer cannot locate a block that it told us it had. // This only results from reorganization assuming peer is proper. for (const auto hash: hashes) { log::debug(LOG_NODE) << "Block not_found [" << encode_hash(hash) << "] from [" << authority() << "]"; } return true; }
void protocol_block_in::handle_fetch_block_locator(const code& ec, const hash_list& locator) { if (stopped() || ec == error::service_stopped) return; if (ec) { log::error(LOG_NODE) << "Internal failure generating block locator for [" << authority() << "] " << ec.message(); stop(ec); return; } /////////////////////////////////////////////////////////////////////////// // TODO: manage the stop_hash_ (see v2). /////////////////////////////////////////////////////////////////////////// if (headers_from_peer_) { const get_headers request{ std::move(locator), stop_hash_ }; SEND2(request, handle_send, _1, request.command); } else { const get_blocks request{ std::move(locator), stop_hash_ }; SEND2(request, handle_send, _1, request.command); } }
void session_inbound::handle_accept(const code& ec, channel::ptr channel, acceptor::ptr accept) { if (stopped()) return; start_accept(error::success, accept); if (ec) { log::debug(LOG_NETWORK) << "Failure accepting connection: " << ec.message(); return; } if (blacklisted(channel->authority())) { log::debug(LOG_NETWORK) << "Rejected inbound connection from [" << channel->authority() << "] due to blacklisted address."; return; } connection_count( dispatch_.ordered_delegate(&session_inbound::handle_connection_count, shared_from_base<session_inbound>(), _1, channel)); }
void protocol_address::handle_receive_get_address(const code& ec, const get_address& message) { if (stopped()) return; if (ec) { log_debug(LOG_PROTOCOL) << "Failure receiving get_address message from [" << authority() << "] " << ec.message(); stop(ec); return; } // TODO: allowing repeated queries can allow a peer to map our history. // Resubscribe to get_address messages. subscribe<get_address>( &protocol_address::handle_receive_get_address, _1, _2); // TODO: pull active hosts from host cache (currently just resending self). // TODO: need to distort for privacy, don't send currently-connected peers. address active({ { self_.to_network_address() } }); if (active.addresses.empty()) return; log_debug(LOG_PROTOCOL) << "Sending addresses to [" << authority() << "] (" << active.addresses.size() << ")"; send(active, &protocol_address::handle_send_address, _1); }
bool protocol_ping::handle_receive_pong(const code& ec, message::pong::ptr message, uint64_t nonce) { if (stopped()) return false; if (ec) { log::debug(LOG_NETWORK) << "Failure getting pong from [" << authority() << "] " << ec.message(); stop(ec); return false; } if (message->nonce != nonce) { log::warning(LOG_NETWORK) << "Invalid pong nonce from [" << authority() << "]"; // This could result from message overlap due to a short period, // but we assume the response is not as expected and terminate. stop(error::bad_stream); } return false; }
void session_manual::handle_connect(const code& ec, channel::ptr channel, const std::string& hostname, uint16_t port, channel_handler handler, uint16_t retries) { if (ec) { log::warning(LOG_NETWORK) << "Failure connecting [" << config::endpoint(hostname, port) << "] manually: " << ec.message(); // Retry logic. if (settings_.connect_attempts == 0) start_connect(hostname, port, handler, 0); else if (retries > 0) start_connect(hostname, port, handler, retries - 1); else handler(ec, nullptr); return; } log::info(LOG_NETWORK) << "Connected manual channel [" << config::endpoint(hostname, port) << "] as [" << channel->authority() << "]"; register_channel(channel, std::bind(&session_manual::handle_channel_start, shared_from_base<session_manual>(), _1, hostname, port, channel, handler), std::bind(&session_manual::handle_channel_stop, shared_from_base<session_manual>(), _1, hostname, port)); }
// We don't seem to be getting getdata requests. void responder::receive_get_data(const code& ec, const get_data& packet, channel::ptr node) { if (ec == error::channel_stopped) return; const auto peer = node->authority(); if (ec) { log::debug(LOG_RESPONDER) << "Failure in receive get data [" << peer << "] " << ec.message(); node->stop(ec); return; } // Resubscribe to serve tx and blocks. node->subscribe<message::get_data>( std::bind(&responder::receive_get_data, this, _1, _2, node)); log::debug(LOG_RESPONDER) << "Getdata BEGIN [" << peer << "] " << "txs (" << packet.count(inventory_type_id::transaction) << ") " << "blocks (" << packet.count(inventory_type_id::block) << ") " << "bloom (" << packet.count(inventory_type_id::filtered_block) << ")"; for (const auto& inventory: packet.inventories) { switch (inventory.type) { case inventory_type_id::transaction: log::debug(LOG_RESPONDER) << "Transaction getdata for [" << peer << "] " << encode_hash(inventory.hash); tx_pool_.fetch(inventory.hash, std::bind(&responder::send_pool_tx, this, _1, _2, inventory.hash, node)); break; case inventory_type_id::block: log::debug(LOG_RESPONDER) << "Block getdata for [" << peer << "] " << encode_hash(inventory.hash); block_fetcher::fetch(blockchain_, inventory.hash, std::bind(&responder::send_block, this, _1, _2, inventory.hash, node)); break; case inventory_type_id::error: case inventory_type_id::none: default: log::debug(LOG_RESPONDER) << "Ignoring invalid getdata type for [" << peer << "]"; } } log::debug(LOG_RESPONDER) << "Getdata END [" << peer << "]"; }
void responder::send_pool_tx(const code& ec, const transaction& tx, const hash_digest& tx_hash, channel::ptr node) { if (ec == error::service_stopped) return; if (ec == error::not_found) { log::debug(LOG_RESPONDER) << "Transaction for [" << node->authority() << "] not in mempool [" << encode_hash(tx_hash) << "]"; // It wasn't in the mempool, so relay the request to the blockchain. blockchain_.fetch_transaction(tx_hash, std::bind(&responder::send_chain_tx, this, _1, _2, tx_hash, node)); return; } if (ec) { log::error(LOG_RESPONDER) << "Failure fetching mempool tx data for [" << node->authority() << "] " << ec.message(); node->stop(ec); return; } send_tx(tx, tx_hash, node); }
// en.bitcoin.it/wiki/Protocol_documentation#getdata // getdata can be used to retrieve transactions, but only if they are // in the memory pool or relay set - arbitrary access to transactions // in the chain is not allowed to avoid having clients start to depend // on nodes having full transaction indexes (which modern nodes do not). void responder::send_chain_tx(const code& ec, const transaction& tx, const hash_digest& tx_hash, channel::ptr node) { if (ec == error::service_stopped) return; if (ec == error::not_found) { log::debug(LOG_RESPONDER) << "Transaction for [" << node->authority() << "] not in blockchain [" << encode_hash(tx_hash) << "]"; // It wasn't in the blockchain, so send notfound. send_tx_not_found(tx_hash, node); return; } if (ec) { log::error(LOG_RESPONDER) << "Failure fetching blockchain tx data for [" << node->authority() << "] " << ec.message(); node->stop(ec); return; } send_tx(tx, tx_hash, node); }
void session_inbound::handle_accept(const code& ec, channel::ptr channel, acceptor::ptr accept) { if (stopped()) { log::debug(LOG_NETWORK) << "Suspended inbound connection."; return; } start_accept(error::success, accept); if (ec) { log::debug(LOG_NETWORK) << "Failure accepting connection: " << ec.message(); return; } if (blacklisted(channel->authority())) { log::debug(LOG_NETWORK) << "Rejected inbound connection from [" << channel->authority() << "] due to blacklisted address."; return; } connection_count(BIND2(handle_connection_count, _1, channel)); }
void protocol_address::handle_receive_address(const code& ec, const address& message) { if (stopped()) return; if (ec) { log_debug(LOG_PROTOCOL) << "Failure receiving address message from [" << authority() << "] " << ec.message(); stop(ec); return; } // Resubscribe to address messages. subscribe<address>(&protocol_address::handle_receive_address, _1, _2); log_debug(LOG_PROTOCOL) << "Storing addresses from [" << authority() << "] (" << message.addresses.size() << ")"; // TODO: manage timestamps (active peers are connected < 3 hours ago). hosts_.store(message.addresses, bind(&protocol_address::handle_store_addresses, _1)); }
void session::handle_remove(const code& ec, channel::ptr channel) { if (ec) LOG_DEBUG(LOG_NETWORK) << "Failed to remove channel [" << channel->authority() << "] " << ec.message(); }
void session_outbound::handle_channel_stop(const code& ec, connector::ptr connect, channel::ptr channel) { log::debug(LOG_NETWORK) << "Outbound channel stopped [" << channel->authority() << "] " << ec.message(); new_connection(connect); }
void protocol::inbound_channel_stopped(const code& ec, channel::ptr node, const std::string& address) { log_debug(LOG_PROTOCOL) << "Channel stopped (inbound) [" << address << "] " << ec.message(); // We never attempt to reconnect inbound connections. remove_connection(inbound_connections_, node); }
// new blocks come in - remove txs in new // old blocks taken out - resubmit txs in old bool transaction_pool::handle_reorganized(const code& ec, size_t fork_point, const block::ptr_list& new_blocks, const block::ptr_list& replaced_blocks) { if (ec == error::service_stopped) { log::debug(LOG_BLOCKCHAIN) << "Stopping transaction pool: " << ec.message(); return false; } if (ec) { log::debug(LOG_BLOCKCHAIN) << "Failure in tx pool reorganize handler: " << ec.message(); return false; } log::debug(LOG_BLOCKCHAIN) << "Reorganize: tx pool size (" << buffer_.size() << ") forked at (" << fork_point << ") new blocks (" << new_blocks.size() << ") replace blocks (" << replaced_blocks.size() << ")"; if (replaced_blocks.empty()) { // Remove memory pool transactions that also exist in new blocks. dispatch_.ordered( std::bind(&transaction_pool::remove, this, new_blocks)); } else { // See http://www.jwz.org/doc/worse-is-better.html // for why we take this approach. We return with an error_code. // An alternative would be resubmit all tx from the cleared blocks. dispatch_.ordered( std::bind(&transaction_pool::clear, this, error::blockchain_reorganized)); } return true; }
void session_manual::handle_channel_stop(const code& ec, const std::string& hostname, uint16_t port) { LOG_DEBUG(LOG_NETWORK) << "Manual channel stopped: " << ec.message(); // Special case for already connected, do not keep trying. // After a stop we don't use the caller's start handler, but keep connecting. if (ec != error::address_in_use) connect(hostname, port); }
void protocol_base_base::handle_timer(const code& ec) { if (stopped() || deadline::canceled(ec)) return; log_debug(LOG_PROTOCOL) << "Fired " << name_ << " protocol timer on [" << authority() << "] " << ec.message(); callback(error::channel_timeout); }
bool callback_state::succeeded(const code& ec, const std::string& format) { if (ec) { // May want to change the behavior to decrement vs. zeroizing refs. error(boost::format(format) % ec.message()); stop(console_result::failure); return false; } return true; }
void p2p::handle_running(const code& ec, result_handler handler) { if (ec) { LOG_ERROR(LOG_NETWORK) << "Error starting outbound session: " << ec.message(); handler(ec); return; } // This is the end of the run sequence. handler(error::success); }
void session_inbound::handle_channel_start(const code& ec, channel::ptr channel) { if (ec) { LOG_INFO(LOG_NETWORK) << "Inbound channel failed to start [" << channel->authority() << "] " << ec.message(); return; } attach_protocols(channel); };
void protocol::outbound_channel_stopped(const code& ec, channel::ptr node, const std::string& address) { log_debug(LOG_PROTOCOL) << "Channel stopped (outbound) [" << address << "] " << ec.message(); remove_connection(outbound_connections_, node); // If not shutdown we always create a replacement oubound connection. if (ec != error::service_stopped) new_connection(); }
void protocol::manual_channel_stopped(const code& ec, channel::ptr node, const std::string& address, bool relay, size_t retries) { log_debug(LOG_PROTOCOL) << "Channel stopped (manual) [" << address << "] " << ec.message(); remove_connection(manual_connections_, node); // If not shutdown we always attempt to reconnect manual connections. if (ec != error::service_stopped) retry_manual_connection(address, relay, retries); }
void protocol_ping::handle_send_pong(const code& ec) { if (stopped()) return; if (ec) { log::debug(LOG_NETWORK) << "Failure sending pong to [" << authority() << "] " << ec.message(); stop(ec); } }