void traversal_observer::reply(msg const& m) { lazy_entry const* r = m.message.dict_find_dict("r"); if (!r) { #ifdef TORRENT_DHT_VERBOSE_LOGGING TORRENT_LOG(traversal) << "[" << m_algorithm.get() << "] missing response dict"; #endif return; } // look for nodes lazy_entry const* n = r->dict_find_string("nodes"); if (n) { char const* nodes = n->string_ptr(); char const* end = nodes + n->string_length(); while (end - nodes >= 26) { node_id id; std::copy(nodes, nodes + 20, id.begin()); nodes += 20; m_algorithm->traverse(id, read_v4_endpoint<udp::endpoint>(nodes)); } } lazy_entry const* id = r->dict_find_string("id"); if (!id || id->string_length() != 20) { #ifdef TORRENT_DHT_VERBOSE_LOGGING TORRENT_LOG(traversal) << "[" << m_algorithm.get() << "] invalid id in response"; #endif return; } // in case we didn't know the id of this peer when we sent the message to // it. For instance if it's a bootstrap node. set_id(node_id(id->string_ptr())); }
void traversal_algorithm::add_router_entries() { #ifdef TORRENT_DHT_VERBOSE_LOGGING TORRENT_LOG(traversal) << "[" << this << "] using router nodes to initiate traversal algorithm. " << std::distance(m_node.m_table.router_begin(), m_node.m_table.router_end()) << " routers"; #endif for (routing_table::router_iterator i = m_node.m_table.router_begin() , end(m_node.m_table.router_end()); i != end; ++i) { add_entry(node_id(0), *i, observer::flag_initial); } }
void rpc_manager::unreachable(udp::endpoint const& ep) { #ifdef TORRENT_DHT_VERBOSE_LOGGING TORRENT_LOG(rpc) << time_now_string() << " PORT_UNREACHABLE [ ip: " << ep << " ]"; #endif for (transactions_t::iterator i = m_transactions.begin(); i != m_transactions.end();) { TORRENT_ASSERT(*i); observer_ptr const& o = *i; if (o->target_ep() != ep) { ++i; continue; } observer_ptr ptr = *i; m_transactions.erase(i++); #ifdef TORRENT_DHT_VERBOSE_LOGGING TORRENT_LOG(rpc) << " found transaction [ tid: " << ptr->transaction_id() << " ]"; #endif ptr->timeout(); break; } }
rpc_manager::rpc_manager(node_id const& our_id , routing_table& table, send_fun const& sf , void* userdata , external_ip_fun ext_ip) : m_pool_allocator(observer_size, 10) , m_send(sf) , m_userdata(userdata) , m_our_id(our_id) , m_table(table) , m_timer(time_now()) , m_random_number(generate_random_id()) , m_allocated_observers(0) , m_destructing(false) , m_ext_ip(ext_ip) { std::srand(time(0)); #ifdef TORRENT_DHT_VERBOSE_LOGGING TORRENT_LOG(rpc) << "Constructing"; #define PRINT_OFFSETOF(x, y) TORRENT_LOG(rpc) << " +" << offsetof(x, y) << ": " #y TORRENT_LOG(rpc) << " observer: " << sizeof(observer); PRINT_OFFSETOF(observer, m_sent); PRINT_OFFSETOF(observer, m_refs); PRINT_OFFSETOF(observer, m_algorithm); PRINT_OFFSETOF(observer, m_id); PRINT_OFFSETOF(observer, m_addr); PRINT_OFFSETOF(observer, m_port); PRINT_OFFSETOF(observer, m_transaction_id); PRINT_OFFSETOF(observer, flags); TORRENT_LOG(rpc) << " announce_observer: " << sizeof(announce_observer); TORRENT_LOG(rpc) << " null_observer: " << sizeof(null_observer); TORRENT_LOG(rpc) << " find_data_observer: " << sizeof(find_data_observer); #undef PRINT_OFFSETOF #endif }
rpc_manager::rpc_manager(node_id const& our_id , routing_table& table, udp_socket_interface* sock , dht_observer* observer) : m_pool_allocator(observer_size, 10) , m_sock(sock) , m_our_id(our_id) , m_table(table) , m_timer(time_now()) , m_random_number(generate_random_id()) , m_allocated_observers(0) , m_destructing(false) , m_observer(observer) { std::srand(time(0)); #ifdef TORRENT_DHT_VERBOSE_LOGGING TORRENT_LOG(rpc) << "Constructing"; #define PRINT_OFFSETOF(x, y) TORRENT_LOG(rpc) << " +" << offsetof(x, y) << ": " #y TORRENT_LOG(rpc) << " observer: " << sizeof(observer); PRINT_OFFSETOF(dht::observer, m_sent); PRINT_OFFSETOF(dht::observer, m_refs); PRINT_OFFSETOF(dht::observer, m_algorithm); PRINT_OFFSETOF(dht::observer, m_id); PRINT_OFFSETOF(dht::observer, m_addr); PRINT_OFFSETOF(dht::observer, m_port); PRINT_OFFSETOF(dht::observer, m_transaction_id); PRINT_OFFSETOF(dht::observer, flags); TORRENT_LOG(rpc) << " announce_observer: " << sizeof(announce_observer); TORRENT_LOG(rpc) << " null_observer: " << sizeof(null_observer); TORRENT_LOG(rpc) << " find_data_observer: " << sizeof(find_data_observer); #undef PRINT_OFFSETOF #endif }
void traversal_algorithm::traverse(node_id const& id, udp::endpoint addr) { #ifdef TORRENT_DHT_VERBOSE_LOGGING if (id.is_all_zeros()) { TORRENT_LOG(traversal) << time_now_string() << "[" << this << "] WARNING node returned a list which included a node with id 0"; } #endif // let the routing table know this node may exist m_node.m_table.heard_about(id, addr); add_entry(id, addr, 0); }
rpc_manager::~rpc_manager() { TORRENT_ASSERT(!m_destructing); m_destructing = true; #ifdef TORRENT_DHT_VERBOSE_LOGGING TORRENT_LOG(rpc) << "Destructing"; #endif for (transactions_t::iterator i = m_transactions.begin() , end(m_transactions.end()); i != end; ++i) { (*i)->abort(); } }
void bootstrap::done() { #ifdef TORRENT_DHT_VERBOSE_LOGGING TORRENT_LOG(traversal) << " [" << this << "]" << " bootstrap done, pinging remaining nodes"; #endif for (std::vector<observer_ptr>::iterator i = m_results.begin() , end(m_results.end()); i != end; ++i) { if ((*i)->flags & observer::flag_queried) continue; // this will send a ping m_node.add_node((*i)->target_ep()); } refresh::done(); }
traversal_algorithm::traversal_algorithm( node_impl& node , node_id target) : m_ref_count(0) , m_node(node) , m_target(target) , m_invoke_count(0) , m_branch_factor(3) , m_responses(0) , m_timeouts(0) , m_num_target_nodes(m_node.m_table.bucket_size()) { #ifdef TORRENT_DHT_VERBOSE_LOGGING TORRENT_LOG(traversal) << "[" << this << "] NEW" " target: " << target << " k: " << m_node.m_table.bucket_size() ; #endif }
bool rpc_manager::invoke(entry& e, udp::endpoint target_addr , observer_ptr o) { INVARIANT_CHECK; if (m_destructing) return false; e["y"] = "q"; entry& a = e["a"]; add_our_id(a); std::string transaction_id; transaction_id.resize(2); char* out = &transaction_id[0]; int tid = rand() ^ (rand() << 5); io::write_uint16(tid, out); e["t"] = transaction_id; o->set_target(target_addr); o->set_transaction_id(tid); #ifdef TORRENT_DHT_VERBOSE_LOGGING TORRENT_LOG(rpc) << "[" << o->m_algorithm.get() << "] invoking " << e["q"].string() << " -> " << target_addr; #endif if (m_send(m_userdata, e, target_addr, 1)) { m_transactions.push_back(o); #if defined TORRENT_DEBUG || TORRENT_RELEASE_ASSERTS o->m_was_sent = true; #endif return true; } return false; }
bool rpc_manager::invoke(entry& e, udp::endpoint target_addr , observer_ptr o) { INVARIANT_CHECK; if (m_destructing) return false; e["y"] = "q"; entry& a = e["a"]; add_our_id(a); std::string transaction_id; transaction_id.resize(2); char* out = &transaction_id[0]; int tid = (random() ^ (random() << 5)) & 0xffff; io::write_uint16(tid, out); e["t"] = transaction_id; o->set_target(target_addr); o->set_transaction_id(tid); #ifdef TORRENT_DHT_VERBOSE_LOGGING TORRENT_LOG(rpc) << "[" << o->m_algorithm.get() << "] invoking " << e["q"].string() << " -> " << target_addr; #endif if (m_sock->send_packet(e, target_addr, 1)) { m_transactions.insert(std::make_pair(tid,o)); #if TORRENT_USE_ASSERTS o->m_was_sent = true; #endif return true; } return false; }
bool traversal_algorithm::add_requests() { int results_target = m_num_target_nodes; // this only counts outstanding requests at the top of the // target list. This is <= m_invoke count. m_invoke_count // is the total number of outstanding requests, including // old ones that may be waiting on nodes much farther behind // the current point we've reached in the search. int outstanding = 0; // if we're doing aggressive lookups, we keep branch-factor // outstanding requests _at the tops_ of the result list. Otherwise // we just keep any branch-factor outstanding requests bool agg = m_node.settings().aggressive_lookups; // Find the first node that hasn't already been queried. // and make sure that the 'm_branch_factor' top nodes // stay queried at all times (obviously ignoring failed nodes) // and without surpassing the 'result_target' nodes (i.e. k=8) // this is a slight variation of the original paper which instead // limits the number of outstanding requests, this limits the // number of good outstanding requests. It will use more traffic, // but is intended to speed up lookups for (std::vector<observer_ptr>::iterator i = m_results.begin() , end(m_results.end()); i != end && results_target > 0 && (agg ? outstanding < m_branch_factor : m_invoke_count < m_branch_factor); ++i) { observer* o = i->get(); if (o->flags & observer::flag_alive) { TORRENT_ASSERT(o->flags & observer::flag_queried); --results_target; continue; } if (o->flags & observer::flag_queried) { // if it's queried, not alive and not failed, it // must be currently in flight if ((o->flags & observer::flag_failed) == 0) ++outstanding; continue; } #ifdef TORRENT_DHT_VERBOSE_LOGGING TORRENT_LOG(traversal) << "[" << this << "] INVOKE " << " nodes-left: " << (m_results.end() - i) << " top-invoke-count: " << outstanding << " invoke-count: " << m_invoke_count << " branch-factor: " << m_branch_factor << " distance: " << distance_exp(m_target, (*i)->id()) << " type: " << name() ; #endif o->flags |= observer::flag_queried; if (invoke(*i)) { TORRENT_ASSERT(m_invoke_count >= 0); ++m_invoke_count; ++outstanding; } else { o->flags |= observer::flag_failed; } } // this is the completion condition. If we found m_num_target_nodes // (i.e. k=8) completed results, without finding any still // outstanding requests, we're done. // also, if invoke count is 0, it means we didn't even find 'k' // working nodes, we still have to terminate though. return (results_target == 0 && outstanding == 0) || m_invoke_count == 0; }
// prevent request means that the total number of requests has // overflown. This query failed because it was the oldest one. // So, if this is true, don't make another request void traversal_algorithm::failed(observer_ptr o, int flags) { TORRENT_ASSERT(m_invoke_count >= 0); // don't tell the routing table about // node ids that we just generated ourself if ((o->flags & observer::flag_no_id) == 0) m_node.m_table.node_failed(o->id(), o->target_ep()); if (m_results.empty()) return; TORRENT_ASSERT(o->flags & observer::flag_queried); if (flags & short_timeout) { // short timeout means that it has been more than // two seconds since we sent the request, and that // we'll most likely not get a response. But, in case // we do get a late response, keep the handler // around for some more, but open up the slot // by increasing the branch factor if ((o->flags & observer::flag_short_timeout) == 0) ++m_branch_factor; o->flags |= observer::flag_short_timeout; #ifdef TORRENT_DHT_VERBOSE_LOGGING TORRENT_LOG(traversal) << "[" << this << "] 1ST_TIMEOUT " << " id: " << o->id() << " distance: " << distance_exp(m_target, o->id()) << " addr: " << o->target_ep() << " branch-factor: " << m_branch_factor << " invoke-count: " << m_invoke_count << " type: " << name() ; #endif } else { o->flags |= observer::flag_failed; // if this flag is set, it means we increased the // branch factor for it, and we should restore it if (o->flags & observer::flag_short_timeout) --m_branch_factor; #ifdef TORRENT_DHT_VERBOSE_LOGGING TORRENT_LOG(traversal) << "[" << this << "] TIMEOUT " << " id: " << o->id() << " distance: " << distance_exp(m_target, o->id()) << " addr: " << o->target_ep() << " branch-factor: " << m_branch_factor << " invoke-count: " << m_invoke_count << " type: " << name() ; #endif ++m_timeouts; --m_invoke_count; TORRENT_ASSERT(m_invoke_count >= 0); } if (flags & prevent_request) { --m_branch_factor; if (m_branch_factor <= 0) m_branch_factor = 1; } bool is_done = add_requests(); if (is_done) done(); }
void traversal_algorithm::add_entry(node_id const& id, udp::endpoint addr, unsigned char flags) { TORRENT_ASSERT(m_node.m_rpc.allocation_size() >= sizeof(find_data_observer)); void* ptr = m_node.m_rpc.allocate_observer(); if (ptr == 0) { #ifdef TORRENT_DHT_VERBOSE_LOGGING TORRENT_LOG(traversal) << "[" << this << "] failed to allocate memory for observer. aborting!"; #endif done(); return; } observer_ptr o = new_observer(ptr, addr, id); if (id.is_all_zeros()) { o->set_id(generate_random_id()); o->flags |= observer::flag_no_id; } o->flags |= flags; TORRENT_ASSERT(libtorrent::dht::is_sorted(m_results.begin(), m_results.end() , boost::bind( compare_ref , boost::bind(&observer::id, _1) , boost::bind(&observer::id, _2) , m_target) )); std::vector<observer_ptr>::iterator i = std::lower_bound( m_results.begin() , m_results.end() , o , boost::bind( compare_ref , boost::bind(&observer::id, _1) , boost::bind(&observer::id, _2) , m_target ) ); if (i == m_results.end() || (*i)->id() != id) { if (m_node.settings().restrict_search_ips && !(flags & observer::flag_initial)) { // don't allow multiple entries from IPs very close to each other std::vector<observer_ptr>::iterator j = std::find_if( m_results.begin(), m_results.end(), boost::bind(&compare_ip_cidr, _1, o)); if (j != m_results.end()) { // we already have a node in this search with an IP very // close to this one. We know that it's not the same, because // it claims a different node-ID. Ignore this to avoid attacks #ifdef TORRENT_DHT_VERBOSE_LOGGING TORRENT_LOG(traversal) << "[" << this << "] IGNORING result " << "id: " << o->id() << " address: " << o->target_addr() << " existing node: " << (*j)->id() << " " << (*j)->target_addr() << " distance: " << distance_exp(m_target, o->id()) << " type: " << name() ; #endif return; } } TORRENT_ASSERT((o->flags & observer::flag_no_id) || std::find_if(m_results.begin(), m_results.end() , boost::bind(&observer::id, _1) == id) == m_results.end()); #ifdef TORRENT_DHT_VERBOSE_LOGGING TORRENT_LOG(traversal) << "[" << this << "] ADD id: " << id << " address: " << addr << " distance: " << distance_exp(m_target, id) << " invoke-count: " << m_invoke_count << " type: " << name() ; #endif i = m_results.insert(i, o); TORRENT_ASSERT(libtorrent::dht::is_sorted(m_results.begin(), m_results.end() , boost::bind( compare_ref , boost::bind(&observer::id, _1) , boost::bind(&observer::id, _2) , m_target) )); } if (m_results.size() > 100) { #if TORRENT_USE_ASSERTS for (int i = 100; i < int(m_results.size()); ++i) m_results[i]->m_was_abandoned = true; #endif m_results.resize(100); } }
bool rpc_manager::incoming(msg const& m, node_id* id) { INVARIANT_CHECK; if (m_destructing) return false; // we only deal with replies, not queries TORRENT_ASSERT(m.message.dict_find_string_value("y") == "r"); // if we don't have the transaction id in our // request list, ignore the packet std::string transaction_id = m.message.dict_find_string_value("t"); std::string::const_iterator i = transaction_id.begin(); int tid = transaction_id.size() != 2 ? -1 : io::read_uint16(i); observer_ptr o; for (transactions_t::iterator i = m_transactions.begin() , end(m_transactions.end()); i != end; ++i) { TORRENT_ASSERT(*i); if ((*i)->transaction_id() != tid) continue; if (m.addr.address() != (*i)->target_addr()) continue; o = *i; m_transactions.erase(i); break; } if (!o) { #ifdef TORRENT_DHT_VERBOSE_LOGGING TORRENT_LOG(rpc) << "Reply with invalid transaction id size: " << transaction_id.size() << " from " << m.addr; #endif entry e; incoming_error(e, "invalid transaction id"); m_send(m_userdata, e, m.addr, 0); return false; } #ifdef TORRENT_DHT_VERBOSE_LOGGING std::ofstream reply_stats("round_trip_ms.log", std::ios::app); reply_stats << m.addr << "\t" << total_milliseconds(time_now_hires() - o->sent()) << std::endl; #endif lazy_entry const* ret_ent = m.message.dict_find_dict("r"); if (ret_ent == 0) { entry e; incoming_error(e, "missing 'r' key"); m_send(m_userdata, e, m.addr, 0); return false; } lazy_entry const* node_id_ent = ret_ent->dict_find_string("id"); if (node_id_ent == 0 || node_id_ent->string_length() != 20) { entry e; incoming_error(e, "missing 'id' key"); m_send(m_userdata, e, m.addr, 0); return false; } lazy_entry const* ext_ip = ret_ent->dict_find_string("ip"); if (ext_ip && ext_ip->string_length() == 4) { // this node claims we use the wrong node-ID! address_v4::bytes_type b; memcpy(&b[0], ext_ip->string_ptr(), 4); m_ext_ip(address_v4(b), aux::session_impl::source_dht, m.addr.address()); } #if TORRENT_USE_IPV6 else if (ext_ip && ext_ip->string_length() == 16) { // this node claims we use the wrong node-ID! address_v6::bytes_type b; memcpy(&b[0], ext_ip->string_ptr(), 16); m_ext_ip(address_v6(b), aux::session_impl::source_dht, m.addr.address()); } #endif #ifdef TORRENT_DHT_VERBOSE_LOGGING TORRENT_LOG(rpc) << "[" << o->m_algorithm.get() << "] Reply with transaction id: " << tid << " from " << m.addr; #endif o->reply(m); *id = node_id(node_id_ent->string_ptr()); // we found an observer for this reply, hence the node is not spoofing // add it to the routing table return m_table.node_seen(*id, m.addr); }
time_duration rpc_manager::tick() { INVARIANT_CHECK; const static int short_timeout = 1; const static int timeout = 8; // look for observers that have timed out if (m_transactions.empty()) return seconds(short_timeout); std::list<observer_ptr> timeouts; time_duration ret = seconds(short_timeout); ptime now = time_now(); #if defined TORRENT_DEBUG || TORRENT_RELEASE_ASSERTS ptime last = min_time(); for (transactions_t::iterator i = m_transactions.begin(); i != m_transactions.end(); ++i) { TORRENT_ASSERT((*i)->sent() >= last); last = (*i)->sent(); } #endif for (transactions_t::iterator i = m_transactions.begin(); i != m_transactions.end();) { observer_ptr o = *i; // if we reach an observer that hasn't timed out // break, because every observer after this one will // also not have timed out yet time_duration diff = now - o->sent(); if (diff < seconds(timeout)) { ret = seconds(timeout) - diff; break; } #ifdef TORRENT_DHT_VERBOSE_LOGGING TORRENT_LOG(rpc) << "[" << o->m_algorithm.get() << "] Timing out transaction id: " << (*i)->transaction_id() << " from " << o->target_ep(); #endif m_transactions.erase(i++); timeouts.push_back(o); } std::for_each(timeouts.begin(), timeouts.end(), boost::bind(&observer::timeout, _1)); timeouts.clear(); for (transactions_t::iterator i = m_transactions.begin(); i != m_transactions.end(); ++i) { observer_ptr o = *i; // if we reach an observer that hasn't timed out // break, because every observer after this one will // also not have timed out yet time_duration diff = now - o->sent(); if (diff < seconds(short_timeout)) { ret = seconds(short_timeout) - diff; break; } if (o->has_short_timeout()) continue; // TODO: don't call short_timeout() again if we've // already called it once timeouts.push_back(o); } std::for_each(timeouts.begin(), timeouts.end(), boost::bind(&observer::short_timeout, _1)); return ret; }
bool rpc_manager::incoming(msg const& m, node_id* id, libtorrent::dht_settings const& settings) { INVARIANT_CHECK; if (m_destructing) return false; // we only deal with replies and errors, not queries TORRENT_ASSERT(m.message.dict_find_string_value("y") == "r" || m.message.dict_find_string_value("y") == "e"); // if we don't have the transaction id in our // request list, ignore the packet std::string transaction_id = m.message.dict_find_string_value("t"); if (transaction_id.empty()) return false; std::string::const_iterator i = transaction_id.begin(); int tid = transaction_id.size() != 2 ? -1 : io::read_uint16(i); observer_ptr o; std::pair<transactions_t::iterator, transactions_t::iterator> range = m_transactions.equal_range(tid); for (transactions_t::iterator i = range.first; i != range.second; ++i) { if (m.addr.address() != i->second->target_addr()) continue; o = i->second; i = m_transactions.erase(i); break; } if (!o) { #ifdef TORRENT_DHT_VERBOSE_LOGGING TORRENT_LOG(rpc) << "Reply with unknown transaction id size: " << transaction_id.size() << " from " << m.addr; #endif // this isn't necessarily because the other end is doing // something wrong. This can also happen when we restart // the node, and we prematurely abort all outstanding // requests. Also, this opens up a potential magnification // attack. // entry e; // incoming_error(e, "invalid transaction id"); // m_sock->send_packet(e, m.addr, 0); return false; } ptime now = time_now_hires(); #ifdef TORRENT_DHT_VERBOSE_LOGGING std::ofstream reply_stats("round_trip_ms.log", std::ios::app); reply_stats << m.addr << "\t" << total_milliseconds(now - o->sent()) << std::endl; #endif lazy_entry const* ret_ent = m.message.dict_find_dict("r"); if (ret_ent == 0) { // it may be an error ret_ent = m.message.dict_find_dict("e"); o->timeout(); if (ret_ent == NULL) { entry e; incoming_error(e, "missing 'r' key"); m_sock->send_packet(e, m.addr, 0); } return false; } lazy_entry const* node_id_ent = ret_ent->dict_find_string("id"); if (node_id_ent == 0 || node_id_ent->string_length() != 20) { o->timeout(); entry e; incoming_error(e, "missing 'id' key"); m_sock->send_packet(e, m.addr, 0); return false; } node_id nid = node_id(node_id_ent->string_ptr()); if (settings.enforce_node_id && !verify_id(nid, m.addr.address())) { o->timeout(); entry e; incoming_error(e, "invalid node ID"); m_sock->send_packet(e, m.addr, 0); return false; } #ifdef TORRENT_DHT_VERBOSE_LOGGING TORRENT_LOG(rpc) << "[" << o->m_algorithm.get() << "] Reply with transaction id: " << tid << " from " << m.addr; #endif o->reply(m); *id = nid; int rtt = int(total_milliseconds(now - o->sent())); // we found an observer for this reply, hence the node is not spoofing // add it to the routing table return m_table.node_seen(*id, m.addr, rtt); }