Пример #1
0
// prevent request means that the total number of requests has
// overflown. This query failed because it was the oldest one.
// So, if this is true, don't make another request
void traversal_algorithm::failed(observer_ptr o, int flags)
{
	TORRENT_ASSERT(m_invoke_count >= 0);

	if (m_results.empty()) return;

	TORRENT_ASSERT(o->flags & observer::flag_queried);
	if (flags & short_timeout)
	{
		// short timeout means that it has been more than
		// two seconds since we sent the request, and that
		// we'll most likely not get a response. But, in case
		// we do get a late response, keep the handler
		// around for some more, but open up the slot
		// by increasing the branch factor
		if ((o->flags & observer::flag_short_timeout) == 0)
			++m_branch_factor;
		o->flags |= observer::flag_short_timeout;
#ifdef TORRENT_DHT_VERBOSE_LOGGING
		TORRENT_LOG(traversal) << "[" << this << "] 1ST_TIMEOUT "
			<< " id: " << o->id()
			<< " distance: " << distance_exp(m_target, o->id())
			<< " addr: " << o->target_ep()
			<< " branch-factor: " << m_branch_factor
			<< " invoke-count: " << m_invoke_count;
#endif
	}
	else
	{
		o->flags |= observer::flag_failed;
		// if this flag is set, it means we increased the
		// branch factor for it, and we should restore it
		if (o->flags & observer::flag_short_timeout)
			--m_branch_factor;

#ifdef TORRENT_DHT_VERBOSE_LOGGING
		TORRENT_LOG(traversal) << "[" << this << "] TIMEOUT "
			<< " id: " << o->id()
			<< " distance: " << distance_exp(m_target, o->id())
			<< " addr: " << o->target_ep()
			<< " branch-factor: " << m_branch_factor
			<< " invoke-count: " << m_invoke_count;
#endif
		// don't tell the routing table about
		// node ids that we just generated ourself
		if ((o->flags & observer::flag_no_id) == 0)
			m_node.m_table.node_failed(o->id(), o->target_ep());
		++m_timeouts;
		--m_invoke_count;
		TORRENT_ASSERT(m_invoke_count >= 0);
	}

	if (flags & prevent_request)
	{
		--m_branch_factor;
		if (m_branch_factor <= 0) m_branch_factor = 1;
	}
	bool is_done = add_requests();
	if (is_done) done();
}
Пример #2
0
bool obfuscated_get_peers::invoke(observer_ptr o)
{
	if (!m_obfuscated) return get_peers::invoke(o);

	const node_id id = o->id();
	const int shared_prefix = 160 - distance_exp(id, m_target);

	// when we get close to the target zone in the DHT
	// start using the correct info-hash, in order to
	// start receiving peers
	if (shared_prefix > m_node.m_table.depth() - 4)
	{
		m_obfuscated = false;
		// clear the queried bits on all successful nodes in
		// our node-list for this traversal algorithm, to
		// allow the get_peers traversal to regress in case
		// nodes further down end up being dead
		for (std::vector<observer_ptr>::iterator i = m_results.begin()
			, end(m_results.end()); i != end; ++i)
		{
			observer* const node = i->get();
			// don't re-request from nodes that didn't respond
			if (node->flags & observer::flag_failed) continue;
			// don't interrupt with queries that are already in-flight
			if ((node->flags & observer::flag_alive) == 0) continue;
			node->flags &= ~(observer::flag_queried | observer::flag_alive);
		}
		return get_peers::invoke(o);
	}

	entry e;
	e["y"] = "q";
	e["q"] = "get_peers";
	entry& a = e["a"];

	// This logic will obfuscate the target info-hash
	// we're looking up, in order to preserve more privacy
	// on the DHT. This is done by only including enough
	// bits in the info-hash for the node we're querying to
	// give a good answer, but not more.

	// now, obfuscate the bits past shared_prefix + 3
	node_id mask = generate_prefix_mask(shared_prefix + 3);
	node_id obfuscated_target = generate_random_id() & ~mask;
	obfuscated_target |= m_target & mask;
	a["info_hash"] = obfuscated_target.to_string();

	if (m_node.observer())
	{
		m_node.observer()->outgoing_get_peers(m_target, obfuscated_target
			, o->target_ep());
	}

	m_node.stats_counters().inc_stats_counter(counters::dht_get_peers_out);

	return m_node.m_rpc.invoke(e, o->target_ep(), o);
}
Пример #3
0
bool find_data::invoke(observer_ptr o)
{
	if (m_done)
	{
		m_invoke_count = -1;
		return false;
	}

    // send request to find closer nodes
    kademlia2_req req;
    req.kid_receiver = o->id();
    req.kid_target = m_target;
    req.search_type = m_search_type;
    o->m_packet_id = m_target; // bind observer to correspond packet
    return m_node.m_rpc.invoke(req, o->target_ep(), o);
}
Пример #4
0
// prevent request means that the total number of requests has
// overflown. This query failed because it was the oldest one.
// So, if this is true, don't make another request
void traversal_algorithm::failed(observer_ptr o, int const flags)
{
	// don't tell the routing table about
	// node ids that we just generated ourself
	if ((o->flags & observer::flag_no_id) == 0)
		m_node.m_table.node_failed(o->id(), o->target_ep());

	if (m_results.empty()) return;

	bool decrement_branch_factor = false;

	TORRENT_ASSERT(o->flags & observer::flag_queried);
	if (flags & short_timeout)
	{
		// short timeout means that it has been more than
		// two seconds since we sent the request, and that
		// we'll most likely not get a response. But, in case
		// we do get a late response, keep the handler
		// around for some more, but open up the slot
		// by increasing the branch factor
		if ((o->flags & observer::flag_short_timeout) == 0)
		{
			TORRENT_ASSERT(m_branch_factor < (std::numeric_limits<std::int16_t>::max)());
			++m_branch_factor;
		}
		o->flags |= observer::flag_short_timeout;
#ifndef TORRENT_DISABLE_LOGGING
		dht_observer* logger = get_node().observer();
		if (logger != nullptr && logger->should_log(dht_logger::traversal))
		{
			char hex_id[41];
			aux::to_hex(o->id(), hex_id);
			logger->log(dht_logger::traversal
				, "[%p] 1ST_TIMEOUT id: %s distance: %d addr: %s branch-factor: %d "
				"invoke-count: %d type: %s"
				, static_cast<void*>(this), hex_id, distance_exp(m_target, o->id())
				, print_address(o->target_addr()).c_str(), m_branch_factor
				, m_invoke_count, name());
		}
#endif
	}
	else
	{
		o->flags |= observer::flag_failed;
		// if this flag is set, it means we increased the
		// branch factor for it, and we should restore it
		decrement_branch_factor = (o->flags & observer::flag_short_timeout) != 0;

#ifndef TORRENT_DISABLE_LOGGING
		dht_observer* logger = get_node().observer();
		if (logger != nullptr && logger->should_log(dht_logger::traversal))
		{
			char hex_id[41];
			aux::to_hex(o->id(), hex_id);
			logger->log(dht_logger::traversal
				, "[%p] TIMEOUT id: %s distance: %d addr: %s branch-factor: %d "
				"invoke-count: %d type: %s"
				, static_cast<void*>(this), hex_id, distance_exp(m_target, o->id())
				, print_address(o->target_addr()).c_str(), m_branch_factor
				, m_invoke_count, name());
		}
#endif

		++m_timeouts;
		TORRENT_ASSERT(m_invoke_count > 0);
		--m_invoke_count;
	}

	// this is another reason to decrement the branch factor, to prevent another
	// request from filling this slot. Only ever decrement once per response though
	decrement_branch_factor |= (flags & prevent_request);

	if (decrement_branch_factor)
	{
		TORRENT_ASSERT(m_branch_factor > 0);
		--m_branch_factor;
		if (m_branch_factor <= 0) m_branch_factor = 1;
	}

	bool const is_done = add_requests();
	if (is_done) done();
}