Example #1
0
bool put_data::invoke(observer_ptr o)
{
	if (m_done) return false;

	// TODO: what if o is not an instance of put_data_observer? This need to be
	// redesigned for better type safety.
	auto* po = static_cast<put_data_observer*>(o.get());

	entry e;
	e["y"] = "q";
	e["q"] = "put";
	entry& a = e["a"];
	a["v"] = m_data.value();
	a["token"] = po->m_token;
	if (m_data.is_mutable())
	{
		a["k"] = m_data.pk().bytes;
		a["seq"] = m_data.seq().value;
		a["sig"] = m_data.sig().bytes;
		if (!m_data.salt().empty())
		{
			a["salt"] = m_data.salt();
		}
	}

	m_node.stats_counters().inc_stats_counter(counters::dht_put_out);

	return m_node.m_rpc.invoke(e, o->target_ep(), o);
}
Example #2
0
bool get_peers::invoke(observer_ptr o)
{
	if (m_done)
	{
		m_invoke_count = -1;
		return false;
	}

	entry e;
	e["y"] = "q";
	entry& a = e["a"];

	e["q"] = "get_peers";
	a["info_hash"] = m_target.to_string();
	if (m_noseeds) a["noseed"] = 1;

	if (m_node.observer())
	{
		m_node.observer()->outgoing_get_peers(m_target, m_target, o->target_ep());
	}

	m_node.stats_counters().inc_stats_counter(counters::dht_get_peers_out);

	return m_node.m_rpc.invoke(e, o->target_ep(), o);
}
Example #3
0
bool put_data::invoke(observer_ptr o)
{
	if (m_done)
	{
		m_invoke_count = -1;
		return false;
	}

	// TODO: what if o is not an isntance of put_data_observer? This need to be
	// redesigned for better type saftey.
	put_data_observer* po = static_cast<put_data_observer*>(o.get());

	entry e;
	e["y"] = "q";
	e["q"] = "put";
	entry& a = e["a"];
	a["v"] = m_data.value();
	a["token"] = po->m_token;
	if (m_data.is_mutable())
	{
		a["k"] = std::string(m_data.pk().data(), item_pk_len);
		a["seq"] = m_data.seq();
		a["sig"] = std::string(m_data.sig().data(), item_sig_len);
		if (!m_data.salt().empty())
		{
			a["salt"] = m_data.salt();
		}
	}

	return m_node.m_rpc.invoke(e, o->target_ep(), o);
}
Example #4
0
bool obfuscated_get_peers::invoke(observer_ptr o)
{
	if (!m_obfuscated) return get_peers::invoke(o);

	const node_id id = o->id();
	const int shared_prefix = 160 - distance_exp(id, m_target);

	// when we get close to the target zone in the DHT
	// start using the correct info-hash, in order to
	// start receiving peers
	if (shared_prefix > m_node.m_table.depth() - 4)
	{
		m_obfuscated = false;
		// clear the queried bits on all successful nodes in
		// our node-list for this traversal algorithm, to
		// allow the get_peers traversal to regress in case
		// nodes further down end up being dead
		for (std::vector<observer_ptr>::iterator i = m_results.begin()
			, end(m_results.end()); i != end; ++i)
		{
			observer* const node = i->get();
			// don't re-request from nodes that didn't respond
			if (node->flags & observer::flag_failed) continue;
			// don't interrupt with queries that are already in-flight
			if ((node->flags & observer::flag_alive) == 0) continue;
			node->flags &= ~(observer::flag_queried | observer::flag_alive);
		}
		return get_peers::invoke(o);
	}

	entry e;
	e["y"] = "q";
	e["q"] = "get_peers";
	entry& a = e["a"];

	// This logic will obfuscate the target info-hash
	// we're looking up, in order to preserve more privacy
	// on the DHT. This is done by only including enough
	// bits in the info-hash for the node we're querying to
	// give a good answer, but not more.

	// now, obfuscate the bits past shared_prefix + 3
	node_id mask = generate_prefix_mask(shared_prefix + 3);
	node_id obfuscated_target = generate_random_id() & ~mask;
	obfuscated_target |= m_target & mask;
	a["info_hash"] = obfuscated_target.to_string();

	if (m_node.observer())
	{
		m_node.observer()->outgoing_get_peers(m_target, obfuscated_target
			, o->target_ep());
	}

	m_node.stats_counters().inc_stats_counter(counters::dht_get_peers_out);

	return m_node.m_rpc.invoke(e, o->target_ep(), o);
}
Example #5
0
void rpc_manager::invoke(int message_id, udp::endpoint target_addr
	, observer_ptr o)
{
	INVARIANT_CHECK;

	if (m_destructing)
	{
		o->abort();
		return;
	}

	msg m;
	m.message_id = message_id;
	m.reply = false;
	m.id = m_our_id;
	m.addr = target_addr;
	TORRENT_ASSERT(!m_transactions[m_next_transaction_id]);
#ifdef TORRENT_DEBUG
	int potential_new_id = m_next_transaction_id;
#endif
#ifndef BOOST_NO_EXCEPTIONS
	try
	{
#endif
		m.transaction_id.clear();
		std::back_insert_iterator<std::string> out(m.transaction_id);
		io::write_uint16(m_next_transaction_id, out);
		
		o->send(m);

		o->sent = time_now();
#if TORRENT_USE_IPV6
		o->target_addr = target_addr.address();
#else
		o->target_addr = target_addr.address().to_v4();
#endif
		o->port = target_addr.port();

#ifdef TORRENT_DHT_VERBOSE_LOGGING
		TORRENT_LOG(rpc) << "Invoking " << messages::ids[message_id] 
			<< " -> " << target_addr;
#endif	
		m_send(m);
		new_transaction_id(o);
#ifndef BOOST_NO_EXCEPTIONS
	}
	catch (std::exception& e)
	{
		// m_send may fail with "no route to host"
		TORRENT_ASSERT(potential_new_id == m_next_transaction_id);
		o->abort();
	}
#endif
}
// returns true of lhs and rhs are too close to each other to appear
// in the same DHT search under different node IDs
bool compare_ip_cidr(observer_ptr const& lhs, observer_ptr const& rhs)
{
	if (lhs->target_addr().is_v4() != rhs->target_addr().is_v4())
		return false;
	// the number of bits in the IPs that may match. If
	// more bits that this matches, something suspicious is
	// going on and we shouldn't add the second one to our
	// routing table
	int cutoff = rhs->target_addr().is_v4() ? 4 : 64;
	int dist = cidr_distance(lhs->target_addr(), rhs->target_addr());
	return dist <= cutoff;
}
// prevent request means that the total number of requests has
// overflown. This query failed because it was the oldest one.
// So, if this is true, don't make another request
void traversal_algorithm::failed(observer_ptr o, int flags)
{
	TORRENT_ASSERT(m_invoke_count >= 0);

	if (m_results.empty()) return;

	TORRENT_ASSERT(o->flags & observer::flag_queried);
	if (flags & short_timeout)
	{
		// short timeout means that it has been more than
		// two seconds since we sent the request, and that
		// we'll most likely not get a response. But, in case
		// we do get a late response, keep the handler
		// around for some more, but open up the slot
		// by increasing the branch factor
		if ((o->flags & observer::flag_short_timeout) == 0)
			++m_branch_factor;
		o->flags |= observer::flag_short_timeout;
#ifdef TORRENT_DHT_VERBOSE_LOGGING
		TORRENT_LOG(traversal) << "[" << this << "] 1ST_TIMEOUT "
			<< " id: " << o->id()
			<< " distance: " << distance_exp(m_target, o->id())
			<< " addr: " << o->target_ep()
			<< " branch-factor: " << m_branch_factor
			<< " invoke-count: " << m_invoke_count;
#endif
	}
	else
	{
		o->flags |= observer::flag_failed;
		// if this flag is set, it means we increased the
		// branch factor for it, and we should restore it
		if (o->flags & observer::flag_short_timeout)
			--m_branch_factor;

#ifdef TORRENT_DHT_VERBOSE_LOGGING
		TORRENT_LOG(traversal) << "[" << this << "] TIMEOUT "
			<< " id: " << o->id()
			<< " distance: " << distance_exp(m_target, o->id())
			<< " addr: " << o->target_ep()
			<< " branch-factor: " << m_branch_factor
			<< " invoke-count: " << m_invoke_count;
#endif
		// don't tell the routing table about
		// node ids that we just generated ourself
		if ((o->flags & observer::flag_no_id) == 0)
			m_node.m_table.node_failed(o->id(), o->target_ep());
		++m_timeouts;
		--m_invoke_count;
		TORRENT_ASSERT(m_invoke_count >= 0);
	}

	if (flags & prevent_request)
	{
		--m_branch_factor;
		if (m_branch_factor <= 0) m_branch_factor = 1;
	}
	bool is_done = add_requests();
	if (is_done) done();
}
Example #8
0
unsigned int rpc_manager::new_transaction_id(observer_ptr o)
{
	INVARIANT_CHECK;

	unsigned int tid = m_next_transaction_id;
	m_next_transaction_id = (m_next_transaction_id + 1) % max_transactions;
	if (m_transactions[m_next_transaction_id])
	{
		// moving the observer into the set of aborted transactions
		// it will prevent it from spawning new requests right now,
		// since that would break the invariant
		observer_ptr o = m_transactions[m_next_transaction_id];
		m_aborted_transactions.push_back(o);
#ifdef TORRENT_DHT_VERBOSE_LOGGING
		TORRENT_LOG(rpc) << "[new_transaction_id] Aborting message with transaction id: " 
			<< m_next_transaction_id << " sent to " << o->target_ep()
			<< " " << total_seconds(time_now() - o->sent) << " seconds ago";
#endif
		m_transactions[m_next_transaction_id] = 0;
		TORRENT_ASSERT(m_oldest_transaction_id == m_next_transaction_id);
	}
	TORRENT_ASSERT(!m_transactions[tid]);
	m_transactions[tid] = o;
	if (m_oldest_transaction_id == m_next_transaction_id)
	{
		m_oldest_transaction_id = (m_oldest_transaction_id + 1) % max_transactions;
#ifdef TORRENT_DHT_VERBOSE_LOGGING
		TORRENT_LOG(rpc) << "WARNING: transaction limit reached! Too many concurrent"
			" messages! limit: " << (int)max_transactions;
#endif
		update_oldest_transaction_id();
	}

	return tid;
}
Example #9
0
bool find_data::invoke(observer_ptr o)
{
	if (m_done)
	{
		m_invoke_count = -1;
		return false;
	}

    // send request to find closer nodes
    kademlia2_req req;
    req.kid_receiver = o->id();
    req.kid_target = m_target;
    req.search_type = m_search_type;
    o->m_packet_id = m_target; // bind observer to correspond packet
    return m_node.m_rpc.invoke(req, o->target_ep(), o);
}
Example #10
0
bool rpc_manager::invoke(entry& e, udp::endpoint target_addr
	, observer_ptr o)
{
	INVARIANT_CHECK;

	if (m_destructing) return false;

	e["y"] = "q";
	entry& a = e["a"];
	add_our_id(a);

	std::string transaction_id;
	transaction_id.resize(2);
	char* out = &transaction_id[0];
	int tid = (random() ^ (random() << 5)) & 0xffff;
	io::write_uint16(tid, out);
	e["t"] = transaction_id;

	// When a DHT node enters the read-only state, in each outgoing query message,
	// places a 'ro' key in the top-level message dictionary and sets its value to 1.
	if (m_settings.read_only) e["ro"] = 1;

	node& n = o->algorithm()->get_node();
	if (!n.native_address(o->target_addr()))
	{
		a["want"].list().push_back(entry(n.protocol_family_name()));
	}

	o->set_target(target_addr);
	o->set_transaction_id(tid);

#ifndef TORRENT_DISABLE_LOGGING
	m_log->log(dht_logger::rpc_manager, "[%p] invoking %s -> %s"
		, static_cast<void*>(o->algorithm()), e["q"].string().c_str()
		, print_endpoint(target_addr).c_str());
#endif

	if (m_sock->send_packet(e, target_addr))
	{
		m_transactions.insert(std::make_pair(tid, o));
#if TORRENT_USE_ASSERTS
		o->m_was_sent = true;
#endif
		return true;
	}
	return false;
}
Example #11
0
bool refresh::invoke(observer_ptr o)
{
	entry e;
	e["z"] = "q";
	e["q"] = "findNode";
	entry& a = e["x"];
	a["target"] = target().to_string();
	return m_node.m_rpc.invoke(e, o->target_ep(), o);
}
bool rpc_manager::invoke(entry& e, udp::endpoint target_addr
	, observer_ptr o)
{
	INVARIANT_CHECK;

	if (m_destructing) return false;

	e["y"] = "q";
	entry& a = e["a"];
	add_our_id(a);

	std::string transaction_id;
	transaction_id.resize(2);
	char* out = &transaction_id[0];
	int tid = (random() ^ (random() << 5)) & 0xffff;
	io::write_uint16(tid, out);
	e["t"] = transaction_id;

	o->set_target(target_addr);
	o->set_transaction_id(tid);

#ifndef TORRENT_DISABLE_LOGGING
	m_log->log(dht_logger::rpc_manager, "[%p] invoking %s -> %s"
		, static_cast<void*>(o->algorithm()), e["q"].string().c_str()
		, print_endpoint(target_addr).c_str());
#endif

	if (m_sock->send_packet(e, target_addr, 1))
	{
		m_transactions.insert(std::make_pair(tid,o));
#if TORRENT_USE_ASSERTS
		o->m_was_sent = true;
#endif
		return true;
	}
	return false;
}
Example #13
0
bool get_item::invoke(observer_ptr o)
{
	if (m_done) return false;

	entry e;
	e["y"] = "q";
	entry& a = e["a"];

	e["q"] = "get";
	a["target"] = target().to_string();

	m_node.stats_counters().inc_stats_counter(counters::dht_get_out);

	return m_node.m_rpc.invoke(e, o->target_ep(), o);
}
Example #14
0
bool rpc_manager::invoke(entry& e, udp::endpoint target_addr
	, observer_ptr o)
{
	INVARIANT_CHECK;

	if (m_destructing) return false;

	e["y"] = "q";
	entry& a = e["a"];
	add_our_id(a);

	std::string transaction_id;
	transaction_id.resize(2);
	char* out = &transaction_id[0];
	int tid = (random() ^ (random() << 5)) & 0xffff;
	io::write_uint16(tid, out);
	e["t"] = transaction_id;
		
	o->set_target(target_addr);
	o->set_transaction_id(tid);

#ifdef TORRENT_DHT_VERBOSE_LOGGING
	TORRENT_LOG(rpc) << "[" << o->m_algorithm.get() << "] invoking "
		<< e["q"].string() << " -> " << target_addr;
#endif

	if (m_sock->send_packet(e, target_addr, 1))
	{
		m_transactions.insert(std::make_pair(tid,o));
#if TORRENT_USE_ASSERTS
		o->m_was_sent = true;
#endif
		return true;
	}
	return false;
}
Example #15
0
bool rpc_manager::invoke(entry& e, udp::endpoint target_addr
	, observer_ptr o)
{
	INVARIANT_CHECK;

	if (m_destructing) return false;

	e["y"] = "q";
	entry& a = e["a"];
	add_our_id(a);

	std::string transaction_id;
	transaction_id.resize(2);
	char* out = &transaction_id[0];
	int tid = rand() ^ (rand() << 5);
	io::write_uint16(tid, out);
	e["t"] = transaction_id;
		
	o->set_target(target_addr);
	o->set_transaction_id(tid);

#ifdef TORRENT_DHT_VERBOSE_LOGGING
	TORRENT_LOG(rpc) << "[" << o->m_algorithm.get() << "] invoking "
		<< e["q"].string() << " -> " << target_addr;
#endif

	if (m_send(m_userdata, e, target_addr, 1))
	{
		m_transactions.push_back(o);
#if defined TORRENT_DEBUG || TORRENT_RELEASE_ASSERTS
		o->m_was_sent = true;
#endif
		return true;
	}
	return false;
}
Example #16
0
bool find_data::invoke(observer_ptr o)
{
    if (m_done)
    {
        m_invoke_count = -1;
        return false;
    }

    entry e;
    e["y"] = "q";
    e["q"] = "get_peers";
    entry& a = e["a"];
    a["info_hash"] = m_target.to_string();
    return m_node.m_rpc.invoke(e, o->target_ep(), o);
}
Example #17
0
bool find_data::invoke(observer_ptr o)
{
	if (m_done)
	{
		m_invoke_count = -1;
		return false;
	}

	entry e;
	e["h"] = "q";
	e["q"] = "getData"; // "getPeers"
	entry& a = e["g"];
	entry& target = a["target"];
	target["n"] = m_trackerName;
	target["r"] = "tracker";
	target["t"] = "m";
	a["infoHash"] = m_target.to_string();
	if (m_noseeds) a["noseed"] = 1;
	return m_node.m_rpc.invoke(e, o->target_ep(), o);
}
// prevent request means that the total number of requests has
// overflown. This query failed because it was the oldest one.
// So, if this is true, don't make another request
void traversal_algorithm::failed(observer_ptr o, int const flags)
{
	// don't tell the routing table about
	// node ids that we just generated ourself
	if ((o->flags & observer::flag_no_id) == 0)
		m_node.m_table.node_failed(o->id(), o->target_ep());

	if (m_results.empty()) return;

	bool decrement_branch_factor = false;

	TORRENT_ASSERT(o->flags & observer::flag_queried);
	if (flags & short_timeout)
	{
		// short timeout means that it has been more than
		// two seconds since we sent the request, and that
		// we'll most likely not get a response. But, in case
		// we do get a late response, keep the handler
		// around for some more, but open up the slot
		// by increasing the branch factor
		if ((o->flags & observer::flag_short_timeout) == 0)
		{
			TORRENT_ASSERT(m_branch_factor < (std::numeric_limits<std::int16_t>::max)());
			++m_branch_factor;
		}
		o->flags |= observer::flag_short_timeout;
#ifndef TORRENT_DISABLE_LOGGING
		dht_observer* logger = get_node().observer();
		if (logger != nullptr && logger->should_log(dht_logger::traversal))
		{
			char hex_id[41];
			aux::to_hex(o->id(), hex_id);
			logger->log(dht_logger::traversal
				, "[%p] 1ST_TIMEOUT id: %s distance: %d addr: %s branch-factor: %d "
				"invoke-count: %d type: %s"
				, static_cast<void*>(this), hex_id, distance_exp(m_target, o->id())
				, print_address(o->target_addr()).c_str(), m_branch_factor
				, m_invoke_count, name());
		}
#endif
	}
	else
	{
		o->flags |= observer::flag_failed;
		// if this flag is set, it means we increased the
		// branch factor for it, and we should restore it
		decrement_branch_factor = (o->flags & observer::flag_short_timeout) != 0;

#ifndef TORRENT_DISABLE_LOGGING
		dht_observer* logger = get_node().observer();
		if (logger != nullptr && logger->should_log(dht_logger::traversal))
		{
			char hex_id[41];
			aux::to_hex(o->id(), hex_id);
			logger->log(dht_logger::traversal
				, "[%p] TIMEOUT id: %s distance: %d addr: %s branch-factor: %d "
				"invoke-count: %d type: %s"
				, static_cast<void*>(this), hex_id, distance_exp(m_target, o->id())
				, print_address(o->target_addr()).c_str(), m_branch_factor
				, m_invoke_count, name());
		}
#endif

		++m_timeouts;
		TORRENT_ASSERT(m_invoke_count > 0);
		--m_invoke_count;
	}

	// this is another reason to decrement the branch factor, to prevent another
	// request from filling this slot. Only ever decrement once per response though
	decrement_branch_factor |= (flags & prevent_request);

	if (decrement_branch_factor)
	{
		TORRENT_ASSERT(m_branch_factor > 0);
		--m_branch_factor;
		if (m_branch_factor <= 0) m_branch_factor = 1;
	}

	bool const is_done = add_requests();
	if (is_done) done();
}