std::string log_time() { static const ptime start = time_now_hires(); char ret[200]; snprintf(ret, sizeof(ret), "%" PRId64, total_microseconds(time_now_hires() - start)); return ret; }
alert const* alert_manager::wait_for_alert(time_duration max_wait) { mutex::scoped_lock lock(m_mutex); if (!m_alerts.empty()) return m_alerts.front(); // system_time end = get_system_time() // + boost::posix_time::microseconds(total_microseconds(max_wait)); // apparently this call can be interrupted // prematurely if there are other signals // while (m_condition.timed_wait(lock, end)) // if (!m_alerts.empty()) return m_alerts.front(); ptime start = time_now_hires(); // TODO: change this to use an asio timer instead while (m_alerts.empty()) { lock.unlock(); sleep(50); lock.lock(); if (time_now_hires() - start >= max_wait) return 0; } return m_alerts.front(); }
TORRENT_EXTRA_EXPORT char const* time_now_string() { static const ptime start = time_now_hires(); static char ret[200]; int t = total_milliseconds(time_now_hires() - start); int h = t / 1000 / 60 / 60; t -= h * 60 * 60 * 1000; int m = t / 1000 / 60; t -= m * 60 * 1000; int s = t / 1000; t -= s * 1000; int ms = t; snprintf(ret, sizeof(ret), "%02d:%02d:%02d.%03d", h, m, s, ms); return ret; }
void timeout_handler::timeout_callback(error_code const& error) { if (m_abort) return; ptime now = time_now_hires(); time_duration receive_timeout = now - m_read_time; time_duration completion_timeout = now - m_start_time; if ((m_read_timeout && m_read_timeout <= total_seconds(receive_timeout)) || (m_completion_timeout && m_completion_timeout <= total_seconds(completion_timeout)) || error) { on_timeout(error); return; } int timeout = 0; if (m_read_timeout > 0) timeout = m_read_timeout; if (m_completion_timeout > 0) { timeout = timeout == 0 ? m_completion_timeout - total_seconds(m_read_time - m_start_time) : (std::min)(m_completion_timeout - total_seconds(m_read_time - m_start_time), timeout); } error_code ec; m_timeout.expires_at(m_read_time + seconds(timeout), ec); m_timeout.async_wait( boost::bind(&timeout_handler::timeout_callback, self(), _1)); }
void timeout_handler::set_timeout(int completion_timeout, int read_timeout) { m_completion_timeout = completion_timeout; m_read_timeout = read_timeout; m_start_time = m_read_time = time_now_hires(); TORRENT_ASSERT(completion_timeout > 0 || read_timeout > 0); if (m_abort) return; int timeout = 0; if (m_read_timeout > 0) timeout = m_read_timeout; if (m_completion_timeout > 0) { timeout = timeout == 0 ? m_completion_timeout : (std::min)(m_completion_timeout, timeout); } #if defined TORRENT_ASIO_DEBUGGING add_outstanding_async("timeout_handler::timeout_callback"); #endif error_code ec; m_timeout.expires_at(m_read_time + seconds(timeout), ec); m_timeout.async_wait(boost::bind( &timeout_handler::timeout_callback, self(), _1)); }
timeout_handler::timeout_handler(io_service& ios) : m_start_time(time_now_hires()) , m_read_time(m_start_time) , m_timeout(ios) , m_completion_timeout(0) , m_read_timeout(0) , m_abort(false) {}
void connection_queue::on_timeout(error_code const& e) { mutex_t::scoped_lock l(m_mutex); INVARIANT_CHECK; #ifdef TORRENT_DEBUG function_guard guard_(m_in_timeout_function); #endif TORRENT_ASSERT(!e || e == asio::error::operation_aborted); if (e) return; ptime next_expire = max_time(); ptime now = time_now_hires() + milliseconds(100); std::list<entry> timed_out; for (std::list<entry>::iterator i = m_queue.begin(); !m_queue.empty() && i != m_queue.end();) { if (i->connecting && i->expires < now) { std::list<entry>::iterator j = i; ++i; timed_out.splice(timed_out.end(), m_queue, j, i); --m_num_connecting; continue; } if (i->expires < next_expire) next_expire = i->expires; ++i; } // we don't want to call the timeout callback while we're locked // since that is a recepie for dead-locks l.unlock(); for (std::list<entry>::iterator i = timed_out.begin() , end(timed_out.end()); i != end; ++i) { #ifndef BOOST_NO_EXCEPTIONS try { #endif i->on_timeout(); #ifndef BOOST_NO_EXCEPTIONS } catch (std::exception&) {} #endif } l.lock(); if (next_expire < max_time()) { error_code ec; m_timer.expires_at(next_expire, ec); m_timer.async_wait(boost::bind(&connection_queue::on_timeout, this, _1)); } try_connect(l); }
void observer::set_target(udp::endpoint const& ep) { #ifdef TORRENT_DHT_VERBOSE_LOGGING // use high resolution timers for logging m_sent = time_now_hires(); #else m_sent = time_now(); #endif m_port = ep.port(); #if TORRENT_USE_IPV6 if (ep.address().is_v6()) { flags |= flag_ipv6_address; m_addr.v6 = ep.address().to_v6().to_bytes(); } else #endif { flags &= ~flag_ipv6_address; m_addr.v4 = ep.address().to_v4().to_bytes(); } }
void timeout_handler::timeout_callback(error_code const& error) { #if defined TORRENT_ASIO_DEBUGGING complete_async("timeout_handler::timeout_callback"); #endif if (m_abort) return; ptime now = time_now_hires(); time_duration receive_timeout = now - m_read_time; time_duration completion_timeout = now - m_start_time; if ((m_read_timeout && m_read_timeout <= total_seconds(receive_timeout)) || (m_completion_timeout && m_completion_timeout <= total_seconds(completion_timeout)) || error) { on_timeout(error); return; } int timeout = 0; if (m_read_timeout > 0) timeout = m_read_timeout; if (m_completion_timeout > 0) { timeout = timeout == 0 ? int(m_completion_timeout - total_seconds(m_read_time - m_start_time)) : (std::min)(int(m_completion_timeout - total_seconds(m_read_time - m_start_time)), timeout); } #if defined TORRENT_ASIO_DEBUGGING add_outstanding_async("timeout_handler::timeout_callback"); #endif error_code ec; m_timeout.expires_at(m_read_time + seconds(timeout), ec); m_timeout.async_wait( boost::bind(&timeout_handler::timeout_callback, self(), _1)); }
void timeout_handler::restart_read_timeout() { m_read_time = time_now_hires(); }
void connection_queue::on_timeout(error_code const& e) { #if defined TORRENT_ASIO_DEBUGGING complete_async("connection_queue::on_timeout"); #endif mutex_t::scoped_lock l(m_mutex); --m_num_timers; INVARIANT_CHECK; #ifdef TORRENT_DEBUG function_guard guard_(m_in_timeout_function); #endif TORRENT_ASSERT(!e || e == error::operation_aborted); // if there was an error, it's most likely operation aborted, // we should just quit. However, in case there are still connections // in connecting state, and there are no other timer invocations // we need to stick around still. if (e && (m_num_connecting == 0 || m_num_timers > 0)) return; ptime next_expire = max_time(); ptime now = time_now_hires() + milliseconds(100); std::list<entry> timed_out; for (std::list<entry>::iterator i = m_queue.begin(); !m_queue.empty() && i != m_queue.end();) { if (i->connecting && i->expires < now) { std::list<entry>::iterator j = i; ++i; timed_out.splice(timed_out.end(), m_queue, j, i); --m_num_connecting; continue; } if (i->connecting && i->expires < next_expire) next_expire = i->expires; ++i; } // we don't want to call the timeout callback while we're locked // since that is a recepie for dead-locks l.unlock(); for (std::list<entry>::iterator i = timed_out.begin() , end(timed_out.end()); i != end; ++i) { TORRENT_ASSERT(i->connecting); TORRENT_ASSERT(i->ticket != -1); TORRENT_TRY { i->on_timeout(); } TORRENT_CATCH(std::exception&) {} } l.lock(); if (next_expire < max_time()) { #if defined TORRENT_ASIO_DEBUGGING add_outstanding_async("connection_queue::on_timeout"); #endif error_code ec; m_timer.expires_at(next_expire, ec); m_timer.async_wait(boost::bind(&connection_queue::on_timeout, this, _1)); ++m_num_timers; } try_connect(l); }
void connection_queue::try_connect(connection_queue::mutex_t::scoped_lock& l) { INVARIANT_CHECK; #ifdef TORRENT_CONNECTION_LOGGING m_log << log_time() << " " << free_slots() << std::endl; #endif // if this is enabled, UPnP connections will be blocked when shutting down // if (m_abort) return; if (m_num_connecting >= m_half_open_limit && m_half_open_limit > 0) return; if (m_queue.empty()) { error_code ec; m_timer.cancel(ec); return; } // all entries are connecting, no need to look for new ones if (m_queue.size() == m_num_connecting) return; std::list<entry>::iterator i = std::find_if(m_queue.begin() , m_queue.end(), boost::bind(&entry::connecting, _1) == false); std::list<entry> to_connect; while (i != m_queue.end()) { TORRENT_ASSERT(i->connecting == false); ptime expire = time_now_hires() + i->timeout; if (m_num_connecting == 0) { #if defined TORRENT_ASIO_DEBUGGING add_outstanding_async("connection_queue::on_timeout"); #endif error_code ec; m_timer.expires_at(expire, ec); m_timer.async_wait(boost::bind(&connection_queue::on_timeout, this, _1)); ++m_num_timers; } i->connecting = true; ++m_num_connecting; i->expires = expire; INVARIANT_CHECK; to_connect.push_back(*i); #ifdef TORRENT_CONNECTION_LOGGING m_log << log_time() << " " << free_slots() << std::endl; #endif if (m_num_connecting >= m_half_open_limit && m_half_open_limit > 0) break; if (m_num_connecting == m_queue.size()) break; i = std::find_if(i, m_queue.end(), boost::bind(&entry::connecting, _1) == false); } l.unlock(); while (!to_connect.empty()) { entry& ent = to_connect.front(); TORRENT_TRY { ent.on_connect(ent.ticket); } TORRENT_CATCH(std::exception&) {} to_connect.pop_front(); } }
bool rpc_manager::incoming(msg const& m, node_id* id) { INVARIANT_CHECK; if (m_destructing) return false; // we only deal with replies, not queries TORRENT_ASSERT(m.message.dict_find_string_value("y") == "r"); // if we don't have the transaction id in our // request list, ignore the packet std::string transaction_id = m.message.dict_find_string_value("t"); std::string::const_iterator i = transaction_id.begin(); int tid = transaction_id.size() != 2 ? -1 : io::read_uint16(i); observer_ptr o; for (transactions_t::iterator i = m_transactions.begin() , end(m_transactions.end()); i != end; ++i) { TORRENT_ASSERT(*i); if ((*i)->transaction_id() != tid) continue; if (m.addr.address() != (*i)->target_addr()) continue; o = *i; m_transactions.erase(i); break; } if (!o) { #ifdef TORRENT_DHT_VERBOSE_LOGGING TORRENT_LOG(rpc) << "Reply with invalid transaction id size: " << transaction_id.size() << " from " << m.addr; #endif entry e; incoming_error(e, "invalid transaction id"); m_send(m_userdata, e, m.addr, 0); return false; } #ifdef TORRENT_DHT_VERBOSE_LOGGING std::ofstream reply_stats("round_trip_ms.log", std::ios::app); reply_stats << m.addr << "\t" << total_milliseconds(time_now_hires() - o->sent()) << std::endl; #endif lazy_entry const* ret_ent = m.message.dict_find_dict("r"); if (ret_ent == 0) { entry e; incoming_error(e, "missing 'r' key"); m_send(m_userdata, e, m.addr, 0); return false; } lazy_entry const* node_id_ent = ret_ent->dict_find_string("id"); if (node_id_ent == 0 || node_id_ent->string_length() != 20) { entry e; incoming_error(e, "missing 'id' key"); m_send(m_userdata, e, m.addr, 0); return false; } lazy_entry const* ext_ip = ret_ent->dict_find_string("ip"); if (ext_ip && ext_ip->string_length() == 4) { // this node claims we use the wrong node-ID! address_v4::bytes_type b; memcpy(&b[0], ext_ip->string_ptr(), 4); m_ext_ip(address_v4(b), aux::session_impl::source_dht, m.addr.address()); } #if TORRENT_USE_IPV6 else if (ext_ip && ext_ip->string_length() == 16) { // this node claims we use the wrong node-ID! address_v6::bytes_type b; memcpy(&b[0], ext_ip->string_ptr(), 16); m_ext_ip(address_v6(b), aux::session_impl::source_dht, m.addr.address()); } #endif #ifdef TORRENT_DHT_VERBOSE_LOGGING TORRENT_LOG(rpc) << "[" << o->m_algorithm.get() << "] Reply with transaction id: " << tid << " from " << m.addr; #endif o->reply(m); *id = node_id(node_id_ent->string_ptr()); // we found an observer for this reply, hence the node is not spoofing // add it to the routing table return m_table.node_seen(*id, m.addr); }
bool utp_socket_manager::incoming_packet(error_code const& ec, udp::endpoint const& ep , char const* p, int size) { // UTP_LOGV("incoming packet size:%d\n", size); if (size < int(sizeof(utp_header))) return false; utp_header const* ph = (utp_header*)p; // UTP_LOGV("incoming packet version:%d\n", int(ph->get_version())); if (ph->get_version() != 1) return false; const ptime receive_time = time_now_hires(); // parse out connection ID and look for existing // connections. If found, forward to the utp_stream. boost::uint16_t id = ph->connection_id; // first test to see if it's the same socket as last time // in most cases it is if (m_last_socket && utp_match(m_last_socket, ep, id)) { return utp_incoming_packet(m_last_socket, p, size, ep, receive_time); } std::pair<socket_map_t::iterator, socket_map_t::iterator> r = m_utp_sockets.equal_range(id); for (; r.first != r.second; ++r.first) { if (!utp_match(r.first->second, ep, id)) continue; bool ret = utp_incoming_packet(r.first->second, p, size, ep, receive_time); if (ret) m_last_socket = r.first->second; return ret; } // UTP_LOGV("incoming packet id:%d source:%s\n", id, print_endpoint(ep).c_str()); if (!m_sett.enable_incoming_utp) return false; // if not found, see if it's a SYN packet, if it is, // create a new utp_stream if (ph->get_type() == ST_SYN) { // possible SYN flood. Just ignore if (m_utp_sockets.size() > m_sett.connections_limit * 2) return false; // UTP_LOGV("not found, new connection id:%d\n", m_new_connection); boost::shared_ptr<socket_type> c(new (std::nothrow) socket_type(m_sock.get_io_service())); if (!c) return false; TORRENT_ASSERT(m_new_connection == -1); // create the new socket with this ID m_new_connection = id; instantiate_connection(m_sock.get_io_service(), proxy_settings(), *c, 0, this); utp_stream* str = c->get<utp_stream>(); TORRENT_ASSERT(str); int link_mtu, utp_mtu; mtu_for_dest(ep.address(), link_mtu, utp_mtu); utp_init_mtu(str->get_impl(), link_mtu, utp_mtu); bool ret = utp_incoming_packet(str->get_impl(), p, size, ep, receive_time); if (!ret) return false; m_cb(c); // the connection most likely changed its connection ID here // we need to move it to the correct ID return true; } // #error send reset return false; }
bool rpc_manager::incoming(msg const& m, node_id* id, libtorrent::dht_settings const& settings) { INVARIANT_CHECK; if (m_destructing) return false; // we only deal with replies and errors, not queries TORRENT_ASSERT(m.message.dict_find_string_value("y") == "r" || m.message.dict_find_string_value("y") == "e"); // if we don't have the transaction id in our // request list, ignore the packet std::string transaction_id = m.message.dict_find_string_value("t"); if (transaction_id.empty()) return false; std::string::const_iterator i = transaction_id.begin(); int tid = transaction_id.size() != 2 ? -1 : io::read_uint16(i); observer_ptr o; std::pair<transactions_t::iterator, transactions_t::iterator> range = m_transactions.equal_range(tid); for (transactions_t::iterator i = range.first; i != range.second; ++i) { if (m.addr.address() != i->second->target_addr()) continue; o = i->second; i = m_transactions.erase(i); break; } if (!o) { #ifdef TORRENT_DHT_VERBOSE_LOGGING TORRENT_LOG(rpc) << "Reply with unknown transaction id size: " << transaction_id.size() << " from " << m.addr; #endif // this isn't necessarily because the other end is doing // something wrong. This can also happen when we restart // the node, and we prematurely abort all outstanding // requests. Also, this opens up a potential magnification // attack. // entry e; // incoming_error(e, "invalid transaction id"); // m_sock->send_packet(e, m.addr, 0); return false; } ptime now = time_now_hires(); #ifdef TORRENT_DHT_VERBOSE_LOGGING std::ofstream reply_stats("round_trip_ms.log", std::ios::app); reply_stats << m.addr << "\t" << total_milliseconds(now - o->sent()) << std::endl; #endif lazy_entry const* ret_ent = m.message.dict_find_dict("r"); if (ret_ent == 0) { // it may be an error ret_ent = m.message.dict_find_dict("e"); o->timeout(); if (ret_ent == NULL) { entry e; incoming_error(e, "missing 'r' key"); m_sock->send_packet(e, m.addr, 0); } return false; } lazy_entry const* node_id_ent = ret_ent->dict_find_string("id"); if (node_id_ent == 0 || node_id_ent->string_length() != 20) { o->timeout(); entry e; incoming_error(e, "missing 'id' key"); m_sock->send_packet(e, m.addr, 0); return false; } node_id nid = node_id(node_id_ent->string_ptr()); if (settings.enforce_node_id && !verify_id(nid, m.addr.address())) { o->timeout(); entry e; incoming_error(e, "invalid node ID"); m_sock->send_packet(e, m.addr, 0); return false; } #ifdef TORRENT_DHT_VERBOSE_LOGGING TORRENT_LOG(rpc) << "[" << o->m_algorithm.get() << "] Reply with transaction id: " << tid << " from " << m.addr; #endif o->reply(m); *id = nid; int rtt = int(total_milliseconds(now - o->sent())); // we found an observer for this reply, hence the node is not spoofing // add it to the routing table return m_table.node_seen(*id, m.addr, rtt); }
void connection_queue::try_connect(connection_queue::mutex_t::scoped_lock& l) { INVARIANT_CHECK; #if BOOST_VERSION >= 103700 TORRENT_ASSERT(l.owns_lock()); #endif #ifdef TORRENT_CONNECTION_LOGGING m_log << log_time() << " " << free_slots() << std::endl; #endif // if this is enabled, UPnP connections will be blocked when shutting down // if (m_abort) return; if (m_num_connecting >= m_half_open_limit && m_half_open_limit > 0) return; if (m_queue.empty()) { error_code ec; m_timer.cancel(ec); return; } std::list<entry>::iterator i = std::find_if(m_queue.begin() , m_queue.end(), boost::bind(&entry::connecting, _1) == false); std::list<entry> to_connect; while (i != m_queue.end()) { TORRENT_ASSERT(i->connecting == false); ptime expire = time_now_hires() + i->timeout; if (m_num_connecting == 0) { error_code ec; m_timer.expires_at(expire, ec); m_timer.async_wait(boost::bind(&connection_queue::on_timeout, this, _1)); } i->connecting = true; ++m_num_connecting; i->expires = expire; INVARIANT_CHECK; to_connect.push_back(*i); #ifdef TORRENT_CONNECTION_LOGGING m_log << log_time() << " " << free_slots() << std::endl; #endif if (m_num_connecting >= m_half_open_limit && m_half_open_limit > 0) break; i = std::find_if(i, m_queue.end(), boost::bind(&entry::connecting, _1) == false); } l.unlock(); while (!to_connect.empty()) { entry& ent = to_connect.front(); #ifndef BOOST_NO_EXCEPTIONS try { #endif ent.on_connect(ent.ticket); #ifndef BOOST_NO_EXCEPTIONS } catch (std::exception&) {} #endif to_connect.pop_front(); } }