bool dos_blocker::incoming(address const& addr, time_point const now, dht_logger* logger) { TORRENT_UNUSED(logger); node_ban_entry* match = nullptr; node_ban_entry* min = m_ban_nodes; for (node_ban_entry* i = m_ban_nodes; i < m_ban_nodes + num_ban_nodes; ++i) { if (i->src == addr) { match = i; break; } if (i->count < min->count) min = i; else if (i->count == min->count && i->limit < min->limit) min = i; } if (match) { ++match->count; if (match->count >= m_message_rate_limit * 10) { if (now < match->limit) { if (match->count == m_message_rate_limit * 10) { #ifndef TORRENT_DISABLE_LOGGING if (logger != nullptr && logger->should_log(dht_logger::tracker)) { logger->log(dht_logger::tracker, "BANNING PEER [ ip: %s time: %d ms count: %d ]" , print_address(addr).c_str() , int(total_milliseconds((now - match->limit) + seconds(10))) , match->count); } #else TORRENT_UNUSED(logger); #endif // TORRENT_DISABLE_LOGGING // we've received too many messages in less than 10 seconds // from this node. Ignore it until it's silent for 5 minutes match->limit = now + seconds(m_block_timeout); } return false; } // the messages we received from this peer took more than 10 // seconds. Reset the counter and the timer match->count = 0; match->limit = now + seconds(10); } } else { min->count = 1; min->limit = now + seconds(10); min->src = addr; } return true; }
void rc4_handler::decrypt(std::vector<boost::asio::mutable_buffer>& buf , int& consume , int& produce , int& packet_size) { // these are out-parameters that are not set TORRENT_UNUSED(consume); TORRENT_UNUSED(packet_size); if (!m_decrypt) return; int bytes_processed = 0; for (std::vector<boost::asio::mutable_buffer>::iterator i = buf.begin(); i != buf.end(); ++i) { unsigned char* pos = boost::asio::buffer_cast<unsigned char*>(*i); int len = boost::asio::buffer_size(*i); TORRENT_ASSERT(len >= 0); TORRENT_ASSERT(pos); bytes_processed += len; rc4_encrypt(pos, len, &m_rc4_incoming); } buf.clear(); produce = bytes_processed; }
static utf8_errors::error_code_enum convert(UTF8 const** src_start , UTF8 const* src_end , std::wstring& wide) { TORRENT_UNUSED(src_start); TORRENT_UNUSED(src_end); TORRENT_UNUSED(wide); return utf8_errors::error_code_enum::source_illegal; }
static utf8_errors::error_code_enum convert(wchar_t const** src_start , wchar_t const* src_end , std::string& utf8) { TORRENT_UNUSED(src_start); TORRENT_UNUSED(src_end); TORRENT_UNUSED(utf8); return utf8_errors::error_code_enum::source_illegal; }
void torrent_handle::add_extension( boost::function<boost::shared_ptr<torrent_plugin>(torrent_handle const&, void*)> const& ext , void* userdata) { #ifndef TORRENT_DISABLE_EXTENSIONS async_call(&torrent::add_extension_fun, ext, userdata); #else TORRENT_UNUSED(ext); TORRENT_UNUSED(userdata); #endif }
void torrent_handle::set_ssl_certificate_buffer( std::string const& certificate , std::string const& private_key , std::string const& dh_params) { #ifdef TORRENT_USE_OPENSSL async_call(&torrent::set_ssl_cert_buffer, certificate, private_key, dh_params); #else TORRENT_UNUSED(certificate); TORRENT_UNUSED(private_key); TORRENT_UNUSED(dh_params); #endif }
bool disk_buffer_pool::is_disk_buffer(char* buffer , mutex::scoped_lock& l) const { TORRENT_ASSERT(m_magic == 0x1337); TORRENT_ASSERT(l.locked()); TORRENT_UNUSED(l); #if TORRENT_HAVE_MMAP if (m_cache_pool) { return buffer >= m_cache_pool && buffer < m_cache_pool + boost::uint64_t(m_max_use) * 0x4000; } #endif #if defined TORRENT_DEBUG return m_buffers_in_use.count(buffer) == 1; #elif defined TORRENT_DEBUG_BUFFERS return page_aligned_allocator::in_use(buffer); #elif defined TORRENT_DISABLE_POOL_ALLOCATOR return true; #else if (m_using_pool_allocator) return m_pool.is_from(buffer); else return true; #endif }
bool disk_buffer_pool::is_disk_buffer(char* buffer , std::unique_lock<std::mutex>& l) const { TORRENT_ASSERT(m_magic == 0x1337); TORRENT_ASSERT(l.owns_lock()); TORRENT_UNUSED(l); #if TORRENT_USE_INVARIANT_CHECKS return m_buffers_in_use.count(buffer) == 1; #elif defined TORRENT_DEBUG_BUFFERS return page_in_use(buffer); #else TORRENT_UNUSED(buffer); return true; #endif }
// endpoints is an in-out parameter void http_tracker_connection::on_filter(http_connection& c , std::vector<tcp::endpoint>& endpoints) { TORRENT_UNUSED(c); if (!tracker_req().filter) return; // remove endpoints that are filtered by the IP filter for (std::vector<tcp::endpoint>::iterator i = endpoints.begin(); i != endpoints.end();) { if (tracker_req().filter->access(i->address()) == ip_filter::blocked) i = endpoints.erase(i); else ++i; } #ifndef TORRENT_DISABLE_LOGGING boost::shared_ptr<request_callback> cb = requester(); if (cb) { cb->debug_log("*** TRACKER_FILTER"); } #endif if (endpoints.empty()) fail(error_code(errors::banned_by_ip_filter)); }
void session::start(int flags, settings_pack const& pack, io_service* ios) { bool const internal_executor = ios == nullptr; if (internal_executor) { // the user did not provide an executor, we have to use our own m_io_service = boost::make_shared<io_service>(); ios = m_io_service.get(); } m_impl = boost::make_shared<session_impl>(boost::ref(*ios)); *static_cast<session_handle*>(this) = session_handle(m_impl.get()); #ifndef TORRENT_DISABLE_EXTENSIONS if (flags & add_default_plugins) { add_extension(create_ut_pex_plugin); add_extension(create_ut_metadata_plugin); add_extension(create_smart_ban_plugin); } #else TORRENT_UNUSED(flags); #endif m_impl->start_session(pack); if (internal_executor) { // start a thread for the message pump m_thread = std::make_shared<std::thread>( [&]() { m_io_service->run(); }); } }
void alert_manager::maybe_notify(alert* a, std::unique_lock<std::mutex>& lock) { if (m_alerts[m_generation].size() == 1) { lock.unlock(); // we just posted to an empty queue. If anyone is waiting for // alerts, we need to notify them. Also (potentially) call the // user supplied m_notify callback to let the client wake up its // message loop to poll for alerts. if (m_notify) m_notify(); // TODO: 2 keep a count of the number of threads waiting. Only if it's // > 0 notify them m_condition.notify_all(); } else { lock.unlock(); } #ifndef TORRENT_DISABLE_EXTENSIONS for (auto& e : m_ses_extensions) e->on_alert(a); #else TORRENT_UNUSED(a); #endif }
void utp_socket_manager::send_packet(udp::endpoint const& ep, char const* p , int len, error_code& ec, int flags) { #if !defined TORRENT_HAS_DONT_FRAGMENT && !defined TORRENT_DEBUG_MTU TORRENT_UNUSED(flags); #endif if (!m_sock.is_open()) { ec = boost::asio::error::operation_aborted; return; } #ifdef TORRENT_DEBUG_MTU // drop packets that exceed the debug MTU if ((flags & dont_fragment) && len > TORRENT_DEBUG_MTU) return; #endif #ifdef TORRENT_HAS_DONT_FRAGMENT error_code tmp; if (flags & utp_socket_manager::dont_fragment) { m_sock.set_option(libtorrent::dont_fragment(true), tmp); TORRENT_ASSERT_VAL(!tmp, tmp.message()); } #endif m_sock.send(ep, p, len, ec); #ifdef TORRENT_HAS_DONT_FRAGMENT if (flags & utp_socket_manager::dont_fragment) { m_sock.set_option(libtorrent::dont_fragment(false), tmp); TORRENT_ASSERT_VAL(!tmp, tmp.message()); } #endif }
~alloca_destructor() { for (auto& o : objects) { TORRENT_UNUSED(o); o.~T(); } }
void disk_buffer_pool::remove_buffer_in_use(char* buf) { TORRENT_UNUSED(buf); #if TORRENT_USE_INVARIANT_CHECKS std::set<char*>::iterator i = m_buffers_in_use.find(buf); TORRENT_ASSERT(i != m_buffers_in_use.end()); m_buffers_in_use.erase(i); #endif }
void peer_connection_handle::add_extension(std::shared_ptr<peer_plugin> ext) { #ifndef TORRENT_DISABLE_EXTENSIONS std::shared_ptr<peer_connection> pc = native_handle(); TORRENT_ASSERT(pc); pc->add_extension(ext); #else TORRENT_UNUSED(ext); #endif }
std::size_t address_size(Proto p) { TORRENT_UNUSED(p); #if TORRENT_USE_IPV6 if (p == Proto::v6()) return std::tuple_size<address_v6::bytes_type>::value; else #endif return std::tuple_size<address_v4::bytes_type>::value; }
size_t address_size(Proto p) { TORRENT_UNUSED(p); #if TORRENT_USE_IPV6 if (p == Proto::v6()) return address_v6::bytes_type().size(); else #endif return address_v4::bytes_type().size(); }
void bt_peer_connection_handle::switch_recv_crypto(std::shared_ptr<crypto_plugin> crypto) { #if !defined(TORRENT_DISABLE_ENCRYPTION) && !defined(TORRENT_DISABLE_EXTENSIONS) std::shared_ptr<bt_peer_connection> pc = native_handle(); TORRENT_ASSERT(pc); pc->switch_recv_crypto(crypto); #else TORRENT_UNUSED(crypto); #endif }
peer_plugin const* peer_connection_handle::find_plugin(char const* type) { #ifndef TORRENT_DISABLE_EXTENSIONS boost::shared_ptr<peer_connection> pc = native_handle(); TORRENT_ASSERT(pc); return pc->find_plugin(type); #else TORRENT_UNUSED(type); return nullptr; #endif }
bool peer_connection_handle::should_log(peer_log_alert::direction_t direction) const { #ifndef TORRENT_DISABLE_LOGGING std::shared_ptr<peer_connection> pc = native_handle(); TORRENT_ASSERT(pc); return pc->should_log(direction); #else TORRENT_UNUSED(direction); return false; #endif }
void disk_buffer_pool::free_buffer_impl(char* buf, std::unique_lock<std::mutex>& l) { TORRENT_ASSERT(buf); TORRENT_ASSERT(m_magic == 0x1337); TORRENT_ASSERT(m_settings_set); TORRENT_ASSERT(l.owns_lock()); TORRENT_UNUSED(l); page_free(buf); --m_in_use; }
// this has to be thread safe and atomic. i.e. on posix systems it has to be // turned into a series of pread() calls std::int64_t file::readv(std::int64_t file_offset, span<iovec_t const> bufs , error_code& ec, open_mode_t flags) { if (m_file_handle == INVALID_HANDLE_VALUE) { #ifdef TORRENT_WINDOWS ec = error_code(ERROR_INVALID_HANDLE, system_category()); #else ec = error_code(boost::system::errc::bad_file_descriptor, generic_category()); #endif return -1; } TORRENT_ASSERT((m_open_mode & open_mode::rw_mask) == open_mode::read_only || (m_open_mode & open_mode::rw_mask) == open_mode::read_write); TORRENT_ASSERT(!bufs.empty()); TORRENT_ASSERT(is_open()); #if TORRENT_USE_PREADV TORRENT_UNUSED(flags); std::int64_t ret = iov(&::preadv, native_handle(), file_offset, bufs, ec); #else // there's no point in coalescing single buffer writes if (bufs.size() == 1) { flags &= ~open_mode::coalesce_buffers; } iovec_t tmp; span<iovec_t const> tmp_bufs = bufs; if (flags & open_mode::coalesce_buffers) { if (!coalesce_read_buffers(tmp_bufs, tmp)) // ok, that failed, don't coalesce this read flags &= ~open_mode::coalesce_buffers; } #if TORRENT_USE_PREAD std::int64_t ret = iov(&::pread, native_handle(), file_offset, tmp_bufs, ec); #else std::int64_t ret = iov(&::read, native_handle(), file_offset, tmp_bufs, ec); #endif if (flags & open_mode::coalesce_buffers) coalesce_read_buffers_end(bufs , tmp.data(), !ec); #endif return ret; }
void peer_connection_handle::peer_log(peer_log_alert::direction_t direction , char const* event, char const* fmt, ...) const { #ifndef TORRENT_DISABLE_LOGGING std::shared_ptr<peer_connection> pc = native_handle(); TORRENT_ASSERT(pc); va_list v; va_start(v, fmt); #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wformat-nonliteral" #pragma clang diagnostic ignored "-Wclass-varargs" #endif pc->peer_log(direction, event, fmt, v); #ifdef __clang__ #pragma clang diagnostic pop #endif va_end(v); #else // TORRENT_DISABLE_LOGGING TORRENT_UNUSED(direction); TORRENT_UNUSED(event); TORRENT_UNUSED(fmt); #endif }
char* disk_buffer_pool::allocate_buffer_impl(std::unique_lock<std::mutex>& l , char const*) { TORRENT_ASSERT(m_settings_set); TORRENT_ASSERT(m_magic == 0x1337); TORRENT_ASSERT(l.owns_lock()); TORRENT_UNUSED(l); char* ret = page_malloc(default_block_size); if (ret == nullptr) { m_exceeded_max_size = true; m_trigger_cache_trim(); return nullptr; } ++m_in_use; #if TORRENT_USE_INVARIANT_CHECKS try { TORRENT_ASSERT(m_buffers_in_use.count(ret) == 0); m_buffers_in_use.insert(ret); } catch (...) { free_buffer_impl(ret, l); return nullptr; } #endif if (m_in_use >= m_low_watermark + (m_max_use - m_low_watermark) / 2 && !m_exceeded_max_size) { m_exceeded_max_size = true; m_trigger_cache_trim(); } TORRENT_ASSERT(is_disk_buffer(ret, l)); return ret; }
void session::start(int flags, settings_pack const& pack, io_service* ios) { #if defined _MSC_VER && defined TORRENT_DEBUG // workaround for microsofts // hardware exceptions that makes // it hard to debug stuff ::_set_se_translator(straight_to_debugger); #endif bool internal_executor = ios == NULL; if (internal_executor) { // the user did not provide an executor, we have to use our own m_io_service = boost::make_shared<io_service>(); ios = m_io_service.get(); } m_impl = boost::make_shared<session_impl>(boost::ref(*ios)); *static_cast<session_handle*>(this) = session_handle(m_impl.get()); #ifndef TORRENT_DISABLE_EXTENSIONS if (flags & add_default_plugins) { add_extension(create_ut_pex_plugin); add_extension(create_ut_metadata_plugin); add_extension(create_smart_ban_plugin); } #else TORRENT_UNUSED(flags); #endif m_impl->start_session(pack); if (internal_executor) { // start a thread for the message pump m_thread = boost::make_shared<thread>(boost::bind(&io_service::run , m_io_service.get())); } }
void instantiate(io_service& ios, void* userdata = 0) { TORRENT_UNUSED(ios); TORRENT_ASSERT(&ios == &m_io_service); construct(socket_type_int_impl<S>::value, userdata); }
// the case where ignore_peer is motivated is if two peers // have only one piece that we don't have, and it's the // same piece for both peers. Then they might get into an // infinite loop, fighting to request the same blocks. // returns false if the function is aborted by an early-exit // condition. bool request_a_block(torrent& t, peer_connection& c) { if (t.is_seed()) return false; if (c.no_download()) return false; if (t.upload_mode()) return false; if (c.is_disconnecting()) return false; // don't request pieces before we have the metadata if (!t.valid_metadata()) return false; // don't request pieces before the peer is properly // initialized after we have the metadata if (!t.are_files_checked()) return false; TORRENT_ASSERT(c.peer_info_struct() != 0 || c.type() != peer_connection::bittorrent_connection); bool time_critical_mode = t.num_time_critical_pieces() > 0; int desired_queue_size = c.desired_queue_size(); // in time critical mode, only have 1 outstanding request at a time // via normal requests if (time_critical_mode) desired_queue_size = (std::min)(1, desired_queue_size); int num_requests = desired_queue_size - int(c.download_queue().size()) - int(c.request_queue().size()); #ifndef TORRENT_DISABLE_LOGGING c.peer_log(peer_log_alert::info, "PIECE_PICKER" , "dlq: %d rqq: %d target: %d req: %d engame: %d" , int(c.download_queue().size()), int(c.request_queue().size()) , c.desired_queue_size(), num_requests, c.endgame()); #endif TORRENT_ASSERT(c.desired_queue_size() > 0); // if our request queue is already full, we // don't have to make any new requests yet if (num_requests <= 0) return false; t.need_picker(); piece_picker& p = t.picker(); std::vector<piece_block> interesting_pieces; interesting_pieces.reserve(100); int prefer_contiguous_blocks = c.prefer_contiguous_blocks(); if (prefer_contiguous_blocks == 0 && !time_critical_mode) { int blocks_per_piece = t.torrent_file().piece_length() / t.block_size(); prefer_contiguous_blocks = c.statistics().download_payload_rate() * t.settings().get_int(settings_pack::whole_pieces_threshold) > t.torrent_file().piece_length() ? blocks_per_piece : 0; } // if we prefer whole pieces, the piece picker will pick at least // the number of blocks we want, but it will try to make the picked // blocks be from whole pieces, possibly by returning more blocks // than we requested. #ifdef TORRENT_DEBUG error_code ec; TORRENT_ASSERT(c.remote() == c.get_socket()->remote_endpoint(ec) || ec); #endif aux::session_interface& ses = t.session(); std::vector<pending_block> const& dq = c.download_queue(); std::vector<pending_block> const& rq = c.request_queue(); std::vector<int> const& suggested = c.suggested_pieces(); bitfield const* bits = &c.get_bitfield(); bitfield fast_mask; if (c.has_peer_choked()) { // if we are choked we can only pick pieces from the // allowed fast set. The allowed fast set is sorted // in ascending priority order std::vector<int> const& allowed_fast = c.allowed_fast(); // build a bitmask with only the allowed pieces in it fast_mask.resize(c.get_bitfield().size(), false); for (std::vector<int>::const_iterator i = allowed_fast.begin() , end(allowed_fast.end()); i != end; ++i) if ((*bits)[*i]) fast_mask.set_bit(*i); bits = &fast_mask; } // picks the interesting pieces from this peer // the integer is the number of pieces that // should be guaranteed to be available for download // (if num_requests is too big, too many pieces are // picked and cpu-time is wasted) // the last argument is if we should prefer whole pieces // for this peer. If we're downloading one piece in 20 seconds // then use this mode. boost::uint32_t flags = p.pick_pieces(*bits, interesting_pieces , num_requests, prefer_contiguous_blocks, c.peer_info_struct() , c.picker_options(), suggested, t.num_peers() , ses.stats_counters()); #ifndef TORRENT_DISABLE_LOGGING if (t.alerts().should_post<picker_log_alert>() && !interesting_pieces.empty()) { t.alerts().emplace_alert<picker_log_alert>(t.get_handle(), c.remote() , c.pid(), flags, &interesting_pieces[0], int(interesting_pieces.size())); } c.peer_log(peer_log_alert::info, "PIECE_PICKER" , "prefer_contiguous: %d picked: %d" , prefer_contiguous_blocks, int(interesting_pieces.size())); #else TORRENT_UNUSED(flags); #endif // if the number of pieces we have + the number of pieces // we're requesting from is less than the number of pieces // in the torrent, there are still some unrequested pieces // and we're not strictly speaking in end-game mode yet // also, if we already have at least one outstanding // request, we shouldn't pick any busy pieces either // in time critical mode, it's OK to request busy blocks bool dont_pick_busy_blocks = ((ses.settings().get_bool(settings_pack::strict_end_game_mode) && p.get_download_queue_size() < p.num_want_left()) || dq.size() + rq.size() > 0) && !time_critical_mode; // this is filled with an interesting piece // that some other peer is currently downloading piece_block busy_block = piece_block::invalid; for (std::vector<piece_block>::iterator i = interesting_pieces.begin(); i != interesting_pieces.end(); ++i) { if (prefer_contiguous_blocks == 0 && num_requests <= 0) break; if (time_critical_mode && p.piece_priority(i->piece_index) != 7) { // assume the subsequent pieces are not prio 7 and // be done break; } int num_block_requests = p.num_peers(*i); if (num_block_requests > 0) { // have we picked enough pieces? if (num_requests <= 0) break; // this block is busy. This means all the following blocks // in the interesting_pieces list are busy as well, we might // as well just exit the loop if (dont_pick_busy_blocks) break; TORRENT_ASSERT(p.num_peers(*i) > 0); busy_block = *i; continue; } TORRENT_ASSERT(p.num_peers(*i) == 0); // don't request pieces we already have in our request queue // This happens when pieces time out or the peer sends us // pieces we didn't request. Those aren't marked in the // piece picker, but we still keep track of them in the // download queue if (std::find_if(dq.begin(), dq.end(), has_block(*i)) != dq.end() || std::find_if(rq.begin(), rq.end(), has_block(*i)) != rq.end()) { #ifdef TORRENT_DEBUG std::vector<pending_block>::const_iterator j = std::find_if(dq.begin(), dq.end(), has_block(*i)); if (j != dq.end()) TORRENT_ASSERT(j->timed_out || j->not_wanted); #endif #ifndef TORRENT_DISABLE_LOGGING c.peer_log(peer_log_alert::info, "PIECE_PICKER" , "not_picking: %d,%d already in queue" , i->piece_index, i->block_index); #endif continue; } // ok, we found a piece that's not being downloaded // by somebody else. request it from this peer // and return if (!c.add_request(*i, 0)) continue; TORRENT_ASSERT(p.num_peers(*i) == 1); TORRENT_ASSERT(p.is_requested(*i)); num_requests--; } // we have picked as many blocks as we should // we're done! if (num_requests <= 0) { // since we could pick as many blocks as we // requested without having to resort to picking // busy ones, we're not in end-game mode c.set_endgame(false); return true; } // we did not pick as many pieces as we wanted, because // there aren't enough. This means we're in end-game mode // as long as we have at least one request outstanding, // we shouldn't pick another piece // if we are attempting to download 'allowed' pieces // and can't find any, that doesn't count as end-game if (!c.has_peer_choked()) c.set_endgame(true); // if we don't have any potential busy blocks to request // or if we already have outstanding requests, don't // pick a busy piece if (busy_block == piece_block::invalid || dq.size() + rq.size() > 0) { return true; } #ifdef TORRENT_DEBUG piece_picker::downloading_piece st; p.piece_info(busy_block.piece_index, st); TORRENT_ASSERT(st.requested + st.finished + st.writing == p.blocks_in_piece(busy_block.piece_index)); #endif TORRENT_ASSERT(p.is_requested(busy_block)); TORRENT_ASSERT(!p.is_downloaded(busy_block)); TORRENT_ASSERT(!p.is_finished(busy_block)); TORRENT_ASSERT(p.num_peers(busy_block) > 0); c.add_request(busy_block, peer_connection::req_busy); return true; }
// TODO: 2 peer_connection and tracker_connection should probably be flags // TODO: 2 move this function into libtorrent::aux namespace bool instantiate_connection(io_service& ios , aux::proxy_settings const& ps, socket_type& s , void* ssl_context , utp_socket_manager* sm , bool peer_connection , bool tracker_connection) { #ifndef TORRENT_USE_OPENSSL TORRENT_UNUSED(ssl_context); #endif if (sm) { utp_stream* str; #ifdef TORRENT_USE_OPENSSL if (ssl_context) { s.instantiate<ssl_stream<utp_stream> >(ios, ssl_context); str = &s.get<ssl_stream<utp_stream> >()->next_layer(); } else #endif { s.instantiate<utp_stream>(ios); str = s.get<utp_stream>(); } str->set_impl(sm->new_utp_socket(str)); } #if TORRENT_USE_I2P else if (ps.type == settings_pack::i2p_proxy) { // it doesn't make any sense to try ssl over i2p TORRENT_ASSERT(ssl_context == 0); s.instantiate<i2p_stream>(ios); s.get<i2p_stream>()->set_proxy(ps.hostname, ps.port); } #endif else if (ps.type == settings_pack::none || (peer_connection && !ps.proxy_peer_connections) || (tracker_connection && !ps.proxy_tracker_connections)) { #ifdef TORRENT_USE_OPENSSL if (ssl_context) { s.instantiate<ssl_stream<tcp::socket> >(ios, ssl_context); } else #endif { s.instantiate<tcp::socket>(ios); } } else if (ps.type == settings_pack::http || ps.type == settings_pack::http_pw) { http_stream* str; #ifdef TORRENT_USE_OPENSSL if (ssl_context) { s.instantiate<ssl_stream<http_stream> >(ios, ssl_context); str = &s.get<ssl_stream<http_stream> >()->next_layer(); } else #endif { s.instantiate<http_stream>(ios); str = s.get<http_stream>(); } str->set_proxy(ps.hostname, ps.port); if (ps.type == settings_pack::http_pw) str->set_username(ps.username, ps.password); } else if (ps.type == settings_pack::socks5 || ps.type == settings_pack::socks5_pw || ps.type == settings_pack::socks4) { socks5_stream* str; #ifdef TORRENT_USE_OPENSSL if (ssl_context) { s.instantiate<ssl_stream<socks5_stream> >(ios, ssl_context); str = &s.get<ssl_stream<socks5_stream> >()->next_layer(); } else #endif { s.instantiate<socks5_stream>(ios); str = s.get<socks5_stream>(); } str->set_proxy(ps.hostname, ps.port); if (ps.type == settings_pack::socks5_pw) str->set_username(ps.username, ps.password); if (ps.type == settings_pack::socks4) str->set_version(4); } else { TORRENT_ASSERT_FAIL_VAL(ps.type); return false; } return true; }
std::vector<ip_interface> enum_net_interfaces(io_service& ios, error_code& ec) { TORRENT_UNUSED(ios); // this may be unused depending on configuration std::vector<ip_interface> ret; #if defined TORRENT_BUILD_SIMULATOR TORRENT_UNUSED(ec); std::vector<address> ips = ios.get_ips(); for (int i = 0; i < int(ips.size()); ++i) { ip_interface wan; wan.interface_address = ips[i]; wan.netmask = address_v4::from_string("255.255.255.255"); strcpy(wan.name, "eth0"); wan.mtu = ios.sim().config().path_mtu(ips[i], ips[i]); ret.push_back(wan); } #elif TORRENT_USE_IFADDRS int s = socket(AF_INET, SOCK_DGRAM, 0); if (s < 0) { ec = error_code(errno, boost::asio::error::system_category); return ret; } ifaddrs *ifaddr; if (getifaddrs(&ifaddr) == -1) { ec = error_code(errno, boost::asio::error::system_category); close(s); return ret; } for (ifaddrs* ifa = ifaddr; ifa; ifa = ifa->ifa_next) { if (ifa->ifa_addr == 0) continue; if ((ifa->ifa_flags & IFF_UP) == 0) continue; int family = ifa->ifa_addr->sa_family; if (family == AF_INET #if TORRENT_USE_IPV6 || family == AF_INET6 #endif ) { ip_interface iface; if (iface_from_ifaddrs(ifa, iface)) { ifreq req; memset(&req, 0, sizeof(req)); // -1 to leave a null terminator strncpy(req.ifr_name, iface.name, IF_NAMESIZE - 1); if (ioctl(s, siocgifmtu, &req) < 0) { continue; } iface.mtu = req.ifr_mtu; ret.push_back(iface); } } } close(s); freeifaddrs(ifaddr); // MacOS X, BSD and solaris #elif TORRENT_USE_IFCONF int s = socket(AF_INET, SOCK_DGRAM, 0); if (s < 0) { ec = error_code(errno, boost::asio::error::system_category); return ret; } ifconf ifc; // make sure the buffer is aligned to hold ifreq structs ifreq buf[40]; ifc.ifc_len = sizeof(buf); ifc.ifc_buf = (char*)buf; if (ioctl(s, SIOCGIFCONF, &ifc) < 0) { ec = error_code(errno, boost::asio::error::system_category); close(s); return ret; } char *ifr = (char*)ifc.ifc_req; int remaining = ifc.ifc_len; while (remaining > 0) { ifreq const& item = *reinterpret_cast<ifreq*>(ifr); #ifdef _SIZEOF_ADDR_IFREQ int current_size = _SIZEOF_ADDR_IFREQ(item); #elif defined TORRENT_BSD int current_size = item.ifr_addr.sa_len + IFNAMSIZ; #else int current_size = sizeof(ifreq); #endif if (remaining < current_size) break; if (item.ifr_addr.sa_family == AF_INET #if TORRENT_USE_IPV6 || item.ifr_addr.sa_family == AF_INET6 #endif ) { ip_interface iface; iface.interface_address = sockaddr_to_address(&item.ifr_addr); strcpy(iface.name, item.ifr_name); ifreq req; memset(&req, 0, sizeof(req)); // -1 to leave a null terminator strncpy(req.ifr_name, item.ifr_name, IF_NAMESIZE - 1); if (ioctl(s, siocgifmtu, &req) < 0) { ec = error_code(errno, boost::asio::error::system_category); close(s); return ret; } #ifndef TORRENT_OS2 iface.mtu = req.ifr_mtu; #else iface.mtu = req.ifr_metric; // according to tcp/ip reference #endif memset(&req, 0, sizeof(req)); strncpy(req.ifr_name, item.ifr_name, IF_NAMESIZE - 1); if (ioctl(s, SIOCGIFNETMASK, &req) < 0) { #if TORRENT_USE_IPV6 if (iface.interface_address.is_v6()) { // this is expected to fail (at least on MacOS X) iface.netmask = address_v6::any(); } else #endif { ec = error_code(errno, boost::asio::error::system_category); close(s); return ret; } } else { iface.netmask = sockaddr_to_address(&req.ifr_addr, item.ifr_addr.sa_family); } ret.push_back(iface); } ifr += current_size; remaining -= current_size; } close(s); #elif TORRENT_USE_GETADAPTERSADDRESSES #if _WIN32_WINNT >= 0x0501 // Load Iphlpapi library HMODULE iphlp = LoadLibraryA("Iphlpapi.dll"); if (iphlp) { // Get GetAdaptersAddresses() pointer typedef ULONG (WINAPI *GetAdaptersAddresses_t)(ULONG,ULONG,PVOID,PIP_ADAPTER_ADDRESSES,PULONG); GetAdaptersAddresses_t GetAdaptersAddresses = (GetAdaptersAddresses_t)GetProcAddress( iphlp, "GetAdaptersAddresses"); if (GetAdaptersAddresses) { PIP_ADAPTER_ADDRESSES adapter_addresses = 0; ULONG out_buf_size = 0; if (GetAdaptersAddresses(AF_UNSPEC, GAA_FLAG_SKIP_MULTICAST | GAA_FLAG_SKIP_DNS_SERVER | GAA_FLAG_SKIP_ANYCAST, NULL, adapter_addresses, &out_buf_size) != ERROR_BUFFER_OVERFLOW) { FreeLibrary(iphlp); ec = boost::asio::error::operation_not_supported; return std::vector<ip_interface>(); } adapter_addresses = (IP_ADAPTER_ADDRESSES*)malloc(out_buf_size); if (!adapter_addresses) { FreeLibrary(iphlp); ec = boost::asio::error::no_memory; return std::vector<ip_interface>(); } if (GetAdaptersAddresses(AF_UNSPEC, GAA_FLAG_SKIP_MULTICAST | GAA_FLAG_SKIP_DNS_SERVER | GAA_FLAG_SKIP_ANYCAST, NULL, adapter_addresses, &out_buf_size) == NO_ERROR) { for (PIP_ADAPTER_ADDRESSES adapter = adapter_addresses; adapter != 0; adapter = adapter->Next) { ip_interface r; strncpy(r.name, adapter->AdapterName, sizeof(r.name)); r.name[sizeof(r.name)-1] = 0; r.mtu = adapter->Mtu; IP_ADAPTER_UNICAST_ADDRESS* unicast = adapter->FirstUnicastAddress; while (unicast) { r.interface_address = sockaddr_to_address(unicast->Address.lpSockaddr); ret.push_back(r); unicast = unicast->Next; } } } // Free memory free(adapter_addresses); FreeLibrary(iphlp); return ret; } FreeLibrary(iphlp); } #endif SOCKET s = socket(AF_INET, SOCK_DGRAM, 0); if (s == SOCKET_ERROR) { ec = error_code(WSAGetLastError(), boost::asio::error::system_category); return ret; } INTERFACE_INFO buffer[30]; DWORD size; if (WSAIoctl(s, SIO_GET_INTERFACE_LIST, 0, 0, buffer, sizeof(buffer), &size, 0, 0) != 0) { ec = error_code(WSAGetLastError(), boost::asio::error::system_category); closesocket(s); return ret; } closesocket(s); int n = size / sizeof(INTERFACE_INFO); ip_interface iface; for (int i = 0; i < n; ++i) { iface.interface_address = sockaddr_to_address(&buffer[i].iiAddress.Address); if (iface.interface_address == address_v4::any()) continue; iface.netmask = sockaddr_to_address(&buffer[i].iiNetmask.Address , iface.interface_address.is_v4() ? AF_INET : AF_INET6); iface.name[0] = 0; iface.mtu = 1500; // how to get the MTU? ret.push_back(iface); } #else #ifdef _MSC_VER #pragma message ( "THIS OS IS NOT RECOGNIZED, enum_net_interfaces WILL PROBABLY NOT WORK" ) #else #warning "THIS OS IS NOT RECOGNIZED, enum_net_interfaces WILL PROBABLY NOT WORK" #endif // make a best guess of the interface we're using and its IP udp::resolver r(ios); udp::resolver::iterator i = r.resolve(udp::resolver::query(boost::asio::ip::host_name(ec), "0"), ec); if (ec) return ret; ip_interface iface; for (;i != udp::resolver_iterator(); ++i) { iface.interface_address = i->endpoint().address(); iface.mtu = 1500; if (iface.interface_address.is_v4()) iface.netmask = address_v4::netmask(iface.interface_address.to_v4()); ret.push_back(iface); } #endif return ret; }
std::vector<ip_route> enum_routes(io_service& ios, error_code& ec) { std::vector<ip_route> ret; TORRENT_UNUSED(ios); #ifdef TORRENT_BUILD_SIMULATOR TORRENT_UNUSED(ec); std::vector<address> ips = ios.get_ips(); for (int i = 0; i < int(ips.size()); ++i) { ip_route r; if (ips[i].is_v4()) { r.destination = address_v4(); r.netmask = address_v4::from_string("255.255.255.0"); address_v4::bytes_type b = ips[i].to_v4().to_bytes(); b[3] = 1; r.gateway = address_v4(b); } else { r.destination = address_v6(); r.netmask = address_v6::from_string("FFFF:FFFF:FFFF:FFFF::0"); address_v6::bytes_type b = ips[i].to_v6().to_bytes(); b[14] = 1; r.gateway = address_v6(b); } strcpy(r.name, "eth0"); r.mtu = ios.sim().config().path_mtu(ips[i], ips[i]); ret.push_back(r); } #elif TORRENT_USE_SYSCTL /* struct rt_msg { rt_msghdr m_rtm; char buf[512]; }; rt_msg m; int len = sizeof(rt_msg); bzero(&m, len); m.m_rtm.rtm_type = RTM_GET; m.m_rtm.rtm_flags = RTF_UP | RTF_GATEWAY; m.m_rtm.rtm_version = RTM_VERSION; m.m_rtm.rtm_addrs = RTA_DST | RTA_GATEWAY | RTA_NETMASK; m.m_rtm.rtm_seq = 0; m.m_rtm.rtm_msglen = len; int s = socket(PF_ROUTE, SOCK_RAW, AF_UNSPEC); if (s == -1) { ec = error_code(errno, boost::asio::error::system_category); return std::vector<ip_route>(); } int n = write(s, &m, len); if (n == -1) { ec = error_code(errno, boost::asio::error::system_category); close(s); return std::vector<ip_route>(); } else if (n != len) { ec = boost::asio::error::operation_not_supported; close(s); return std::vector<ip_route>(); } bzero(&m, len); n = read(s, &m, len); if (n == -1) { ec = error_code(errno, boost::asio::error::system_category); close(s); return std::vector<ip_route>(); } for (rt_msghdr* ptr = &m.m_rtm; (char*)ptr < ((char*)&m.m_rtm) + n; ptr = (rt_msghdr*)(((char*)ptr) + ptr->rtm_msglen)) { std::cout << " rtm_msglen: " << ptr->rtm_msglen << std::endl; std::cout << " rtm_type: " << ptr->rtm_type << std::endl; if (ptr->rtm_errno) { ec = error_code(ptr->rtm_errno, boost::asio::error::system_category); return std::vector<ip_route>(); } if (m.m_rtm.rtm_flags & RTF_UP == 0 || m.m_rtm.rtm_flags & RTF_GATEWAY == 0) { ec = boost::asio::error::operation_not_supported; return address_v4::any(); } if (ptr->rtm_addrs & RTA_DST == 0 || ptr->rtm_addrs & RTA_GATEWAY == 0 || ptr->rtm_addrs & RTA_NETMASK == 0) { ec = boost::asio::error::operation_not_supported; return std::vector<ip_route>(); } if (ptr->rtm_msglen > len - ((char*)ptr - ((char*)&m.m_rtm))) { ec = boost::asio::error::operation_not_supported; return std::vector<ip_route>(); } int min_len = sizeof(rt_msghdr) + 2 * sizeof(sockaddr_in); if (m.m_rtm.rtm_msglen < min_len) { ec = boost::asio::error::operation_not_supported; return std::vector<ip_route>(); } ip_route r; // destination char* p = m.buf; sockaddr_in* sin = (sockaddr_in*)p; r.destination = sockaddr_to_address((sockaddr*)p); // gateway p += sin->sin_len; sin = (sockaddr_in*)p; r.gateway = sockaddr_to_address((sockaddr*)p); // netmask p += sin->sin_len; sin = (sockaddr_in*)p; r.netmask = sockaddr_to_address((sockaddr*)p); ret.push_back(r); } close(s); */ int mib[6] = { CTL_NET, PF_ROUTE, 0, AF_UNSPEC, NET_RT_DUMP, 0}; size_t needed = 0; #ifdef TORRENT_OS2 if (__libsocket_sysctl(mib, 6, 0, &needed, 0, 0) < 0) #else if (sysctl(mib, 6, 0, &needed, 0, 0) < 0) #endif { ec = error_code(errno, boost::asio::error::system_category); return std::vector<ip_route>(); } if (needed <= 0) { return std::vector<ip_route>(); } boost::scoped_array<char> buf(new (std::nothrow) char[needed]); if (buf.get() == 0) { ec = boost::asio::error::no_memory; return std::vector<ip_route>(); } #ifdef TORRENT_OS2 if (__libsocket_sysctl(mib, 6, buf.get(), &needed, 0, 0) < 0) #else if (sysctl(mib, 6, buf.get(), &needed, 0, 0) < 0) #endif { ec = error_code(errno, boost::asio::error::system_category); return std::vector<ip_route>(); } char* end = buf.get() + needed; int s = socket(AF_INET, SOCK_DGRAM, 0); if (s < 0) { ec = error_code(errno, boost::asio::error::system_category); return std::vector<ip_route>(); } rt_msghdr* rtm; for (char* next = buf.get(); next < end; next += rtm->rtm_msglen) { rtm = reinterpret_cast<rt_msghdr*>(next); if (rtm->rtm_version != RTM_VERSION) continue; ip_route r; if (parse_route(s, rtm, &r)) ret.push_back(r); } close(s); #elif TORRENT_USE_GETIPFORWARDTABLE /* move this to enum_net_interfaces // Load Iphlpapi library HMODULE iphlp = LoadLibraryA("Iphlpapi.dll"); if (!iphlp) { ec = boost::asio::error::operation_not_supported; return std::vector<ip_route>(); } // Get GetAdaptersInfo() pointer typedef DWORD (WINAPI *GetAdaptersInfo_t)(PIP_ADAPTER_INFO, PULONG); GetAdaptersInfo_t GetAdaptersInfo = (GetAdaptersInfo_t)GetProcAddress(iphlp, "GetAdaptersInfo"); if (!GetAdaptersInfo) { FreeLibrary(iphlp); ec = boost::asio::error::operation_not_supported; return std::vector<ip_route>(); } PIP_ADAPTER_INFO adapter_info = 0; ULONG out_buf_size = 0; if (GetAdaptersInfo(adapter_info, &out_buf_size) != ERROR_BUFFER_OVERFLOW) { FreeLibrary(iphlp); ec = boost::asio::error::operation_not_supported; return std::vector<ip_route>(); } adapter_info = (IP_ADAPTER_INFO*)malloc(out_buf_size); if (!adapter_info) { FreeLibrary(iphlp); ec = boost::asio::error::no_memory; return std::vector<ip_route>(); } if (GetAdaptersInfo(adapter_info, &out_buf_size) == NO_ERROR) { for (PIP_ADAPTER_INFO adapter = adapter_info; adapter != 0; adapter = adapter->Next) { ip_route r; r.destination = address::from_string(adapter->IpAddressList.IpAddress.String, ec); r.gateway = address::from_string(adapter->GatewayList.IpAddress.String, ec); r.netmask = address::from_string(adapter->IpAddressList.IpMask.String, ec); strncpy(r.name, adapter->AdapterName, sizeof(r.name)); if (ec) { ec = error_code(); continue; } ret.push_back(r); } } // Free memory free(adapter_info); FreeLibrary(iphlp); */ // Load Iphlpapi library HMODULE iphlp = LoadLibraryA("Iphlpapi.dll"); if (!iphlp) { ec = boost::asio::error::operation_not_supported; return std::vector<ip_route>(); } typedef DWORD (WINAPI *GetIfEntry_t)(PMIB_IFROW pIfRow); GetIfEntry_t GetIfEntry = (GetIfEntry_t)GetProcAddress(iphlp, "GetIfEntry"); if (!GetIfEntry) { ec = boost::asio::error::operation_not_supported; return std::vector<ip_route>(); } #if _WIN32_WINNT >= 0x0600 typedef DWORD (WINAPI *GetIpForwardTable2_t)( ADDRESS_FAMILY, PMIB_IPFORWARD_TABLE2*); typedef void (WINAPI *FreeMibTable_t)(PVOID Memory); GetIpForwardTable2_t GetIpForwardTable2 = (GetIpForwardTable2_t)GetProcAddress( iphlp, "GetIpForwardTable2"); FreeMibTable_t FreeMibTable = (FreeMibTable_t)GetProcAddress( iphlp, "FreeMibTable"); if (GetIpForwardTable2 && FreeMibTable) { MIB_IPFORWARD_TABLE2* routes = NULL; int res = GetIpForwardTable2(AF_UNSPEC, &routes); if (res == NO_ERROR) { for (int i = 0; i < routes->NumEntries; ++i) { ip_route r; r.gateway = sockaddr_to_address((const sockaddr*)&routes->Table[i].NextHop); r.destination = sockaddr_to_address( (const sockaddr*)&routes->Table[i].DestinationPrefix.Prefix); r.netmask = build_netmask(routes->Table[i].SitePrefixLength , routes->Table[i].DestinationPrefix.Prefix.si_family); MIB_IFROW ifentry; ifentry.dwIndex = routes->Table[i].InterfaceIndex; if (GetIfEntry(&ifentry) == NO_ERROR) { wcstombs(r.name, ifentry.wszName, sizeof(r.name)); r.mtu = ifentry.dwMtu; ret.push_back(r); } } } if (routes) FreeMibTable(routes); FreeLibrary(iphlp); return ret; } #endif // Get GetIpForwardTable() pointer typedef DWORD (WINAPI *GetIpForwardTable_t)(PMIB_IPFORWARDTABLE pIpForwardTable,PULONG pdwSize,BOOL bOrder); GetIpForwardTable_t GetIpForwardTable = (GetIpForwardTable_t)GetProcAddress( iphlp, "GetIpForwardTable"); if (!GetIpForwardTable) { FreeLibrary(iphlp); ec = boost::asio::error::operation_not_supported; return std::vector<ip_route>(); } MIB_IPFORWARDTABLE* routes = NULL; ULONG out_buf_size = 0; if (GetIpForwardTable(routes, &out_buf_size, FALSE) != ERROR_INSUFFICIENT_BUFFER) { FreeLibrary(iphlp); ec = boost::asio::error::operation_not_supported; return std::vector<ip_route>(); } routes = (MIB_IPFORWARDTABLE*)malloc(out_buf_size); if (!routes) { FreeLibrary(iphlp); ec = boost::asio::error::no_memory; return std::vector<ip_route>(); } if (GetIpForwardTable(routes, &out_buf_size, FALSE) == NO_ERROR) { for (int i = 0; i < routes->dwNumEntries; ++i) { ip_route r; r.destination = inaddr_to_address((in_addr const*)&routes->table[i].dwForwardDest); r.netmask = inaddr_to_address((in_addr const*)&routes->table[i].dwForwardMask); r.gateway = inaddr_to_address((in_addr const*)&routes->table[i].dwForwardNextHop); MIB_IFROW ifentry; ifentry.dwIndex = routes->table[i].dwForwardIfIndex; if (GetIfEntry(&ifentry) == NO_ERROR) { wcstombs(r.name, ifentry.wszName, sizeof(r.name)); r.name[sizeof(r.name)-1] = 0; r.mtu = ifentry.dwMtu; ret.push_back(r); } } } // Free memory free(routes); FreeLibrary(iphlp); #elif TORRENT_USE_NETLINK enum { BUFSIZE = 8192 }; int sock = socket(PF_ROUTE, SOCK_DGRAM, NETLINK_ROUTE); if (sock < 0) { ec = error_code(errno, boost::asio::error::system_category); return std::vector<ip_route>(); } int seq = 0; char msg[BUFSIZE]; memset(msg, 0, BUFSIZE); nlmsghdr* nl_msg = (nlmsghdr*)msg; nl_msg->nlmsg_len = NLMSG_LENGTH(sizeof(rtmsg)); nl_msg->nlmsg_type = RTM_GETROUTE; nl_msg->nlmsg_flags = NLM_F_DUMP | NLM_F_REQUEST; nl_msg->nlmsg_seq = seq++; nl_msg->nlmsg_pid = getpid(); if (send(sock, nl_msg, nl_msg->nlmsg_len, 0) < 0) { ec = error_code(errno, boost::asio::error::system_category); close(sock); return std::vector<ip_route>(); } int len = read_nl_sock(sock, msg, BUFSIZE, seq, getpid()); if (len < 0) { ec = error_code(errno, boost::asio::error::system_category); close(sock); return std::vector<ip_route>(); } int s = socket(AF_INET, SOCK_DGRAM, 0); if (s < 0) { ec = error_code(errno, boost::asio::error::system_category); return std::vector<ip_route>(); } for (; NLMSG_OK(nl_msg, len); nl_msg = NLMSG_NEXT(nl_msg, len)) { ip_route r; if (parse_route(s, nl_msg, &r)) ret.push_back(r); } close(s); close(sock); #endif return ret; }