int count_trailing_ones_hw(span<std::uint32_t const> buf) { auto const num = int(buf.size()); std::uint32_t const* ptr = buf.data(); TORRENT_ASSERT(num >= 0); TORRENT_ASSERT(ptr != nullptr); for (int i = num - 1; i >= 0; i--) { if (ptr[i] == 0xffffffff) continue; #if TORRENT_HAS_BUILTIN_CTZ std::uint32_t const v = ~aux::network_to_host(ptr[i]); return (num - i - 1) * 32 + __builtin_ctz(v); #elif defined _MSC_VER std::uint32_t const v = ~aux::network_to_host(ptr[i]); DWORD pos; _BitScanForward(&pos, v); return (num - i - 1) * 32 + pos; #else TORRENT_ASSERT_FAIL(); return -1; #endif } return num * 32; }
void peer_class_set::add_class(peer_class_pool& pool, peer_class_t c) { if (std::find(m_class.begin(), m_class.begin() + m_size, c) != m_class.begin() + m_size) return; if (m_size >= m_class.size() - 1) { TORRENT_ASSERT_FAIL(); return; } m_class[m_size] = c; pool.incref(c); ++m_size; }
void ip_filter::add_rule(address first, address last, std::uint32_t flags) { if (first.is_v4()) { TORRENT_ASSERT(last.is_v4()); m_filter4.add_rule(first.to_v4().to_bytes(), last.to_v4().to_bytes(), flags); } #if TORRENT_USE_IPV6 else if (first.is_v6()) { TORRENT_ASSERT(last.is_v6()); m_filter6.add_rule(first.to_v6().to_bytes(), last.to_v6().to_bytes(), flags); } #endif else TORRENT_ASSERT_FAIL(); }
bool settings_pack::has_val(int name) const { switch (name & type_mask) { case string_type_base: { // this is an optimization. If the settings pack is complete, // i.e. has every key, we don't need to search, it's just a lookup if (m_strings.size() == settings_pack::num_string_settings) return true; std::pair<std::uint16_t, std::string> v(name, std::string()); std::vector<std::pair<std::uint16_t, std::string> >::const_iterator i = std::lower_bound(m_strings.begin(), m_strings.end(), v , &compare_first<std::string>); return i != m_strings.end() && i->first == name; } case int_type_base: { // this is an optimization. If the settings pack is complete, // i.e. has every key, we don't need to search, it's just a lookup if (m_ints.size() == settings_pack::num_int_settings) return true; std::pair<std::uint16_t, int> v(name, 0); std::vector<std::pair<std::uint16_t, int> >::const_iterator i = std::lower_bound(m_ints.begin(), m_ints.end(), v , &compare_first<int>); return i != m_ints.end() && i->first == name; } case bool_type_base: { // this is an optimization. If the settings pack is complete, // i.e. has every key, we don't need to search, it's just a lookup if (m_bools.size() == settings_pack::num_bool_settings) return true; std::pair<std::uint16_t, bool> v(name, false); std::vector<std::pair<std::uint16_t, bool> >::const_iterator i = std::lower_bound(m_bools.begin(), m_bools.end(), v , &compare_first<bool>); return i != m_bools.end() && i->first == name; } } TORRENT_ASSERT_FAIL(); return false; }
void entry::swap(entry& e) { bool clear_this = false; bool clear_that = false; if (m_type == undefined_t && e.m_type == undefined_t) return; if (m_type == undefined_t) { construct(data_type(e.m_type)); clear_that = true; } if (e.m_type == undefined_t) { e.construct(data_type(m_type)); clear_this = true; } if (m_type == e.m_type) { switch (m_type) { case int_t: std::swap(*reinterpret_cast<integer_type*>(&data) , *reinterpret_cast<integer_type*>(&e.data)); break; case string_t: std::swap(*reinterpret_cast<string_type*>(&data) , *reinterpret_cast<string_type*>(&e.data)); break; case list_t: std::swap(*reinterpret_cast<list_type*>(&data) , *reinterpret_cast<list_type*>(&e.data)); break; case dictionary_t: std::swap(*reinterpret_cast<dictionary_type*>(&data) , *reinterpret_cast<dictionary_type*>(&e.data)); break; case preformatted_t: std::swap(*reinterpret_cast<preformatted_type*>(&data) , *reinterpret_cast<preformatted_type*>(&e.data)); break; default: break; } if (clear_this) destruct(); if (clear_that) e.destruct(); } else { // currently, only swapping entries of the same type or where one // of the entries is uninitialized is supported. TORRENT_ASSERT_FAIL(); } }
int unchoke_sort(std::vector<peer_connection*>& peers , int max_upload_rate , time_duration unchoke_interval , aux::session_settings const& sett) { #if TORRENT_USE_ASSERTS for (std::vector<peer_connection*>::iterator i = peers.begin() , end(peers.end()); i != end; ++i) { TORRENT_ASSERT((*i)->self()); TORRENT_ASSERT((*i)->associated_torrent().lock()); } #endif int upload_slots = sett.get_int(settings_pack::unchoke_slots_limit); if (upload_slots < 0) upload_slots = (std::numeric_limits<int>::max)(); // ==== BitTyrant ==== // // if we're using the bittyrant unchoker, go through all peers that // we have unchoked already, and adjust our estimated reciprocation // rate. If the peer has reciprocated, lower the estimate, if it hasn't, // increase the estimate (this attempts to optimize "ROI" of upload // capacity, by sending just enough to be reciprocated). // For more information, see: http://bittyrant.cs.washington.edu/ if (sett.get_int(settings_pack::choking_algorithm) == settings_pack::bittyrant_choker) { for (std::vector<peer_connection*>::const_iterator i = peers.begin() , end(peers.end()); i != end; ++i) { peer_connection* p = *i; if (p->is_choked() || !p->is_interesting()) continue; if (!p->has_peer_choked()) { // we're unchoked, we may want to lower our estimated // reciprocation rate p->decrease_est_reciprocation_rate(); } else { // we've unchoked this peer, and it hasn't reciprocated // we may want to increase our estimated reciprocation rate p->increase_est_reciprocation_rate(); } } // if we're using the bittyrant choker, sort peers by their return // on investment. i.e. download rate / upload rate std::sort(peers.begin(), peers.end() , std::bind(&bittyrant_unchoke_compare, _1, _2)); int upload_capacity_left = max_upload_rate; // now, figure out how many peers should be unchoked. We deduct the // estimated reciprocation rate from our upload_capacity estimate // until there none left upload_slots = 0; for (std::vector<peer_connection*>::iterator i = peers.begin() , end(peers.end()); i != end; ++i) { peer_connection* p = *i; TORRENT_ASSERT(p); if (p->est_reciprocation_rate() > upload_capacity_left) break; ++upload_slots; upload_capacity_left -= p->est_reciprocation_rate(); } return upload_slots; } // ==== rate-based ==== // // The rate based unchoker looks at our upload rate to peers, and find // a balance between number of upload slots and the rate we achieve. The // intention is to not spread upload bandwidth too thin, but also to not // unchoke few enough peers to not be able to saturate the up-link. // this is done by traversing the peers sorted by our upload rate to // them in decreasing rates. For each peer we increase our threshold // by 1 kB/s. The first peer we get to to whom we upload slower than // the threshold, we stop and that's the number of unchoke slots we have. if (sett.get_int(settings_pack::choking_algorithm) == settings_pack::rate_based_choker) { // first reset the number of unchoke slots, because we'll calculate // it purely based on the current state of our peers. upload_slots = 0; // TODO: optimize this using partial_sort or something. We don't need // to sort the entire list // TODO: make the comparison function a free function and move it // into this cpp file std::sort(peers.begin(), peers.end() , std::bind(&upload_rate_compare, _1, _2)); // TODO: make configurable int rate_threshold = 1024; for (std::vector<peer_connection*>::const_iterator i = peers.begin() , end(peers.end()); i != end; ++i) { peer_connection const& p = **i; int const rate = int(p.uploaded_in_last_round() * 1000 / total_milliseconds(unchoke_interval)); if (rate < rate_threshold) break; ++upload_slots; // TODO: make configurable rate_threshold += 1024; } ++upload_slots; } // sorts the peers that are eligible for unchoke by download rate and // secondary by total upload. The reason for this is, if all torrents are // being seeded, the download rate will be 0, and the peers we have sent // the least to should be unchoked // we use partial sort here, because we only care about the top // upload_slots peers. if (sett.get_int(settings_pack::seed_choking_algorithm) == settings_pack::round_robin) { int const pieces = sett.get_int(settings_pack::seeding_piece_quota); std::partial_sort(peers.begin(), peers.begin() + (std::min)(upload_slots, int(peers.size())), peers.end() , std::bind(&unchoke_compare_rr, _1, _2, pieces)); } else if (sett.get_int(settings_pack::seed_choking_algorithm) == settings_pack::fastest_upload) { std::partial_sort(peers.begin(), peers.begin() + (std::min)(upload_slots, int(peers.size())), peers.end() , std::bind(&unchoke_compare_fastest_upload, _1, _2)); } else if (sett.get_int(settings_pack::seed_choking_algorithm) == settings_pack::anti_leech) { std::partial_sort(peers.begin(), peers.begin() + (std::min)(upload_slots, int(peers.size())), peers.end() , std::bind(&unchoke_compare_anti_leech, _1, _2)); } else { int const pieces = sett.get_int(settings_pack::seeding_piece_quota); std::partial_sort(peers.begin(), peers.begin() + (std::min)(upload_slots, int(peers.size())), peers.end() , std::bind(&unchoke_compare_rr, _1, _2, pieces)); TORRENT_ASSERT_FAIL(); } return upload_slots; }