Ejemplo n.º 1
0
	void web_connection_base::add_headers(std::string& request
		, aux::session_settings const& sett, bool using_proxy) const
	{
		request += "Host: ";
		request += m_host;
		if (m_first_request || m_settings.get_bool(settings_pack::always_send_user_agent)) {
			request += "\r\nUser-Agent: ";
			request += m_settings.get_str(settings_pack::user_agent);
		}
		if (!m_external_auth.empty()) {
			request += "\r\nAuthorization: ";
			request += m_external_auth;
		} else if (!m_basic_auth.empty()) {
			request += "\r\nAuthorization: Basic ";
			request += m_basic_auth;
		}
		if (sett.get_int(settings_pack::proxy_type) == settings_pack::http_pw) {
			request += "\r\nProxy-Authorization: Basic ";
			request += base64encode(sett.get_str(settings_pack::proxy_username)
				+ ":" + sett.get_str(settings_pack::proxy_password));
		}
		for (web_seed_entry::headers_t::const_iterator it = m_extra_headers.begin();
		     it != m_extra_headers.end(); ++it) {
		  request += "\r\n";
		  request += it->first;
		  request += ": ";
		  request += it->second;
		}
		if (using_proxy) {
			request += "\r\nProxy-Connection: keep-alive";
		}
		if (m_first_request || using_proxy) {
			request += "\r\nConnection: keep-alive";
		}
	}
Ejemplo n.º 2
0
	void load_struct_from_settings(aux::session_settings const& current, session_settings& ret)
	{
		for (int i = 0; i < settings_pack::num_string_settings; ++i)
		{
			if (str_settings[i].offset == 0) continue;
			std::string& val = *(std::string*)(((char*)&ret) + str_settings[i].offset);
			val = current.get_str(settings_pack::string_type_base + i);
		}

		for (int i = 0; i < settings_pack::num_int_settings; ++i)
		{
			if (int_settings[i].offset == 0) continue;
			int& val = *(int*)(((char*)&ret) + int_settings[i].offset);
			val = current.get_int(settings_pack::int_type_base + i);
		}

		for (int i = 0; i < settings_pack::num_bool_settings; ++i)
		{
			if (bool_settings[i].offset == 0) continue;
			bool& val = *(bool*)(((char*)&ret) + bool_settings[i].offset);
			val = current.get_bool(settings_pack::bool_type_base + i);
		}

		// special case for deprecated float values
		ret.share_ratio_limit = float(current.get_int(settings_pack::share_ratio_limit)) / 100.f;
		ret.seed_time_ratio_limit = float(current.get_int(settings_pack::seed_time_ratio_limit)) / 100.f;
		ret.peer_turnover = float(current.get_int(settings_pack::peer_turnover)) / 100.f;
		ret.peer_turnover_cutoff = float(current.get_int(settings_pack::peer_turnover_cutoff)) / 100.f;
	}
Ejemplo n.º 3
0
	boost::shared_ptr<settings_pack> load_pack_from_struct(
		aux::session_settings const& current, session_settings const& s)
	{
		boost::shared_ptr<settings_pack> p = boost::make_shared<settings_pack>();

		for (int i = 0; i < settings_pack::num_string_settings; ++i)
		{
			if (str_settings[i].offset == 0) continue;
			std::string& val = *(std::string*)(((char*)&s) + str_settings[i].offset);
			int setting_name = settings_pack::string_type_base + i;
			if (val == current.get_str(setting_name)) continue;
			p->set_str(setting_name, val);
		}

		for (int i = 0; i < settings_pack::num_int_settings; ++i)
		{
			if (int_settings[i].offset == 0) continue;
			int& val = *(int*)(((char*)&s) + int_settings[i].offset);
			int setting_name = settings_pack::int_type_base + i;
			if (val == current.get_int(setting_name)) continue;
			p->set_int(setting_name, val);
		}

		for (int i = 0; i < settings_pack::num_bool_settings; ++i)
		{
			if (bool_settings[i].offset == 0) continue;
			bool& val = *(bool*)(((char*)&s) + bool_settings[i].offset);
			int setting_name = settings_pack::bool_type_base + i;
			if (val == current.get_bool(setting_name)) continue;
			p->set_bool(setting_name, val);
		}

		// special case for deprecated float values
		int val = current.get_int(settings_pack::share_ratio_limit);
		if (fabs(s.share_ratio_limit - float(val) / 100.f) > 0.001f)
			p->set_int(settings_pack::share_ratio_limit, s.share_ratio_limit * 100);

		val = current.get_int(settings_pack::seed_time_ratio_limit);
		if (fabs(s.seed_time_ratio_limit - float(val) / 100.f) > 0.001f)
			p->set_int(settings_pack::seed_time_ratio_limit, s.seed_time_ratio_limit * 100);

		val = current.get_int(settings_pack::peer_turnover);
		if (fabs(s.peer_turnover - float(val) / 100.f) > 0.001)
			p->set_int(settings_pack::peer_turnover, s.peer_turnover * 100);

		val = current.get_int(settings_pack::peer_turnover_cutoff);
		if (fabs(s.peer_turnover_cutoff - float(val) / 100.f) > 0.001)
			p->set_int(settings_pack::peer_turnover_cutoff, s.peer_turnover_cutoff * 100);

		return p;
	}
Ejemplo n.º 4
0
void initialize_default_settings(aux::session_settings& s)
{
    for (int i = 0; i < settings_pack::num_string_settings; ++i)
    {
        if (str_settings[i].default_value == nullptr) continue;
        s.set_str(settings_pack::string_type_base + i, str_settings[i].default_value);
        TORRENT_ASSERT(s.get_str(settings_pack::string_type_base + i) == str_settings[i].default_value);
    }

    for (int i = 0; i < settings_pack::num_int_settings; ++i)
    {
        s.set_int(settings_pack::int_type_base + i, int_settings[i].default_value);
        TORRENT_ASSERT(s.get_int(settings_pack::int_type_base + i) == int_settings[i].default_value);
    }

    for (int i = 0; i < settings_pack::num_bool_settings; ++i)
    {
        s.set_bool(settings_pack::bool_type_base + i, bool_settings[i].default_value);
        TORRENT_ASSERT(s.get_bool(settings_pack::bool_type_base + i) == bool_settings[i].default_value);
    }
}
Ejemplo n.º 5
0
	void disk_buffer_pool::set_settings(aux::session_settings const& sett)
	{
		std::unique_lock<std::mutex> l(m_pool_mutex);

		int const cache_size = sett.get_int(settings_pack::cache_size);
		if (cache_size < 0)
		{
			std::int64_t phys_ram = total_physical_ram();
			if (phys_ram == 0) m_max_use = 1024;
			else
			{
				// this is the logic to calculate the automatic disk cache size
				// based on the amount of physical RAM.
				// The more physical RAM, the smaller portion of it is allocated
				// for the cache.

				// we take a 40th of everything exceeding 4 GiB
				// a 30th of everything exceeding 1 GiB
				// and a 10th of everything below a GiB

				constexpr std::int64_t gb = 1024 * 1024 * 1024;

				std::int64_t result = 0;
				if (phys_ram > 4 * gb)
				{
					result += (phys_ram - 4 * gb) / 40;
					phys_ram = 4 * gb;
				}
				if (phys_ram > 1 * gb)
				{
					result += (phys_ram - 1 * gb) / 30;
					phys_ram = 1 * gb;
				}
				result += phys_ram / 20;
				m_max_use = int(result / default_block_size);
			}

#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 4127 ) /* warning C4127: conditional expression is constant */
#endif // _MSC_VER
			if (sizeof(void*) == 4)
#ifdef _MSC_VER
#pragma warning(pop)
#endif // _MSC_VER
			{
				// 32 bit builds should  capped below 2 GB of memory, even
				// when more actual ram is available, because we're still
				// constrained by the 32 bit virtual address space.
				m_max_use = std::min(2 * 1024 * 1024 * 3 / 4 * 1024
					/ default_block_size, m_max_use);
			}
		}
		else
		{
			m_max_use = cache_size;
		}
		m_low_watermark = m_max_use - std::max(16, sett.get_int(settings_pack::max_queued_disk_bytes) / 0x4000);
		if (m_low_watermark < 0) m_low_watermark = 0;
		if (m_in_use >= m_max_use && !m_exceeded_max_size)
		{
			m_exceeded_max_size = true;
			m_trigger_cache_trim();
		}

#if TORRENT_USE_ASSERTS
		m_settings_set = true;
#endif
	}
Ejemplo n.º 6
0
	int unchoke_sort(std::vector<peer_connection*>& peers
		, int max_upload_rate
		, time_duration unchoke_interval
		, aux::session_settings const& sett)
	{
		int upload_slots = sett.get_int(settings_pack::unchoke_slots_limit);

		// ==== BitTyrant ====
		//
		// if we're using the bittyrant unchoker, go through all peers that
		// we have unchoked already, and adjust our estimated reciprocation
		// rate. If the peer has reciprocated, lower the estimate, if it hasn't,
		// increase the estimate (this attempts to optimize "ROI" of upload
		// capacity, by sending just enough to be reciprocated).
		// For more information, see: http://bittyrant.cs.washington.edu/
		if (sett.get_int(settings_pack::choking_algorithm)
			== settings_pack::bittyrant_choker)
		{
			for (std::vector<peer_connection*>::const_iterator i = peers.begin()
				, end(peers.end()); i != end; ++i)
			{
				peer_connection* p = *i;
				if (p->is_choked() || !p->is_interesting()) continue;

				if (!p->has_peer_choked())
				{
					// we're unchoked, we may want to lower our estimated
					// reciprocation rate
					p->decrease_est_reciprocation_rate();
				}
				else
				{
					// we've unchoked this peer, and it hasn't reciprocated
					// we may want to increase our estimated reciprocation rate
					p->increase_est_reciprocation_rate();
				}
			}

			// if we're using the bittyrant choker, sort peers by their return
			// on investment. i.e. download rate / upload rate
			std::sort(peers.begin(), peers.end()
				, boost::bind(&bittyrant_unchoke_compare, _1, _2));

			int upload_capacity_left = max_upload_rate;

			// now, figure out how many peers should be unchoked. We deduct the
			// estimated reciprocation rate from our upload_capacity estimate
			// until there none left
			upload_slots = 0;

			for (std::vector<peer_connection*>::iterator i = peers.begin()
				, end(peers.end()); i != end; ++i)
			{
				peer_connection* p = *i;
				TORRENT_ASSERT(p);

				if (p->est_reciprocation_rate() > upload_capacity_left) break;

				++upload_slots;
				upload_capacity_left -= p->est_reciprocation_rate();
			}

			return upload_slots;
		}

		// ==== rate-based ====
		//
		// The rate based unchoker looks at our upload rate to peers, and find
		// a balance between number of upload slots and the rate we achieve. The
		// intention is to not spread upload bandwidth too thin, but also to not
		// unchoke few enough peers to not be able to saturate the up-link.
		// this is done by traversing the peers sorted by our upload rate to
		// them in decreasing rates. For each peer we increase our threshold
		// by 1 kB/s. The first peer we get to to whom we upload slower than
		// the threshold, we stop and that's the number of unchoke slots we have.
		if (sett.get_int(settings_pack::choking_algorithm)
			== settings_pack::rate_based_choker)
		{
			// first reset the number of unchoke slots, because we'll calculate
			// it purely based on the current state of our peers.
			upload_slots = 0;

			// TODO: optimize this using partial_sort or something. We don't need
			// to sort the entire list
			
			// TODO: make the comparison function a free function and move it
			// into this cpp file
			std::sort(peers.begin(), peers.end()
				, boost::bind(&upload_rate_compare, _1, _2));

			// TODO: make configurable
			int rate_threshold = 1024;

			for (std::vector<peer_connection*>::const_iterator i = peers.begin()
				, end(peers.end()); i != end; ++i)
			{
				peer_connection const& p = **i;
				int rate = int(p.uploaded_in_last_round()
					* 1000 / total_milliseconds(unchoke_interval));

				if (rate < rate_threshold) break;

				++upload_slots;

				// TODO: make configurable
				rate_threshold += 1024;
			}
			++upload_slots;
		}

		// sorts the peers that are eligible for unchoke by download rate and
		// secondary by total upload. The reason for this is, if all torrents are
		// being seeded, the download rate will be 0, and the peers we have sent
		// the least to should be unchoked
		
		// we use partial sort here, because we only care about the top
		// upload_slots peers.

		if (sett.get_int(settings_pack::seed_choking_algorithm)
			== settings_pack::round_robin)
		{
			int pieces = sett.get_int(settings_pack::seeding_piece_quota);

			std::partial_sort(peers.begin(), peers.begin()
				+ (std::min)(upload_slots, int(peers.size())), peers.end()
				, boost::bind(&unchoke_compare_rr, _1, _2, pieces));
		}
		else if (sett.get_int(settings_pack::seed_choking_algorithm)
			== settings_pack::fastest_upload)
		{
			std::partial_sort(peers.begin(), peers.begin()
				+ (std::min)(upload_slots, int(peers.size())), peers.end()
				, boost::bind(&unchoke_compare_fastest_upload, _1, _2));
		}
		else if (sett.get_int(settings_pack::seed_choking_algorithm)
			== settings_pack::anti_leech)
		{
			std::partial_sort(peers.begin(), peers.begin()
				+ (std::min)(upload_slots, int(peers.size())), peers.end()
				, boost::bind(&unchoke_compare_anti_leech, _1, _2));
		}
		else
		{
			TORRENT_ASSERT(false && "unknown seed choking algorithm");

			int pieces = sett.get_int(settings_pack::seeding_piece_quota);
			std::partial_sort(peers.begin(), peers.begin()
				+ (std::min)(upload_slots, int(peers.size())), peers.end()
				, boost::bind(&unchoke_compare_rr, _1, _2, pieces));
		}

		return upload_slots;
	}
Ejemplo n.º 7
0
void apply_pack(settings_pack const* pack, aux::session_settings& sett
                , aux::session_impl* ses)
{
    typedef void (aux::session_impl::*fun_t)();
    std::vector<fun_t> callbacks;

    for (auto const& p : pack->m_strings)
    {
        // disregard setting indices that are not string types
        if ((p.first & settings_pack::type_mask) != settings_pack::string_type_base)
            continue;

        // ignore settings that are out of bounds
        int const index = p.first & settings_pack::index_mask;
        TORRENT_ASSERT_PRECOND(index >= 0 && index < settings_pack::num_string_settings);
        if (index < 0 || index >= settings_pack::num_string_settings)
            continue;

        // if the value did not change, don't call the update callback
        if (sett.get_str(p.first) == p.second) continue;

        sett.set_str(p.first, p.second);
        str_setting_entry_t const& sa = str_settings[index];

        if (sa.fun && ses
                && std::find(callbacks.begin(), callbacks.end(), sa.fun) == callbacks.end())
            callbacks.push_back(sa.fun);
    }

    for (auto const& p : pack->m_ints)
    {
        // disregard setting indices that are not int types
        if ((p.first & settings_pack::type_mask) != settings_pack::int_type_base)
            continue;

        // ignore settings that are out of bounds
        int const index = p.first & settings_pack::index_mask;
        TORRENT_ASSERT_PRECOND(index >= 0 && index < settings_pack::num_int_settings);
        if (index < 0 || index >= settings_pack::num_int_settings)
            continue;

        // if the value did not change, don't call the update callback
        if (sett.get_int(p.first) == p.second) continue;

        sett.set_int(p.first, p.second);
        int_setting_entry_t const& sa = int_settings[index];
        if (sa.fun && ses
                && std::find(callbacks.begin(), callbacks.end(), sa.fun) == callbacks.end())
            callbacks.push_back(sa.fun);
    }

    for (auto const& p : pack->m_bools)
    {
        // disregard setting indices that are not bool types
        if ((p.first & settings_pack::type_mask) != settings_pack::bool_type_base)
            continue;

        // ignore settings that are out of bounds
        int const index = p.first & settings_pack::index_mask;
        TORRENT_ASSERT_PRECOND(index >= 0 && index < settings_pack::num_bool_settings);
        if (index < 0 || index >= settings_pack::num_bool_settings)
            continue;

        // if the value did not change, don't call the update callback
        if (sett.get_bool(p.first) == p.second) continue;

        sett.set_bool(p.first, p.second);
        bool_setting_entry_t const& sa = bool_settings[index];
        if (sa.fun && ses
                && std::find(callbacks.begin(), callbacks.end(), sa.fun) == callbacks.end())
            callbacks.push_back(sa.fun);
    }

    // call the callbacks once all the settings have been applied, and
    // only once per callback
    for (auto const& f : callbacks)
    {
        (ses->*f)();
    }
}
Ejemplo n.º 8
0
	void disk_buffer_pool::set_settings(aux::session_settings const& sett)
	{
		mutex::scoped_lock l(m_pool_mutex);

		// 0 cache_buffer_chunk_size means 'automatic' (i.e.
		// proportional to the total disk cache size)
		m_cache_buffer_chunk_size = sett.get_int(settings_pack::cache_buffer_chunk_size);
		m_lock_disk_cache = sett.get_bool(settings_pack::lock_disk_cache);
#ifndef TORRENT_DISABLE_POOL_ALLOCATOR
		m_want_pool_allocator = sett.get_bool(settings_pack::use_disk_cache_pool);
		// if there are no allocated blocks, it's OK to switch allocator
		if (m_in_use == 0)
			m_using_pool_allocator = m_want_pool_allocator;
#endif

#if TORRENT_HAVE_MMAP
		// if we've already allocated an mmap, we can't change
		// anything unless there are no allocations in use
		if (m_cache_pool && m_in_use > 0) return;
#endif

		// only allow changing size if we're not using mmapped
		// cache, or if we're just about to turn it off
		if (
#if TORRENT_HAVE_MMAP
			m_cache_pool == 0 ||
#endif
			sett.get_str(settings_pack::mmap_cache).empty())
		{
			int cache_size = sett.get_int(settings_pack::cache_size);
			if (cache_size < 0)
			{
				boost::uint64_t phys_ram = physical_ram();
				if (phys_ram == 0) m_max_use = 1024;
				else m_max_use = phys_ram / 8 / m_block_size;
			}
			else
			{
				m_max_use = cache_size;
			}
			m_low_watermark = m_max_use - (std::max)(16, sett.get_int(settings_pack::max_queued_disk_bytes) / 0x4000);
			if (m_low_watermark < 0) m_low_watermark = 0;
			if (m_in_use >= m_max_use && !m_exceeded_max_size)
			{
				m_exceeded_max_size = true;
				m_trigger_cache_trim();
			}
		}

#if TORRENT_USE_ASSERTS
		m_settings_set = true;
#endif

#if TORRENT_HAVE_MMAP
		// #error support resizing the map
		if (m_cache_pool && sett.get_str(settings_pack::mmap_cache).empty())
		{
			TORRENT_ASSERT(m_in_use == 0);
			munmap(m_cache_pool, boost::uint64_t(m_max_use) * 0x4000);
			m_cache_pool = 0;
			// attempt to make MacOS not flush this to disk, making close()
			// block for a long time
			ftruncate(m_cache_fd, 0);
			close(m_cache_fd);
			m_cache_fd = -1;
			std::vector<int>().swap(m_free_list);
		}
		else if (m_cache_pool == 0 && !sett.get_str(settings_pack::mmap_cache).empty())
		{
			// O_TRUNC here is because we don't actually care about what's
			// in the file now, there's no need to ever read that into RAM
#ifndef O_EXLOCK
#define O_EXLOCK 0
#endif
			m_cache_fd = open(sett.get_str(settings_pack::mmap_cache).c_str(), O_RDWR | O_CREAT | O_EXLOCK | O_TRUNC, 0700);
			if (m_cache_fd < 0)
			{
				if (m_post_alert)
				{
					error_code ec(errno, boost::system::generic_category());
					m_ios.post(boost::bind(alert_callback, m_post_alert, new mmap_cache_alert(ec)));
				}
			}
			else
			{
#ifndef MAP_NOCACHE
#define MAP_NOCACHE 0
#endif
				ftruncate(m_cache_fd, boost::uint64_t(m_max_use) * 0x4000);
				m_cache_pool = (char*)mmap(0, boost::uint64_t(m_max_use) * 0x4000, PROT_READ | PROT_WRITE
					, MAP_SHARED | MAP_NOCACHE, m_cache_fd, 0);
				if (intptr_t(m_cache_pool) == -1)
				{
					if (m_post_alert)
					{
						error_code ec(errno, boost::system::generic_category());
						m_ios.post(boost::bind(alert_callback, m_post_alert, new mmap_cache_alert(ec)));
					}
					m_cache_pool = 0;
					// attempt to make MacOS not flush this to disk, making close()
					// block for a long time
					ftruncate(m_cache_fd, 0);
					close(m_cache_fd);
					m_cache_fd = -1;
				}
				else
				{
					TORRENT_ASSERT((size_t(m_cache_pool) & 0xfff) == 0);
					m_free_list.reserve(m_max_use);
					for (int i = 0; i < m_max_use; ++i)
						m_free_list.push_back(i);
				}
			}
		}
#endif
	}
Ejemplo n.º 9
0
	void apply_pack(settings_pack const* pack, aux::session_settings& sett
		, aux::session_impl* ses)
	{
		typedef void (aux::session_impl::*fun_t)();
		std::vector<fun_t> callbacks;

		for (std::vector<std::pair<std::uint16_t, std::string> >::const_iterator i = pack->m_strings.begin()
			, end(pack->m_strings.end()); i != end; ++i)
		{
			// disregard setting indices that are not string types
			if ((i->first & settings_pack::type_mask) != settings_pack::string_type_base)
				continue;

			// ignore settings that are out of bounds
			int const index = i->first & settings_pack::index_mask;
			TORRENT_ASSERT_PRECOND(index >= 0 && index < settings_pack::num_string_settings);
			if (index < 0 || index >= settings_pack::num_string_settings)
				continue;

			sett.set_str(i->first, i->second);
			str_setting_entry_t const& sa = str_settings[index];
			if (sa.fun && ses
				&& std::find(callbacks.begin(), callbacks.end(), sa.fun) == callbacks.end())
				callbacks.push_back(sa.fun);
		}

		for (std::vector<std::pair<std::uint16_t, int> >::const_iterator i = pack->m_ints.begin()
			, end(pack->m_ints.end()); i != end; ++i)
		{
			// disregard setting indices that are not int types
			if ((i->first & settings_pack::type_mask) != settings_pack::int_type_base)
				continue;

			// ignore settings that are out of bounds
			int const index = i->first & settings_pack::index_mask;
			TORRENT_ASSERT_PRECOND(index >= 0 && index < settings_pack::num_int_settings);
			if (index < 0 || index >= settings_pack::num_int_settings)
				continue;

			sett.set_int(i->first, i->second);
			int_setting_entry_t const& sa = int_settings[index];
			if (sa.fun && ses
				&& std::find(callbacks.begin(), callbacks.end(), sa.fun) == callbacks.end())
				callbacks.push_back(sa.fun);
		}

		for (std::vector<std::pair<std::uint16_t, bool> >::const_iterator i = pack->m_bools.begin()
			, end(pack->m_bools.end()); i != end; ++i)
		{
			// disregard setting indices that are not bool types
			if ((i->first & settings_pack::type_mask) != settings_pack::bool_type_base)
				continue;

			// ignore settings that are out of bounds
			int const index = i->first & settings_pack::index_mask;
			TORRENT_ASSERT_PRECOND(index >= 0 && index < settings_pack::num_bool_settings);
			if (index < 0 || index >= settings_pack::num_bool_settings)
				continue;

			sett.set_bool(i->first, i->second);
			bool_setting_entry_t const& sa = bool_settings[index];
			if (sa.fun && ses
				&& std::find(callbacks.begin(), callbacks.end(), sa.fun) == callbacks.end())
				callbacks.push_back(sa.fun);
		}

		// call the callbacks once all the settings have been applied, and
		// only once per callback
		for (std::vector<fun_t>::iterator i = callbacks.begin(), end(callbacks.end());
			i != end; ++i)
		{
			fun_t const& f = *i;
			(ses->*f)();
		}
	}