char* disk_buffer_pool::allocate_buffer_impl(std::unique_lock<std::mutex>& l
		, char const*)
	{
		TORRENT_ASSERT(m_settings_set);
		TORRENT_ASSERT(m_magic == 0x1337);
		TORRENT_ASSERT(l.owns_lock());
		TORRENT_UNUSED(l);

		char* ret = page_malloc(default_block_size);

		if (ret == nullptr)
		{
			m_exceeded_max_size = true;
			m_trigger_cache_trim();
			return nullptr;
		}

		++m_in_use;

#if TORRENT_USE_INVARIANT_CHECKS
		try
		{
			TORRENT_ASSERT(m_buffers_in_use.count(ret) == 0);
			m_buffers_in_use.insert(ret);
		}
		catch (...)
		{
			free_buffer_impl(ret, l);
			return nullptr;
		}
#endif

		if (m_in_use >= m_low_watermark + (m_max_use - m_low_watermark)
			/ 2 && !m_exceeded_max_size)
		{
			m_exceeded_max_size = true;
			m_trigger_cache_trim();
		}

		TORRENT_ASSERT(is_disk_buffer(ret, l));
		return ret;
	}
	void disk_buffer_pool::set_settings(aux::session_settings const& sett)
	{
		std::unique_lock<std::mutex> l(m_pool_mutex);

		int const cache_size = sett.get_int(settings_pack::cache_size);
		if (cache_size < 0)
		{
			std::int64_t phys_ram = total_physical_ram();
			if (phys_ram == 0) m_max_use = 1024;
			else
			{
				// this is the logic to calculate the automatic disk cache size
				// based on the amount of physical RAM.
				// The more physical RAM, the smaller portion of it is allocated
				// for the cache.

				// we take a 40th of everything exceeding 4 GiB
				// a 30th of everything exceeding 1 GiB
				// and a 10th of everything below a GiB

				constexpr std::int64_t gb = 1024 * 1024 * 1024;

				std::int64_t result = 0;
				if (phys_ram > 4 * gb)
				{
					result += (phys_ram - 4 * gb) / 40;
					phys_ram = 4 * gb;
				}
				if (phys_ram > 1 * gb)
				{
					result += (phys_ram - 1 * gb) / 30;
					phys_ram = 1 * gb;
				}
				result += phys_ram / 20;
				m_max_use = int(result / default_block_size);
			}

#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 4127 ) /* warning C4127: conditional expression is constant */
#endif // _MSC_VER
			if (sizeof(void*) == 4)
#ifdef _MSC_VER
#pragma warning(pop)
#endif // _MSC_VER
			{
				// 32 bit builds should  capped below 2 GB of memory, even
				// when more actual ram is available, because we're still
				// constrained by the 32 bit virtual address space.
				m_max_use = std::min(2 * 1024 * 1024 * 3 / 4 * 1024
					/ default_block_size, m_max_use);
			}
		}
		else
		{
			m_max_use = cache_size;
		}
		m_low_watermark = m_max_use - std::max(16, sett.get_int(settings_pack::max_queued_disk_bytes) / 0x4000);
		if (m_low_watermark < 0) m_low_watermark = 0;
		if (m_in_use >= m_max_use && !m_exceeded_max_size)
		{
			m_exceeded_max_size = true;
			m_trigger_cache_trim();
		}

#if TORRENT_USE_ASSERTS
		m_settings_set = true;
#endif
	}
Beispiel #3
0
	void disk_buffer_pool::set_settings(aux::session_settings const& sett)
	{
		mutex::scoped_lock l(m_pool_mutex);

		// 0 cache_buffer_chunk_size means 'automatic' (i.e.
		// proportional to the total disk cache size)
		m_cache_buffer_chunk_size = sett.get_int(settings_pack::cache_buffer_chunk_size);
		m_lock_disk_cache = sett.get_bool(settings_pack::lock_disk_cache);
#ifndef TORRENT_DISABLE_POOL_ALLOCATOR
		m_want_pool_allocator = sett.get_bool(settings_pack::use_disk_cache_pool);
		// if there are no allocated blocks, it's OK to switch allocator
		if (m_in_use == 0)
			m_using_pool_allocator = m_want_pool_allocator;
#endif

#if TORRENT_HAVE_MMAP
		// if we've already allocated an mmap, we can't change
		// anything unless there are no allocations in use
		if (m_cache_pool && m_in_use > 0) return;
#endif

		// only allow changing size if we're not using mmapped
		// cache, or if we're just about to turn it off
		if (
#if TORRENT_HAVE_MMAP
			m_cache_pool == 0 ||
#endif
			sett.get_str(settings_pack::mmap_cache).empty())
		{
			int cache_size = sett.get_int(settings_pack::cache_size);
			if (cache_size < 0)
			{
				boost::uint64_t phys_ram = physical_ram();
				if (phys_ram == 0) m_max_use = 1024;
				else m_max_use = phys_ram / 8 / m_block_size;
			}
			else
			{
				m_max_use = cache_size;
			}
			m_low_watermark = m_max_use - (std::max)(16, sett.get_int(settings_pack::max_queued_disk_bytes) / 0x4000);
			if (m_low_watermark < 0) m_low_watermark = 0;
			if (m_in_use >= m_max_use && !m_exceeded_max_size)
			{
				m_exceeded_max_size = true;
				m_trigger_cache_trim();
			}
		}

#if TORRENT_USE_ASSERTS
		m_settings_set = true;
#endif

#if TORRENT_HAVE_MMAP
		// #error support resizing the map
		if (m_cache_pool && sett.get_str(settings_pack::mmap_cache).empty())
		{
			TORRENT_ASSERT(m_in_use == 0);
			munmap(m_cache_pool, boost::uint64_t(m_max_use) * 0x4000);
			m_cache_pool = 0;
			// attempt to make MacOS not flush this to disk, making close()
			// block for a long time
			ftruncate(m_cache_fd, 0);
			close(m_cache_fd);
			m_cache_fd = -1;
			std::vector<int>().swap(m_free_list);
		}
		else if (m_cache_pool == 0 && !sett.get_str(settings_pack::mmap_cache).empty())
		{
			// O_TRUNC here is because we don't actually care about what's
			// in the file now, there's no need to ever read that into RAM
#ifndef O_EXLOCK
#define O_EXLOCK 0
#endif
			m_cache_fd = open(sett.get_str(settings_pack::mmap_cache).c_str(), O_RDWR | O_CREAT | O_EXLOCK | O_TRUNC, 0700);
			if (m_cache_fd < 0)
			{
				if (m_post_alert)
				{
					error_code ec(errno, boost::system::generic_category());
					m_ios.post(boost::bind(alert_callback, m_post_alert, new mmap_cache_alert(ec)));
				}
			}
			else
			{
#ifndef MAP_NOCACHE
#define MAP_NOCACHE 0
#endif
				ftruncate(m_cache_fd, boost::uint64_t(m_max_use) * 0x4000);
				m_cache_pool = (char*)mmap(0, boost::uint64_t(m_max_use) * 0x4000, PROT_READ | PROT_WRITE
					, MAP_SHARED | MAP_NOCACHE, m_cache_fd, 0);
				if (intptr_t(m_cache_pool) == -1)
				{
					if (m_post_alert)
					{
						error_code ec(errno, boost::system::generic_category());
						m_ios.post(boost::bind(alert_callback, m_post_alert, new mmap_cache_alert(ec)));
					}
					m_cache_pool = 0;
					// attempt to make MacOS not flush this to disk, making close()
					// block for a long time
					ftruncate(m_cache_fd, 0);
					close(m_cache_fd);
					m_cache_fd = -1;
				}
				else
				{
					TORRENT_ASSERT((size_t(m_cache_pool) & 0xfff) == 0);
					m_free_list.reserve(m_max_use);
					for (int i = 0; i < m_max_use; ++i)
						m_free_list.push_back(i);
				}
			}
		}
#endif
	}
Beispiel #4
0
	char* disk_buffer_pool::allocate_buffer_impl(mutex::scoped_lock& l, char const* category)
	{
		TORRENT_ASSERT(m_settings_set);
		TORRENT_ASSERT(m_magic == 0x1337);

		char* ret;
#if TORRENT_HAVE_MMAP
		if (m_cache_pool)
		{
			if (m_free_list.size() <= (m_max_use - m_low_watermark) / 2 && !m_exceeded_max_size)
			{
				m_exceeded_max_size = true;
				m_trigger_cache_trim();
			}
			if (m_free_list.empty()) return 0;
			boost::uint64_t slot_index = m_free_list.back();
			m_free_list.pop_back();
			ret = m_cache_pool + (slot_index * 0x4000);
			TORRENT_ASSERT(is_disk_buffer(ret, l));
		}
		else
#endif
		{
#if defined TORRENT_DISABLE_POOL_ALLOCATOR

#if TORRENT_USE_PURGABLE_CONTROL
			kern_return_t res = vm_allocate(
				mach_task_self(),
				reinterpret_cast<vm_address_t*>(&ret),
				0x4000,
				VM_FLAGS_PURGABLE |
				VM_FLAGS_ANYWHERE);
			if (res != KERN_SUCCESS)
				ret = NULL;
#else
			ret = page_aligned_allocator::malloc(m_block_size);
#endif // TORRENT_USE_PURGABLE_CONTROL

#else
			if (m_using_pool_allocator)
			{
				ret = (char*)m_pool.malloc();
				int effective_block_size = m_cache_buffer_chunk_size
					? m_cache_buffer_chunk_size
					: (std::max)(m_max_use / 10, 1);
				m_pool.set_next_size(effective_block_size);
			}
			else
			{
				ret = page_aligned_allocator::malloc(m_block_size);
			}
#endif
			if (ret == NULL)
			{
				m_exceeded_max_size = true;
				m_trigger_cache_trim();
				return 0;
			}
		}

#if defined TORRENT_DEBUG
		TORRENT_ASSERT(m_buffers_in_use.count(ret) == 0);
		m_buffers_in_use.insert(ret);
#endif

		++m_in_use;
		if (m_in_use >= m_low_watermark + (m_max_use - m_low_watermark) / 2 && !m_exceeded_max_size)
		{
			m_exceeded_max_size = true;
			m_trigger_cache_trim();
		}
#if TORRENT_USE_MLOCK
		if (m_lock_disk_cache)
		{
#ifdef TORRENT_WINDOWS
			VirtualLock(ret, m_block_size);
#else
			mlock(ret, m_block_size);
#endif
		}
#endif // TORRENT_USE_MLOCK

		TORRENT_ASSERT(is_disk_buffer(ret, l));
		return ret;
	}