예제 #1
0
	char* disk_buffer_pool::allocate_buffer(char const* category)
	{
		mutex::scoped_lock l(m_pool_mutex);
		TORRENT_ASSERT(m_magic == 0x1337);
#ifdef TORRENT_DISABLE_POOL_ALLOCATOR
		char* ret = page_aligned_allocator::malloc(m_block_size);
#else
		char* ret = (char*)m_pool.ordered_malloc();
		m_pool.set_next_size(m_settings.cache_buffer_chunk_size);
#endif
		++m_in_use;
#if TORRENT_USE_MLOCK
		if (m_settings.lock_disk_cache)
		{
#ifdef TORRENT_WINDOWS
			VirtualLock(ret, m_block_size);
#else
			mlock(ret, m_block_size);
#endif		
		}
#endif

#if defined TORRENT_DISK_STATS || defined TORRENT_STATS
		++m_allocations;
#endif
#ifdef TORRENT_DISK_STATS
		++m_categories[category];
		m_buf_to_category[ret] = category;
		m_log << log_time() << " " << category << ": " << m_categories[category] << "\n";
#endif
		TORRENT_ASSERT(ret == 0 || is_disk_buffer(ret, l));
		return ret;
	}
예제 #2
0
	void disk_buffer_pool::free_buffer_impl(char* buf, mutex::scoped_lock& l)
	{
		TORRENT_ASSERT(buf);
		TORRENT_ASSERT(m_magic == 0x1337);
		TORRENT_ASSERT(is_disk_buffer(buf, l));
#if defined TORRENT_DISK_STATS || defined TORRENT_STATS
		--m_allocations;
#endif
#ifdef TORRENT_DISK_STATS
		TORRENT_ASSERT(m_categories.find(m_buf_to_category[buf])
			!= m_categories.end());
		std::string const& category = m_buf_to_category[buf];
		--m_categories[category];
		m_log << log_time() << " " << category << ": " << m_categories[category] << "\n";
		m_buf_to_category.erase(buf);
#endif
#if TORRENT_USE_MLOCK
		if (m_settings.lock_disk_cache)
		{
#ifdef TORRENT_WINDOWS
			VirtualUnlock(buf, m_block_size);
#else
			munlock(buf, m_block_size);
#endif		
		}
#endif
#ifdef TORRENT_DISABLE_POOL_ALLOCATOR
		page_aligned_allocator::free(buf);
#else
		m_pool.free(buf);
#endif
		--m_in_use;
	}
예제 #3
0
	void disk_buffer_pool::free_buffer(char* buf)
	{
		std::unique_lock<std::mutex> l(m_pool_mutex);
		TORRENT_ASSERT(is_disk_buffer(buf, l));
		free_buffer_impl(buf, l);
		remove_buffer_in_use(buf);
		check_buffer_level(l);
	}
예제 #4
0
	void disk_buffer_pool::free_iovec(span<iovec_t const> iov)
	{
		// TODO: perhaps we should sort the buffers here?
		std::unique_lock<std::mutex> l(m_pool_mutex);
		for (auto i : iov)
		{
			char* buf = i.data();
			TORRENT_ASSERT(is_disk_buffer(buf, l));
			free_buffer_impl(buf, l);
			remove_buffer_in_use(buf);
		}
		check_buffer_level(l);
	}
예제 #5
0
	void disk_buffer_pool::free_multiple_buffers(span<char*> bufvec)
	{
		// sort the pointers in order to maximize cache hits
		std::sort(bufvec.begin(), bufvec.end());

		std::unique_lock<std::mutex> l(m_pool_mutex);
		for (char* buf : bufvec)
		{
			TORRENT_ASSERT(is_disk_buffer(buf, l));
			free_buffer_impl(buf, l);
			remove_buffer_in_use(buf);
		}

		check_buffer_level(l);
	}
예제 #6
0
	void disk_buffer_pool::rename_buffer(char* buf, char const* category)
	{
		mutex::scoped_lock l(m_pool_mutex);
		TORRENT_ASSERT(is_disk_buffer(buf, l));
		TORRENT_ASSERT(m_categories.find(m_buf_to_category[buf])
			!= m_categories.end());
		std::string const& prev_category = m_buf_to_category[buf];
		--m_categories[prev_category];
		m_log << log_time() << " " << prev_category << ": " << m_categories[prev_category] << "\n";

		++m_categories[category];
		m_buf_to_category[buf] = category;
		m_log << log_time() << " " << category << ": " << m_categories[category] << "\n";
		TORRENT_ASSERT(m_categories.find(m_buf_to_category[buf])
			!= m_categories.end());
	}
예제 #7
0
	char* disk_buffer_pool::allocate_buffer_impl(std::unique_lock<std::mutex>& l
		, char const*)
	{
		TORRENT_ASSERT(m_settings_set);
		TORRENT_ASSERT(m_magic == 0x1337);
		TORRENT_ASSERT(l.owns_lock());
		TORRENT_UNUSED(l);

		char* ret = page_malloc(default_block_size);

		if (ret == nullptr)
		{
			m_exceeded_max_size = true;
			m_trigger_cache_trim();
			return nullptr;
		}

		++m_in_use;

#if TORRENT_USE_INVARIANT_CHECKS
		try
		{
			TORRENT_ASSERT(m_buffers_in_use.count(ret) == 0);
			m_buffers_in_use.insert(ret);
		}
		catch (...)
		{
			free_buffer_impl(ret, l);
			return nullptr;
		}
#endif

		if (m_in_use >= m_low_watermark + (m_max_use - m_low_watermark)
			/ 2 && !m_exceeded_max_size)
		{
			m_exceeded_max_size = true;
			m_trigger_cache_trim();
		}

		TORRENT_ASSERT(is_disk_buffer(ret, l));
		return ret;
	}
예제 #8
0
// this function allocates buffers and
// fills in the iovec array with the buffers
	int disk_buffer_pool::allocate_iovec(span<iovec_t> iov)
	{
		std::unique_lock<std::mutex> l(m_pool_mutex);
		for (auto& i : iov)
		{
			i = { allocate_buffer_impl(l, "pending read"), std::size_t(default_block_size)};
			if (i.data() == nullptr)
			{
				// uh oh. We failed to allocate the buffer!
				// we need to roll back and free all the buffers
				// we've already allocated
				for (auto j : iov)
				{
					if (j.data() == nullptr) break;
					char* buf = j.data();
					TORRENT_ASSERT(is_disk_buffer(buf, l));
					free_buffer_impl(buf, l);
					remove_buffer_in_use(buf);
				}
				return -1;
			}
		}
		return 0;
	}
예제 #9
0
	bool disk_buffer_pool::is_disk_buffer(char* buffer) const
	{
		std::unique_lock<std::mutex> l(m_pool_mutex);
		return is_disk_buffer(buffer, l);
	}
예제 #10
0
	void disk_buffer_pool::free_buffer_impl(char* buf, mutex::scoped_lock& l)
	{
		TORRENT_ASSERT(buf);
		TORRENT_ASSERT(m_magic == 0x1337);
		TORRENT_ASSERT(m_settings_set);
		TORRENT_ASSERT(is_disk_buffer(buf, l));

#if TORRENT_USE_MLOCK
		if (m_lock_disk_cache)
		{
#ifdef TORRENT_WINDOWS
			VirtualUnlock(buf, m_block_size);
#else
			munlock(buf, m_block_size);
#endif
		}
#endif

#if TORRENT_HAVE_MMAP
		if (m_cache_pool)
		{
			TORRENT_ASSERT(buf >= m_cache_pool);
			TORRENT_ASSERT(buf <  m_cache_pool + boost::uint64_t(m_max_use) * 0x4000);
			int slot_index = (buf - m_cache_pool) / 0x4000;
			m_free_list.push_back(slot_index);
#if defined MADV_FREE
			// tell the virtual memory system that we don't actually care
			// about the data in these pages anymore. If this block was
			// swapped out to the SSD, it (hopefully) means it won't have
			// to be read back in once we start writing our new data to it
			madvise(buf, 0x4000, MADV_FREE);
#elif defined MADV_DONTNEED && defined TORRENT_LINUX
			// rumor has it that MADV_DONTNEED is in fact destructive
			// on linux (i.e. it won't flush it to disk or re-read from disk)
			// http://kerneltrap.org/mailarchive/linux-kernel/2007/5/1/84410
			madvise(buf, 0x4000, MADV_DONTNEED);
#endif
		}
		else
#endif
		{
#if defined TORRENT_DISABLE_POOL_ALLOCATOR

#if TORRENT_USE_PURGABLE_CONTROL
		vm_deallocate(
			mach_task_self(),
			reinterpret_cast<vm_address_t>(buf),
			0x4000
			);
#else
		page_aligned_allocator::free(buf);
#endif // TORRENT_USE_PURGABLE_CONTROL

#else
		if (m_using_pool_allocator)
			m_pool.free(buf);
		else
			page_aligned_allocator::free(buf);
#endif // TORRENT_DISABLE_POOL_ALLOCATOR
		}

#if defined TORRENT_DEBUG
		std::set<char*>::iterator i = m_buffers_in_use.find(buf);
		TORRENT_ASSERT(i != m_buffers_in_use.end());
		m_buffers_in_use.erase(i);
#endif

		--m_in_use;

#ifndef TORRENT_DISABLE_POOL_ALLOCATOR
		// should we switch which allocator to use?
		if (m_in_use == 0 && m_want_pool_allocator != m_using_pool_allocator)
		{
			m_pool.release_memory();
			m_using_pool_allocator = m_want_pool_allocator;
		}
#endif
	}
예제 #11
0
	char* disk_buffer_pool::allocate_buffer_impl(mutex::scoped_lock& l, char const* category)
	{
		TORRENT_ASSERT(m_settings_set);
		TORRENT_ASSERT(m_magic == 0x1337);

		char* ret;
#if TORRENT_HAVE_MMAP
		if (m_cache_pool)
		{
			if (m_free_list.size() <= (m_max_use - m_low_watermark) / 2 && !m_exceeded_max_size)
			{
				m_exceeded_max_size = true;
				m_trigger_cache_trim();
			}
			if (m_free_list.empty()) return 0;
			boost::uint64_t slot_index = m_free_list.back();
			m_free_list.pop_back();
			ret = m_cache_pool + (slot_index * 0x4000);
			TORRENT_ASSERT(is_disk_buffer(ret, l));
		}
		else
#endif
		{
#if defined TORRENT_DISABLE_POOL_ALLOCATOR

#if TORRENT_USE_PURGABLE_CONTROL
			kern_return_t res = vm_allocate(
				mach_task_self(),
				reinterpret_cast<vm_address_t*>(&ret),
				0x4000,
				VM_FLAGS_PURGABLE |
				VM_FLAGS_ANYWHERE);
			if (res != KERN_SUCCESS)
				ret = NULL;
#else
			ret = page_aligned_allocator::malloc(m_block_size);
#endif // TORRENT_USE_PURGABLE_CONTROL

#else
			if (m_using_pool_allocator)
			{
				ret = (char*)m_pool.malloc();
				int effective_block_size = m_cache_buffer_chunk_size
					? m_cache_buffer_chunk_size
					: (std::max)(m_max_use / 10, 1);
				m_pool.set_next_size(effective_block_size);
			}
			else
			{
				ret = page_aligned_allocator::malloc(m_block_size);
			}
#endif
			if (ret == NULL)
			{
				m_exceeded_max_size = true;
				m_trigger_cache_trim();
				return 0;
			}
		}

#if defined TORRENT_DEBUG
		TORRENT_ASSERT(m_buffers_in_use.count(ret) == 0);
		m_buffers_in_use.insert(ret);
#endif

		++m_in_use;
		if (m_in_use >= m_low_watermark + (m_max_use - m_low_watermark) / 2 && !m_exceeded_max_size)
		{
			m_exceeded_max_size = true;
			m_trigger_cache_trim();
		}
#if TORRENT_USE_MLOCK
		if (m_lock_disk_cache)
		{
#ifdef TORRENT_WINDOWS
			VirtualLock(ret, m_block_size);
#else
			mlock(ret, m_block_size);
#endif
		}
#endif // TORRENT_USE_MLOCK

		TORRENT_ASSERT(is_disk_buffer(ret, l));
		return ret;
	}
예제 #12
0
	bool disk_buffer_pool::is_disk_buffer(char* buffer) const
	{
		mutex::scoped_lock l(m_pool_mutex);
		return is_disk_buffer(buffer, l);
	}