bool disk_buffer_pool::is_disk_buffer(char* buffer
		, mutex::scoped_lock& l) const
	{
		TORRENT_ASSERT(m_magic == 0x1337);
		TORRENT_ASSERT(l.locked());
		TORRENT_UNUSED(l);

#if TORRENT_HAVE_MMAP
		if (m_cache_pool)
		{
			return buffer >= m_cache_pool && buffer < m_cache_pool
				+ boost::uint64_t(m_max_use) * 0x4000;
		}
#endif

#if defined TORRENT_DEBUG
		return m_buffers_in_use.count(buffer) == 1;
#elif defined TORRENT_DEBUG_BUFFERS
		return page_aligned_allocator::in_use(buffer);
#elif defined TORRENT_DISABLE_POOL_ALLOCATOR
		return true;
#else
		if (m_using_pool_allocator)
			return m_pool.is_from(buffer);
		else
			return true;
#endif
	}
Beispiel #2
0
	void condition_variable::wait_for(mutex::scoped_lock& l, time_duration rel_time)
	{
		TORRENT_ASSERT(l.locked());
		++m_num_waiters;
		l.unlock();
		acquire_sem_etc(m_sem, 1, B_RELATIVE_TIMEOUT, total_microseconds(rel_time));
		l.lock();
		--m_num_waiters;
	}
Beispiel #3
0
	void condition_variable::wait(mutex::scoped_lock& l)
	{
		TORRENT_ASSERT(l.locked());
		++m_num_waiters;
		l.unlock();
		acquire_sem(m_sem);
		l.lock();
		--m_num_waiters;
	}
Beispiel #4
0
	void condition_variable::wait_for(mutex::scoped_lock& l, time_duration rel_time)
	{
		TORRENT_ASSERT(l.locked());
		++m_num_waiters;
		l.unlock();
		WaitForSingleObject(m_sem, total_milliseconds(rel_time));
		l.lock();
		--m_num_waiters;
	}
Beispiel #5
0
	void condition_variable::wait(mutex::scoped_lock& l)
	{
		TORRENT_ASSERT(l.locked());
		++m_num_waiters;
		l.unlock();
		WaitForSingleObject(m_sem, INFINITE);
		l.lock();
		--m_num_waiters;
	}
	// checks to see if we're no longer exceeding the high watermark,
	// and if we're in fact below the low watermark. If so, we need to
	// post the notification messages to the peers that are waiting for
	// more buffers to received data into
	void disk_buffer_pool::check_buffer_level(mutex::scoped_lock& l)
	{
		TORRENT_ASSERT(l.locked());
		if (!m_exceeded_max_size || m_in_use > m_low_watermark) return;

		m_exceeded_max_size = false;

		// if slice is non-NULL, only some of the handlers got a buffer
		// back, and the slice should be posted back to the network thread
		std::vector<handler_t>* slice = NULL;

		for (std::vector<handler_t>::iterator i = m_handlers.begin()
			, end(m_handlers.end()); i != end; ++i)
		{
			i->buffer = allocate_buffer_impl(l, i->category);
			if (!m_exceeded_max_size || i == end - 1) continue;

			// only some of the handlers got buffers. We need to slice the vector
			slice = new std::vector<handler_t>();
			slice->insert(slice->end(), m_handlers.begin(), i + 1);
			m_handlers.erase(m_handlers.begin(), i + 1);
			break;
		}

		if (slice != NULL)
		{
			l.unlock();
			m_ios.post(boost::bind(&watermark_callback
				, static_cast<std::vector<boost::shared_ptr<disk_observer> >*>(NULL)
				, slice));
			return;
		}

		std::vector<handler_t>* handlers = new std::vector<handler_t>();
		handlers->swap(m_handlers);

		if (m_exceeded_max_size)
		{
			l.unlock();
			m_ios.post(boost::bind(&watermark_callback
				, static_cast<std::vector<boost::shared_ptr<disk_observer> >*>(NULL)
				, handlers));
			return;
		}

		std::vector<boost::shared_ptr<disk_observer> >* cbs
			= new std::vector<boost::shared_ptr<disk_observer> >();
		m_observers.swap(*cbs);
		l.unlock();
		m_ios.post(boost::bind(&watermark_callback, cbs, handlers));
	}
Beispiel #7
0
	void condition_variable::wait_for(mutex::scoped_lock& l, time_duration rel_time)
	{
		TORRENT_ASSERT(l.locked());

		struct timeval tv;
		struct timespec ts;
		gettimeofday(&tv, NULL);
		boost::uint64_t microseconds = tv.tv_usec + total_microseconds(rel_time) % 1000000;
		ts.tv_nsec = (microseconds % 1000000) * 1000;
		ts.tv_sec = tv.tv_sec + total_seconds(rel_time) + microseconds / 1000000;
		
		// wow, this is quite a hack
		pthread_cond_timedwait(&m_cond, (::pthread_mutex_t*)&l.mutex(), &ts);
	}
Beispiel #8
0
	void condition_variable::wait(mutex::scoped_lock& l)
	{
		TORRENT_ASSERT(l.locked());
		// wow, this is quite a hack
		pthread_cond_wait(&m_cond, (::pthread_mutex_t*)&l.mutex());
	}
Beispiel #9
0
	void condition::signal_all(mutex::scoped_lock& l)
	{
		TORRENT_ASSERT(l.locked());
		ReleaseSemaphore(m_sem, m_num_waiters, 0);
	}
Beispiel #10
0
	void condition::signal_all(mutex::scoped_lock& l)
	{
		TORRENT_ASSERT(l.locked());
		pthread_cond_broadcast(&m_cond);
	}
Beispiel #11
0
	void condition_variable::wait(mutex::scoped_lock& l)
	{
		TORRENT_ASSERT(l.locked());
		// wow, this is quite a hack
		pthread_cond_wait(&m_cond, reinterpret_cast<pthread_mutex_t*>(&l.mutex()));
	}
Beispiel #12
0
	void condition::signal_all(mutex::scoped_lock& l)
	{
		TORRENT_ASSERT(l.locked());
		release_sem_etc(m_sem, m_num_waiters, 0);
	}
	void disk_buffer_pool::free_buffer_impl(char* buf, mutex::scoped_lock& l)
	{
		TORRENT_ASSERT(buf);
		TORRENT_ASSERT(m_magic == 0x1337);
		TORRENT_ASSERT(m_settings_set);
		TORRENT_ASSERT(is_disk_buffer(buf, l));
		TORRENT_ASSERT(l.locked());
		TORRENT_UNUSED(l);

#if TORRENT_USE_MLOCK
		if (m_lock_disk_cache)
		{
#ifdef TORRENT_WINDOWS
			VirtualUnlock(buf, m_block_size);
#else
			munlock(buf, m_block_size);
#endif
		}
#endif

#if TORRENT_HAVE_MMAP
		if (m_cache_pool)
		{
			TORRENT_ASSERT(buf >= m_cache_pool);
			TORRENT_ASSERT(buf <  m_cache_pool + boost::uint64_t(m_max_use) * 0x4000);
			int slot_index = (buf - m_cache_pool) / 0x4000;
			m_free_list.push_back(slot_index);
#if defined MADV_FREE
			// tell the virtual memory system that we don't actually care
			// about the data in these pages anymore. If this block was
			// swapped out to the SSD, it (hopefully) means it won't have
			// to be read back in once we start writing our new data to it
			madvise(buf, 0x4000, MADV_FREE);
#elif defined MADV_DONTNEED && defined TORRENT_LINUX
			// rumor has it that MADV_DONTNEED is in fact destructive
			// on linux (i.e. it won't flush it to disk or re-read from disk)
			// http://kerneltrap.org/mailarchive/linux-kernel/2007/5/1/84410
			madvise(buf, 0x4000, MADV_DONTNEED);
#endif
		}
		else
#endif
		{
#if defined TORRENT_DISABLE_POOL_ALLOCATOR

#if TORRENT_USE_PURGABLE_CONTROL
		vm_deallocate(
			mach_task_self(),
			reinterpret_cast<vm_address_t>(buf),
			0x4000
			);
#else
		page_aligned_allocator::free(buf);
#endif // TORRENT_USE_PURGABLE_CONTROL

#else
		if (m_using_pool_allocator)
			m_pool.free(buf);
		else
			page_aligned_allocator::free(buf);
#endif // TORRENT_DISABLE_POOL_ALLOCATOR
		}

#if defined TORRENT_DEBUG
		std::set<char*>::iterator i = m_buffers_in_use.find(buf);
		TORRENT_ASSERT(i != m_buffers_in_use.end());
		m_buffers_in_use.erase(i);
#endif

		--m_in_use;

#ifndef TORRENT_DISABLE_POOL_ALLOCATOR
		// should we switch which allocator to use?
		if (m_in_use == 0 && m_want_pool_allocator != m_using_pool_allocator)
		{
			m_pool.release_memory();
			m_using_pool_allocator = m_want_pool_allocator;
		}
#endif
	}