void alert_manager::maybe_notify(alert* a, mutex::scoped_lock& lock)
	{
		if (a->type() == save_resume_data_failed_alert::alert_type
			|| a->type() == save_resume_data_alert::alert_type)
			++m_num_queued_resume;

		if (m_alerts[m_generation].size() == 1)
		{
			lock.unlock();

			// we just posted to an empty queue. If anyone is waiting for
			// alerts, we need to notify them. Also (potentially) call the
			// user supplied m_notify callback to let the client wake up its
			// message loop to poll for alerts.
			if (m_notify) m_notify();

			// TODO: 2 keep a count of the number of threads waiting. Only if it's
			// > 0 notify them
			m_condition.notify_all();
		}
		else
		{
			lock.unlock();
		}

#ifndef TORRENT_DISABLE_EXTENSIONS
		for (ses_extension_list_t::iterator i = m_ses_extensions.begin()
			, end(m_ses_extensions.end()); i != end; ++i)
		{
			(*i)->on_alert(a);
		}
#endif
	}
Esempio n. 2
0
	void condition_variable::wait_for(mutex::scoped_lock& l, time_duration rel_time)
	{
		TORRENT_ASSERT(l.locked());
		++m_num_waiters;
		l.unlock();
		acquire_sem_etc(m_sem, 1, B_RELATIVE_TIMEOUT, total_microseconds(rel_time));
		l.lock();
		--m_num_waiters;
	}
Esempio n. 3
0
	void condition_variable::wait(mutex::scoped_lock& l)
	{
		TORRENT_ASSERT(l.locked());
		++m_num_waiters;
		l.unlock();
		acquire_sem(m_sem);
		l.lock();
		--m_num_waiters;
	}
Esempio n. 4
0
	void condition_variable::wait_for(mutex::scoped_lock& l, time_duration rel_time)
	{
		TORRENT_ASSERT(l.locked());
		++m_num_waiters;
		l.unlock();
		WaitForSingleObject(m_sem, total_milliseconds(rel_time));
		l.lock();
		--m_num_waiters;
	}
Esempio n. 5
0
	void condition_variable::wait(mutex::scoped_lock& l)
	{
		TORRENT_ASSERT(l.locked());
		++m_num_waiters;
		l.unlock();
		WaitForSingleObject(m_sem, INFINITE);
		l.lock();
		--m_num_waiters;
	}
Esempio n. 6
0
	// checks to see if we're no longer exceeding the high watermark,
	// and if we're in fact below the low watermark. If so, we need to
	// post the notification messages to the peers that are waiting for
	// more buffers to received data into
	void disk_buffer_pool::check_buffer_level(mutex::scoped_lock& l)
	{
		TORRENT_ASSERT(l.locked());
		if (!m_exceeded_max_size || m_in_use > m_low_watermark) return;

		m_exceeded_max_size = false;

		// if slice is non-NULL, only some of the handlers got a buffer
		// back, and the slice should be posted back to the network thread
		std::vector<handler_t>* slice = NULL;

		for (std::vector<handler_t>::iterator i = m_handlers.begin()
			, end(m_handlers.end()); i != end; ++i)
		{
			i->buffer = allocate_buffer_impl(l, i->category);
			if (!m_exceeded_max_size || i == end - 1) continue;

			// only some of the handlers got buffers. We need to slice the vector
			slice = new std::vector<handler_t>();
			slice->insert(slice->end(), m_handlers.begin(), i + 1);
			m_handlers.erase(m_handlers.begin(), i + 1);
			break;
		}

		if (slice != NULL)
		{
			l.unlock();
			m_ios.post(boost::bind(&watermark_callback
				, static_cast<std::vector<boost::shared_ptr<disk_observer> >*>(NULL)
				, slice));
			return;
		}

		std::vector<handler_t>* handlers = new std::vector<handler_t>();
		handlers->swap(m_handlers);

		if (m_exceeded_max_size)
		{
			l.unlock();
			m_ios.post(boost::bind(&watermark_callback
				, static_cast<std::vector<boost::shared_ptr<disk_observer> >*>(NULL)
				, handlers));
			return;
		}

		std::vector<boost::shared_ptr<disk_observer> >* cbs
			= new std::vector<boost::shared_ptr<disk_observer> >();
		m_observers.swap(*cbs);
		l.unlock();
		m_ios.post(boost::bind(&watermark_callback, cbs, handlers));
	}
Esempio n. 7
0
	void condition_variable::wait_for(mutex::scoped_lock& l, time_duration rel_time)
	{
		TORRENT_ASSERT(l.locked());

		struct timeval tv;
		struct timespec ts;
		gettimeofday(&tv, NULL);
		boost::uint64_t microseconds = tv.tv_usec + total_microseconds(rel_time) % 1000000;
		ts.tv_nsec = (microseconds % 1000000) * 1000;
		ts.tv_sec = tv.tv_sec + total_seconds(rel_time) + microseconds / 1000000;
		
		// wow, this is quite a hack
		pthread_cond_timedwait(&m_cond, (::pthread_mutex_t*)&l.mutex(), &ts);
	}
Esempio n. 8
0
	void file_pool::remove_oldest(mutex::scoped_lock& l)
	{
		file_set::iterator i = std::min_element(m_files.begin(), m_files.end()
			, boost::bind(&lru_file_entry::last_use, boost::bind(&file_set::value_type::second, _1))
				< boost::bind(&lru_file_entry::last_use, boost::bind(&file_set::value_type::second, _2)));
		if (i == m_files.end()) return;

		file_handle file_ptr = i->second.file_ptr;
		m_files.erase(i);

		// closing a file may be long running operation (mac os x)
		l.unlock();
		file_ptr.reset();
		l.lock();
	}
Esempio n. 9
0
void natpmp::disable(error_code const& ec, mutex::scoped_lock& l)
{
    m_disabled = true;

    for (std::vector<mapping_t>::iterator i = m_mappings.begin(), end(m_mappings.end()); i != end; ++i)
    {
        if (i->protocol == none) continue;
        i->protocol = none;
        int index = i - m_mappings.begin();
        l.unlock();
        m_callback(index, address(), 0, ec);
        l.lock();
    }
    close_impl(l);
}
Esempio n. 10
0
	bool disk_buffer_pool::is_disk_buffer(char* buffer
		, mutex::scoped_lock& l) const
	{
		TORRENT_ASSERT(m_magic == 0x1337);
		TORRENT_ASSERT(l.locked());
		TORRENT_UNUSED(l);

#if TORRENT_HAVE_MMAP
		if (m_cache_pool)
		{
			return buffer >= m_cache_pool && buffer < m_cache_pool
				+ boost::uint64_t(m_max_use) * 0x4000;
		}
#endif

#if defined TORRENT_DEBUG
		return m_buffers_in_use.count(buffer) == 1;
#elif defined TORRENT_DEBUG_BUFFERS
		return page_aligned_allocator::in_use(buffer);
#elif defined TORRENT_DISABLE_POOL_ALLOCATOR
		return true;
#else
		if (m_using_pool_allocator)
			return m_pool.is_from(buffer);
		else
			return true;
#endif
	}
Esempio n. 11
0
	void condition_variable::wait(mutex::scoped_lock& l)
	{
		TORRENT_ASSERT(l.locked());
		// wow, this is quite a hack
		pthread_cond_wait(&m_cond, (::pthread_mutex_t*)&l.mutex());
	}
Esempio n. 12
0
	void condition::signal_all(mutex::scoped_lock& l)
	{
		TORRENT_ASSERT(l.locked());
		ReleaseSemaphore(m_sem, m_num_waiters, 0);
	}
Esempio n. 13
0
	void condition_variable::wait(mutex::scoped_lock& l)
	{
		TORRENT_ASSERT(l.locked());
		// wow, this is quite a hack
		pthread_cond_wait(&m_cond, reinterpret_cast<pthread_mutex_t*>(&l.mutex()));
	}
Esempio n. 14
0
	void condition::signal_all(mutex::scoped_lock& l)
	{
		TORRENT_ASSERT(l.locked());
		release_sem_etc(m_sem, m_num_waiters, 0);
	}
Esempio n. 15
0
	void disk_buffer_pool::free_buffer_impl(char* buf, mutex::scoped_lock& l)
	{
		TORRENT_ASSERT(buf);
		TORRENT_ASSERT(m_magic == 0x1337);
		TORRENT_ASSERT(m_settings_set);
		TORRENT_ASSERT(is_disk_buffer(buf, l));
		TORRENT_ASSERT(l.locked());
		TORRENT_UNUSED(l);

#if TORRENT_USE_MLOCK
		if (m_lock_disk_cache)
		{
#ifdef TORRENT_WINDOWS
			VirtualUnlock(buf, m_block_size);
#else
			munlock(buf, m_block_size);
#endif
		}
#endif

#if TORRENT_HAVE_MMAP
		if (m_cache_pool)
		{
			TORRENT_ASSERT(buf >= m_cache_pool);
			TORRENT_ASSERT(buf <  m_cache_pool + boost::uint64_t(m_max_use) * 0x4000);
			int slot_index = (buf - m_cache_pool) / 0x4000;
			m_free_list.push_back(slot_index);
#if defined MADV_FREE
			// tell the virtual memory system that we don't actually care
			// about the data in these pages anymore. If this block was
			// swapped out to the SSD, it (hopefully) means it won't have
			// to be read back in once we start writing our new data to it
			madvise(buf, 0x4000, MADV_FREE);
#elif defined MADV_DONTNEED && defined TORRENT_LINUX
			// rumor has it that MADV_DONTNEED is in fact destructive
			// on linux (i.e. it won't flush it to disk or re-read from disk)
			// http://kerneltrap.org/mailarchive/linux-kernel/2007/5/1/84410
			madvise(buf, 0x4000, MADV_DONTNEED);
#endif
		}
		else
#endif
		{
#if defined TORRENT_DISABLE_POOL_ALLOCATOR

#if TORRENT_USE_PURGABLE_CONTROL
		vm_deallocate(
			mach_task_self(),
			reinterpret_cast<vm_address_t>(buf),
			0x4000
			);
#else
		page_aligned_allocator::free(buf);
#endif // TORRENT_USE_PURGABLE_CONTROL

#else
		if (m_using_pool_allocator)
			m_pool.free(buf);
		else
			page_aligned_allocator::free(buf);
#endif // TORRENT_DISABLE_POOL_ALLOCATOR
		}

#if defined TORRENT_DEBUG
		std::set<char*>::iterator i = m_buffers_in_use.find(buf);
		TORRENT_ASSERT(i != m_buffers_in_use.end());
		m_buffers_in_use.erase(i);
#endif

		--m_in_use;

#ifndef TORRENT_DISABLE_POOL_ALLOCATOR
		// should we switch which allocator to use?
		if (m_in_use == 0 && m_want_pool_allocator != m_using_pool_allocator)
		{
			m_pool.release_memory();
			m_using_pool_allocator = m_want_pool_allocator;
		}
#endif
	}
Esempio n. 16
0
void natpmp::log(char const* msg, mutex::scoped_lock& l)
{
	l.unlock();
	m_log_callback(msg);
	l.lock();
}
Esempio n. 17
0
	void condition::signal_all(mutex::scoped_lock& l)
	{
		TORRENT_ASSERT(l.locked());
		pthread_cond_broadcast(&m_cond);
	}