void alert_manager::maybe_notify(alert* a, mutex::scoped_lock& lock) { if (a->type() == save_resume_data_failed_alert::alert_type || a->type() == save_resume_data_alert::alert_type) ++m_num_queued_resume; if (m_alerts[m_generation].size() == 1) { lock.unlock(); // we just posted to an empty queue. If anyone is waiting for // alerts, we need to notify them. Also (potentially) call the // user supplied m_notify callback to let the client wake up its // message loop to poll for alerts. if (m_notify) m_notify(); // TODO: 2 keep a count of the number of threads waiting. Only if it's // > 0 notify them m_condition.notify_all(); } else { lock.unlock(); } #ifndef TORRENT_DISABLE_EXTENSIONS for (ses_extension_list_t::iterator i = m_ses_extensions.begin() , end(m_ses_extensions.end()); i != end; ++i) { (*i)->on_alert(a); } #endif }
// checks to see if we're no longer exceeding the high watermark, // and if we're in fact below the low watermark. If so, we need to // post the notification messages to the peers that are waiting for // more buffers to received data into void disk_buffer_pool::check_buffer_level(mutex::scoped_lock& l) { TORRENT_ASSERT(l.locked()); if (!m_exceeded_max_size || m_in_use > m_low_watermark) return; m_exceeded_max_size = false; // if slice is non-NULL, only some of the handlers got a buffer // back, and the slice should be posted back to the network thread std::vector<handler_t>* slice = NULL; for (std::vector<handler_t>::iterator i = m_handlers.begin() , end(m_handlers.end()); i != end; ++i) { i->buffer = allocate_buffer_impl(l, i->category); if (!m_exceeded_max_size || i == end - 1) continue; // only some of the handlers got buffers. We need to slice the vector slice = new std::vector<handler_t>(); slice->insert(slice->end(), m_handlers.begin(), i + 1); m_handlers.erase(m_handlers.begin(), i + 1); break; } if (slice != NULL) { l.unlock(); m_ios.post(boost::bind(&watermark_callback , static_cast<std::vector<boost::shared_ptr<disk_observer> >*>(NULL) , slice)); return; } std::vector<handler_t>* handlers = new std::vector<handler_t>(); handlers->swap(m_handlers); if (m_exceeded_max_size) { l.unlock(); m_ios.post(boost::bind(&watermark_callback , static_cast<std::vector<boost::shared_ptr<disk_observer> >*>(NULL) , handlers)); return; } std::vector<boost::shared_ptr<disk_observer> >* cbs = new std::vector<boost::shared_ptr<disk_observer> >(); m_observers.swap(*cbs); l.unlock(); m_ios.post(boost::bind(&watermark_callback, cbs, handlers)); }
void condition_variable::wait_for(mutex::scoped_lock& l, time_duration rel_time) { TORRENT_ASSERT(l.locked()); ++m_num_waiters; l.unlock(); acquire_sem_etc(m_sem, 1, B_RELATIVE_TIMEOUT, total_microseconds(rel_time)); l.lock(); --m_num_waiters; }
void condition_variable::wait(mutex::scoped_lock& l) { TORRENT_ASSERT(l.locked()); ++m_num_waiters; l.unlock(); acquire_sem(m_sem); l.lock(); --m_num_waiters; }
void condition_variable::wait_for(mutex::scoped_lock& l, time_duration rel_time) { TORRENT_ASSERT(l.locked()); ++m_num_waiters; l.unlock(); WaitForSingleObject(m_sem, total_milliseconds(rel_time)); l.lock(); --m_num_waiters; }
void condition_variable::wait(mutex::scoped_lock& l) { TORRENT_ASSERT(l.locked()); ++m_num_waiters; l.unlock(); WaitForSingleObject(m_sem, INFINITE); l.lock(); --m_num_waiters; }
void file_pool::remove_oldest(mutex::scoped_lock& l) { file_set::iterator i = std::min_element(m_files.begin(), m_files.end() , boost::bind(&lru_file_entry::last_use, boost::bind(&file_set::value_type::second, _1)) < boost::bind(&lru_file_entry::last_use, boost::bind(&file_set::value_type::second, _2))); if (i == m_files.end()) return; file_handle file_ptr = i->second.file_ptr; m_files.erase(i); // closing a file may be long running operation (mac os x) l.unlock(); file_ptr.reset(); l.lock(); }
void natpmp::disable(error_code const& ec, mutex::scoped_lock& l) { m_disabled = true; for (std::vector<mapping_t>::iterator i = m_mappings.begin(), end(m_mappings.end()); i != end; ++i) { if (i->protocol == none) continue; i->protocol = none; int index = i - m_mappings.begin(); l.unlock(); m_callback(index, address(), 0, ec); l.lock(); } close_impl(l); }
void natpmp::log(char const* msg, mutex::scoped_lock& l) { l.unlock(); m_log_callback(msg); l.lock(); }