void disk_buffer_pool::free_buffer(char* buf) { mutex::scoped_lock l(m_pool_mutex); free_buffer_impl(buf, l); check_buffer_level(l); }
void disk_buffer_pool::free_buffer(char* buf) { std::unique_lock<std::mutex> l(m_pool_mutex); TORRENT_ASSERT(is_disk_buffer(buf, l)); free_buffer_impl(buf, l); remove_buffer_in_use(buf); check_buffer_level(l); }
void disk_buffer_pool::free_iovec(file::iovec_t* iov, int iov_len) { // TODO: perhaps we should sort the buffers here? mutex::scoped_lock l(m_pool_mutex); for (int i = 0; i < iov_len; ++i) free_buffer_impl(static_cast<char*>(iov[i].iov_base), l); check_buffer_level(l); }
void disk_buffer_pool::free_iovec(span<iovec_t const> iov) { // TODO: perhaps we should sort the buffers here? std::unique_lock<std::mutex> l(m_pool_mutex); for (auto i : iov) { char* buf = i.data(); TORRENT_ASSERT(is_disk_buffer(buf, l)); free_buffer_impl(buf, l); remove_buffer_in_use(buf); } check_buffer_level(l); }
void disk_buffer_pool::free_multiple_buffers(char** bufvec, int numbufs) { char** end = bufvec + numbufs; // sort the pointers in order to maximize cache hits std::sort(bufvec, end); mutex::scoped_lock l(m_pool_mutex); for (; bufvec != end; ++bufvec) { char* buf = *bufvec; TORRENT_ASSERT(buf); free_buffer_impl(buf, l);; } }
void disk_buffer_pool::free_multiple_buffers(span<char*> bufvec) { // sort the pointers in order to maximize cache hits std::sort(bufvec.begin(), bufvec.end()); std::unique_lock<std::mutex> l(m_pool_mutex); for (char* buf : bufvec) { TORRENT_ASSERT(is_disk_buffer(buf, l)); free_buffer_impl(buf, l); remove_buffer_in_use(buf); } check_buffer_level(l); }
// this function allocates buffers and // fills in the iovec array with the buffers int disk_buffer_pool::allocate_iovec(file::iovec_t* iov, int iov_len) { mutex::scoped_lock l(m_pool_mutex); for (int i = 0; i < iov_len; ++i) { iov[i].iov_base = allocate_buffer_impl(l, "pending read"); iov[i].iov_len = block_size(); if (iov[i].iov_base == NULL) { // uh oh. We failed to allocate the buffer! // we need to roll back and free all the buffers // we've already allocated for (int j = 0; j < i; ++j) free_buffer_impl(static_cast<char*>(iov[j].iov_base), l); return -1; } } return 0; }
char* disk_buffer_pool::allocate_buffer_impl(std::unique_lock<std::mutex>& l , char const*) { TORRENT_ASSERT(m_settings_set); TORRENT_ASSERT(m_magic == 0x1337); TORRENT_ASSERT(l.owns_lock()); TORRENT_UNUSED(l); char* ret = page_malloc(default_block_size); if (ret == nullptr) { m_exceeded_max_size = true; m_trigger_cache_trim(); return nullptr; } ++m_in_use; #if TORRENT_USE_INVARIANT_CHECKS try { TORRENT_ASSERT(m_buffers_in_use.count(ret) == 0); m_buffers_in_use.insert(ret); } catch (...) { free_buffer_impl(ret, l); return nullptr; } #endif if (m_in_use >= m_low_watermark + (m_max_use - m_low_watermark) / 2 && !m_exceeded_max_size) { m_exceeded_max_size = true; m_trigger_cache_trim(); } TORRENT_ASSERT(is_disk_buffer(ret, l)); return ret; }
// this function allocates buffers and // fills in the iovec array with the buffers int disk_buffer_pool::allocate_iovec(span<iovec_t> iov) { std::unique_lock<std::mutex> l(m_pool_mutex); for (auto& i : iov) { i = { allocate_buffer_impl(l, "pending read"), std::size_t(default_block_size)}; if (i.data() == nullptr) { // uh oh. We failed to allocate the buffer! // we need to roll back and free all the buffers // we've already allocated for (auto j : iov) { if (j.data() == nullptr) break; char* buf = j.data(); TORRENT_ASSERT(is_disk_buffer(buf, l)); free_buffer_impl(buf, l); remove_buffer_in_use(buf); } return -1; } } return 0; }