// Allocates a bit buffer from pooled memory // Updates size PkPooledRawBitSetArray::buffer_type PkPooledRawBitSetArray::allocate_bit_buffer() { // Allocate a new buffer from our pool allocator byte_type* const p_buffer = (byte_type*) get_pool_alloc().malloc(); PkAssert( NULL != p_buffer ); // Determine if it's contiguous with our current chunk if ( is_contiguous_byte_buffer( p_buffer ) ) { // Assert that this is a pooled chunk PkAssert( get_pool_alloc().is_from( get_chunks().back().first ) ); // Assert that parallel arrays are same size PkAssert( m_owned_chunks_mask.size() == num_chunks() ); // Assert that we don't own this chunk PkAssert( !is_owned_chunk( num_chunks()-1 ) ); // Update current contiguous chunk get_chunks().back().second += num_bytes(); } else { // Start a new contiguous chunk get_chunks().push_back( PkPooledRawBitSetChunkInfo( p_buffer, num_bytes() ) ); // This chunk is owned by the pool; therefore, we don't have to free it explicitly m_owned_chunks_mask.push_back( false ); // Assert that parallel arrays are the same size PkAssert( m_owned_chunks_mask.size() == num_chunks() ); } // Keep track of how many bit buffers are in this collection ++m_size; // Return allocated buffer return (buffer_type) p_buffer; }
void assert_valid() const { HPX_ASSERT(tag() != -1); HPX_ASSERT(size() != -1); HPX_ASSERT(numbytes() != -1); HPX_ASSERT(num_chunks().first != -1); HPX_ASSERT(num_chunks().second != -1); }
void concurrent_growable_pool::grow(size_type capacity) { const size_type target_nchunks = (capacity + capacity_per_chunk - 1) / capacity_per_chunk; size_type nchunks = num_chunks(); while (nchunks++ < target_nchunks) { chunk_t* c = allocate_chunk(); last_allocate = &c->pool; // racy but only an optimization hint chunks.push(c); } }
// Removes last element from pooled array void PkPooledRawBitSetArray::pop_back() { // Assert we have elements to pop PkAssert( !empty() ); // Assert that our byte offset indicates we have elements as well PkAssert( get_chunks().back().second >= num_bytes() ); // Assert that byte offset is proper multiple of number of blocks to represent a bit set PkAssert( byte_offset_is_proper_multiple( get_chunks().back().second ) ); // If last element is from pooled chunk, then release it back to pool if ( !is_owned_chunk( num_chunks()-1 ) ) { get_pool_alloc().free( get_back_bit_buffer() ); } // If chunk now has zero elements, remove the chunk if ( 0 == ( get_chunks().back().second -= num_bytes() ) ) { remove_back_chunk(); } // Update our size --m_size; // Assert that we are empty or new back chunk has elements PkAssert( empty() || (get_chunks().back().second >= num_bytes()) ); }