AllocatorBlock allocate(size_t n) { if (n > max) { return {nullptr, 0}; } if ( (n - min) % step == 0) { return parent_.allocate(n); } else { auto delta = (n - min)/step + 1; return parent_.allocate(min + delta * step); } }
static std::pair<pointer, bool> allocation_command(Allocator &a, allocation_type command, size_type, size_type preferred_size, size_type &received_size, const pointer &) { std::pair<pointer, bool> ret(pointer(), false); if(!(command & allocate_new)){ if(!(command & nothrow_allocation)){ throw_logic_error("version 1 allocator without allocate_new flag"); } } else{ received_size = preferred_size; BOOST_TRY{ ret.first = a.allocate(received_size); } BOOST_CATCH(...){ if(!(command & nothrow_allocation)){ BOOST_RETHROW } } BOOST_CATCH_END } return ret; }
int main (int argc, char **argv) { bool check = true; Allocator allocator; for ( int n = 0; n < TIMES; n++ ) { for ( unsigned int i = 0; i < (sizeof( sizes )/sizeof(int)); i++ ) { int *ptr = (int *) allocator.allocate( sizes[i] * sizeof(int) ); if ( ptr == NULL ) check = false; for ( int j = 0; j < sizes[i]; j++ ) ptr[j] = CHECK_VALUE; // INI for ( int j = 0; j < sizes[i]; j++ ) ptr[j]++; // INC for ( int j = 0; j < sizes[i]; j++ ) ptr[j]--; // DEC // Check result for ( int j = 0; j < sizes[i]; j++ ) { if ( ptr[j] != CHECK_VALUE ) exit(-1); } Allocator::deallocate( ptr ); } } if (check) { return 0; } else { return -1; } }
void expand() { size_t size = (this->mask + 1) << 1; size_t mask = size - 1; Table table = allocator.allocate(size); if(!Allocator::null_references) for(size_t i = 0; i < size; ++i) table[i] = T::invalid_value(); V *end = this->table + (this->mask + 1); for(V *slot = this->table; slot != end; ++slot) { V entry = *slot; while(T::valid_value(entry)) { T::verify_value(entry); V next = T::get_value_next(entry); store(table, mask, T::get_key(entry), entry); entry = next; } } allocator.free(this->table); this->mask = mask; this->table = table; }
/// construct a stack with 0 elements and an initial capacity of 128 Stack() : mData( NULL ), mSize( 0 ), mCapacity( kInitialSize ), mAlloc() { mData = mAlloc.allocate( kInitialSize ); }
PoolAllocator::PoolAllocator(Allocator& backing, uint32_t num_blocks, uint32_t block_size, uint32_t block_align) : _backing(backing) , _start(NULL) , _freelist(NULL) , _block_size(block_size) , _block_align(block_align) , _num_allocations(0) , _allocated_size(0) { CE_ASSERT(num_blocks > 0, "Unsupported number of blocks"); CE_ASSERT(block_size > 0, "Unsupported block size"); CE_ASSERT(block_align > 0, "Unsupported block alignment"); uint32_t actual_block_size = block_size + block_align; uint32_t pool_size = num_blocks * actual_block_size; char* mem = (char*) backing.allocate(pool_size, block_align); // Initialize intrusive freelist char* cur = mem; for (uint32_t bb = 0; bb < num_blocks - 1; bb++) { uintptr_t* next = (uintptr_t*) cur; *next = (uintptr_t) cur + actual_block_size; cur += actual_block_size; } uintptr_t* end = (uintptr_t*) cur; *end = (uintptr_t) NULL; _start = mem; _freelist = mem; }
explicit Stack(int n) : capacity_(n), index_(0) { Allocator alloc; data_ = alloc.allocate(capacity_); for (int i = 0; i < capacity_; ++i) { alloc.construct(data_ + i); } }
static void allocate_individual(Allocator &a, size_type n, multiallocation_chain &m) { allocate_individual_rollback rollback(a, m); while(n--){ m.push_front(a.allocate(1)); } rollback.release(); }
//----------------------------------------------------------------------------- LinearAllocator::LinearAllocator(Allocator& backing, size_t size) : m_backing(&backing) , m_physical_start(NULL) , m_total_size(size) , m_offset(0) { m_physical_start = backing.allocate(size); }
LinearAllocator::LinearAllocator(Allocator& backing, uint32_t size) : _backing(&backing) , _physical_start(NULL) , _total_size(size) , _offset(0) { _physical_start = backing.allocate(size); }
/// copy constructor creates a copy of the right hand side's data Stack( const Stack& other ) : mData( NULL ), mSize( other.mSize ), mCapacity( other.mCapacity ), mAlloc( other.mAlloc ) { mData = mAlloc.allocate( other.mCapacity ); std::uninitialized_copy( other.mData, other.mData + other.mSize, mData ); }
void init() { m_table = m_allocator.allocate( m_buckets * ItemSize ); m_end_marker = m_table + m_buckets; m_end_it = iterator( this, m_end_marker ); initialize_memory(); }
/*! * \brief Primary constructor. * * Constructs an empty cache object and sets a maximum size for it. It is the only way to set size for a cache and it can't be changed later. * You could also pass optional comparator object, compatible with Compare. * * \param <size> Maximum number of entries, allowed in the cache. * \param <comp> Comparator object, compatible with Compare type. Defaults to Compare() * */ explicit cache(const size_type size, const Compare& comp = Compare()) { this->_storage=storageType(comp, Allocator<pair<const Key, Data> >()); this->_maxEntries=size; this->_currEntries=0; policy_type localPolicy(size); this->_policy = policyAlloc.allocate(1); policyAlloc.construct(this->_policy,localPolicy); }
void Simple() { Allocator<DummyClass> allocator; DummyClass* p = allocator.allocate(1); allocator.construct(p, DummyClass()); allocator.destroy(p); allocator.deallocate(p, 1); }
/// Set value. void unmanaged_string::set(UnmanagedString& s, StringRef value, Allocator& a) { unmanaged_string::clear(s, a); if (value.empty()) { return; } s.size = value.size; s.data = static_cast<char*>(a.allocate(s.size + 1)); string::copy(s.data, s.size + 1, value); }
/// Reserve enough space for n elements void reserve( size_type n ) { if ( mCapacity < n ) { size_type oldCapacity = mCapacity; pointer oldData = mData; mCapacity = n; mData = mAlloc.allocate( mCapacity ); std::uninitialized_copy( oldData, oldData + mSize, mData ); mAlloc.deallocate( oldData, oldCapacity ); } }
Image::Image (const DeviceInterface& vk, const VkDevice device, Allocator& allocator, const VkImageCreateInfo& imageCreateInfo, const MemoryRequirement memoryRequirement) { m_image = createImage(vk, device, &imageCreateInfo); m_allocation = allocator.allocate(getImageMemoryRequirements(vk, device, *m_image), memoryRequirement); VK_CHECK(vk.bindImageMemory(device, *m_image, m_allocation->getMemory(), m_allocation->getOffset())); }
void first_allocated_last_deallocated_batch() { uint32* p[N]; for (size_t i = 0; i < N; ++i) p[i] = m_allocator.allocate(1); for (size_t i = N; i; --i) m_allocator.deallocate(p[i - 1], 1); }
Buffer::Buffer (const DeviceInterface& vk, const VkDevice device, Allocator& allocator, const VkBufferCreateInfo& bufferCreateInfo, const MemoryRequirement memoryRequirement) { m_buffer = createBuffer(vk, device, &bufferCreateInfo); m_allocation = allocator.allocate(getBufferMemoryRequirements(vk, device, *m_buffer), memoryRequirement); VK_CHECK(vk.bindBufferMemory(device, *m_buffer, m_allocation->getMemory(), m_allocation->getOffset())); }
/*! * \brief Copy cache content * * Assigns a copy of the elements in x as the new content for the cache. Usage counts for entries are copied too. * The elements contained in the object before the call are dropped, and replaced by copies of those in cache x, if any. * After a call to this member function, both the map object and x will have the same size and compare equal to each other. * * \param <x> a cache object with the same template parameters * * \return *this * * \see swap */ cache<Key,Data,Policy,Compare,Allocator>& operator= ( const cache<Key,Data,Policy,Compare,Allocator>& x) { this->_storage=x._storage; this->_maxEntries=x._maxEntries; this->_currEntries=this->_storage.size(); policy_type localPolicy(*x._policy); this->_policy = policyAlloc.allocate(1); policyAlloc.construct(this->_policy,localPolicy); return *this; }
void * mynew( size_t size ) { #ifdef _ANALYZEMEMORY return analyzealloc( size ); #else // return malloc( size ); // return g_MemoryPool.GetMem( size ); // return g_MemPool.GetMem( size ); size_t newsz = size + sizeof(size_t); void* ptr = g_Allocator.allocate(newsz); *(size_t*)ptr = newsz; return (char*)ptr + sizeof(size_t); #endif }
view_type allocate(size_type n) { Allocator allocator; pointer ptr = n>0 ? allocator.allocate(n) : nullptr; #ifdef VERBOSE std::cerr << util::type_printer<DeviceCoordinator>::print() << util::blue("::allocate") << "(" << n << ")" << (ptr==nullptr && n>0 ? " failure" : " success") << std::endl; #endif return view_type(ptr, n); }
static bool isHandledDefault(Allocator &allocator, block &b, size_t n) { if (b.length == n) { return true; } if (n == 0) { allocator.deallocate(b); return true; } if (!b) { b = allocator.allocate(n); return true; } return false; }
void resize( size_type size ) { size_t new_size = round_to_power2( size ); size_t old_size = m_buckets; if ( new_size == old_size ) { // Do nothing return; } else if ( new_size < old_size ) { // The new table will be smaller, so there's no need to rehash // all the items. value_type* new_table; new_table = m_allocator.allocate( new_size * ItemSize ); // Copy the elements that fit into the new table and destroy // those that doesn't fit. Plain old memcpy seems to have much // less problems with types than std::copy.. std::memcpy( new_table, m_table, new_size * ItemSize ); _Destroy( iterator( this, m_table + new_size, true ), m_end_it ); m_allocator.deallocate( m_table, old_size ); m_table = new_table; m_end_marker = m_table + new_size; m_end_it = iterator( this, m_end_marker ); m_buckets = new_size; m_mask = m_buckets - 1; // Re-count the number of elements. m_num_elements = 0; for ( const_iterator it = begin(); it != m_end_it; ++it ) ++m_num_elements; } else // new_size > old_size { // Creates a new table and re-insert all the items, with // new buckets. cache_table other( new_size, m_hasher, m_key_equal ); other.set_empty_value( m_empty_value ); swap( other ); insert( other.begin(), other.end() ); } }
/** * Allocates a Block of n bytes. Actually a Block of n + sizeof(Prefix) + * sizeof(Sufix) bytes is allocated. Depending of the defines Prefix and Sufix * types objects of this gets instantiated before and/or beyond the returned * Block. If Zero bytes are allocated then no allocation at all takes places * and an empty Block is returned. * \param n Specifies the number of requested bytes. n or more bytes are * returned, depending on the alignment of the underlying Allocator. */ block allocate(size_t n) noexcept { if (n == 0) { return {}; } auto innerMem = _allocator.allocate(prefix_size + n + sufix_size); if (innerMem) { if (prefix_size > 0) { new (innerToPrefix(innerMem)) Prefix{}; } if (sufix_size > 0) { new (innerToSufix(innerMem)) Sufix{}; } return toOuterBlock(innerMem); } return {}; }
void test_al(Allocator& al) { clock_type::time_point tp1 = clock_type::now(); for(int i = 0; i<attempts; ++i) { g_ptrs[i] = al.allocate(1); *(g_ptrs[i]) = i; } clock_type::time_point tp2 = clock_type::now(); for(int i = 0; i<attempts; ++i) al.deallocate(g_ptrs[i], 1); clock_type::time_point tp3 = clock_type::now(); std::cout << typeid(Allocator).name() << std::endl << tp2 - tp1 << std::endl << tp3 - tp1 << std::endl; }
static bool is_handled_default(Allocator &allocator, block &b, size_t n) noexcept { if (b.length == n) { return true; } if (n == 0) { allocator.deallocate(b); return true; } if (!b) { b = allocator.allocate(n); return true; } if (n > b.length) { if (allocator.expand(b, n - b.length)) { return true; } } return false; }
/** * Allocates a Block of n bytes. Actually a Block of n + sizeof(Prefix) + * sizeof(Sufix) bytes is allocated. Depending of the defines Prefix and Sufix * types objects of this gets instantiated before and/or beyond the returned * Block. If Zero bytes are allocated then no allocation at all takes places * and an empty Block is returned. * \param n Specifies the number of requested bytes. n or more bytes are * returned, depending on the alignment of the underlying Allocator. */ block allocate(size_t n) noexcept { block result; if (n == 0) { return result; } auto innerMem = allocator_.allocate(prefix_size + n + sufix_size); if (innerMem) { if (prefix_size > 0) { affix_helper::create_affix_in_place<Prefix>(inner_to_prefix(innerMem), *this); } if (sufix_size > 0) { affix_helper::create_affix_in_place<Sufix>(inner_to_sufix(innerMem), *this); } result = to_outer_block(innerMem); return result; } return result; }
static pointer allocation_command(Allocator &a, allocation_type command, size_type, size_type &prefer_in_recvd_out_size, pointer &reuse) { pointer ret = pointer(); if(BOOST_UNLIKELY(!(command & allocate_new) && !(command & nothrow_allocation))) { throw_logic_error("version 1 allocator without allocate_new flag"); } else { BOOST_TRY{ ret = a.allocate(prefer_in_recvd_out_size); } BOOST_CATCH(...) { if(!(command & nothrow_allocation)) { BOOST_RETHROW } } BOOST_CATCH_END reuse = pointer(); } return ret; }
inline void* Document::allocate() { return alloc_->allocate(sizeof(Rep)); }