boost::shared_ptr<boost::pool<voltdb_pool_allocator_new_delete> > ThreadLocalPool::get(std::size_t size) { size_t alloc_size = getAllocationSizeForObject(size); if (alloc_size == 0) { throwFatalException("Attempted to allocate an object then the 1 meg limit. Requested size was %Zu", size); } return getExact(alloc_size); }
boost::shared_ptr<boost::pool<voltdb_pool_allocator_new_delete> > ThreadLocalPool::get(std::size_t size) { size_t alloc_size = getAllocationSizeForObject(size); if (alloc_size == 0) { throwDynamicSQLException("Attempted to allocate an object > than the 1 meg limit. Requested size was %du", static_cast<int32_t>(size)); } return getExact(alloc_size); }
void ThreadLocalPool::freeRelocatable(Sized* sized) { // use the cached size to find the right pool. int32_t alloc_size = getAllocationSizeForObject(sized->m_size); CompactingStringStorage& poolMap = getStringPoolMap(); CompactingStringStorage::iterator iter = poolMap.find(alloc_size); if (iter == poolMap.end()) { // If the pool can not be found, there could not have been a prior // allocation for any object of this size, so either the caller // passed a bogus data pointer that was never allocated here OR // the data pointer's size header has been corrupted. throwFatalException("Attempted to free an object of an unrecognized size. Requested size was %d", alloc_size); } // Free the raw allocation from the found pool. iter->second->free(sized); }
ThreadLocalPool::Sized* ThreadLocalPool::allocateRelocatable(char** referrer, int32_t sz) { // The size provided to this function determines the // approximate-size-specific pool selection. It gets // reflected (after rounding and padding) in the size // prefix padded into each allocation. The size prefix is somewhat // redundant with the "object length" that NValue will eventually // encode into the first 1-3 bytes of the buffer being returned here. // So, in theory, this code could avoid adding the overhead of a // "Sized" allocation by trusting the NValue code and decoding // (and rounding up) the object length out of the first few bytes // of the "user data" whenever it gets passed back into // getAllocationSizeForRelocatable and freeRelocatable. // For now, to keep the allocator simple and abstract, // NValue and the allocator each keep their own accounting. int32_t alloc_size = getAllocationSizeForObject(sz); CompactingStringStorage& poolMap = getStringPoolMap(); CompactingStringStorage::iterator iter = poolMap.find(alloc_size); void* allocation; if (iter == poolMap.end()) { // There is no pool yet for objects of this size, so create one. // Compute num_elements to be the largest multiple of alloc_size // to fit in a 2MB buffer. int32_t num_elements = ((2 * 1024 * 1024 - 1) / alloc_size) + 1; boost::shared_ptr<CompactingPool> pool(new CompactingPool(alloc_size, num_elements)); poolMap.insert(std::pair<int32_t, boost::shared_ptr<CompactingPool> >(alloc_size, pool)); allocation = pool->malloc(referrer); } else { allocation = iter->second->malloc(referrer); } // Convert from the raw allocation to the initialized size header. Sized* sized = new (allocation) Sized(sz); return sized; }
int32_t ThreadLocalPool::getAllocationSizeForRelocatable(Sized* sized) { // Convert from the caller data to the size-prefixed allocation to // extract its size field. return getAllocationSizeForObject(sized->m_size); }
int TestOnlyAllocationSizeForObject(int length) { return getAllocationSizeForObject(length); }