void Table::initializeWithColumns(TupleSchema *schema, const std::vector<string> &columnNames, bool ownsTupleSchema, int32_t compactionThreshold) { // copy the tuple schema if (m_ownsTupleSchema) { TupleSchema::freeTupleSchema(m_schema); } m_ownsTupleSchema = ownsTupleSchema; m_schema = schema; m_columnCount = schema->columnCount(); m_tupleLength = m_schema->tupleLength() + TUPLE_HEADER_SIZE; #ifdef MEMCHECK m_tuplesPerBlock = 1; m_tableAllocationSize = m_tupleLength; #else m_tuplesPerBlock = m_tableAllocationTargetSize / m_tupleLength; #ifdef USE_MMAP if (m_tuplesPerBlock < 1) { m_tuplesPerBlock = 1; m_tableAllocationSize = nexthigher(m_tupleLength); } else { m_tableAllocationSize = nexthigher(m_tableAllocationTargetSize); } #else if (m_tuplesPerBlock < 1) { m_tuplesPerBlock = 1; m_tableAllocationSize = m_tupleLength; } else { m_tableAllocationSize = m_tableAllocationTargetSize; } #endif #endif // initialize column names m_columnNames.resize(m_columnCount); for (int i = 0; i < m_columnCount; ++i) m_columnNames[i] = columnNames[i]; m_allowNulls.resize(m_columnCount); for (int i = m_columnCount - 1; i >= 0; --i) { TupleSchema::ColumnInfo const* columnInfo = m_schema->getColumnInfo(i); m_allowNulls[i] = columnInfo->allowNull; } // initialize the temp tuple m_tempTupleMemory.reset(new char[m_schema->tupleLength() + TUPLE_HEADER_SIZE]); m_tempTuple = TableTuple(m_tempTupleMemory.get(), m_schema); ::memset(m_tempTupleMemory.get(), 0, m_tempTuple.tupleLength()); // default value of hidden dr timestamp is null if (m_schema->hiddenColumnCount() > 0) { m_tempTuple.setHiddenNValue(0, NValue::getNullValue(VALUE_TYPE_BIGINT)); } m_tempTuple.setActiveTrue(); // set the data to be empty m_tupleCount = 0; m_compactionThreshold = compactionThreshold; }
// Allocate a continous block of memory of the specified size. void *VarlenPool::Allocate(std::size_t size) { void *retval = nullptr; // Protect using pool lock // TODO: Can make locking more fine-grained { std::lock_guard<std::mutex> pool_lock(pool_mutex); // See if there is space in the current chunk Chunk *current_chunk = &chunks[current_chunk_index]; if (size > current_chunk->size - current_chunk->offset) { // Not enough space. Check if it is greater than our allocation size. if (size > allocation_size) { // Allocate an oversize chunk that will not be reused. auto &storage_manager = storage::StorageManager::GetInstance(); char *storage = reinterpret_cast<char *>( storage_manager.Allocate(backend_type, size)); oversize_chunks.push_back(Chunk(nexthigher(size), storage)); Chunk &newChunk = oversize_chunks.back(); newChunk.offset = size; return newChunk.chunk_data; } // Check if there is an already allocated chunk we can use. current_chunk_index++; if (current_chunk_index < chunks.size()) { current_chunk = &chunks[current_chunk_index]; current_chunk->offset = size; return current_chunk->chunk_data; } else { // Need to allocate a new chunk auto &storage_manager = storage::StorageManager::GetInstance(); char *storage = reinterpret_cast<char *>( storage_manager.Allocate(backend_type, allocation_size)); chunks.push_back(Chunk(allocation_size, storage)); Chunk &new_chunk = chunks.back(); new_chunk.offset = size; return new_chunk.chunk_data; } } // Get the offset into the current chunk. Then increment the // offset counter by the amount being allocated. retval = current_chunk->chunk_data + current_chunk->offset; current_chunk->offset += size; // Ensure 8 byte alignment of future allocations current_chunk->offset += (8 - (current_chunk->offset % 8)); if (current_chunk->offset > current_chunk->size) { current_chunk->offset = current_chunk->size; } } return retval; }