Table* TableFactory::getPersistentTable( voltdb::CatalogId databaseId, const std::string &name, TupleSchema* schema, const std::vector<std::string> &columnNames, int partitionColumn, bool exportEnabled, bool exportOnly, int tableAllocationTargetSize, int tupleLimit, int32_t compactionThreshold) { Table *table = NULL; if (exportOnly) { table = new StreamedTable(exportEnabled); } else { table = new PersistentTable(partitionColumn, tableAllocationTargetSize, tupleLimit); } initCommon(databaseId, table, name, schema, columnNames, true, compactionThreshold); // initialize stats for the table configureStats(databaseId, name, table); return dynamic_cast<Table*>(table); }
Table* TableFactory::getPersistentTable( voltdb::CatalogId databaseId, ExecutorContext *ctx, const std::string &name, TupleSchema* schema, const std::string* columnNames, int partitionColumn, bool exportEnabled, bool exportOnly) { Table *table = NULL; if (exportOnly) { table = new StreamedTable(ctx, exportEnabled); TableFactory::initCommon(databaseId, table, name, schema, columnNames, true); } else { table = new PersistentTable(ctx, exportEnabled); PersistentTable *pTable = dynamic_cast<PersistentTable*>(table); TableFactory::initCommon(databaseId, pTable, name, schema, columnNames, true); pTable->m_partitionColumn = partitionColumn; } // initialize stats for the table configureStats(databaseId, ctx, name, table); return dynamic_cast<Table*>(table); }
Table* TableFactory::getPersistentTable( voltdb::CatalogId databaseId, const std::string &name, TupleSchema* schema, const std::vector<std::string> &columnNames, char *signature, bool tableIsMaterialized, int partitionColumn, bool exportEnabled, bool exportOnly, int tableAllocationTargetSize, int tupleLimit, int32_t compactionThreshold, bool drEnabled) { Table *table = NULL; StreamedTable *streamedTable = NULL; PersistentTable *persistentTable = NULL; if (exportOnly) { table = streamedTable = new StreamedTable(partitionColumn); } else { table = persistentTable = new PersistentTable(partitionColumn, signature, tableIsMaterialized, tableAllocationTargetSize, tupleLimit, drEnabled); } initCommon(databaseId, table, name, schema, columnNames, true, // table will take ownership of TupleSchema object compactionThreshold); TableStats *stats; if (exportOnly) { stats = streamedTable->getTableStats(); } else { stats = persistentTable->getTableStats(); // Allocate and assign the tuple storage block to the persistent table ahead of time instead // of doing so at time of first tuple insertion. The intent of block allocation ahead of time // is to avoid allocation cost at time of tuple insertion TBPtr block = persistentTable->allocateNextBlock(); assert(block->hasFreeTuples()); persistentTable->m_blocksWithSpace.insert(block); } // initialize stats for the table configureStats(name, stats); return table; }
Table* TableFactory::getPersistentTable( voltdb::CatalogId databaseId, ExecutorContext *ctx, const std::string &name, TupleSchema* schema, const std::string* columnNames, const TableIndexScheme &pkeyIndex, const std::vector<TableIndexScheme> &indexes, int partitionColumn, bool exportEnabled, bool exportOnly) { Table *table = NULL; if (exportOnly) { table = new StreamedTable(ctx, exportEnabled); TableFactory::initCommon(databaseId, table, name, schema, columnNames, true); } else { /** * Choosing whether to use MMAP_PersistentTable */ if(!ctx->isMMAPEnabled()) table = new PersistentTable(ctx, name, exportEnabled); else table = new MMAP_PersistentTable(ctx, name, exportEnabled); VOLT_DEBUG("MMAP Enabled : %d \n", (int)ctx->isMMAPEnabled()); PersistentTable *pTable = dynamic_cast<PersistentTable*>(table); pTable->m_pkeyIndex = TableIndexFactory::getInstance(pkeyIndex); TableFactory::initCommon(databaseId, pTable, name, schema, columnNames, true); pTable->m_partitionColumn = partitionColumn; // one for pkey + all the other indexes pTable->m_indexCount = 1 + (int)indexes.size(); pTable->m_indexes = new TableIndex*[1 + indexes.size()]; pTable->m_indexes[0] = pTable->m_pkeyIndex; for (int i = 0; i < indexes.size(); ++i) { pTable->m_indexes[i + 1] = TableIndexFactory::getInstance(indexes[i]); } initConstraints(pTable); } configureStats(databaseId, ctx, name, table); return dynamic_cast<Table*>(table); }
Table* TableFactory::getPersistentTable( voltdb::CatalogId databaseId, const std::string &name, TupleSchema* schema, const std::vector<std::string> &columnNames, char *signature, bool tableIsMaterialized, int partitionColumn, bool exportEnabled, bool exportOnly, int tableAllocationTargetSize, int tupleLimit, int32_t compactionThreshold, bool drEnabled) { Table *table = NULL; if (exportOnly) { table = new StreamedTable(exportEnabled); } else { table = new PersistentTable(partitionColumn, signature, tableIsMaterialized, tableAllocationTargetSize, tupleLimit, drEnabled); } initCommon(databaseId, table, name, schema, columnNames, true, // table will take ownership of TupleSchema object compactionThreshold); // initialize stats for the table configureStats(databaseId, name, table); if(!exportOnly) { // allocate tuple storage block for the persistent table ahead of time // instead of waiting till first tuple insertion. Intend of allocating tuple // block storage ahead is to improve performance on first tuple insertion. PersistentTable *persistentTable = static_cast<PersistentTable*>(table); TBPtr block = persistentTable->allocateNextBlock(); assert(block->hasFreeTuples()); persistentTable->m_blocksWithSpace.insert(block); } return table; }
// This is a convenient wrapper for test only. Table* TableFactory::getStreamedTableForTest( voltdb::CatalogId databaseId, const std::string &name, TupleSchema* schema, const std::vector<std::string> &columnNames, ExportTupleStream* wrapper, bool exportEnabled, int32_t compactionThreshold) { Table *table = new StreamedTable(exportEnabled, wrapper); initCommon(databaseId, table, name, schema, columnNames, true, // table will take ownership of TupleSchema object compactionThreshold); // initialize stats for the table configureStats(databaseId, name, table); return table; }