Example #1
0
Table* TableFactory::getPersistentTable(
            voltdb::CatalogId databaseId,
            const std::string &name,
            TupleSchema* schema,
            const std::vector<std::string> &columnNames,
            char *signature,
            bool tableIsMaterialized,
            int partitionColumn,
            bool exportEnabled,
            bool exportOnly,
            int tableAllocationTargetSize,
            int tupleLimit,
            int32_t compactionThreshold,
            bool drEnabled)
{
    Table *table = NULL;
    StreamedTable *streamedTable = NULL;
    PersistentTable *persistentTable = NULL;

    if (exportOnly) {
        table = streamedTable = new StreamedTable(partitionColumn);
    }
    else {
        table = persistentTable = new PersistentTable(partitionColumn,
                                                      signature,
                                                      tableIsMaterialized,
                                                      tableAllocationTargetSize,
                                                      tupleLimit,
                                                      drEnabled);
    }

    initCommon(databaseId,
               table,
               name,
               schema,
               columnNames,
               true,  // table will take ownership of TupleSchema object
               compactionThreshold);

    TableStats *stats;
    if (exportOnly) {
        stats = streamedTable->getTableStats();
    }
    else {
        stats = persistentTable->getTableStats();
        // Allocate and assign the tuple storage block to the persistent table ahead of time instead
        // of doing so at time of first tuple insertion. The intent of block allocation ahead of time
        // is to avoid allocation cost at time of tuple insertion
        TBPtr block = persistentTable->allocateNextBlock();
        assert(block->hasFreeTuples());
        persistentTable->m_blocksWithSpace.insert(block);
    }

    // initialize stats for the table
    configureStats(name, stats);

    return table;
}
Example #2
0
Table* TableFactory::getPersistentTable(
            voltdb::CatalogId databaseId,
            const std::string &name,
            TupleSchema* schema,
            const std::vector<std::string> &columnNames,
            char *signature,
            bool tableIsMaterialized,
            int partitionColumn,
            bool exportEnabled,
            bool exportOnly,
            int tableAllocationTargetSize,
            int tupleLimit,
            int32_t compactionThreshold,
            bool drEnabled)
{
    Table *table = NULL;

    if (exportOnly) {
        table = new StreamedTable(exportEnabled);
    }
    else {
        table = new PersistentTable(partitionColumn, signature, tableIsMaterialized, tableAllocationTargetSize, tupleLimit, drEnabled);
    }

    initCommon(databaseId,
               table,
               name,
               schema,
               columnNames,
               true,  // table will take ownership of TupleSchema object
               compactionThreshold);

    // initialize stats for the table
    configureStats(databaseId, name, table);

    if(!exportOnly) {
        // allocate tuple storage block for the persistent table ahead of time
        // instead of waiting till first tuple insertion. Intend of allocating tuple
        // block storage ahead is to improve performance on first tuple insertion.
        PersistentTable *persistentTable = static_cast<PersistentTable*>(table);
        TBPtr block = persistentTable->allocateNextBlock();
        assert(block->hasFreeTuples());
        persistentTable->m_blocksWithSpace.insert(block);
    }
    return table;
}
Example #3
0
// ------------------------------------------------------------------
// OPERATIONS
// ------------------------------------------------------------------
void PersistentTable::nextFreeTuple(TableTuple *tuple) {
    // First check whether we have any in our list
    // In the memcheck it uses the heap instead of a free list to help Valgrind.
    if (!m_blocksWithSpace.empty()) {
        VOLT_TRACE("GRABBED FREE TUPLE!\n");
        stx::btree_set<TBPtr >::iterator begin = m_blocksWithSpace.begin();
        TBPtr block = (*begin);
        std::pair<char*, int> retval = block->nextFreeTuple();

        /**
         * Check to see if the block needs to move to a new bucket
         */
        if (retval.second != -1) {
            //Check if if the block is currently pending snapshot
            if (m_blocksNotPendingSnapshot.find(block) != m_blocksNotPendingSnapshot.end()) {
                block->swapToBucket(m_blocksNotPendingSnapshotLoad[retval.second]);
            //Check if the block goes into the pending snapshot set of buckets
            } else if (m_blocksPendingSnapshot.find(block) != m_blocksPendingSnapshot.end()) {
                block->swapToBucket(m_blocksPendingSnapshotLoad[retval.second]);
            } else {
                //In this case the block is actively being snapshotted and isn't eligible for merge operations at all
                //do nothing, once the block is finished by the iterator, the iterator will return it
            }
        }

        tuple->move(retval.first);
        if (!block->hasFreeTuples()) {
            m_blocksWithSpace.erase(block);
        }
        assert (m_columnCount == tuple->sizeInValues());
        return;
    }

    // if there are no tuples free, we need to grab another chunk of memory
    // Allocate a new set of tuples
    TBPtr block = allocateNextBlock();

    // get free tuple
    assert (m_columnCount == tuple->sizeInValues());

    std::pair<char*, int> retval = block->nextFreeTuple();

    /**
     * Check to see if the block needs to move to a new bucket
     */
    if (retval.second != -1) {
        //Check if the block goes into the pending snapshot set of buckets
        if (m_blocksPendingSnapshot.find(block) != m_blocksPendingSnapshot.end()) {
            //std::cout << "Swapping block to nonsnapshot bucket " << static_cast<void*>(block.get()) << " to bucket " << retval.second << std::endl;
            block->swapToBucket(m_blocksPendingSnapshotLoad[retval.second]);
        //Now check if it goes in with the others
        } else if (m_blocksNotPendingSnapshot.find(block) != m_blocksNotPendingSnapshot.end()) {
            //std::cout << "Swapping block to snapshot bucket " << static_cast<void*>(block.get()) << " to bucket " << retval.second << std::endl;
            block->swapToBucket(m_blocksNotPendingSnapshotLoad[retval.second]);
        } else {
            //In this case the block is actively being snapshotted and isn't eligible for merge operations at all
            //do nothing, once the block is finished by the iterator, the iterator will return it
        }
    }

    tuple->move(retval.first);
    //cout << "table::nextFreeTuple(" << reinterpret_cast<const void *>(this) << ") m_usedTuples == " << m_usedTuples << endl;

    if (block->hasFreeTuples()) {
        m_blocksWithSpace.insert(block);
    }
}