コード例 #1
0
ファイル: tableindex.cpp プロジェクト: akhanzode/voltdb
bool TableIndex::replaceEntryNoKeyChange(const TableTuple &destinationTuple, const TableTuple &originalTuple)
{
    assert(originalTuple.address() != destinationTuple.address());

    if (isPartialIndex()) {
        const AbstractExpression* predicate = getPredicate();
        if (!predicate->eval(&destinationTuple, NULL).isTrue() && !predicate->eval(&originalTuple, NULL).isTrue()) {
            // both tuples fail the predicate. Nothing to do. Return TRUE
            return true;
        } else if (predicate->eval(&destinationTuple, NULL).isTrue() && !predicate->eval(&originalTuple, NULL).isTrue()) {
            // The original tuple fails the predicate meaning the tuple is not indexed.
            // Simply add the new tuple
            TableTuple conflict(destinationTuple.getSchema());
            addEntryDo(&destinationTuple, &conflict);
            return conflict.isNullTuple();
        } else if (!predicate->eval(&destinationTuple, NULL).isTrue() && predicate->eval(&originalTuple, NULL).isTrue()) {
            // The destination tuple fails the predicate. Simply delete the original tuple
            return deleteEntryDo(&originalTuple);
        } else {
            // both tuples pass the predicate.
            assert(predicate->eval(&destinationTuple, NULL).isTrue() && predicate->eval(&originalTuple, NULL).isTrue());
            return replaceEntryNoKeyChangeDo(destinationTuple, originalTuple);
        }
    } else {
        return replaceEntryNoKeyChangeDo(destinationTuple, originalTuple);
    }
}
コード例 #2
0
bool CopyOnWriteContext::notifyTupleDelete(TableTuple &tuple) {
    assert(m_iterator != NULL);

    if (tuple.isDirty() || m_finishedTableScan) {
        return true;
    }

    /**
     * Find out which block the address is contained in. Lower bound returns the first entry
     * in the index >= the address. Unless the address happens to be equal then the block
     * we are looking for is probably the previous entry. Then check if the address fits
     * in the previous entry. If it doesn't then the block is something new.
     */
    TBPtr block = PersistentTable::findBlock(tuple.address(), m_blocks, getTable().getTableAllocationSize());
    if (block.get() == NULL) {
        // tuple not in snapshot region, don't care about this tuple
        return true;
    }

    /**
     * Now check where this is relative to the COWIterator.
     */
    CopyOnWriteIterator *iter = reinterpret_cast<CopyOnWriteIterator*>(m_iterator.get());
    return !iter->needToDirtyTuple(block->address(), tuple.address());
}
コード例 #3
0
TEST_F(AntiCacheEvictionManagerTest, InsertMultipleTuples)
{
    int num_tuples = 10; 

    initTable(true); 
    
    TableTuple tuple = m_table->tempTuple();
    
    uint32_t oldest_tuple_id, newest_tuple_id; 
    
    for(int i = 0; i < num_tuples; i++) // insert 10 tuples
    {
        tuple.setNValue(0, ValueFactory::getIntegerValue(m_tuplesInserted++));
        tuple.setNValue(1, ValueFactory::getIntegerValue(rand()));
        m_table->insertTuple(tuple);
        
                tuple = m_table->lookupTuple(tuple);

        if(i == 0)
        {
            oldest_tuple_id = m_table->getTupleID(tuple.address()); 
        }
        else if(i == num_tuples-1)
        {
            newest_tuple_id = m_table->getTupleID(tuple.address()); 
        }
    }
        
    ASSERT_EQ(num_tuples, m_table->getNumTuplesInEvictionChain()); 
    ASSERT_EQ(oldest_tuple_id, m_table->getOldestTupleID());
    ASSERT_EQ(newest_tuple_id, m_table->getNewestTupleID());
    
    cleanupTable();
}
コード例 #4
0
void CopyOnWriteContext::markTupleDirty(TableTuple tuple, bool newTuple) {
    assert(m_iterator != NULL);

    if (newTuple) {
        m_inserts++;
    }
    else {
        m_updates++;
    }

    /**
     * If this an update or a delete of a tuple that is already dirty then no further action is
     * required.
     */
    if (!newTuple && tuple.isDirty()) {
        return;
    }

    /**
     * If the table has been scanned already there is no need to continue marking tuples dirty
     * If the tuple is dirty then it has already been backed up.
     */
    if (m_finishedTableScan) {
        tuple.setDirtyFalse();
        return;
    }

    /**
     * Find out which block the address is contained in.
     */
    TBPtr block = PersistentTable::findBlock(tuple.address(), m_blocks, getTable().getTableAllocationSize());
    if (block.get() == NULL) {
        // tuple not in snapshot region, don't care about this tuple, no need to dirty it
        tuple.setDirtyFalse();
        return;
    }

    /**
     * Now check where this is relative to the COWIterator.
     */
    CopyOnWriteIterator *iter = reinterpret_cast<CopyOnWriteIterator*>(m_iterator.get());
    if (iter->needToDirtyTuple(block->address(), tuple.address())) {
        tuple.setDirtyTrue();
        /**
         * Don't back up a newly introduced tuple, just mark it as dirty.
         */
        if (!newTuple) {
            m_backedUpTuples->insertTupleNonVirtualWithDeepCopy(tuple, &m_pool);
        }
    } else {
        tuple.setDirtyFalse();
        return;
    }
}
コード例 #5
0
void NVMEvictedTable::deleteNVMEvictedTuple(TableTuple source) {
    if(source.address() == NULL)
        return; 
    
    source.freeObjectColumns();
    deleteTupleStorage(source);
}
コード例 #6
0
 void addRandomUniqueTuples(Table *table, int numTuples) {
     TableTuple tuple = table->tempTuple();
     ::memset(tuple.address() + 1, 0, tuple.tupleLength() - 1);
     for (int ii = 0; ii < numTuples; ii++) {
         tuple.setNValue(0, ValueFactory::getIntegerValue(m_primaryKeyIndex++));
         tuple.setNValue(1, ValueFactory::getIntegerValue(rand()));
         bool success = table->insertTuple(tuple);
         if (!success) {
             std::cout << "Failed to add random unique tuple" << std::endl;
             return;
         }
     }
 }
コード例 #7
0
TEST_F(AntiCacheEvictionManagerTest, TestSetEntryToNewAddress)
{
    int num_tuples = 20;

    initTable(true); 

    TableTuple tuple = m_table->tempTuple();

    int iterations = 0;

    for(int i = 0; i < num_tuples / 2; i++) // insert tuples
    {
        tuple.setNValue(0, ValueFactory::getIntegerValue(m_tuplesInserted++));
        tuple.setNValue(1, ValueFactory::getIntegerValue(0));
        m_table->insertTuple(tuple);
        tuple.setNValue(0, ValueFactory::getIntegerValue(m_tuplesInserted++));
        tuple.setNValue(1, ValueFactory::getIntegerValue(1));
        m_table->insertTuple(tuple);
    }

    TableIterator itr1(m_table);
    iterations = 0; 

    itr1.next(tuple);

    void* oldAddress = tuple.address();
    
    m_table->setEntryToNewAddressForAllIndexes(&tuple, (void*)0xdeadbeaf, oldAddress);

    std::vector <TableIndex*> allIndexes = m_table->allIndexes();
    for (int i = 0; i < allIndexes.size(); ++i) {
        int cnt = 0;

        allIndexes[i]->moveToTuple(&tuple);
        const void* address;

        // check to see whether we set the tuple and only that tuple to new address
        // for both primaryKey and secondary indexes
        while ((address = (allIndexes[i]->nextValueAtKey()).address())) {
            ASSERT_NE(address, oldAddress);
            if (address == (void*)0xdeadbeaf)
                cnt++;
        }
        ASSERT_EQ(cnt, 1);
    }

    cleanupTable();
}
コード例 #8
0
bool CopyOnWriteContext::notifyTupleDelete(TableTuple &tuple) {
    assert(m_iterator != NULL);

    if (tuple.isDirty() || m_finishedTableScan) {
        return true;
    }
    // This is a 'loose' count of the number of deletes because COWIterator could be past this
    // point in the block.
    m_deletes++;

    /**
     * Now check where this is relative to the COWIterator.
     */
    CopyOnWriteIterator *iter = reinterpret_cast<CopyOnWriteIterator*>(m_iterator.get());
    return !iter->needToDirtyTuple(tuple.address());
}
コード例 #9
0
bool CopyOnWriteContext::canSafelyFreeTuple(TableTuple tuple) {
    assert(m_iterator != NULL);

    if (tuple.isDirty() || m_finishedTableScan) {
        return true;
    }

    /**
     * Find out which block the address is contained in. Lower bound returns the first entry
     * in the index >= the address. Unless the address happens to be equal then the block
     * we are looking for is probably the previous entry. Then check if the address fits
     * in the previous entry. If it doesn't then the block is something new.
     */
    char *address = tuple.address();
    TBMapI i = m_blocks.lower_bound(address);
    if (i == m_blocks.end() && m_blocks.empty()) {
        return true;
    }
    if (i == m_blocks.end()) {
        i--;
        if (i.key() + getTable().m_tableAllocationSize < address) {
            return true;
        }
        //OK it is in the very last block
    } else {
        if (i.key() != address) {
            i--;
            if (i.key() + getTable().m_tableAllocationSize < address) {
                return true;
            }
            //OK... this is in this particular block
        }
    }

    const char *blockStartAddress = i.key();

    /**
     * Now check where this is relative to the COWIterator.
     */
    CopyOnWriteIterator *iter = reinterpret_cast<CopyOnWriteIterator*>(m_iterator.get());
    return !iter->needToDirtyTuple(blockStartAddress, address);
}
コード例 #10
0
void CopyOnWriteContext::markTupleDirty(TableTuple tuple, bool newTuple) {
    assert(m_iterator != NULL);

    /**
     * If this an update or a delete of a tuple that is already dirty then no further action is
     * required.
     */
    if (!newTuple && tuple.isDirty()) {
        return;
    }

    /**
     * If the table has been scanned already there is no need to continue marking tuples dirty
     * If the tuple is dirty then it has already been backed up.
     */
    if (m_finishedTableScan) {
        tuple.setDirtyFalse();
        return;
    }

    /**
     * Now check where this is relative to the COWIterator.
     */
    CopyOnWriteIterator *iter = reinterpret_cast<CopyOnWriteIterator*>(m_iterator.get());
    if (iter->needToDirtyTuple(tuple.address())) {
        tuple.setDirtyTrue();

        if (newTuple) {
            /**
             * Don't back up a newly introduced tuple, just mark it as dirty.
             */
            m_inserts++;
        }
        else {
            m_updates++;
            m_backedUpTuples->insertTempTupleDeepCopy(tuple, &m_pool);
        }
    } else {
        tuple.setDirtyFalse();
        return;
    }
}
TEST_F(AntiCacheEvictionManagerTest, GetTupleID)
{
    initTable(true); 
    
    TableTuple tuple = m_table->tempTuple();
    
    tuple.setNValue(0, ValueFactory::getIntegerValue(m_tuplesInserted++));
    tuple.setNValue(1, ValueFactory::getIntegerValue(rand()));
    m_table->insertTuple(tuple);
    
    // get the tuple that was just inserted
    tuple = m_table->lookupTuple(tuple); 
    
    int tuple_id = m_table->getTupleID(tuple.address()); 
    
    //printf("tuple_id = %d\n", tuple_id); 

    //ASSERT_NE(tuple_id, -1); 
    ASSERT_EQ(tuple_id, 0); 
}
コード例 #12
0
TEST_F(AntiCacheEvictionManagerTest, OldestTupleID)
{
    int inserted_tuple_id, oldest_tuple_id; 
    
    initTable(true); 
    
    TableTuple tuple = m_table->tempTuple();
    
    tuple.setNValue(0, ValueFactory::getIntegerValue(m_tuplesInserted++));
    tuple.setNValue(1, ValueFactory::getIntegerValue(rand()));
    m_table->insertTuple(tuple);
    
    // get the tuple that was just inserted
    tuple = m_table->lookupTuple(tuple); 
    
    inserted_tuple_id = m_table->getTupleID(tuple.address()); 
    oldest_tuple_id = m_table->getOldestTupleID(); 
    
    ASSERT_EQ(inserted_tuple_id, oldest_tuple_id);
    
    cleanupTable();
}
コード例 #13
0
TEST_F(AntiCacheEvictionManagerTest, NewestTupleIDTest)
{
    int inserted_tuple_id, newest_tuple_id; 
    
    initTable(true); 
    
    TableTuple tuple = m_table->tempTuple();
    
    tuple.setNValue(0, ValueFactory::getIntegerValue(m_tuplesInserted++));
    tuple.setNValue(1, ValueFactory::getIntegerValue(rand()));
    m_table->insertTuple(tuple);
    
    // get the tuple that was just inserted
    tuple = m_table->lookupTuple(tuple); 
    
    inserted_tuple_id = m_table->getTupleID(tuple.address()); 
    newest_tuple_id = m_table->getNewestTupleID(); 
    
    printf("inserted_tuple_id = %d\n", inserted_tuple_id);
    printf("newest_tuple_id = %d\n", newest_tuple_id); 
    
    ASSERT_EQ(inserted_tuple_id, newest_tuple_id); 
}
コード例 #14
0
ファイル: EvictedTable.cpp プロジェクト: duranHe/h-store
void EvictedTable::deleteEvictedTuple(TableTuple source) {
    if(source.address() == NULL)
        return; 
    
    deleteTupleStorage(source);
}
コード例 #15
0
ファイル: insertexecutor.cpp プロジェクト: DarkDare/voltdb
bool InsertExecutor::p_execute(const NValueArray &params) {
    assert(m_node == dynamic_cast<InsertPlanNode*>(m_abstractNode));
    assert(m_node);
    assert(m_inputTable == dynamic_cast<TempTable*>(m_node->getInputTable()));
    assert(m_inputTable);

    // Target table can be StreamedTable or PersistentTable and must not be NULL
    // Update target table reference from table delegate
    Table* targetTable = m_node->getTargetTable();
    assert(targetTable);
    assert((targetTable == dynamic_cast<PersistentTable*>(targetTable)) ||
            (targetTable == dynamic_cast<StreamedTable*>(targetTable)));

    PersistentTable* persistentTable = m_isStreamed ?
        NULL : static_cast<PersistentTable*>(targetTable);
    TableTuple upsertTuple = TableTuple(targetTable->schema());

    VOLT_TRACE("INPUT TABLE: %s\n", m_inputTable->debug().c_str());

    // count the number of successful inserts
    int modifiedTuples = 0;

    Table* outputTable = m_node->getOutputTable();
    assert(outputTable);

    TableTuple templateTuple = m_templateTuple.tuple();

    std::vector<int>::iterator it;
    for (it = m_nowFields.begin(); it != m_nowFields.end(); ++it) {
        templateTuple.setNValue(*it, NValue::callConstant<FUNC_CURRENT_TIMESTAMP>());
    }

    VOLT_DEBUG("This is a %s-row insert on partition with id %d",
               m_node->getChildren()[0]->getPlanNodeType() == PLAN_NODE_TYPE_MATERIALIZE ?
               "single" : "multi", m_engine->getPartitionId());
    VOLT_DEBUG("Offset of partition column is %d", m_partitionColumn);

    //
    // An insert is quite simple really. We just loop through our m_inputTable
    // and insert any tuple that we find into our targetTable. It doesn't get any easier than that!
    //
    TableTuple inputTuple(m_inputTable->schema());
    assert (inputTuple.sizeInValues() == m_inputTable->columnCount());
    TableIterator iterator = m_inputTable->iterator();
    while (iterator.next(inputTuple)) {

        for (int i = 0; i < m_node->getFieldMap().size(); ++i) {
            // Most executors will just call setNValue instead of
            // setNValueAllocateForObjectCopies.
            //
            // However, We need to call
            // setNValueAlocateForObjectCopies here.  Sometimes the
            // input table's schema has an inlined string field, and
            // it's being assigned to the target table's outlined
            // string field.  In this case we need to tell the NValue
            // where to allocate the string data.
            templateTuple.setNValueAllocateForObjectCopies(m_node->getFieldMap()[i],
                                                           inputTuple.getNValue(i),
                                                           ExecutorContext::getTempStringPool());
        }

        VOLT_TRACE("Inserting tuple '%s' into target table '%s' with table schema: %s",
                   templateTuple.debug(targetTable->name()).c_str(), targetTable->name().c_str(),
                   targetTable->schema()->debug().c_str());

        // if there is a partition column for the target table
        if (m_partitionColumn != -1) {

            // get the value for the partition column
            NValue value = templateTuple.getNValue(m_partitionColumn);
            bool isLocal = m_engine->isLocalSite(value);

            // if it doesn't map to this site
            if (!isLocal) {
                if (!m_multiPartition) {
                    throw ConstraintFailureException(
                            dynamic_cast<PersistentTable*>(targetTable),
                            templateTuple,
                            "Mispartitioned tuple in single-partition insert statement.");
                }

                // don't insert
                continue;
            }
        }

        // for multi partition export tables, only insert into one
        // place (the partition with hash(0)), if the data is from a
        // replicated source.  If the data is coming from a subquery
        // with partitioned tables, we need to perform the insert on
        // every partition.
        if (m_isStreamed && m_multiPartition && !m_sourceIsPartitioned) {
            bool isLocal = m_engine->isLocalSite(ValueFactory::getBigIntValue(0));
            if (!isLocal) continue;
        }


        if (! m_isUpsert) {
            // try to put the tuple into the target table

            if (m_hasPurgeFragment) {
                if (!executePurgeFragmentIfNeeded(&persistentTable))
                    return false;
                // purge fragment might have truncated the table, and
                // refreshed the persistent table pointer.  Make sure to
                // use it when doing the insert below.
                targetTable = persistentTable;
            }

            if (!targetTable->insertTuple(templateTuple)) {
                VOLT_ERROR("Failed to insert tuple from input table '%s' into"
                           " target table '%s'",
                           m_inputTable->name().c_str(),
                           targetTable->name().c_str());
                return false;
            }

        } else {
            // upsert execution logic
            assert(persistentTable->primaryKeyIndex() != NULL);
            TableTuple existsTuple = persistentTable->lookupTupleByValues(templateTuple);

            if (existsTuple.isNullTuple()) {
                // try to put the tuple into the target table

                if (m_hasPurgeFragment) {
                    if (!executePurgeFragmentIfNeeded(&persistentTable))
                        return false;
                }

                if (!persistentTable->insertTuple(templateTuple)) {
                    VOLT_ERROR("Failed to insert tuple from input table '%s' into"
                               " target table '%s'",
                               m_inputTable->name().c_str(),
                               persistentTable->name().c_str());
                    return false;
                }
            } else {
                // tuple exists already, try to update the tuple instead
                upsertTuple.move(templateTuple.address());
                TableTuple &tempTuple = persistentTable->getTempTupleInlined(upsertTuple);

                if (!persistentTable->updateTupleWithSpecificIndexes(existsTuple, tempTuple,
                        persistentTable->allIndexes())) {
                    VOLT_INFO("Failed to update existsTuple from table '%s'",
                            persistentTable->name().c_str());
                    return false;
                }
            }
        }

        // successfully inserted or updated
        modifiedTuples++;
    }

    TableTuple& count_tuple = outputTable->tempTuple();
    count_tuple.setNValue(0, ValueFactory::getBigIntValue(modifiedTuples));
    // try to put the tuple into the output table
    if (!outputTable->insertTuple(count_tuple)) {
        VOLT_ERROR("Failed to insert tuple count (%d) into"
                   " output table '%s'",
                   modifiedTuples,
                   outputTable->name().c_str());
        return false;
    }

    // add to the planfragments count of modified tuples
    m_engine->addToTuplesModified(modifiedTuples);
    VOLT_DEBUG("Finished inserting %d tuples", modifiedTuples);
    return true;
}
コード例 #16
0
void CopyOnWriteContext::markTupleDirty(TableTuple tuple, bool newTuple) {
    if (newTuple) {
        m_inserts++;
    }
    else {
        m_updates++;
    }

    /**
     * If this an update or a delete of a tuple that is already dirty then no further action is
     * required.
     */
    if (!newTuple && tuple.isDirty()) {
        return;
    }

    /**
     * If the table has been scanned already there is no need to continue marking tuples dirty
     * If the tuple is dirty then it has already been backed up.
     */
    if (m_finishedTableScan) {
        tuple.setDirtyFalse();
        return;
    }

    /**
     * Find out which block the address is contained in.
     */
    char *address = tuple.address();
    TBMapI i =
                            m_blocks.lower_bound(address);
    if (i == m_blocks.end() && m_blocks.empty()) {
        tuple.setDirtyFalse();
        return;
    }
    if (i == m_blocks.end()) {
        i--;
        if (i.key() + m_table.m_tableAllocationSize < address) {
            tuple.setDirtyFalse();
            return;
        }
        //OK it is in the very last block
    } else {
        if (i.key() != address) {
            i--;
            if (i.key() + m_table.m_tableAllocationSize < address) {
                tuple.setDirtyFalse();
                return;
            }
            //OK... this is in this particular block
        }
    }

    const char *blockStartAddress = i.key();

    /**
     * Now check where this is relative to the COWIterator.
     */
    CopyOnWriteIterator *iter = reinterpret_cast<CopyOnWriteIterator*>(m_iterator.get());
    if (iter->needToDirtyTuple(blockStartAddress, address)) {
        tuple.setDirtyTrue();
        /**
         * Don't back up a newly introduced tuple, just mark it as dirty.
         */
        if (!newTuple) {
            m_backedUpTuples->insertTupleNonVirtualWithDeepCopy(tuple, &m_pool);
        }
    } else {
        tuple.setDirtyFalse();
        return;
    }
}
コード例 #17
0
void CopyOnWriteContext::markTupleDirty(TableTuple tuple, bool newTuple) {
    /**
     * If this an update or a delete of a tuple that is already dirty then no further action is
     * required.
     */
    if (!newTuple && tuple.isDirty()) {
        return;
    }

    /**
     * If the table has been scanned already there is no need to continue marking tuples dirty
     * If the tuple is dirty then it has already been backed up.
     */
    if (m_finishedTableScan) {
        tuple.setDirtyFalse();
        return;
    }

    /**
     * Find out which block the address is contained in.
     */
    char *address = tuple.address();
#ifdef MEMCHECK
        BlockPair compP;
        compP.pair =  std::pair<char*, int>(address, 0);
        compP.tupleLength = tuple.tupleLength();
#else
    const BlockPair compP(address, 0);
#endif
    BlockPairVectorI i =
            std::lower_bound(m_blocks.begin(), m_blocks.end(), compP, pairAddressToPairAddressComparator);
    if (i == m_blocks.end()) {
        tuple.setDirtyFalse();
        return;
    }
#ifdef MEMCHECK
    const char *blockStartAddress = (*i).pair.first;
    const int blockIndex = (*i).pair.second;
    const char *blockEndAddress = blockStartAddress + tuple.tupleLength();
#else
    const char *blockStartAddress = (*i).first;
    const int blockIndex = (*i).second;
    const char *blockEndAddress = blockStartAddress + TABLE_BLOCKSIZE;
#endif

    if (address >= blockEndAddress || address < blockStartAddress) {
        /**
         * Tuple is in a block allocated after the start of COW
         */
        tuple.setDirtyFalse();
        return;
    }

    /**
     * Now check where this is relative to the COWIterator.
     */
    CopyOnWriteIterator *iter = reinterpret_cast<CopyOnWriteIterator*>(m_iterator.get());
    if (iter->needToDirtyTuple(blockIndex, address, newTuple)) {
        tuple.setDirtyTrue();
        /**
         * Don't back up a newly introduced tuple, just mark it as dirty.
         */
        if (!newTuple) {
            m_backedUpTuples->insertTupleNonVirtualWithDeepCopy(tuple, &m_pool);
        }
    } else {
        tuple.setDirtyFalse();
        return;
    }
}
コード例 #18
0
ファイル: CompactionTest.cpp プロジェクト: Sciumo/voltdb
TEST_F(CompactionTest, CompactionWithCopyOnWrite) {
    initTable(true);
#ifdef MEMCHECK
    int tupleCount = 1000;
#else
    int tupleCount = 645260;
#endif
    addRandomUniqueTuples( m_table, tupleCount);

#ifdef MEMCHECK
    ASSERT_EQ( tupleCount, m_table->m_data.size());
#else
    ASSERT_EQ(20, m_table->m_data.size());
#endif

    stx::btree_set<int32_t> pkeysNotDeleted[3];
    std::vector<int32_t> pkeysToDelete[3];
    for (int ii = 0; ii < tupleCount; ii ++) {
        int foo = ii % 3;
        pkeysToDelete[foo].push_back(ii);
        if (ii % 3 == 0) {
            //All keys deleted
        } else if (ii % 3 == 1) {
            pkeysNotDeleted[0].insert(ii);
        } else {
            pkeysNotDeleted[0].insert(ii);
            pkeysNotDeleted[1].insert(ii);
        }
    }
    //std::cout << pkeysToDelete[0].size() << "," << pkeysToDelete[1].size() << "," << pkeysToDelete[2].size() << std::endl;

    stx::btree_set<int32_t> COWTuples;
    int totalInsertedCOWTuples = 0;
    DefaultTupleSerializer serializer;
    m_table->activateCopyOnWrite(&serializer, 0);
    for (int qq = 0; qq < 3; qq++) {
#ifdef MEMCHECK
        int serializationBufferSize = 22700;
#else
        int serializationBufferSize = 131072;
#endif
        char serializationBuffer[serializationBufferSize];
        while (true) {
            ReferenceSerializeOutput out( serializationBuffer, serializationBufferSize);
            m_table->serializeMore(&out);
            const int serialized = static_cast<int>(out.position());
            if (out.position() == 0) {
                break;
            }
            int ii = 16;//skip partition id and row count and first tuple length
            while (ii < (serialized - 4)) {
                int32_t value = ntohl(*reinterpret_cast<int32_t*>(&serializationBuffer[ii]));
                const bool inserted =
                        COWTuples.insert(value).second;
                if (!inserted) {
                    printf("Failed in iteration %d, total inserted %d, with pkey %d\n", qq, totalInsertedCOWTuples, value);
                }
                ASSERT_TRUE(inserted);
                totalInsertedCOWTuples++;
                ii += 68;
            }
            if (qq == 0) {
                if (totalInsertedCOWTuples > (tupleCount / 3)) {
                    break;
                }
            } else if (qq == 1) {
                if (totalInsertedCOWTuples > ((tupleCount / 3) * 2)) {
                    break;
                }
            }
        }

        voltdb::TableIndex *pkeyIndex = m_table->primaryKeyIndex();
        TableTuple key(pkeyIndex->getKeySchema());
        boost::scoped_array<char> backingStore(new char[pkeyIndex->getKeySchema()->tupleLength()]);
        key.moveNoHeader(backingStore.get());
        for (std::vector<int32_t>::iterator ii = pkeysToDelete[qq].begin(); ii != pkeysToDelete[qq].end(); ii++) {
            key.setNValue(0, ValueFactory::getIntegerValue(*ii));
            ASSERT_TRUE(pkeyIndex->moveToKey(&key));
            TableTuple tuple = pkeyIndex->nextValueAtKey();
            m_table->deleteTuple(tuple, true);
        }

        //std::cout << "Allocated tuple count before idle compactions " << m_table->allocatedTupleCount() << std::endl;
        m_table->doIdleCompaction();
        m_table->doIdleCompaction();
        //std::cout << "Allocated tuple count after idle compactions " << m_table->allocatedTupleCount() << std::endl;
        m_table->doForcedCompaction();

        stx::btree_set<int32_t> pkeysFoundAfterDelete;
        TableIterator& iter = m_table->iterator();
        TableTuple tuple(m_table->schema());
        while (iter.next(tuple)) {
            int32_t pkey = ValuePeeker::peekAsInteger(tuple.getNValue(0));
            key.setNValue(0, ValueFactory::getIntegerValue(pkey));
            for (int ii = 0; ii < 4; ii++) {
                ASSERT_TRUE(m_table->m_indexes[ii]->moveToKey(&key));
                TableTuple indexTuple = m_table->m_indexes[ii]->nextValueAtKey();
                ASSERT_EQ(indexTuple.address(), tuple.address());
            }
            pkeysFoundAfterDelete.insert(pkey);
        }

        std::vector<int32_t> diff;
        std::insert_iterator<std::vector<int32_t> > ii( diff, diff.begin());
        std::set_difference(pkeysNotDeleted[qq].begin(), pkeysNotDeleted[qq].end(), pkeysFoundAfterDelete.begin(), pkeysFoundAfterDelete.end(), ii);
        for (int ii = 0; ii < diff.size(); ii++) {
            printf("Key that was not deleted, but wasn't found is %d\n", diff[ii]);
        }

        diff.clear();
        ii = std::insert_iterator<std::vector<int32_t> >(diff, diff.begin());
        std::set_difference( pkeysFoundAfterDelete.begin(), pkeysFoundAfterDelete.end(), pkeysNotDeleted[qq].begin(), pkeysNotDeleted[qq].end(), ii);
        for (int ii = 0; ii < diff.size(); ii++) {
            printf("Key that was found after deletes, but shouldn't have been there was %d\n", diff[ii]);
        }

//        ASSERT_EQ(pkeysFoundAfterDelete.size(), pkeysNotDeleted.size());
//        ASSERT_TRUE(pkeysFoundAfterDelete == pkeysNotDeleted);
    //    std::cout << "Have " << m_table->m_data.size() << " blocks left " << m_table->allocatedTupleCount() << ", " << m_table->activeTupleCount() << std::endl;
//        ASSERT_EQ( m_table->m_data.size(), 13);
//
//        for (stx::btree_set<int32_t>::iterator ii = pkeysNotDeleted.begin(); ii != pkeysNotDeleted.end(); ii++) {
//            key.setNValue(0, ValueFactory::getIntegerValue(*ii));
//            ASSERT_TRUE(pkeyIndex->moveToKey(&key));
//            TableTuple tuple = pkeyIndex->nextValueAtKey();
//            m_table->deleteTuple(tuple, true);
//        }

    }
    m_table->doForcedCompaction();
    ASSERT_EQ( m_table->m_data.size(), 0);
    ASSERT_EQ( m_table->activeTupleCount(), 0);
    for (int ii = 0; ii < tupleCount; ii++) {
        ASSERT_TRUE(COWTuples.find(ii) != COWTuples.end());
    }
}
コード例 #19
0
ファイル: CompactionTest.cpp プロジェクト: Sciumo/voltdb
TEST_F(CompactionTest, BasicCompaction) {
    initTable(true);
#ifdef MEMCHECK
    int tupleCount = 1000;
#else
    int tupleCount = 645260;
#endif
    addRandomUniqueTuples( m_table, tupleCount);

#ifdef MEMCHECK
    ASSERT_EQ( tupleCount, m_table->m_data.size());
#else
    ASSERT_EQ(20, m_table->m_data.size());
#endif

    stx::btree_set<int32_t> pkeysNotDeleted;
    std::vector<int32_t> pkeysToDelete;
    for (int ii = 0; ii < tupleCount; ii ++) {
        if (ii % 2 == 0) {
            pkeysToDelete.push_back(ii);
        } else {
            pkeysNotDeleted.insert(ii);
        }
    }

    voltdb::TableIndex *pkeyIndex = m_table->primaryKeyIndex();
    TableTuple key(pkeyIndex->getKeySchema());
    boost::scoped_array<char> backingStore(new char[pkeyIndex->getKeySchema()->tupleLength()]);
    key.moveNoHeader(backingStore.get());
    for (std::vector<int32_t>::iterator ii = pkeysToDelete.begin(); ii != pkeysToDelete.end(); ii++) {
        key.setNValue(0, ValueFactory::getIntegerValue(*ii));
        ASSERT_TRUE(pkeyIndex->moveToKey(&key));
        TableTuple tuple = pkeyIndex->nextValueAtKey();
        m_table->deleteTuple(tuple, true);
    }

    m_table->doForcedCompaction();

    stx::btree_set<int32_t> pkeysFoundAfterDelete;
    TableIterator& iter = m_table->iterator();
    TableTuple tuple(m_table->schema());
    while (iter.next(tuple)) {
        int32_t pkey = ValuePeeker::peekAsInteger(tuple.getNValue(0));
        key.setNValue(0, ValueFactory::getIntegerValue(pkey));
        for (int ii = 0; ii < 4; ii++) {
            ASSERT_TRUE(m_table->m_indexes[ii]->moveToKey(&key));
            TableTuple indexTuple = m_table->m_indexes[ii]->nextValueAtKey();
            ASSERT_EQ(indexTuple.address(), tuple.address());
        }
        pkeysFoundAfterDelete.insert(pkey);
    }

    std::vector<int32_t> diff;
    std::insert_iterator<std::vector<int32_t> > ii( diff, diff.begin());
    std::set_difference(pkeysNotDeleted.begin(), pkeysNotDeleted.end(), pkeysFoundAfterDelete.begin(), pkeysFoundAfterDelete.end(), ii);
    for (int ii = 0; ii < diff.size(); ii++) {
        printf("Key that was not deleted, but wasn't found is %d\n", diff[ii]);
    }

    diff.clear();
    ii = std::insert_iterator<std::vector<int32_t> >(diff, diff.begin());
    std::set_difference( pkeysFoundAfterDelete.begin(), pkeysFoundAfterDelete.end(), pkeysNotDeleted.begin(), pkeysNotDeleted.end(), ii);
    for (int ii = 0; ii < diff.size(); ii++) {
        printf("Key that was found after deletes, but shouldn't have been there was %d\n", diff[ii]);
    }

    ASSERT_EQ(pkeysFoundAfterDelete.size(), pkeysNotDeleted.size());
    ASSERT_TRUE(pkeysFoundAfterDelete == pkeysNotDeleted);
//    std::cout << "Have " << m_table->m_data.size() << " blocks left " << m_table->allocatedTupleCount() << ", " << m_table->activeTupleCount() << std::endl;
#ifdef MEMCHECK
    ASSERT_EQ( m_table->m_data.size(), 500);
#else
    ASSERT_EQ( m_table->m_data.size(), 13);
#endif

    for (stx::btree_set<int32_t>::iterator ii = pkeysNotDeleted.begin(); ii != pkeysNotDeleted.end(); ii++) {
        key.setNValue(0, ValueFactory::getIntegerValue(*ii));
        ASSERT_TRUE(pkeyIndex->moveToKey(&key));
        TableTuple tuple = pkeyIndex->nextValueAtKey();
        m_table->deleteTuple(tuple, true);
    }
    m_table->doForcedCompaction();
    ASSERT_EQ( m_table->m_data.size(), 0);
    ASSERT_EQ( m_table->activeTupleCount(), 0);
}