示例#1
0
size_t DRTupleStream::computeOffsets(DRRecordType &type,
        const std::pair<const TableIndex*, uint32_t> &indexPair,
        TableTuple &tuple,
        size_t &rowHeaderSz,
        size_t &rowMetadataSz,
        const std::vector<int> *&interestingColumns) {
    interestingColumns = NULL;
    rowMetadataSz = sizeof(int32_t);
    int columnCount;
    switch (type) {
    case DR_RECORD_DELETE:
    case DR_RECORD_UPDATE:
        if (indexPair.first) {
            // The index-optimized versions of these types have values exactly
            // 5 larger than the unoptimized versions (asserted in test)
            // DR_RECORD_DELETE => DR_RECORD_DELETE_BY_INDEX
            // DR_RECORD_UPDATE => DR_RECORD_UPDATE_BY_INDEX
            type = static_cast<DRRecordType>((int)type + 5);
            interestingColumns = &(indexPair.first->getColumnIndices());
            rowMetadataSz += sizeof(int32_t);
            columnCount = static_cast<int>(interestingColumns->size());
        } else {
            columnCount = tuple.sizeInValues();
        }
        break;
    default:
        columnCount = tuple.sizeInValues();
        break;
    }
    int nullMaskLength = ((columnCount + 7) & -8) >> 3;
    rowHeaderSz = rowMetadataSz + nullMaskLength;
    return rowHeaderSz + tuple.maxDRSerializationSize(interestingColumns);
}
TEST_F(AntiCacheEvictionManagerTest, TestEvictionOrder)
{
    int num_tuples = 100; 

    initTable(true); 
      
    TableTuple tuple = m_table->tempTuple();
    int tuple_size = m_tableSchema->tupleLength() + TUPLE_HEADER_SIZE;

        
    for(int i = 0; i < num_tuples; i++) // insert 10 tuples
    {
        tuple.setNValue(0, ValueFactory::getIntegerValue(m_tuplesInserted++));
        tuple.setNValue(1, ValueFactory::getIntegerValue(rand()));
        m_table->insertTuple(tuple);
    }
    
    EvictionIterator itr(m_table); 

    itr.reserve(20 * tuple_size);

    ASSERT_TRUE(itr.hasNext());

    uint32_t oldTimeStamp = 0;

    while(itr.hasNext()) {
        itr.next(tuple); 
        
        uint32_t newTimeStamp = tuple.getTimeStamp();
        ASSERT_LE(oldTimeStamp, newTimeStamp);
        oldTimeStamp = newTimeStamp;
    }

    cleanupTable();
}
void AntiCacheEvictionManager::printLRUChain(PersistentTable* table, int max, bool forward)
{
    VOLT_INFO("num tuples in chain: %d", table->getNumTuplesInEvictionChain());
    VOLT_INFO("oldest tuple id: %u", table->getOldestTupleID());
    VOLT_INFO("newest tuple id: %u", table->getNewestTupleID());
    
    char chain[max * 4];
    int tuple_id;
    TableTuple tuple = table->tempTuple();
    
    if(forward)
        tuple_id = table->getOldestTupleID();
    else
        tuple_id = table->getNewestTupleID();
    
    chain[0] = '\0';
    
    int iterations = 0;
    while(iterations < table->getNumTuplesInEvictionChain() && iterations < max)
    {
        strcat(chain, itoa(tuple_id));
        strcat(chain, " ");
        
        tuple.move(table->dataPtrForTuple(tuple_id));
        
        if(forward)
            tuple_id = tuple.getNextTupleInChain();
        else
            tuple_id = tuple.getPreviousTupleInChain();
        
        iterations++;
    }
    
    VOLT_INFO("LRU CHAIN: %s", chain);
}
Table* AntiCacheEvictionManager::evictBlock(PersistentTable *table, long blockSize, int numBlocks) {
    int32_t lastTuplesEvicted = table->getTuplesEvicted();
    int32_t lastBlocksEvicted = table->getBlocksEvicted();
    int64_t lastBytesEvicted  = table->getBytesEvicted();
    
    if (table->evictBlockToDisk(blockSize, numBlocks) == false) {
        throwFatalException("Failed to evict tuples from table '%s'", table->name().c_str());
    }
    
    int32_t tuplesEvicted = table->getTuplesEvicted() - lastTuplesEvicted;
    int32_t blocksEvicted = table->getBlocksEvicted() - lastBlocksEvicted; 
    int64_t bytesEvicted = table->getBytesEvicted() - lastBytesEvicted;
    
    m_evictResultTable->deleteAllTuples(false);
    TableTuple tuple = m_evictResultTable->tempTuple();
    
    int idx = 0;
    tuple.setNValue(idx++, ValueFactory::getStringValue(table->name()));
    tuple.setNValue(idx++, ValueFactory::getIntegerValue(static_cast<int32_t>(tuplesEvicted)));
    tuple.setNValue(idx++, ValueFactory::getIntegerValue(static_cast<int32_t>(blocksEvicted)));
    tuple.setNValue(idx++, ValueFactory::getBigIntValue(static_cast<int32_t>(bytesEvicted)));
    m_evictResultTable->insertTuple(tuple);
    
    return (m_evictResultTable);
}
示例#5
0
bool TableIndex::replaceEntryNoKeyChange(const TableTuple &destinationTuple, const TableTuple &originalTuple)
{
    assert(originalTuple.address() != destinationTuple.address());

    if (isPartialIndex()) {
        const AbstractExpression* predicate = getPredicate();
        if (!predicate->eval(&destinationTuple, NULL).isTrue() && !predicate->eval(&originalTuple, NULL).isTrue()) {
            // both tuples fail the predicate. Nothing to do. Return TRUE
            return true;
        } else if (predicate->eval(&destinationTuple, NULL).isTrue() && !predicate->eval(&originalTuple, NULL).isTrue()) {
            // The original tuple fails the predicate meaning the tuple is not indexed.
            // Simply add the new tuple
            TableTuple conflict(destinationTuple.getSchema());
            addEntryDo(&destinationTuple, &conflict);
            return conflict.isNullTuple();
        } else if (!predicate->eval(&destinationTuple, NULL).isTrue() && predicate->eval(&originalTuple, NULL).isTrue()) {
            // The destination tuple fails the predicate. Simply delete the original tuple
            return deleteEntryDo(&originalTuple);
        } else {
            // both tuples pass the predicate.
            assert(predicate->eval(&destinationTuple, NULL).isTrue() && predicate->eval(&originalTuple, NULL).isTrue());
            return replaceEntryNoKeyChangeDo(destinationTuple, originalTuple);
        }
    } else {
        return replaceEntryNoKeyChangeDo(destinationTuple, originalTuple);
    }
}
示例#6
0
bool CopyOnWriteContext::notifyTupleDelete(TableTuple &tuple) {
    assert(m_iterator != NULL);

    if (tuple.isDirty() || m_finishedTableScan) {
        return true;
    }

    /**
     * Find out which block the address is contained in. Lower bound returns the first entry
     * in the index >= the address. Unless the address happens to be equal then the block
     * we are looking for is probably the previous entry. Then check if the address fits
     * in the previous entry. If it doesn't then the block is something new.
     */
    TBPtr block = PersistentTable::findBlock(tuple.address(), m_blocks, getTable().getTableAllocationSize());
    if (block.get() == NULL) {
        // tuple not in snapshot region, don't care about this tuple
        return true;
    }

    /**
     * Now check where this is relative to the COWIterator.
     */
    CopyOnWriteIterator *iter = reinterpret_cast<CopyOnWriteIterator*>(m_iterator.get());
    return !iter->needToDirtyTuple(block->address(), tuple.address());
}
TEST_F(AntiCacheEvictionManagerTest, DeleteMultipleTuples)
{
    int num_tuples = 100; 

    initTable(true); 
      
    TableTuple tuple = m_table->tempTuple();
        
    for(int i = 0; i < num_tuples; i++) // insert 10 tuples
    {
        tuple.setNValue(0, ValueFactory::getIntegerValue(m_tuplesInserted++));
        tuple.setNValue(1, ValueFactory::getIntegerValue(rand()));
        m_table->insertTuple(tuple);
    }
    
    VOLT_INFO("%d == %d", num_tuples, m_table->getNumTuplesInEvictionChain());
    ASSERT_EQ(num_tuples, m_table->getNumTuplesInEvictionChain()); 
        
    int num_tuples_deleted = 0; 
    TableIterator itr(m_table); 
    while(itr.hasNext())  {
        itr.next(tuple); 
                
        if(rand() % 2 == 0) { // delete each tuple with probability .5
            m_table->deleteTuple(tuple, true); 
            ++num_tuples_deleted; 
        }
    }
        

    ASSERT_EQ((num_tuples - num_tuples_deleted), m_table->getNumTuplesInEvictionChain());
    
    cleanupTable();
}
示例#8
0
    void updateWithNullsTest() {
        beginTxn(99, 99, 98, 70);
        TableTuple first_tuple = insertTuple(m_table, prepareTempTuple(m_table, 42, 31241, "349508345.34583", "a thing", "a totally different thing altogether", 5433));
        TableTuple second_tuple = insertTuple(m_table, prepareTempTuple(m_table, 24, 2321, "23455.5554", "and another", "this is starting to get even sillier", 2222));
        endTxn(true);

        flushAndApply(99);

        EXPECT_EQ(2, m_tableReplica->activeTupleCount());

        beginTxn(100, 100, 99, 71);
        TableTuple tuple_to_update = m_table->lookupTupleByValues(first_tuple);
        ASSERT_FALSE(tuple_to_update.isNullTuple());
        TableTuple updated_tuple = secondTupleWithNulls(m_table);
        m_table->updateTuple(tuple_to_update, updated_tuple);
        endTxn(true);

        flushAndApply(100);

        EXPECT_EQ(2, m_tableReplica->activeTupleCount());
        TableTuple expected_tuple = secondTupleWithNulls(m_table);
        TableTuple tuple = m_tableReplica->lookupTupleByValues(expected_tuple);
        ASSERT_FALSE(tuple.isNullTuple());
        tuple = m_table->lookupTupleByValues(second_tuple);
        ASSERT_FALSE(tuple.isNullTuple());
    }
示例#9
0
TEST_F(DRBinaryLogTest, PartialTxnRollback) {
    beginTxn(98, 98, 97, 69);
    TableTuple first_tuple = insertTuple(m_table, prepareTempTuple(m_table, 99, 29058, "92384598.2342", "what", "really, why am I writing anything in these?", 3455));
    endTxn(true);

    beginTxn(99, 99, 98, 70);

    TableTuple second_tuple = insertTuple(m_table, prepareTempTuple(m_table, 42, 55555, "349508345.34583", "a thing", "a totally different thing altogether", 5433));

    // Simulate a second batch within the same txn
    UndoQuantum* uq = m_undoLog.generateUndoQuantum(m_undoToken + 1);
    m_context->setupForPlanFragments(uq, addPartitionId(99), addPartitionId(99),
                                     addPartitionId(98), addPartitionId(70));

    insertTuple(m_table, prepareTempTuple(m_table, 24, 2321, "23455.5554", "and another", "this is starting to get even sillier", 2222));

    m_undoLog.undo(m_undoToken + 1);

    endTxn(true);

    flushAndApply(100);

    EXPECT_EQ(2, m_tableReplica->activeTupleCount());
    TableTuple tuple = m_tableReplica->lookupTupleByValues(first_tuple);
    ASSERT_FALSE(tuple.isNullTuple());
    tuple = m_tableReplica->lookupTupleByValues(second_tuple);
    ASSERT_FALSE(tuple.isNullTuple());
}
示例#10
0
TEST_F(TableTupleTest, ComputeNonInlinedMemory)
{
    UniqueEngine engine = UniqueEngineBuilder().build();
    Pool *pool = ExecutorContext::getTempStringPool();

    // Make sure that inlined strings are actually inlined
    int32_t maxInlinableLength = UNINLINEABLE_OBJECT_LENGTH/MAX_BYTES_PER_UTF8_CHARACTER - 1;
    ScopedTupleSchema allInlineSchema{Tools::buildSchema(VALUE_TYPE_BIGINT,
                                                         std::make_pair(VALUE_TYPE_VARCHAR, maxInlinableLength))};
    PoolBackedTupleStorage tupleStorage;
    tupleStorage.init(allInlineSchema.get(), pool);
    tupleStorage.allocateActiveTuple();
    TableTuple inlineTuple = tupleStorage;

    Tools::setTupleValues(&inlineTuple, int64_t(0), "dude");
    EXPECT_EQ(0, inlineTuple.getNonInlinedMemorySizeForPersistentTable());

    // Now check that an non-inlined schema returns the right thing.
    int32_t nonInlinableLength = UNINLINEABLE_OBJECT_LENGTH + 10000;
    ScopedTupleSchema nonInlinedSchema{Tools::buildSchema(VALUE_TYPE_BIGINT,
                                                         std::make_pair(VALUE_TYPE_VARCHAR, nonInlinableLength))};
    tupleStorage.init(nonInlinedSchema.get(), pool);
    tupleStorage.allocateActiveTuple();
    TableTuple nonInlinedTuple = tupleStorage;

    NValue nonInlinedString = Tools::nvalueFromNative("123456");
    Tools::setTupleValues(&nonInlinedTuple, int64_t(0), nonInlinedString);
    EXPECT_EQ(nonInlinedString.getAllocationSizeForObjectInPersistentStorage(),
              nonInlinedTuple.getNonInlinedMemorySizeForPersistentTable());
}
示例#11
0
void NVMEvictedTable::deleteNVMEvictedTuple(TableTuple source) {
    if(source.address() == NULL)
        return; 
    
    source.freeObjectColumns();
    deleteTupleStorage(source);
}
示例#12
0
TEST_F(DRBinaryLogTest, PartitionedTableRollbacks) {
    m_singleColumnTable->setDR(false);

    beginTxn(99, 99, 98, 70);
    TableTuple source_tuple = insertTuple(m_table, prepareTempTuple(m_table, 42, 55555, "349508345.34583", "a thing", "a totally different thing altogether", 5433));
    endTxn(false);

    // Intentionally ignore the fact that a rollback wouldn't have actually advanced the
    // lastCommittedSpHandle. Our goal is to tick such that, if data had been produced,
    // it would flush itself out now
    ASSERT_FALSE(flush(99));

    EXPECT_EQ(-1, m_drStream.getLastCommittedSequenceNumberAndUniqueId().first);
    EXPECT_EQ(0, m_tableReplica->activeTupleCount());

    beginTxn(100, 100, 99, 71);
    source_tuple = insertTuple(m_table, prepareTempTuple(m_table, 99, 29058, "92384598.2342", "what", "really, why am I writing anything in these?", 3455));
    endTxn(true);

    // Roll back a txn that hasn't applied any binary log data
    beginTxn(101, 101, 100, 72);
    TableTuple temp_tuple = m_singleColumnTable->tempTuple();
    temp_tuple.setNValue(0, ValueFactory::getTinyIntValue(1));
    insertTuple(m_singleColumnTable, temp_tuple);
    endTxn(false);

    flushAndApply(101);

    EXPECT_EQ(1, m_tableReplica->activeTupleCount());
    TableTuple tuple = m_tableReplica->lookupTupleByValues(source_tuple);
    ASSERT_FALSE(tuple.isNullTuple());

    EXPECT_EQ(0, m_drStream.getLastCommittedSequenceNumberAndUniqueId().first);
}
示例#13
0
TEST_F(DRBinaryLogTest, SerializeNulls) {
    beginTxn(109, 99, 98, 70);
    TableTuple temp_tuple = m_replicatedTable->tempTuple();
    temp_tuple.setNValue(0, NValue::getNullValue(VALUE_TYPE_TINYINT));
    temp_tuple.setNValue(1, ValueFactory::getBigIntValue(489735));
    temp_tuple.setNValue(2, NValue::getNullValue(VALUE_TYPE_DECIMAL));
    m_cachedStringValues.push_back(ValueFactory::getStringValue("whatever"));
    temp_tuple.setNValue(3, m_cachedStringValues.back());
    temp_tuple.setNValue(4, ValueFactory::getNullStringValue());
    temp_tuple.setNValue(5, ValueFactory::getTimestampValue(3495));
    TableTuple first_tuple = insertTuple(m_replicatedTable, temp_tuple);

    temp_tuple = m_replicatedTable->tempTuple();
    temp_tuple.setNValue(0, ValueFactory::getTinyIntValue(42));
    temp_tuple.setNValue(1, NValue::getNullValue(VALUE_TYPE_BIGINT));
    temp_tuple.setNValue(2, ValueFactory::getDecimalValueFromString("234234.243"));
    temp_tuple.setNValue(3, ValueFactory::getNullStringValue());
    m_cachedStringValues.push_back(ValueFactory::getStringValue("whatever and ever and ever and ever"));
    temp_tuple.setNValue(4, m_cachedStringValues.back());
    temp_tuple.setNValue(5, NValue::getNullValue(VALUE_TYPE_TIMESTAMP));
    TableTuple second_tuple = insertTuple(m_replicatedTable, temp_tuple);
    endTxn(true);

    flushAndApply(99);

    EXPECT_EQ(2, m_replicatedTableReplica->activeTupleCount());
    TableTuple tuple = m_replicatedTableReplica->lookupTupleByValues(first_tuple);
    ASSERT_FALSE(tuple.isNullTuple());
    tuple = m_replicatedTableReplica->lookupTupleByValues(second_tuple);
    ASSERT_FALSE(tuple.isNullTuple());
}
TEST_F(AntiCacheEvictionManagerTest, InsertMultipleTuples)
{
    int num_tuples = 10; 

    initTable(true); 
    
    TableTuple tuple = m_table->tempTuple();
    
    uint32_t oldest_tuple_id, newest_tuple_id; 
    
    for(int i = 0; i < num_tuples; i++) // insert 10 tuples
    {
        tuple.setNValue(0, ValueFactory::getIntegerValue(m_tuplesInserted++));
        tuple.setNValue(1, ValueFactory::getIntegerValue(rand()));
        m_table->insertTuple(tuple);
        
                tuple = m_table->lookupTuple(tuple);

        if(i == 0)
        {
            oldest_tuple_id = m_table->getTupleID(tuple.address()); 
        }
        else if(i == num_tuples-1)
        {
            newest_tuple_id = m_table->getTupleID(tuple.address()); 
        }
    }
        
    ASSERT_EQ(num_tuples, m_table->getNumTuplesInEvictionChain()); 
    ASSERT_EQ(oldest_tuple_id, m_table->getOldestTupleID());
    ASSERT_EQ(newest_tuple_id, m_table->getNewestTupleID());
    
    cleanupTable();
}
示例#15
0
    // Delete some records from the table, forcing update of the geospatial index.
    static int deleteSomeRecords(PersistentTable* table, int totalTuples, int numTuplesToDelete) {
        std::cout << "            Deleting " << numTuplesToDelete << " tuples...\n";
        int numDeleted = 0;

        StandAloneTupleStorage tableTuple(table->schema());
        tableTuple.tuple().setNValue(GEOG_COL_INDEX, NValue::getNullValue(VALUE_TYPE_GEOGRAPHY));

        auto start = std::chrono::high_resolution_clock::now();
        std::chrono::microseconds usSpentDeleting = std::chrono::duration_cast<microseconds>(start - start);

        // Choose a random row, and delete it, and do this until we've
        // deleted as many rows as the caller has requested.
        // Sometimes the random number generator will select a
        // previously deleted row, and we just try again when this
        // happens.  This might seem like it would take a long time,
        // but practically it happens instantaneously.
        while (numDeleted < numTuplesToDelete) {
            int idOfTupleToDelete = std::rand() % totalTuples;
            tableTuple.tuple().setNValue(PK_COL_INDEX, ValueFactory::getIntegerValue(idOfTupleToDelete));
            TableTuple tupleToDelete = table->lookupTupleByValues(tableTuple.tuple());
            if (! tupleToDelete.isNullTuple()) {
                start = std::chrono::high_resolution_clock::now();
                table->deleteTuple(tupleToDelete);
                auto end = std::chrono::high_resolution_clock::now();
                usSpentDeleting += std::chrono::duration_cast<microseconds>(end - start);
                ++numDeleted;
            }
        }

        std::cout << "              Average duration of delete: " << (usSpentDeleting.count() / numDeleted) << " us\n";

        return numDeleted;
    }
示例#16
0
TEST_F(DRBinaryLogTest, DeleteWithUniqueIndexWhenAAEnabled) {
    m_engine->setIsActiveActiveDREnabled(true);
    createIndexes();
    std::pair<const TableIndex*, uint32_t> indexPair = m_table->getUniqueIndexForDR();
    std::pair<const TableIndex*, uint32_t> indexPairReplica = m_tableReplica->getUniqueIndexForDR();
    ASSERT_TRUE(indexPair.first == NULL);
    ASSERT_TRUE(indexPairReplica.first == NULL);
    EXPECT_EQ(indexPair.second, 0);
    EXPECT_EQ(indexPairReplica.second, 0);

    beginTxn(99, 99, 98, 70);
    TableTuple first_tuple = insertTuple(m_table, prepareTempTuple(m_table, 42, 55555, "349508345.34583", "a thing", "a totally different thing altogether", 5433));
    TableTuple second_tuple = insertTuple(m_table, prepareTempTuple(m_table, 24, 2321, "23455.5554", "and another", "this is starting to get even sillier", 2222));
    TableTuple third_tuple = insertTuple(m_table, prepareTempTuple(m_table, 72, 345, "4256.345", "something", "more tuple data, really not the same", 1812));
    endTxn(true);

    flushAndApply(99);

    EXPECT_EQ(3, m_tableReplica->activeTupleCount());

    beginTxn(100, 100, 99, 71);
    deleteTuple(m_table, first_tuple);
    deleteTuple(m_table, second_tuple);
    endTxn(true);

    flushAndApply(100);

    EXPECT_EQ(1, m_tableReplica->activeTupleCount());
    TableTuple tuple = m_tableReplica->lookupTupleByValues(third_tuple);
    ASSERT_FALSE(tuple.isNullTuple());
}
示例#17
0
 void addRandomUniqueTuples(Table *table, int numTuples) {
     TableTuple tuple = table->tempTuple();
     for (int ii = 0; ii < numTuples; ii++) {
         tuple.setNValue(0, ValueFactory::getIntegerValue(m_primaryKeyIndex++));
         tuple.setNValue(1, ValueFactory::getIntegerValue(rand()));
         table->insertTuple(tuple);
     }
 }
示例#18
0
void MaterializedViewMetadata::processTupleDelete(TableTuple &oldTuple) {
    // don't change the view if this tuple doesn't match the predicate
    if (m_filterPredicate && (m_filterPredicate->eval(&oldTuple, NULL).isFalse()))
        return;

    // this will assert if the tuple isn't there as param expected is true
    findExistingTuple(oldTuple, true);

    // clear the tuple that will be built to insert or overwrite
    memset(m_updatedTupleBackingStore, 0, m_target->schema()->tupleLength() + 1);

    //printf("  Existing tuple: %s.\n", m_existingTuple.debugNoHeader().c_str());
    //fflush(stdout);

    // set up the first column, which is a count
    NValue count = m_existingTuple.getNValue(m_groupByColumnCount).op_decrement();

    //printf("  Count is: %d.\n", (int)(m_existingTuple.getSlimValue(m_groupByColumnCount).getBigInt()));
    //fflush(stdout);

    // check if we should remove the tuple
    if (count.isZero()) {
        m_target->deleteTuple(m_existingTuple, true);
        return;
    }
    // assume from here that we're just updating the existing row

    int colindex = 0;
    // set up the first n columns, based on group-by columns
    for (colindex = 0; colindex < m_groupByColumnCount; colindex++) {
        m_updatedTuple.setNValue(colindex, oldTuple.getNValue(m_groupByColumns[colindex]));
    }

    m_updatedTuple.setNValue(colindex, count);
    colindex++;

    // set values for the other columns
    for (int i = colindex; i < m_outputColumnCount; i++) {
        NValue oldValue = oldTuple.getNValue(m_outputColumnSrcTableIndexes[i]);
        NValue existingValue = m_existingTuple.getNValue(i);

        if (m_outputColumnAggTypes[i] == EXPRESSION_TYPE_AGGREGATE_SUM) {
            m_updatedTuple.setNValue(i, existingValue.op_subtract(oldValue));
        }
        else if (m_outputColumnAggTypes[i] == EXPRESSION_TYPE_AGGREGATE_COUNT) {
            m_updatedTuple.setNValue(i, existingValue.op_decrement());
        }
        else {
            throw SerializableEEException(VOLT_EE_EXCEPTION_TYPE_EEEXCEPTION,
                                          "Error in materialized view table"
                                          " update.");
        }
    }

    // update the row
    // shouldn't need to update indexes as this shouldn't ever change the key
    m_target->updateTuple(m_updatedTuple, m_existingTuple, false);
}
示例#19
0
void MaterializedViewMetadata::processTupleInsert(TableTuple &newTuple) {
    // don't change the view if this tuple doesn't match the predicate
    if (m_filterPredicate
        && (m_filterPredicate->eval(&newTuple, NULL).isFalse()))
        return;

    bool exists = findExistingTuple(newTuple);
    if (!exists) {
        // create a blank tuple
        m_existingTuple.move(m_emptyTupleBackingStore);
    }

    // clear the tuple that will be built to insert or overwrite
    memset(m_updatedTupleBackingStore, 0, m_target->schema()->tupleLength() + 1);

    int colindex = 0;
    // set up the first n columns, based on group-by columns
    for (colindex = 0; colindex < m_groupByColumnCount; colindex++) {
        m_updatedTuple.setNValue(colindex,
                                 newTuple.getNValue(m_groupByColumns[colindex]));
    }

    // set up the next column, which is a count
    m_updatedTuple.setNValue(colindex,
                             m_existingTuple.getNValue(colindex).op_increment());
    colindex++;

    // set values for the other columns
    for (int i = colindex; i < m_outputColumnCount; i++) {
        NValue newValue = newTuple.getNValue(m_outputColumnSrcTableIndexes[i]);
        NValue existingValue = m_existingTuple.getNValue(i);

        if (m_outputColumnAggTypes[i] == EXPRESSION_TYPE_AGGREGATE_SUM) {
            m_updatedTuple.setNValue(i, newValue.op_add(existingValue));
        }
        else if (m_outputColumnAggTypes[i] == EXPRESSION_TYPE_AGGREGATE_COUNT) {
            m_updatedTuple.setNValue(i, existingValue.op_increment());
        }
        else {
            char message[128];
            sprintf(message, "Error in materialized view table update for"
                    " col %d. Expression type %d", i, m_outputColumnAggTypes[i]);
            throw SerializableEEException(VOLT_EE_EXCEPTION_TYPE_EEEXCEPTION,
                                          message);
        }
    }

    // update or insert the row
    if (exists) {
        // shouldn't need to update indexes as this shouldn't ever change the
        // key
        m_target->updateTuple(m_updatedTuple, m_existingTuple, false);
    }
    else {
        m_target->insertTuple(m_updatedTuple);
    }
}
示例#20
0
// for debugging - may be unused
void SetOperator::printTupleMap(const char* nonce, TupleMap &tuples) {
    printf("Printing TupleMap (%s): ", nonce);
    for (TupleMap::const_iterator mapIt = tuples.begin(); mapIt != tuples.end(); ++mapIt) {
        TableTuple tuple = mapIt->first;
        printf("%s, ", tuple.debugNoHeader().c_str());
    }
    printf("\n");
    fflush(stdout);
}
TableTuple *newTuple(TupleSchema *schema, int idx, long value) {
    TableTuple *tuple = new TableTuple(schema);
    char *data = new char[tuple->tupleLength()];
    memset(data, 0, tuple->tupleLength());
    tuple->move(data);

    tuple->setNValue(idx, ValueFactory::getBigIntValue(value));
    return tuple;
}
示例#22
0
bool Table::checkNulls(TableTuple& tuple) const {
    assert (m_columnCount == tuple.columnCount());
    for (int i = m_columnCount - 1; i >= 0; --i) {
        if (( ! m_allowNulls[i]) && tuple.isNull(i)) {
            VOLT_TRACE ("%d th attribute was NULL. It is non-nillable attribute.", i);
            return false;
        }
    }
    return true;
}
TEST_F(AntiCacheEvictionManagerTest, InsertTuple)
{
    initTable(true); 
    
    TableTuple tuple = m_table->tempTuple();
    
    tuple.setNValue(0, ValueFactory::getIntegerValue(m_tuplesInserted++));
    tuple.setNValue(1, ValueFactory::getIntegerValue(rand()));
    m_table->insertTuple(tuple);

}
示例#24
0
    // Load table from the polygons in the string POLYGONS, defined in
    // polygons.hpp.  Also print out some stats about how long it
    // took.
    //
    // The workload is 1000 generated polygons created by
    // PolygonFactory in Java.  They are all bounded to an area
    // approximately in the continental US, and so may overlap:
    //   o 25% regular convex
    //   o 25% regular convex with a hole in the center
    //   o 25% star-shaped
    //   o 25% star-shaped with a hole in the center
    // Also, add a null polygon.
    //
    // In memcheck mode, only loads 50 rows.
    void loadTable(PersistentTable* table) {
#ifndef MEMCHECK
        int rowLimit = -1;
#else
        int rowLimit = 50;
#endif

        std::cout << "\n            Loading polygons...\n";
        std::istringstream instream(POLYGONS); // defined in polygons.hpp

        TableTuple tempTuple = table->tempTuple();
        auto start = std::chrono::high_resolution_clock::now();
        std::chrono::microseconds usSpentInserting = std::chrono::duration_cast<microseconds>(start - start);

        int pk = 0;
        std::string line;
        while (std::getline(instream, line)) {
            tempTuple.setNValue(PK_COL_INDEX, ValueFactory::getIntegerValue(pk));
            tempTuple.setNValue(GEOG_COL_INDEX, polygonWktToNval(line));

            start = std::chrono::high_resolution_clock::now();
            table->insertTuple(tempTuple);
            auto end = std::chrono::high_resolution_clock::now();
            usSpentInserting += std::chrono::duration_cast<microseconds>(end - start);

            ++pk;
            if (rowLimit > 0 && pk > rowLimit) {
                break;
            }
        }

        std::cout << "              Average duration of insert: " << (usSpentInserting.count() / pk) << " us\n";

        // Add a null value
        tempTuple.setNValue(PK_COL_INDEX, ValueFactory::getIntegerValue(pk));
        tempTuple.setNValue(GEOG_COL_INDEX, NValue::getNullValue(VALUE_TYPE_GEOGRAPHY));
        table->insertTuple(tempTuple);

        // Dump some stats about the index.
        CoveringCellIndex* ccIndex = static_cast<CoveringCellIndex*>(table->index("poly_idx"));
        CoveringCellIndex::StatsForTest stats = ccIndex->getStatsForTest(table);

        double cellsPerPoly = double(stats.numCells) / stats.numPolygons;
        std::cout << "              Cells per polygon: " << cellsPerPoly << "\n";

        // Use km^2, since the areas are large.
        double areaPerPoly = (stats.polygonsArea / stats.numPolygons) / 1000000.0;
        double areaPerCellCovering = (stats.cellsArea / stats.numPolygons) / 1000000.0;
        std::cout << "              Average area per polygon: " << areaPerPoly << " km^2\n";
        std::cout << "              Average area per cell covering: " << areaPerCellCovering << " km^2\n";
        std::cout << "              Cell area divided by polygon area (lower is better): "
                  <<  (areaPerCellCovering / areaPerPoly) << "\n";
    }
示例#25
0
TEST_F(DRBinaryLogTest, ReplicatedTableWrites) {
    // write to only the replicated table
    beginTxn(109, 99, 98, 70);
    TableTuple first_tuple = insertTuple(m_replicatedTable, prepareTempTuple(m_replicatedTable, 42, 55555, "349508345.34583", "a thing", "a totally different thing altogether", 5433));
    endTxn(true);

    flushAndApply(99);

    EXPECT_EQ(0, m_tableReplica->activeTupleCount());
    EXPECT_EQ(1, m_replicatedTableReplica->activeTupleCount());
    TableTuple tuple = m_replicatedTableReplica->lookupTupleByValues(first_tuple);
    ASSERT_FALSE(tuple.isNullTuple());

    // write to both the partitioned and replicated table
    beginTxn(110, 100, 99, 71);
    first_tuple = insertTuple(m_table, prepareTempTuple(m_table, 72, 345, "4256.345", "something", "more tuple data, really not the same", 1812));
    TableTuple second_tuple = insertTuple(m_replicatedTable, prepareTempTuple(m_replicatedTable, 7, 234, "23452436.54", "what", "this is starting to get silly", 2342));
    endTxn(true);

    flushAndApply(100);

    EXPECT_EQ(1, m_tableReplica->activeTupleCount());
    EXPECT_EQ(2, m_replicatedTableReplica->activeTupleCount());
    tuple = m_tableReplica->lookupTupleByValues(first_tuple);
    ASSERT_FALSE(tuple.isNullTuple());
    tuple = m_replicatedTableReplica->lookupTupleByValues(second_tuple);
    ASSERT_FALSE(tuple.isNullTuple());

    // write to the partitioned and replicated table and roll it back
    beginTxn(111, 101, 100, 72);
    first_tuple = insertTuple(m_table, prepareTempTuple(m_table, 11, 34534, "3453.4545", "another", "blah blah blah blah blah blah", 2344));
    second_tuple = insertTuple(m_replicatedTable, prepareTempTuple(m_replicatedTable, 24, 2321, "23455.5554", "and another", "this is starting to get even sillier", 2222));
    endTxn(false);

    ASSERT_FALSE(flush(101));

    // one more write to the replicated table for good measure
    beginTxn(112, 102, 101, 73);
    second_tuple = insertTuple(m_replicatedTable, prepareTempTuple(m_replicatedTable, 99, 29058, "92384598.2342", "what", "really, why am I writing anything in these?", 3455));
    endTxn(true);

    flushAndApply(102);
    EXPECT_EQ(1, m_tableReplica->activeTupleCount());
    EXPECT_EQ(3, m_replicatedTableReplica->activeTupleCount());
    tuple = m_replicatedTableReplica->lookupTupleByValues(second_tuple);
    ASSERT_FALSE(tuple.isNullTuple());

    DRCommittedInfo committed = m_drStream.getLastCommittedSequenceNumberAndUniqueIds();
    EXPECT_EQ(0, committed.seqNum);
    committed = m_drReplicatedStream.getLastCommittedSequenceNumberAndUniqueIds();
    EXPECT_EQ(2, committed.seqNum);
}
示例#26
0
 void addRandomUniqueTuples(Table *table, int numTuples) {
     TableTuple tuple = table->tempTuple();
     ::memset(tuple.address() + 1, 0, tuple.tupleLength() - 1);
     for (int ii = 0; ii < numTuples; ii++) {
         tuple.setNValue(0, ValueFactory::getIntegerValue(m_primaryKeyIndex++));
         tuple.setNValue(1, ValueFactory::getIntegerValue(rand()));
         bool success = table->insertTuple(tuple);
         if (!success) {
             std::cout << "Failed to add random unique tuple" << std::endl;
             return;
         }
     }
 }
/**
 * Clean up after consuming indexed tuples.
 */
void ElasticIndexReadContext::deleteStreamedTuples()
{
    // Delete the indexed tuples that were streamed.
    // Undo token release will cause the index to delete the corresponding items
    // via notifications.
    m_iter->reset();
    TableTuple tuple;
    while (m_iter->next(tuple)) {
        if (!tuple.isPendingDelete()) {
            m_surgeon.deleteTuple(tuple);
        }
    }
}
// still couldn't pass
TEST_F(AntiCacheEvictionManagerTest, UpdateIndexPerformance)
{
    int num_tuples = 100000;
    int num_index_updates = 8;

    struct timeval start, end;

    long  seconds, useconds;
    double mtime; 

    initTable(true); 

    TableTuple tuple = m_table->tempTuple();

    int iterations = 0;

    for(int i = 0; i < num_tuples; i++) // insert tuples
    {
        tuple.setNValue(0, ValueFactory::getIntegerValue(m_tuplesInserted++));
        tuple.setNValue(1, ValueFactory::getIntegerValue(rand()));
        m_table->insertTuple(tuple);
    }

    for(int i = 0; i < num_index_updates; i++)
    {
        TableIterator itr1(m_table);
        iterations = 0; 
        gettimeofday(&start, NULL);
        while(itr1.hasNext())
        {
            itr1.next(tuple);
            for(int j = 0; j < i+1; j++)
            {
                m_table->setEntryToNewAddressForAllIndexes(&tuple, NULL);
            }

            if(++iterations == 1000)
                break; 
        }
        gettimeofday(&end, NULL);

        seconds  = end.tv_sec  - start.tv_sec;
        useconds = end.tv_usec - start.tv_usec;
        mtime = (double)((seconds) * 1000 + useconds/1000);

        VOLT_INFO("total time for 1000 index updates: %f milliseconds", mtime);
    }

    cleanupTable();
}
示例#29
0
TEST_F(DRBinaryLogTest, SerializeNulls) {
    beginTxn(109, 99, 98, 70);
    TableTuple first_tuple = insertTuple(m_replicatedTable, firstTupleWithNulls(m_replicatedTable));
    TableTuple second_tuple = insertTuple(m_replicatedTable, secondTupleWithNulls(m_replicatedTable));
    endTxn(true);

    flushAndApply(99);

    EXPECT_EQ(2, m_replicatedTableReplica->activeTupleCount());
    TableTuple tuple = m_replicatedTableReplica->lookupTupleByValues(first_tuple);
    ASSERT_FALSE(tuple.isNullTuple());
    tuple = m_replicatedTableReplica->lookupTupleByValues(second_tuple);
    ASSERT_FALSE(tuple.isNullTuple());
}
TEST_F(PersistentTableMemStatsTest, UpdateAndUndoTest) {
    initTable(true);
    tableutil::addRandomTuples(m_table, 10);
    int64_t orig_size = m_table->nonInlinedMemorySize();
    //cout << "Original non-inline size: " << orig_size << endl;

    TableTuple tuple(m_tableSchema);
    tableutil::getRandomTuple(m_table, tuple);
    //cout << "Retrieved random tuple " << endl << tuple.debugNoHeader() << endl;

    size_t removed_bytes =
        StringRef::computeStringMemoryUsed(ValuePeeker::peekObjectLength(tuple.getNValue(1))) +
        StringRef::computeStringMemoryUsed(ValuePeeker::peekObjectLength(tuple.getNValue(2)));
    //cout << "Removing bytes from table: " << removed_bytes << endl;

    /*
     * A copy of the tuple to modify and use as a source tuple when
     * updating the new tuple.
     */
    TableTuple tempTuple = m_table->tempTuple();
    tempTuple.copy(tuple);
    string strval = "123456";
    NValue new_string = ValueFactory::getStringValue(strval);
    tempTuple.setNValue(1, new_string);
    //cout << "Created random tuple " << endl << tempTuple.debugNoHeader() << endl;
    size_t added_bytes =
        StringRef::computeStringMemoryUsed(ValuePeeker::peekObjectLength(tempTuple.getNValue(1))) +
        StringRef::computeStringMemoryUsed(ValuePeeker::peekObjectLength(tempTuple.getNValue(2)));
    //cout << "Adding bytes to table: " << added_bytes << endl;

    m_engine->setUndoToken(INT64_MIN + 2);
    // this next line is a testing hack until engine data is
    // de-duplicated with executorcontext data
    m_engine->getExecutorContext();

    m_table->updateTuple(tempTuple, tuple, true);

    ASSERT_EQ(orig_size + added_bytes - removed_bytes, m_table->nonInlinedMemorySize());

    m_engine->undoUndoToken(INT64_MIN + 2);

    ASSERT_EQ(orig_size, m_table->nonInlinedMemorySize());

    //tuple.freeObjectColumns();
    //tempTuple.freeObjectColumns();
    //delete [] tuple.address();
    //delete[] tempTuple.address();
    new_string.free();
}