bool UpdateExecutor::p_execute(const NValueArray ¶ms) { assert(m_inputTable); // target table should be persistenttable PersistentTable* targetTable = dynamic_cast<PersistentTable*>(m_node->getTargetTable()); assert(targetTable); TableTuple targetTuple = TableTuple(targetTable->schema()); VOLT_TRACE("INPUT TABLE: %s\n", m_inputTable->debug().c_str()); VOLT_TRACE("TARGET TABLE - BEFORE: %s\n", targetTable->debug().c_str()); // determine which indices are updated by this executor // iterate through all target table indices and see if they contain // columns mutated by this executor std::vector<TableIndex*> indexesToUpdate; const std::vector<TableIndex*>& allIndexes = targetTable->allIndexes(); BOOST_FOREACH(TableIndex *index, allIndexes) { bool indexKeyUpdated = false; BOOST_FOREACH(int colIndex, index->getColumnIndices()) { std::pair<int, int> updateColInfo; // needs to be here because of macro failure BOOST_FOREACH(updateColInfo, m_inputTargetMap) { if (updateColInfo.second == colIndex) { indexKeyUpdated = true; break; } } if (indexKeyUpdated) break; } if (indexKeyUpdated) { indexesToUpdate.push_back(index); } }
bool InsertExecutor::p_execute(const NValueArray ¶ms) { assert(m_node == dynamic_cast<InsertPlanNode*>(m_abstractNode)); assert(m_node); assert(m_inputTable == dynamic_cast<TempTable*>(m_node->getInputTable())); assert(m_inputTable); // Target table can be StreamedTable or PersistentTable and must not be NULL // Update target table reference from table delegate Table* targetTable = m_node->getTargetTable(); assert(targetTable); assert((targetTable == dynamic_cast<PersistentTable*>(targetTable)) || (targetTable == dynamic_cast<StreamedTable*>(targetTable))); PersistentTable* persistentTable = m_isStreamed ? NULL : static_cast<PersistentTable*>(targetTable); TableTuple upsertTuple = TableTuple(targetTable->schema()); VOLT_TRACE("INPUT TABLE: %s\n", m_inputTable->debug().c_str()); // count the number of successful inserts int modifiedTuples = 0; Table* outputTable = m_node->getOutputTable(); assert(outputTable); TableTuple templateTuple = m_templateTuple.tuple(); std::vector<int>::iterator it; for (it = m_nowFields.begin(); it != m_nowFields.end(); ++it) { templateTuple.setNValue(*it, NValue::callConstant<FUNC_CURRENT_TIMESTAMP>()); } VOLT_DEBUG("This is a %s-row insert on partition with id %d", m_node->getChildren()[0]->getPlanNodeType() == PLAN_NODE_TYPE_MATERIALIZE ? "single" : "multi", m_engine->getPartitionId()); VOLT_DEBUG("Offset of partition column is %d", m_partitionColumn); // // An insert is quite simple really. We just loop through our m_inputTable // and insert any tuple that we find into our targetTable. It doesn't get any easier than that! // TableTuple inputTuple(m_inputTable->schema()); assert (inputTuple.sizeInValues() == m_inputTable->columnCount()); TableIterator iterator = m_inputTable->iterator(); while (iterator.next(inputTuple)) { for (int i = 0; i < m_node->getFieldMap().size(); ++i) { // Most executors will just call setNValue instead of // setNValueAllocateForObjectCopies. // // However, We need to call // setNValueAlocateForObjectCopies here. Sometimes the // input table's schema has an inlined string field, and // it's being assigned to the target table's outlined // string field. In this case we need to tell the NValue // where to allocate the string data. templateTuple.setNValueAllocateForObjectCopies(m_node->getFieldMap()[i], inputTuple.getNValue(i), ExecutorContext::getTempStringPool()); } VOLT_TRACE("Inserting tuple '%s' into target table '%s' with table schema: %s", templateTuple.debug(targetTable->name()).c_str(), targetTable->name().c_str(), targetTable->schema()->debug().c_str()); // if there is a partition column for the target table if (m_partitionColumn != -1) { // get the value for the partition column NValue value = templateTuple.getNValue(m_partitionColumn); bool isLocal = m_engine->isLocalSite(value); // if it doesn't map to this site if (!isLocal) { if (!m_multiPartition) { throw ConstraintFailureException( dynamic_cast<PersistentTable*>(targetTable), templateTuple, "Mispartitioned tuple in single-partition insert statement."); } // don't insert continue; } } // for multi partition export tables, only insert into one // place (the partition with hash(0)), if the data is from a // replicated source. If the data is coming from a subquery // with partitioned tables, we need to perform the insert on // every partition. if (m_isStreamed && m_multiPartition && !m_sourceIsPartitioned) { bool isLocal = m_engine->isLocalSite(ValueFactory::getBigIntValue(0)); if (!isLocal) continue; } if (! m_isUpsert) { // try to put the tuple into the target table if (m_hasPurgeFragment) { if (!executePurgeFragmentIfNeeded(&persistentTable)) return false; // purge fragment might have truncated the table, and // refreshed the persistent table pointer. Make sure to // use it when doing the insert below. targetTable = persistentTable; } if (!targetTable->insertTuple(templateTuple)) { VOLT_ERROR("Failed to insert tuple from input table '%s' into" " target table '%s'", m_inputTable->name().c_str(), targetTable->name().c_str()); return false; } } else { // upsert execution logic assert(persistentTable->primaryKeyIndex() != NULL); TableTuple existsTuple = persistentTable->lookupTupleByValues(templateTuple); if (existsTuple.isNullTuple()) { // try to put the tuple into the target table if (m_hasPurgeFragment) { if (!executePurgeFragmentIfNeeded(&persistentTable)) return false; } if (!persistentTable->insertTuple(templateTuple)) { VOLT_ERROR("Failed to insert tuple from input table '%s' into" " target table '%s'", m_inputTable->name().c_str(), persistentTable->name().c_str()); return false; } } else { // tuple exists already, try to update the tuple instead upsertTuple.move(templateTuple.address()); TableTuple &tempTuple = persistentTable->getTempTupleInlined(upsertTuple); if (!persistentTable->updateTupleWithSpecificIndexes(existsTuple, tempTuple, persistentTable->allIndexes())) { VOLT_INFO("Failed to update existsTuple from table '%s'", persistentTable->name().c_str()); return false; } } } // successfully inserted or updated modifiedTuples++; } TableTuple& count_tuple = outputTable->tempTuple(); count_tuple.setNValue(0, ValueFactory::getBigIntValue(modifiedTuples)); // try to put the tuple into the output table if (!outputTable->insertTuple(count_tuple)) { VOLT_ERROR("Failed to insert tuple count (%d) into" " output table '%s'", modifiedTuples, outputTable->name().c_str()); return false; } // add to the planfragments count of modified tuples m_engine->addToTuplesModified(modifiedTuples); VOLT_DEBUG("Finished inserting %d tuples", modifiedTuples); return true; }
TEST_F(PersistentTableTest, DRTimestampColumn) { // Load a catalog where active/active DR is turned on for the database, // And we have a table "T" which is being DRed. getEngine()->loadCatalog(0, catalogPayload()); PersistentTable *table = dynamic_cast<PersistentTable*>( getEngine()->getTable("T")); ASSERT_NE(NULL, table); ASSERT_EQ(true, table->hasDRTimestampColumn()); ASSERT_EQ(0, table->getDRTimestampColumnIndex()); const voltdb::TupleSchema* schema = table->schema(); ASSERT_EQ(1, schema->hiddenColumnCount()); voltdb::StandAloneTupleStorage storage(schema); TableTuple &srcTuple = const_cast<TableTuple&>(storage.tuple()); NValue bigintNValues[] = { ValueFactory::getBigIntValue(1900), ValueFactory::getBigIntValue(1901), ValueFactory::getBigIntValue(1902) }; NValue stringNValues[] = { ValueFactory::getTempStringValue("Je me souviens"), ValueFactory::getTempStringValue("Ut Incepit Fidelis Sic Permanet"), ValueFactory::getTempStringValue("Splendor sine occasu") }; // Let's do some inserts into the table. beginWork(); for (int i = 0; i < 3; ++i) { srcTuple.setNValue(0, bigintNValues[i]); srcTuple.setNValue(1, stringNValues[i]); table->insertTuple(srcTuple); } commit(); // Now verify that the right DR timestamp was created in the // hidden column for each row. int64_t drTimestampOrig = ExecutorContext::getExecutorContext()->currentDRTimestamp(); NValue drTimestampValueOrig = ValueFactory::getBigIntValue(drTimestampOrig); TableTuple tuple(schema); TableIterator iterator = table->iteratorDeletingAsWeGo(); int i = 0; const int timestampColIndex = table->getDRTimestampColumnIndex(); while (iterator.next(tuple)) { // DR timestamp is set for each row. EXPECT_EQ(0, tuple.getHiddenNValue(timestampColIndex).compare(drTimestampValueOrig)); EXPECT_EQ(0, tuple.getNValue(0).compare(bigintNValues[i])); EXPECT_EQ(0, tuple.getNValue(1).compare(stringNValues[i])); ++i; } // Now let's update the middle tuple with a new value, and make // sure the DR timestamp changes. beginWork(); NValue newStringData = ValueFactory::getTempStringValue("Nunavut Sannginivut"); iterator = table->iteratorDeletingAsWeGo(); ASSERT_TRUE(iterator.next(tuple)); ASSERT_TRUE(iterator.next(tuple)); TableTuple& tempTuple = table->copyIntoTempTuple(tuple); tempTuple.setNValue(1, newStringData); table->updateTupleWithSpecificIndexes(tuple, tempTuple, table->allIndexes()); // verify the updated tuple has the new timestamp. int64_t drTimestampNew = ExecutorContext::getExecutorContext()->currentDRTimestamp(); ASSERT_NE(drTimestampNew, drTimestampOrig); NValue drTimestampValueNew = ValueFactory::getBigIntValue(drTimestampNew); iterator = table->iteratorDeletingAsWeGo(); i = 0; while (iterator.next(tuple)) { if (i == 1) { EXPECT_EQ(0, tuple.getHiddenNValue(timestampColIndex).compare(drTimestampValueNew)); EXPECT_EQ(0, tuple.getNValue(0).compare(bigintNValues[i])); EXPECT_EQ(0, tuple.getNValue(1).compare(newStringData)); } else { EXPECT_EQ(0, tuple.getHiddenNValue(timestampColIndex).compare(drTimestampValueOrig)); EXPECT_EQ(0, tuple.getNValue(0).compare(bigintNValues[i])); EXPECT_EQ(0, tuple.getNValue(1).compare(stringNValues[i])); } ++i; } // After rolling back, we should have all our original values, // including the DR timestamp. rollback(); i = 0; iterator = table->iteratorDeletingAsWeGo(); while (iterator.next(tuple)) { EXPECT_EQ(0, tuple.getHiddenNValue(timestampColIndex).compare(drTimestampValueOrig)); EXPECT_EQ(0, tuple.getNValue(0).compare(bigintNValues[i])); EXPECT_EQ(0, tuple.getNValue(1).compare(stringNValues[i])); ++i; } }