Table* AntiCacheEvictionManager::evictBlock(PersistentTable *table, long blockSize, int numBlocks) { int32_t lastTuplesEvicted = table->getTuplesEvicted(); int32_t lastBlocksEvicted = table->getBlocksEvicted(); int64_t lastBytesEvicted = table->getBytesEvicted(); if (table->evictBlockToDisk(blockSize, numBlocks) == false) { throwFatalException("Failed to evict tuples from table '%s'", table->name().c_str()); } int32_t tuplesEvicted = table->getTuplesEvicted() - lastTuplesEvicted; int32_t blocksEvicted = table->getBlocksEvicted() - lastBlocksEvicted; int64_t bytesEvicted = table->getBytesEvicted() - lastBytesEvicted; m_evictResultTable->deleteAllTuples(false); TableTuple tuple = m_evictResultTable->tempTuple(); int idx = 0; tuple.setNValue(idx++, ValueFactory::getStringValue(table->name())); tuple.setNValue(idx++, ValueFactory::getIntegerValue(static_cast<int32_t>(tuplesEvicted))); tuple.setNValue(idx++, ValueFactory::getIntegerValue(static_cast<int32_t>(blocksEvicted))); tuple.setNValue(idx++, ValueFactory::getBigIntValue(static_cast<int32_t>(bytesEvicted))); m_evictResultTable->insertTuple(tuple); return (m_evictResultTable); }
TEST_F(AntiCacheEvictionManagerTest, DeleteMultipleTuples) { int num_tuples = 100; initTable(true); TableTuple tuple = m_table->tempTuple(); for(int i = 0; i < num_tuples; i++) // insert 10 tuples { tuple.setNValue(0, ValueFactory::getIntegerValue(m_tuplesInserted++)); tuple.setNValue(1, ValueFactory::getIntegerValue(rand())); m_table->insertTuple(tuple); } VOLT_INFO("%d == %d", num_tuples, m_table->getNumTuplesInEvictionChain()); ASSERT_EQ(num_tuples, m_table->getNumTuplesInEvictionChain()); int num_tuples_deleted = 0; TableIterator itr(m_table); while(itr.hasNext()) { itr.next(tuple); if(rand() % 2 == 0) { // delete each tuple with probability .5 m_table->deleteTuple(tuple, true); ++num_tuples_deleted; } } ASSERT_EQ((num_tuples - num_tuples_deleted), m_table->getNumTuplesInEvictionChain()); cleanupTable(); }
TEST_F(AntiCacheEvictionManagerTest, TestEvictionOrder) { int num_tuples = 100; initTable(true); TableTuple tuple = m_table->tempTuple(); int tuple_size = m_tableSchema->tupleLength() + TUPLE_HEADER_SIZE; for(int i = 0; i < num_tuples; i++) // insert 10 tuples { tuple.setNValue(0, ValueFactory::getIntegerValue(m_tuplesInserted++)); tuple.setNValue(1, ValueFactory::getIntegerValue(rand())); m_table->insertTuple(tuple); } EvictionIterator itr(m_table); itr.reserve(20 * tuple_size); ASSERT_TRUE(itr.hasNext()); uint32_t oldTimeStamp = 0; while(itr.hasNext()) { itr.next(tuple); uint32_t newTimeStamp = tuple.getTimeStamp(); ASSERT_LE(oldTimeStamp, newTimeStamp); oldTimeStamp = newTimeStamp; } cleanupTable(); }
TEST_F(AntiCacheEvictionManagerTest, InsertMultipleTuples) { int num_tuples = 10; initTable(true); TableTuple tuple = m_table->tempTuple(); uint32_t oldest_tuple_id, newest_tuple_id; for(int i = 0; i < num_tuples; i++) // insert 10 tuples { tuple.setNValue(0, ValueFactory::getIntegerValue(m_tuplesInserted++)); tuple.setNValue(1, ValueFactory::getIntegerValue(rand())); m_table->insertTuple(tuple); tuple = m_table->lookupTuple(tuple); if(i == 0) { oldest_tuple_id = m_table->getTupleID(tuple.address()); } else if(i == num_tuples-1) { newest_tuple_id = m_table->getTupleID(tuple.address()); } } ASSERT_EQ(num_tuples, m_table->getNumTuplesInEvictionChain()); ASSERT_EQ(oldest_tuple_id, m_table->getOldestTupleID()); ASSERT_EQ(newest_tuple_id, m_table->getNewestTupleID()); cleanupTable(); }
void addRandomUniqueTuples(Table *table, int numTuples) { TableTuple tuple = table->tempTuple(); for (int ii = 0; ii < numTuples; ii++) { tuple.setNValue(0, ValueFactory::getIntegerValue(m_primaryKeyIndex++)); tuple.setNValue(1, ValueFactory::getIntegerValue(rand())); table->insertTuple(tuple); } }
TEST_F(AntiCacheEvictionManagerTest, InsertTuple) { initTable(true); TableTuple tuple = m_table->tempTuple(); tuple.setNValue(0, ValueFactory::getIntegerValue(m_tuplesInserted++)); tuple.setNValue(1, ValueFactory::getIntegerValue(rand())); m_table->insertTuple(tuple); }
// Load table from the polygons in the string POLYGONS, defined in // polygons.hpp. Also print out some stats about how long it // took. // // The workload is 1000 generated polygons created by // PolygonFactory in Java. They are all bounded to an area // approximately in the continental US, and so may overlap: // o 25% regular convex // o 25% regular convex with a hole in the center // o 25% star-shaped // o 25% star-shaped with a hole in the center // Also, add a null polygon. // // In memcheck mode, only loads 50 rows. void loadTable(PersistentTable* table) { #ifndef MEMCHECK int rowLimit = -1; #else int rowLimit = 50; #endif std::cout << "\n Loading polygons...\n"; std::istringstream instream(POLYGONS); // defined in polygons.hpp TableTuple tempTuple = table->tempTuple(); auto start = std::chrono::high_resolution_clock::now(); std::chrono::microseconds usSpentInserting = std::chrono::duration_cast<microseconds>(start - start); int pk = 0; std::string line; while (std::getline(instream, line)) { tempTuple.setNValue(PK_COL_INDEX, ValueFactory::getIntegerValue(pk)); tempTuple.setNValue(GEOG_COL_INDEX, polygonWktToNval(line)); start = std::chrono::high_resolution_clock::now(); table->insertTuple(tempTuple); auto end = std::chrono::high_resolution_clock::now(); usSpentInserting += std::chrono::duration_cast<microseconds>(end - start); ++pk; if (rowLimit > 0 && pk > rowLimit) { break; } } std::cout << " Average duration of insert: " << (usSpentInserting.count() / pk) << " us\n"; // Add a null value tempTuple.setNValue(PK_COL_INDEX, ValueFactory::getIntegerValue(pk)); tempTuple.setNValue(GEOG_COL_INDEX, NValue::getNullValue(VALUE_TYPE_GEOGRAPHY)); table->insertTuple(tempTuple); // Dump some stats about the index. CoveringCellIndex* ccIndex = static_cast<CoveringCellIndex*>(table->index("poly_idx")); CoveringCellIndex::StatsForTest stats = ccIndex->getStatsForTest(table); double cellsPerPoly = double(stats.numCells) / stats.numPolygons; std::cout << " Cells per polygon: " << cellsPerPoly << "\n"; // Use km^2, since the areas are large. double areaPerPoly = (stats.polygonsArea / stats.numPolygons) / 1000000.0; double areaPerCellCovering = (stats.cellsArea / stats.numPolygons) / 1000000.0; std::cout << " Average area per polygon: " << areaPerPoly << " km^2\n"; std::cout << " Average area per cell covering: " << areaPerCellCovering << " km^2\n"; std::cout << " Cell area divided by polygon area (lower is better): " << (areaPerCellCovering / areaPerPoly) << "\n"; }
void addRandomUniqueTuples(Table *table, int numTuples) { TableTuple tuple = table->tempTuple(); ::memset(tuple.address() + 1, 0, tuple.tupleLength() - 1); for (int ii = 0; ii < numTuples; ii++) { tuple.setNValue(0, ValueFactory::getIntegerValue(m_primaryKeyIndex++)); tuple.setNValue(1, ValueFactory::getIntegerValue(rand())); bool success = table->insertTuple(tuple); if (!success) { std::cout << "Failed to add random unique tuple" << std::endl; return; } } }
// still couldn't pass TEST_F(AntiCacheEvictionManagerTest, UpdateIndexPerformance) { int num_tuples = 100000; int num_index_updates = 8; struct timeval start, end; long seconds, useconds; double mtime; initTable(true); TableTuple tuple = m_table->tempTuple(); int iterations = 0; for(int i = 0; i < num_tuples; i++) // insert tuples { tuple.setNValue(0, ValueFactory::getIntegerValue(m_tuplesInserted++)); tuple.setNValue(1, ValueFactory::getIntegerValue(rand())); m_table->insertTuple(tuple); } for(int i = 0; i < num_index_updates; i++) { TableIterator itr1(m_table); iterations = 0; gettimeofday(&start, NULL); while(itr1.hasNext()) { itr1.next(tuple); for(int j = 0; j < i+1; j++) { m_table->setEntryToNewAddressForAllIndexes(&tuple, NULL); } if(++iterations == 1000) break; } gettimeofday(&end, NULL); seconds = end.tv_sec - start.tv_sec; useconds = end.tv_usec - start.tv_usec; mtime = (double)((seconds) * 1000 + useconds/1000); VOLT_INFO("total time for 1000 index updates: %f milliseconds", mtime); } cleanupTable(); }
TEST_F(AntiCacheEvictionManagerTest, TestSetEntryToNewAddress) { int num_tuples = 20; initTable(true); TableTuple tuple = m_table->tempTuple(); int iterations = 0; for(int i = 0; i < num_tuples / 2; i++) // insert tuples { tuple.setNValue(0, ValueFactory::getIntegerValue(m_tuplesInserted++)); tuple.setNValue(1, ValueFactory::getIntegerValue(0)); m_table->insertTuple(tuple); tuple.setNValue(0, ValueFactory::getIntegerValue(m_tuplesInserted++)); tuple.setNValue(1, ValueFactory::getIntegerValue(1)); m_table->insertTuple(tuple); } TableIterator itr1(m_table); iterations = 0; itr1.next(tuple); void* oldAddress = tuple.address(); m_table->setEntryToNewAddressForAllIndexes(&tuple, (void*)0xdeadbeaf, oldAddress); std::vector <TableIndex*> allIndexes = m_table->allIndexes(); for (int i = 0; i < allIndexes.size(); ++i) { int cnt = 0; allIndexes[i]->moveToTuple(&tuple); const void* address; // check to see whether we set the tuple and only that tuple to new address // for both primaryKey and secondary indexes while ((address = (allIndexes[i]->nextValueAtKey()).address())) { ASSERT_NE(address, oldAddress); if (address == (void*)0xdeadbeaf) cnt++; } ASSERT_EQ(cnt, 1); } cleanupTable(); }
TableTuple *newTuple(TupleSchema *schema, int idx, long value) { TableTuple *tuple = new TableTuple(schema); char *data = new char[tuple->tupleLength()]; memset(data, 0, tuple->tupleLength()); tuple->move(data); tuple->setNValue(idx, ValueFactory::getBigIntValue(value)); return tuple; }
TEST_F(AntiCacheEvictionManagerTest, GetTupleTimeStamp) { initTable(true); TableTuple tuple = m_table->tempTuple(); tuple.setNValue(0, ValueFactory::getIntegerValue(m_tuplesInserted++)); tuple.setNValue(1, ValueFactory::getIntegerValue(rand())); m_table->insertTuple(tuple); // get the tuple that was just inserted tuple = m_table->lookupTuple(tuple); uint32_t time_stamp = tuple.getTimeStamp(); ASSERT_NE(time_stamp, 0); cleanupTable(); }
TEST_F(AntiCacheEvictionManagerTest, DeleteSingleTuple) { initTable(true); TableTuple tuple = m_table->tempTuple(); tuple.setNValue(0, ValueFactory::getIntegerValue(m_tuplesInserted++)); tuple.setNValue(1, ValueFactory::getIntegerValue(rand())); m_table->insertTuple(tuple); ASSERT_EQ(1, m_table->getNumTuplesInEvictionChain()); tuple = m_table->lookupTuple(tuple); m_table->deleteTuple(tuple, true); ASSERT_EQ(0, m_table->getNumTuplesInEvictionChain()); cleanupTable(); }
TEST_F(AntiCacheEvictionManagerTest, GetTupleID) { initTable(true); TableTuple tuple = m_table->tempTuple(); tuple.setNValue(0, ValueFactory::getIntegerValue(m_tuplesInserted++)); tuple.setNValue(1, ValueFactory::getIntegerValue(rand())); m_table->insertTuple(tuple); // get the tuple that was just inserted tuple = m_table->lookupTuple(tuple); int tuple_id = m_table->getTupleID(tuple.address()); //printf("tuple_id = %d\n", tuple_id); //ASSERT_NE(tuple_id, -1); ASSERT_EQ(tuple_id, 0); }
TEST_F(IndexTrackerAllocatorTest, ArrayUniqueIndexMemoryEstimate) { initTable(true, voltdb::ARRAY_INDEX, true); TableTuple tuple = m_table->tempTuple(); tuple.setNValue(0, ValueFactory::getIntegerValue(m_tuplesInserted++)); tuple.setNValue(1, ValueFactory::getIntegerValue(rand())); m_table->insertTuple(tuple); tuple.setNValue(0, ValueFactory::getIntegerValue(m_tuplesInserted++)); tuple.setNValue(1, ValueFactory::getIntegerValue(rand())); m_table->insertTuple(tuple); for(int i = 0; i < 1024; i++) // insert 1024 tuples { tuple.setNValue(0, ValueFactory::getIntegerValue(m_tuplesInserted++)); tuple.setNValue(1, ValueFactory::getIntegerValue(rand())); m_table->insertTuple(tuple); } // get the tuple that was just inserted tuple = m_table->lookupTuple(tuple); VOLT_INFO("%s memory estimate: %ld\n", m_table->index("primaryKeyIndex")->getTypeName().c_str(), m_table->index("primaryKeyIndex")->getMemoryEstimate()); ASSERT_NE(m_table->index("primaryKeyIndex")->getMemoryEstimate(), 0); cleanupTable(); }
TEST_F(AntiCacheEvictionManagerTest, OldestTupleID) { int inserted_tuple_id, oldest_tuple_id; initTable(true); TableTuple tuple = m_table->tempTuple(); tuple.setNValue(0, ValueFactory::getIntegerValue(m_tuplesInserted++)); tuple.setNValue(1, ValueFactory::getIntegerValue(rand())); m_table->insertTuple(tuple); // get the tuple that was just inserted tuple = m_table->lookupTuple(tuple); inserted_tuple_id = m_table->getTupleID(tuple.address()); oldest_tuple_id = m_table->getOldestTupleID(); ASSERT_EQ(inserted_tuple_id, oldest_tuple_id); cleanupTable(); }
TEST_F(PersistentTableLogTest, LookupTupleUsingTempTupleTest) { initNarrowTable(); // Create three tuple with a variable length VARCHAR column, then call // lookupTupleForUndo() to look each tuple up from wide to narrower column. // It will use the memcmp() code path for the comparison, which should all // succeed because there is no uninlined stuff. NValue wideStr = ValueFactory::getStringValue("a long string"); NValue narrowStr = ValueFactory::getStringValue("a"); NValue nullStr = ValueFactory::getNullStringValue(); TableTuple wideTuple(m_tableSchema); wideTuple.move(new char[wideTuple.tupleLength()]); ::memset(wideTuple.address(), 0, wideTuple.tupleLength()); wideTuple.setNValue(0, ValueFactory::getBigIntValue(1)); wideTuple.setNValue(1, wideStr); m_table->insertTuple(wideTuple); delete[] wideTuple.address(); TableTuple narrowTuple(m_tableSchema); narrowTuple.move(new char[narrowTuple.tupleLength()]); ::memset(narrowTuple.address(), 0, narrowTuple.tupleLength()); narrowTuple.setNValue(0, ValueFactory::getBigIntValue(2)); narrowTuple.setNValue(1, narrowStr); m_table->insertTuple(narrowTuple); delete[] narrowTuple.address(); TableTuple nullTuple(m_tableSchema); nullTuple.move(new char[nullTuple.tupleLength()]); ::memset(nullTuple.address(), 0, nullTuple.tupleLength()); nullTuple.setNValue(0, ValueFactory::getBigIntValue(3)); nullTuple.setNValue(1, nullStr); m_table->insertTuple(nullTuple); delete[] nullTuple.address(); TableTuple tempTuple = m_table->tempTuple(); tempTuple.setNValue(0, ValueFactory::getBigIntValue(1)); tempTuple.setNValue(1, wideStr); TableTuple result = m_table->lookupTupleForUndo(tempTuple); ASSERT_FALSE(result.isNullTuple()); tempTuple = m_table->tempTuple(); tempTuple.setNValue(0, ValueFactory::getBigIntValue(2)); tempTuple.setNValue(1, narrowStr); result = m_table->lookupTupleForUndo(tempTuple); ASSERT_FALSE(result.isNullTuple()); tempTuple = m_table->tempTuple(); tempTuple.setNValue(0, ValueFactory::getBigIntValue(3)); tempTuple.setNValue(1, nullStr); result = m_table->lookupTupleForUndo(tempTuple); ASSERT_FALSE(result.isNullTuple()); wideStr.free(); narrowStr.free(); nullStr.free(); }
TEST_F(AntiCacheEvictionManagerTest, NewestTupleIDTest) { int inserted_tuple_id, newest_tuple_id; initTable(true); TableTuple tuple = m_table->tempTuple(); tuple.setNValue(0, ValueFactory::getIntegerValue(m_tuplesInserted++)); tuple.setNValue(1, ValueFactory::getIntegerValue(rand())); m_table->insertTuple(tuple); // get the tuple that was just inserted tuple = m_table->lookupTuple(tuple); inserted_tuple_id = m_table->getTupleID(tuple.address()); newest_tuple_id = m_table->getNewestTupleID(); printf("inserted_tuple_id = %d\n", inserted_tuple_id); printf("newest_tuple_id = %d\n", newest_tuple_id); ASSERT_EQ(inserted_tuple_id, newest_tuple_id); }
TEST_F(PersistentTableMemStatsTest, UpdateAndUndoTest) { initTable(true); tableutil::addRandomTuples(m_table, 10); int64_t orig_size = m_table->nonInlinedMemorySize(); //cout << "Original non-inline size: " << orig_size << endl; TableTuple tuple(m_tableSchema); tableutil::getRandomTuple(m_table, tuple); //cout << "Retrieved random tuple " << endl << tuple.debugNoHeader() << endl; size_t removed_bytes = StringRef::computeStringMemoryUsed(ValuePeeker::peekObjectLength(tuple.getNValue(1))) + StringRef::computeStringMemoryUsed(ValuePeeker::peekObjectLength(tuple.getNValue(2))); //cout << "Removing bytes from table: " << removed_bytes << endl; /* * A copy of the tuple to modify and use as a source tuple when * updating the new tuple. */ TableTuple tempTuple = m_table->tempTuple(); tempTuple.copy(tuple); string strval = "123456"; NValue new_string = ValueFactory::getStringValue(strval); tempTuple.setNValue(1, new_string); //cout << "Created random tuple " << endl << tempTuple.debugNoHeader() << endl; size_t added_bytes = StringRef::computeStringMemoryUsed(ValuePeeker::peekObjectLength(tempTuple.getNValue(1))) + StringRef::computeStringMemoryUsed(ValuePeeker::peekObjectLength(tempTuple.getNValue(2))); //cout << "Adding bytes to table: " << added_bytes << endl; m_engine->setUndoToken(INT64_MIN + 2); // this next line is a testing hack until engine data is // de-duplicated with executorcontext data m_engine->getExecutorContext(); m_table->updateTuple(tempTuple, tuple, true); ASSERT_EQ(orig_size + added_bytes - removed_bytes, m_table->nonInlinedMemorySize()); m_engine->undoUndoToken(INT64_MIN + 2); ASSERT_EQ(orig_size, m_table->nonInlinedMemorySize()); //tuple.freeObjectColumns(); //tempTuple.freeObjectColumns(); //delete [] tuple.address(); //delete[] tempTuple.address(); new_string.free(); }
void TupleTrackerManager::getTuples(boost::unordered_map<std::string, RowOffsets*> *map) const { this->resultTable->deleteAllTuples(false); TableTuple tuple = this->resultTable->tempTuple(); boost::unordered_map<std::string, RowOffsets*>::const_iterator iter = map->begin(); while (iter != map->end()) { RowOffsets::const_iterator tupleIter = iter->second->begin(); while (tupleIter != iter->second->end()) { int idx = 0; tuple.setNValue(idx++, ValueFactory::getStringValue(iter->first)); // TABLE_NAME tuple.setNValue(idx++, ValueFactory::getIntegerValue(*tupleIter)); // TUPLE_ID this->resultTable->insertTuple(tuple); tupleIter++; } // WHILE iter++; } // WHILE return; }
void makeColsForRow_00(int offset, int64_t row, TableTuple &tuple) { for (int i = 0; i < 10; i++, offset++) tuple.setNValue(offset, ValueFactory::getBigIntValue(static_cast<int64_t>((row << 32) + i))); }
bool DeleteExecutor::p_execute(const NValueArray ¶ms, ReadWriteTracker *tracker) { assert(m_targetTable); if (m_truncate) { VOLT_TRACE("truncating table %s...", m_targetTable->name().c_str()); // count the truncated tuples as deleted m_engine->m_tuplesModified += m_inputTable->activeTupleCount(); #ifdef ARIES if(m_engine->isARIESEnabled()){ // no need of persistency check, m_targetTable is // always persistent for deletes LogRecord *logrecord = new LogRecord(computeTimeStamp(), LogRecord::T_TRUNCATE,// this is a truncate record LogRecord::T_FORWARD,// the system is running normally -1,// XXX: prevLSN must be fetched from table! m_engine->getExecutorContext()->currentTxnId() ,// txn id m_engine->getSiteId(),// which execution site m_targetTable->name(),// the table affected NULL,// primary key irrelevant -1,// irrelevant numCols NULL,// list of modified cols irrelevant NULL,// before image irrelevant NULL// after image irrelevant ); size_t logrecordLength = logrecord->getEstimatedLength(); char *logrecordBuffer = new char[logrecordLength]; FallbackSerializeOutput output; output.initializeWithPosition(logrecordBuffer, logrecordLength, 0); logrecord->serializeTo(output); LogManager* m_logManager = this->m_engine->getLogManager(); Logger m_ariesLogger = m_logManager->getAriesLogger(); //VOLT_WARN("m_logManager : %p AriesLogger : %p",&m_logManager, &m_ariesLogger); const Logger *logger = m_logManager->getThreadLogger(LOGGERID_MM_ARIES); logger->log(LOGLEVEL_INFO, output.data(), output.position()); delete[] logrecordBuffer; logrecordBuffer = NULL; delete logrecord; logrecord = NULL; } #endif //m_engine->context().incrementTuples(m_targetTable->activeTupleCount()); // actually delete all the tuples m_targetTable->deleteAllTuples(true); return true; } // XXX : ARIES : Not sure if else is needed ? assert(m_inputTable); assert(m_inputTuple.sizeInValues() == m_inputTable->columnCount()); assert(m_targetTuple.sizeInValues() == m_targetTable->columnCount()); TableIterator inputIterator(m_inputTable); while (inputIterator.next(m_inputTuple)) { // // OPTIMIZATION: Single-Sited Query Plans // If our beloved DeletePlanNode is apart of a single-site query plan, // then the first column in the input table will be the address of a // tuple on the target table that we will want to blow away. This saves // us the trouble of having to do an index lookup // void *targetAddress = m_inputTuple.getNValue(0).castAsAddress(); m_targetTuple.move(targetAddress); // Read/Write Set Tracking if (tracker != NULL) { tracker->markTupleWritten(m_targetTable, &m_targetTuple); } #ifdef ARIES if(m_engine->isARIESEnabled()){ // no need of persistency check, m_targetTable is // always persistent for deletes // before image -- target is tuple to be deleted. TableTuple *beforeImage = &m_targetTuple; TableTuple *keyTuple = NULL; char *keydata = NULL; // See if we use an index instead TableIndex *index = m_targetTable->primaryKeyIndex(); if (index != NULL) { // First construct tuple for primary key keydata = new char[index->getKeySchema()->tupleLength()]; keyTuple = new TableTuple(keydata, index->getKeySchema()); for (int i = 0; i < index->getKeySchema()->columnCount(); i++) { keyTuple->setNValue(i, beforeImage->getNValue(index->getColumnIndices()[i])); } // no before image need be recorded, just the primary key beforeImage = NULL; } LogRecord *logrecord = new LogRecord(computeTimeStamp(), LogRecord::T_DELETE,// this is a delete record LogRecord::T_FORWARD,// the system is running normally -1,// XXX: prevLSN must be fetched from table! m_engine->getExecutorContext()->currentTxnId() ,// txn id m_engine->getSiteId(),// which execution site m_targetTable->name(),// the table affected keyTuple,// primary key -1,// must delete all columns NULL,// no list of modified cols beforeImage, NULL// no after image ); size_t logrecordLength = logrecord->getEstimatedLength(); char *logrecordBuffer = new char[logrecordLength]; FallbackSerializeOutput output; output.initializeWithPosition(logrecordBuffer, logrecordLength, 0); logrecord->serializeTo(output); LogManager* m_logManager = this->m_engine->getLogManager(); Logger m_ariesLogger = m_logManager->getAriesLogger(); //VOLT_WARN("m_logManager : %p AriesLogger : %p",&m_logManager, &m_ariesLogger); const Logger *logger = m_logManager->getThreadLogger(LOGGERID_MM_ARIES); logger->log(LOGLEVEL_INFO, output.data(), output.position()); delete[] logrecordBuffer; logrecordBuffer = NULL; delete logrecord; logrecord = NULL; if (keydata != NULL) { delete[] keydata; keydata = NULL; } if (keyTuple != NULL) { delete keyTuple; keyTuple = NULL; } } #endif // Delete from target table if (!m_targetTable->deleteTuple(m_targetTuple, true)) { VOLT_ERROR("Failed to delete tuple from table '%s'", m_targetTable->name().c_str()); return false; } } // add to the planfragments count of modified tuples m_engine->m_tuplesModified += m_inputTable->activeTupleCount(); //m_engine->context().incrementTuples(m_inputTable->activeTupleCount()); return true; }
bool InsertExecutor::p_execute(const NValueArray ¶ms) { assert(m_node == dynamic_cast<InsertPlanNode*>(m_abstractNode)); assert(m_node); assert(m_inputTable == dynamic_cast<TempTable*>(m_node->getInputTable())); assert(m_inputTable); // Target table can be StreamedTable or PersistentTable and must not be NULL // Update target table reference from table delegate Table* targetTable = m_node->getTargetTable(); assert(targetTable); assert((targetTable == dynamic_cast<PersistentTable*>(targetTable)) || (targetTable == dynamic_cast<StreamedTable*>(targetTable))); PersistentTable* persistentTable = m_isStreamed ? NULL : static_cast<PersistentTable*>(targetTable); TableTuple upsertTuple = TableTuple(targetTable->schema()); VOLT_TRACE("INPUT TABLE: %s\n", m_inputTable->debug().c_str()); // count the number of successful inserts int modifiedTuples = 0; Table* outputTable = m_node->getOutputTable(); assert(outputTable); TableTuple templateTuple = m_templateTuple.tuple(); std::vector<int>::iterator it; for (it = m_nowFields.begin(); it != m_nowFields.end(); ++it) { templateTuple.setNValue(*it, NValue::callConstant<FUNC_CURRENT_TIMESTAMP>()); } VOLT_DEBUG("This is a %s-row insert on partition with id %d", m_node->getChildren()[0]->getPlanNodeType() == PLAN_NODE_TYPE_MATERIALIZE ? "single" : "multi", m_engine->getPartitionId()); VOLT_DEBUG("Offset of partition column is %d", m_partitionColumn); // // An insert is quite simple really. We just loop through our m_inputTable // and insert any tuple that we find into our targetTable. It doesn't get any easier than that! // TableTuple inputTuple(m_inputTable->schema()); assert (inputTuple.sizeInValues() == m_inputTable->columnCount()); TableIterator iterator = m_inputTable->iterator(); while (iterator.next(inputTuple)) { for (int i = 0; i < m_node->getFieldMap().size(); ++i) { // Most executors will just call setNValue instead of // setNValueAllocateForObjectCopies. // // However, We need to call // setNValueAlocateForObjectCopies here. Sometimes the // input table's schema has an inlined string field, and // it's being assigned to the target table's outlined // string field. In this case we need to tell the NValue // where to allocate the string data. templateTuple.setNValueAllocateForObjectCopies(m_node->getFieldMap()[i], inputTuple.getNValue(i), ExecutorContext::getTempStringPool()); } VOLT_TRACE("Inserting tuple '%s' into target table '%s' with table schema: %s", templateTuple.debug(targetTable->name()).c_str(), targetTable->name().c_str(), targetTable->schema()->debug().c_str()); // if there is a partition column for the target table if (m_partitionColumn != -1) { // get the value for the partition column NValue value = templateTuple.getNValue(m_partitionColumn); bool isLocal = m_engine->isLocalSite(value); // if it doesn't map to this site if (!isLocal) { if (!m_multiPartition) { throw ConstraintFailureException( dynamic_cast<PersistentTable*>(targetTable), templateTuple, "Mispartitioned tuple in single-partition insert statement."); } // don't insert continue; } } // for multi partition export tables, only insert into one // place (the partition with hash(0)), if the data is from a // replicated source. If the data is coming from a subquery // with partitioned tables, we need to perform the insert on // every partition. if (m_isStreamed && m_multiPartition && !m_sourceIsPartitioned) { bool isLocal = m_engine->isLocalSite(ValueFactory::getBigIntValue(0)); if (!isLocal) continue; } if (! m_isUpsert) { // try to put the tuple into the target table if (m_hasPurgeFragment) { if (!executePurgeFragmentIfNeeded(&persistentTable)) return false; // purge fragment might have truncated the table, and // refreshed the persistent table pointer. Make sure to // use it when doing the insert below. targetTable = persistentTable; } if (!targetTable->insertTuple(templateTuple)) { VOLT_ERROR("Failed to insert tuple from input table '%s' into" " target table '%s'", m_inputTable->name().c_str(), targetTable->name().c_str()); return false; } } else { // upsert execution logic assert(persistentTable->primaryKeyIndex() != NULL); TableTuple existsTuple = persistentTable->lookupTupleByValues(templateTuple); if (existsTuple.isNullTuple()) { // try to put the tuple into the target table if (m_hasPurgeFragment) { if (!executePurgeFragmentIfNeeded(&persistentTable)) return false; } if (!persistentTable->insertTuple(templateTuple)) { VOLT_ERROR("Failed to insert tuple from input table '%s' into" " target table '%s'", m_inputTable->name().c_str(), persistentTable->name().c_str()); return false; } } else { // tuple exists already, try to update the tuple instead upsertTuple.move(templateTuple.address()); TableTuple &tempTuple = persistentTable->getTempTupleInlined(upsertTuple); if (!persistentTable->updateTupleWithSpecificIndexes(existsTuple, tempTuple, persistentTable->allIndexes())) { VOLT_INFO("Failed to update existsTuple from table '%s'", persistentTable->name().c_str()); return false; } } } // successfully inserted or updated modifiedTuples++; } TableTuple& count_tuple = outputTable->tempTuple(); count_tuple.setNValue(0, ValueFactory::getBigIntValue(modifiedTuples)); // try to put the tuple into the output table if (!outputTable->insertTuple(count_tuple)) { VOLT_ERROR("Failed to insert tuple count (%d) into" " output table '%s'", modifiedTuples, outputTable->name().c_str()); return false; } // add to the planfragments count of modified tuples m_engine->addToTuplesModified(modifiedTuples); VOLT_DEBUG("Finished inserting %d tuples", modifiedTuples); return true; }
// Scan some records in the table, verifying that points that are // supposed to be inside are, and those that are not aren't. // Print out some stats about how long things took. void scanSomeRecords(PersistentTable *table, int numTuples, int numScans) { std::cout << " Scanning for containing polygons on " << numScans << " points...\n"; auto start = std::chrono::high_resolution_clock::now(); std::chrono::microseconds usSpentScanning = std::chrono::duration_cast<microseconds>(start - start); std::chrono::microseconds usSpentContainsing = std::chrono::duration_cast<microseconds>(start - start); CoveringCellIndex* ccIndex = static_cast<CoveringCellIndex*>(table->index("poly_idx")); TableTuple tempTuple = table->tempTuple(); StandAloneTupleStorage searchKey(ccIndex->getKeySchema()); int numContainingCells = 0; int numContainingPolygons = 0; for (int i = 0; i < numScans; ++i) { // Pick a tuple at random. int pk = std::rand() % numTuples; tempTuple.setNValue(PK_COL_INDEX, ValueFactory::getIntegerValue(pk)); TableTuple sampleTuple = table->lookupTupleByValues(tempTuple); ASSERT_FALSE(sampleTuple.isNullTuple()); NValue geog = sampleTuple.getNValue(GEOG_COL_INDEX); if (geog.isNull()) { // There is one null row in the table. continue; } // The centroid will be inside polygons with one ring, and // not inside polygons with two rings (because the second // ring is a hole in the center). NValue centroid = geog.callUnary<FUNC_VOLT_POLYGON_CENTROID>(); int32_t numInteriorRings = ValuePeeker::peekAsBigInt(geog.callUnary<FUNC_VOLT_POLYGON_NUM_INTERIOR_RINGS>()); bool isValid = ValuePeeker::peekBoolean(geog.callUnary<FUNC_VOLT_VALIDATE_POLYGON>()); if (! isValid) { std::ostringstream oss; int32_t len; const char* reasonChars = ValuePeeker::peekObject_withoutNull(geog.callUnary<FUNC_VOLT_POLYGON_INVALID_REASON>(), &len); std::string reason = std::string(reasonChars, len); oss << "At " << i << "th scan, expected a valid polygon at pk " << pk << " but isValid says its not because \"" << reason << "\". WKT:\n" << nvalToWkt(geog); ASSERT_TRUE_WITH_MESSAGE(isValid, oss.str().c_str()); } start = std::chrono::high_resolution_clock::now(); searchKey.tuple().setNValue(0, centroid); IndexCursor cursor(ccIndex->getTupleSchema()); bool foundSamplePoly = false; bool b = ccIndex->moveToCoveringCell(&searchKey.tuple(), cursor); if (b) { TableTuple foundTuple = ccIndex->nextValueAtKey(cursor); while (! foundTuple.isNullTuple()) { ++numContainingCells; auto startContains = std::chrono::high_resolution_clock::now(); bool polygonContains = ValuePeeker::peekBoolean(NValue::call<FUNC_VOLT_CONTAINS>({geog, centroid})); auto endContains = std::chrono::high_resolution_clock::now(); usSpentContainsing += std::chrono::duration_cast<microseconds>(endContains - startContains); if (polygonContains) ++numContainingPolygons; int foundPk = ValuePeeker::peekAsInteger(foundTuple.getNValue(PK_COL_INDEX)); if (foundPk == pk && polygonContains) { foundSamplePoly = true; } foundTuple = ccIndex->nextValueAtKey(cursor); } } auto end = std::chrono::high_resolution_clock::now(); usSpentScanning += std::chrono::duration_cast<microseconds>(end - start); ASSERT_TRUE(numInteriorRings == 0 || numInteriorRings == 1); if (numInteriorRings == 0 && !foundSamplePoly) { std::ostringstream oss; oss << "At " << i << "th scan, expected to find centroid in polygon with primary key " << pk << ", centroid WKT:\n" << nvalToWkt(centroid) << "\npolygon WKT:\n" << nvalToWkt(geog); ASSERT_TRUE_WITH_MESSAGE(foundSamplePoly, oss.str().c_str()); } else if (numInteriorRings == 1) { // There was a hole in the center so the centroid is not in the polygon ASSERT_TRUE_WITH_MESSAGE(!foundSamplePoly, "Expected to not find centroid contained by polygon with hole in the center"); } } auto avgTotalUsSpentScanning = usSpentScanning.count() / numScans; auto avgUsSpentContainsing = usSpentContainsing.count() / numScans; auto avgUsSpentScanning = avgTotalUsSpentScanning - avgUsSpentContainsing; std::cout << " Average duration of each index lookup total: " << avgTotalUsSpentScanning << " us\n"; std::cout << " Average duration spent on CONTAINS: " << avgUsSpentContainsing << " us\n"; std::cout << " Average duration spent on B-tree traversal: " << avgUsSpentScanning << " us\n"; double pctFalsePositives = (double(numContainingCells - numContainingPolygons) / numContainingCells) * 100.0; std::cout << " Percent false positives (point in cell but not polygon): " << pctFalsePositives << "%\n"; double avgCellsContainingPoint = numContainingCells / double(numScans); double avgPolygonsContainingPoint = numContainingPolygons / double(numScans); std::cout << " On average, each point was in " << avgCellsContainingPoint << " cells\n"; std::cout << " On average, each point was in " << avgPolygonsContainingPoint << " polygons\n"; }
void setSearchKeyFromTuple(TableTuple &source) { keyTuple.setNValue(0, source.getNValue(1)); keyTuple.setNValue(1, source.getNValue(2)); }
void makeColsForRow_90(int offset, int64_t row, TableTuple &tuple) { for (int i = 90; i < 100; i++, offset++) tuple.setNValue(offset, ValueFactory::getBigIntValue(static_cast<int64_t>(i + (row % 10)))); }
bool UpdateExecutor::p_execute(const NValueArray ¶ms, ReadWriteTracker *tracker) { assert(m_inputTable); assert(m_targetTable); VOLT_TRACE("INPUT TABLE: %s\n", m_inputTable->debug().c_str()); VOLT_TRACE("TARGET TABLE - BEFORE: %s\n", m_targetTable->debug().c_str()); assert(m_inputTuple.sizeInValues() == m_inputTable->columnCount()); assert(m_targetTuple.sizeInValues() == m_targetTable->columnCount()); TableIterator input_iterator(m_inputTable); while (input_iterator.next(m_inputTuple)) { // // OPTIMIZATION: Single-Sited Query Plans // If our beloved UpdatePlanNode is apart of a single-site query plan, // then the first column in the input table will be the address of a // tuple on the target table that we will want to update. This saves us // the trouble of having to do an index lookup // void *target_address = m_inputTuple.getNValue(0).castAsAddress(); m_targetTuple.move(target_address); // Read/Write Set Tracking if (tracker != NULL) { tracker->markTupleWritten(m_targetTable, &m_targetTuple); } // Loop through INPUT_COL_IDX->TARGET_COL_IDX mapping and only update // the values that we need to. The key thing to note here is that we // grab a temp tuple that is a copy of the target tuple (i.e., the tuple // we want to update). This insures that if the input tuple is somehow // bringing garbage with it, we're only going to copy what we really // need to into the target tuple. // TableTuple &tempTuple = m_targetTable->getTempTupleInlined(m_targetTuple); for (int map_ctr = 0; map_ctr < m_inputTargetMapSize; map_ctr++) { tempTuple.setNValue(m_inputTargetMap[map_ctr].second, m_inputTuple.getNValue(m_inputTargetMap[map_ctr].first)); } // if there is a partition column for the target table if (m_partitionColumn != -1) { // check for partition problems // get the value for the partition column NValue value = tempTuple.getNValue(m_partitionColumn); bool isLocal = m_engine->isLocalSite(value); // if it doesn't map to this site if (!isLocal) { VOLT_ERROR("Mispartitioned tuple in single-partition plan for" " table '%s'", m_targetTable->name().c_str()); return false; } } #ifdef ARIES if(m_engine->isARIESEnabled()){ // add persistency check: PersistentTable* table = dynamic_cast<PersistentTable*>(m_targetTable); // only log if we are writing to a persistent table. if (table != NULL) { // before image -- target is old val with no updates // XXX: what about uninlined fields? // should we not be doing // m_targetTable->getTempTupleInlined(m_targetTuple); instead? TableTuple *beforeImage = &m_targetTuple; // after image -- temp is NEW, created using target and input TableTuple *afterImage = &tempTuple; TableTuple *keyTuple = NULL; char *keydata = NULL; std::vector<int32_t> modifiedCols; int32_t numCols = -1; // See if we can do better by using an index instead TableIndex *index = table->primaryKeyIndex(); if (index != NULL) { // First construct tuple for primary key keydata = new char[index->getKeySchema()->tupleLength()]; keyTuple = new TableTuple(keydata, index->getKeySchema()); for (int i = 0; i < index->getKeySchema()->columnCount(); i++) { keyTuple->setNValue(i, beforeImage->getNValue(index->getColumnIndices()[i])); } // no before image need be recorded, just the primary key beforeImage = NULL; } // Set the modified column list numCols = m_inputTargetMapSize; modifiedCols.resize(m_inputTargetMapSize, -1); for (int map_ctr = 0; map_ctr < m_inputTargetMapSize; map_ctr++) { // can't use column-id directly, otherwise we would go over vector bounds int pos = m_inputTargetMap[map_ctr].first - 1; modifiedCols.at(pos) = m_inputTargetMap[map_ctr].second; } // Next, let the input tuple be the diff after image afterImage = &m_inputTuple; LogRecord *logrecord = new LogRecord(computeTimeStamp(), LogRecord::T_UPDATE,// this is an update record LogRecord::T_FORWARD,// the system is running normally -1,// XXX: prevLSN must be fetched from table! m_engine->getExecutorContext()->currentTxnId() ,// txn id m_engine->getSiteId(),// which execution site m_targetTable->name(),// the table affected keyTuple,// primary key numCols, (numCols > 0) ? &modifiedCols : NULL, beforeImage, afterImage ); size_t logrecordLength = logrecord->getEstimatedLength(); char *logrecordBuffer = new char[logrecordLength]; FallbackSerializeOutput output; output.initializeWithPosition(logrecordBuffer, logrecordLength, 0); logrecord->serializeTo(output); LogManager* m_logManager = this->m_engine->getLogManager(); Logger m_ariesLogger = m_logManager->getAriesLogger(); //VOLT_WARN("m_logManager : %p AriesLogger : %p",&m_logManager, &m_ariesLogger); const Logger *logger = m_logManager->getThreadLogger(LOGGERID_MM_ARIES); logger->log(LOGLEVEL_INFO, output.data(), output.position()); delete[] logrecordBuffer; logrecordBuffer = NULL; delete logrecord; logrecord = NULL; if (keydata != NULL) { delete[] keydata; keydata = NULL; } if (keyTuple != NULL) { delete keyTuple; keyTuple = NULL; } } } #endif if (!m_targetTable->updateTuple(tempTuple, m_targetTuple, m_updatesIndexes)) { VOLT_INFO("Failed to update tuple from table '%s'", m_targetTable->name().c_str()); return false; } } VOLT_TRACE("TARGET TABLE - AFTER: %s\n", m_targetTable->debug().c_str()); // TODO lets output result table here, not in result executor. same thing in // delete/insert // add to the planfragments count of modified tuples m_engine->m_tuplesModified += m_inputTable->activeTupleCount(); return true; }