/** * Iterate through the table blocks until all the active tuples have been found. Skip dirty tuples * and mark them as clean so that they can be copied during the next snapshot. */ bool CopyOnWriteIterator::next(TableTuple &out) { assert(m_currentBlock != NULL); while (true) { if (m_blockOffset >= m_currentBlock->unusedTupleBoundry()) { if (m_blockIterator == m_end) { m_table->snapshotFinishedScanningBlock(m_currentBlock, TBPtr()); break; } m_table->snapshotFinishedScanningBlock(m_currentBlock, m_blockIterator.data()); m_location = m_blockIterator.key(); m_currentBlock = m_blockIterator.data(); assert(m_currentBlock->address() == m_location); m_blockIterator.data() = TBPtr(); m_blockOffset = 0; m_blockIterator++; } assert(m_location < m_currentBlock.get()->address() + m_table->m_tableAllocationSize); assert(m_location < m_currentBlock.get()->address() + (m_table->m_tupleLength * m_table->m_tuplesPerBlock)); assert (out.sizeInValues() == m_table->columnCount()); m_blockOffset++; out.move(m_location); const bool active = out.isActive(); const bool dirty = out.isDirty(); // Return this tuple only when this tuple is not marked as deleted and isn't dirty if (active && !dirty) { out.setDirtyFalse(); m_location += m_tupleLength; return true; } else { out.setDirtyFalse(); m_location += m_tupleLength; } } return false; }
/** * Iterate through the table blocks until all the active tuples have been found. Skip dirty tuples * and mark them as clean so that they can be copied during the next snapshot. */ bool CopyOnWriteIterator::next(TableTuple &out) { if (m_currentBlock == NULL) { return false; } while (true) { if (m_blockOffset >= m_currentBlock->unusedTupleBoundary()) { if (m_blockIterator == m_end) { m_surgeon->snapshotFinishedScanningBlock(m_currentBlock, TBPtr()); break; } m_surgeon->snapshotFinishedScanningBlock(m_currentBlock, m_blockIterator.data()); char *finishedBlock = m_currentBlock->address(); m_location = m_blockIterator.key(); m_currentBlock = m_blockIterator.data(); assert(m_currentBlock->address() == m_location); m_blockOffset = 0; // Remove the finished block from the map so that it can be released // back to the OS if all tuples in the block is deleted. // // This invalidates the iterators, so we have to get new iterators // using the current block's start address. m_blockIterator has to // point to the next block, hence the upper_bound() call. m_blocks.erase(finishedBlock); m_blockIterator = m_blocks.upper_bound(m_currentBlock->address()); m_end = m_blocks.end(); } assert(m_location < m_currentBlock.get()->address() + m_table->getTableAllocationSize()); assert(m_location < m_currentBlock.get()->address() + (m_table->getTupleLength() * m_table->getTuplesPerBlock())); assert (out.columnCount() == m_table->columnCount()); m_blockOffset++; out.move(m_location); const bool active = out.isActive(); const bool dirty = out.isDirty(); if (dirty) m_skippedDirtyRows++; if (!active) m_skippedInactiveRows++; // Return this tuple only when this tuple is not marked as deleted and isn't dirty if (active && !dirty) { out.setDirtyFalse(); m_location += m_tupleLength; return true; } else { out.setDirtyFalse(); m_location += m_tupleLength; } } return false; }
TEST_F(TableTupleFilterTest, tableTupleFilterTest) { static const int MARKER = 33; TempTable* table = getTempTable(); TableTupleFilter tableFilter; tableFilter.init(table); int tuplePerBlock = table->getTuplesPerBlock(); // make sure table spans more than one block ASSERT_TRUE(NUM_OF_TUPLES / tuplePerBlock > 1); TableTuple tuple = table->tempTuple(); TableIterator iterator = table->iterator(); // iterator over and mark every 5th tuple int counter = 0; std::multiset<int64_t> control_values; while(iterator.next(tuple)) { if (++counter % 5 == 0) { NValue nvalue = tuple.getNValue(1); int64_t value = ValuePeeker::peekBigInt(nvalue); control_values.insert(value); tableFilter.updateTuple(tuple, MARKER); } } TableTupleFilter_iter<MARKER> endItr = tableFilter.end<MARKER>(); for (TableTupleFilter_iter<MARKER> itr = tableFilter.begin<MARKER>(); itr != endItr; ++itr) { uint64_t tupleAddr = tableFilter.getTupleAddress(*itr); tuple.move((char *)tupleAddr); ASSERT_TRUE(tuple.isActive()); NValue nvalue = tuple.getNValue(1); int64_t value = ValuePeeker::peekBigInt(nvalue); ASSERT_FALSE(control_values.empty()); auto it = control_values.find(value); ASSERT_NE(it, control_values.end()); control_values.erase(it); } ASSERT_TRUE(control_values.empty()); }
/** * Get the next tuple or return false if none is available. */ bool ElasticScanner::next(TableTuple &out) { bool found = false; while (!found && continueScan()) { assert(m_currentBlockPtr != NULL); // Sanity checks. assert(m_tuplePtr < m_currentBlockPtr.get()->address() + m_table.getTableAllocationSize()); assert(m_tuplePtr < m_currentBlockPtr.get()->address() + (m_tupleSize * m_table.getTuplesPerBlock())); assert (out.sizeInValues() == m_table.columnCount()); // Grab the tuple pointer. out.move(m_tuplePtr); // Shift to the next tuple in block. // continueScan() will check if it's the last one in the block. m_tupleIndex++; m_tuplePtr += m_tupleSize; // The next active/non-dirty tuple is return-worthy. found = out.isActive() && !out.isDirty(); } return found; }