예제 #1
0
void CopyOnWriteContext::notifyBlockWasCompactedAway(TBPtr block) {
    assert(!m_finishedTableScan);
    m_blocksCompacted++;
    CopyOnWriteIterator *iter = static_cast<CopyOnWriteIterator*>(m_iterator.get());
    if (iter->m_blockIterator != m_blocks.end()) {
        TBPtr nextBlock = iter->m_blockIterator.data();
        //The next block is the one that was compacted away
        //Need to move the iterator forward to skip it
        if (nextBlock == block) {
            iter->m_blockIterator++;

            //There is another block after the one that was compacted away
            if (iter->m_blockIterator != m_blocks.end()) {
                TBPtr newNextBlock = iter->m_blockIterator.data();
                m_blocks.erase(block->address());
                iter->m_blockIterator = m_blocks.find(newNextBlock->address());
                iter->m_end = m_blocks.end();
                assert(iter->m_blockIterator != m_blocks.end());
            } else {
                //No block after the one compacted away
                //set everything to end
                m_blocks.erase(block->address());
                iter->m_blockIterator = m_blocks.end();
                iter->m_end = m_blocks.end();
            }
        } else {
            //Some random block was compacted away. Remove it and regenerate the iterator
            m_blocks.erase(block->address());
            iter->m_blockIterator = m_blocks.find(nextBlock->address());
            iter->m_end = m_blocks.end();
            assert(iter->m_blockIterator != m_blocks.end());
        }
    }
}
예제 #2
0
/**
 * Block compaction hook.
 */
void ElasticScanner::notifyBlockWasCompactedAway(TBPtr block) {
    if (!m_scanComplete && m_blockIterator != m_blockEnd) {
        TBPtr nextBlock = m_blockIterator.data();
        if (nextBlock == block) {
            // The next block was compacted away.
            m_blockIterator++;
            if (m_blockIterator != m_blockEnd) {
                // There is a block to skip to.
                TBPtr newNextBlock = m_blockIterator.data();
                m_blockMap.erase(block->address());
                m_blockIterator = m_blockMap.find(newNextBlock->address());
                m_blockEnd = m_blockMap.end();
                assert(m_blockIterator != m_blockMap.end());
            }
            else {
                // There isn't a block to skip to, so we're done.
                m_blockMap.erase(block->address());
                m_blockIterator = m_blockMap.end();
                m_blockEnd = m_blockMap.end();
            }
        } else {
            // Some random block was compacted away.
            // Remove it and regenerate the iterator.
            m_blockMap.erase(block->address());
            m_blockIterator = m_blockMap.find(nextBlock->address());
            m_blockEnd = m_blockMap.end();
            assert(m_blockIterator != m_blockMap.end());
        }
    }
}
예제 #3
0
bool CopyOnWriteContext::notifyTupleDelete(TableTuple &tuple) {
    assert(m_iterator != NULL);

    if (tuple.isDirty() || m_finishedTableScan) {
        return true;
    }

    /**
     * Find out which block the address is contained in. Lower bound returns the first entry
     * in the index >= the address. Unless the address happens to be equal then the block
     * we are looking for is probably the previous entry. Then check if the address fits
     * in the previous entry. If it doesn't then the block is something new.
     */
    TBPtr block = PersistentTable::findBlock(tuple.address(), m_blocks, getTable().getTableAllocationSize());
    if (block.get() == NULL) {
        // tuple not in snapshot region, don't care about this tuple
        return true;
    }

    /**
     * Now check where this is relative to the COWIterator.
     */
    CopyOnWriteIterator *iter = reinterpret_cast<CopyOnWriteIterator*>(m_iterator.get());
    return !iter->needToDirtyTuple(block->address(), tuple.address());
}
예제 #4
0
void CopyOnWriteContext::markTupleDirty(TableTuple tuple, bool newTuple) {
    assert(m_iterator != NULL);

    if (newTuple) {
        m_inserts++;
    }
    else {
        m_updates++;
    }

    /**
     * If this an update or a delete of a tuple that is already dirty then no further action is
     * required.
     */
    if (!newTuple && tuple.isDirty()) {
        return;
    }

    /**
     * If the table has been scanned already there is no need to continue marking tuples dirty
     * If the tuple is dirty then it has already been backed up.
     */
    if (m_finishedTableScan) {
        tuple.setDirtyFalse();
        return;
    }

    /**
     * Find out which block the address is contained in.
     */
    TBPtr block = PersistentTable::findBlock(tuple.address(), m_blocks, getTable().getTableAllocationSize());
    if (block.get() == NULL) {
        // tuple not in snapshot region, don't care about this tuple, no need to dirty it
        tuple.setDirtyFalse();
        return;
    }

    /**
     * Now check where this is relative to the COWIterator.
     */
    CopyOnWriteIterator *iter = reinterpret_cast<CopyOnWriteIterator*>(m_iterator.get());
    if (iter->needToDirtyTuple(block->address(), tuple.address())) {
        tuple.setDirtyTrue();
        /**
         * Don't back up a newly introduced tuple, just mark it as dirty.
         */
        if (!newTuple) {
            m_backedUpTuples->insertTupleNonVirtualWithDeepCopy(tuple, &m_pool);
        }
    } else {
        tuple.setDirtyFalse();
        return;
    }
}
예제 #5
0
/**
 * When a tuple is "dirty" it is still active, but will never be a "found" tuple
 * since it is skipped. The tuple may be dirty because it was deleted (this is why it is always skipped). In that
 * case the CopyOnWriteContext calls this to ensure that the iteration finds the correct number of tuples
 * in the used portion of the table blocks and doesn't overrun to the uninitialized block memory because
 * it skiped a dirty tuple and didn't end up with the right found tuple count upon reaching the end.
 */
bool CopyOnWriteIterator::needToDirtyTuple(char *tupleAddress) {
    if (m_tableEmpty) {
        // snapshot was activated when the table was empty.
        // Tuple is not in  snapshot region, don't care about this tuple
        assert(m_currentBlock == NULL);
        return false;
    }
    /**
     * Find out which block the address is contained in. Lower bound returns the first entry
     * in the index >= the address. Unless the address happens to be equal then the block
     * we are looking for is probably the previous entry. Then check if the address fits
     * in the previous entry. If it doesn't then the block is something new.
     */
    TBPtr block = PersistentTable::findBlock(tupleAddress, m_blocks, m_table->getTableAllocationSize());
    if (block.get() == NULL) {
        // tuple not in snapshot region, don't care about this tuple
        return false;
    }

    assert(m_currentBlock != NULL);

    /**
     * Now check where this is relative to the COWIterator.
     */
    const char *blockAddress = block->address();
    if (blockAddress > m_currentBlock->address()) {
        return true;
    }

    assert(blockAddress == m_currentBlock->address());
    if (tupleAddress >= m_location) {
        return true;
    } else {
        return false;
    }
}
예제 #6
0
std::pair<int, int> TupleBlock::merge(Table *table, TBPtr source) {
    assert(source != this);
    /*
      std::cout << "Attempting to merge " << static_cast<void*> (this)
                << "(" << m_activeTuples << ") with " << static_cast<void*>(source.get())
                << "(" << source->m_activeTuples << ")";
      std::cout << " source last compaction offset is " << source->lastCompactionOffset()
                << " and active tuple count is " << source->m_activeTuples << std::endl;
    */

    uint32_t m_nextTupleInSourceOffset = source->lastCompactionOffset();
    int sourceTuplesPendingDeleteOnUndoRelease = 0;
    while (hasFreeTuples() && !source->isEmpty()) {
        TableTuple sourceTuple(table->schema());
        TableTuple destinationTuple(table->schema());

        bool foundSourceTuple = false;
        //Iterate further into the block looking for active tuples
        //Stop when running into the unused tuple boundry
        while (m_nextTupleInSourceOffset < source->unusedTupleBoundry()) {
            sourceTuple.move(&source->address()[m_tupleLength * m_nextTupleInSourceOffset]);
            m_nextTupleInSourceOffset++;
            if (sourceTuple.isActive()) {
                foundSourceTuple = true;
                break;
            }
        }

        if (!foundSourceTuple) {
           //The block isn't empty, but there are no more active tuples.
           //Some of the tuples that make it register as not empty must have been
           //pending delete and those aren't mergable
            assert(sourceTuplesPendingDeleteOnUndoRelease);
            break;
        }

        //Can't move a tuple with a pending undo action, it would invalidate the pointer
        //Keep a count so that the block can be notified of the number
        //of tuples pending delete on undo release when calculating the correct bucket
        //index. If all the active tuples are pending delete on undo release the block
        //is effectively empty and shouldn't be considered for merge ops.
        //It will be completely discarded once the undo log releases the block.
        if (sourceTuple.isPendingDeleteOnUndoRelease()) {
            sourceTuplesPendingDeleteOnUndoRelease++;
            continue;
        }

        destinationTuple.move(nextFreeTuple().first);
        table->swapTuples( sourceTuple, destinationTuple);
        source->freeTuple(sourceTuple.address());
    }
    source->lastCompactionOffset(m_nextTupleInSourceOffset);

    int newBucketIndex = calculateBucketIndex();
    if (newBucketIndex != m_bucketIndex) {
        m_bucketIndex = newBucketIndex;
        //std::cout << "Merged " << static_cast<void*> (this) << "(" << m_activeTuples << ") with " << static_cast<void*>(source.get())  << "(" << source->m_activeTuples << ")";
        //std::cout << " found " << sourceTuplesPendingDeleteOnUndoRelease << " tuples pending delete on undo release "<< std::endl;
        return std::pair<int, int>(newBucketIndex, source->calculateBucketIndex(sourceTuplesPendingDeleteOnUndoRelease));
    } else {
        //std::cout << "Merged " << static_cast<void*> (this) << "(" << m_activeTuples << ") with " << static_cast<void*>(source.get()) << "(" << source->m_activeTuples << ")";
        //std::cout << " found " << sourceTuplesPendingDeleteOnUndoRelease << " tuples pending delete on undo release "<< std::endl;
        return std::pair<int, int>( -1, source->calculateBucketIndex(sourceTuplesPendingDeleteOnUndoRelease));
    }
}