Пример #1
0
bool CopyOnWriteContext::notifyTupleDelete(TableTuple &tuple) {
    assert(m_iterator != NULL);

    if (tuple.isDirty() || m_finishedTableScan) {
        return true;
    }

    /**
     * Find out which block the address is contained in. Lower bound returns the first entry
     * in the index >= the address. Unless the address happens to be equal then the block
     * we are looking for is probably the previous entry. Then check if the address fits
     * in the previous entry. If it doesn't then the block is something new.
     */
    TBPtr block = PersistentTable::findBlock(tuple.address(), m_blocks, getTable().getTableAllocationSize());
    if (block.get() == NULL) {
        // tuple not in snapshot region, don't care about this tuple
        return true;
    }

    /**
     * Now check where this is relative to the COWIterator.
     */
    CopyOnWriteIterator *iter = reinterpret_cast<CopyOnWriteIterator*>(m_iterator.get());
    return !iter->needToDirtyTuple(block->address(), tuple.address());
}
/**
 * Iterate through the table blocks until all the active tuples have been found. Skip dirty tuples
 * and mark them as clean so that they can be copied during the next snapshot.
 */
bool CopyOnWriteIterator::next(TableTuple &out) {
    assert(m_currentBlock != NULL);
    while (true) {
        if (m_blockOffset >= m_currentBlock->unusedTupleBoundry()) {
            if (m_blockIterator == m_end) {
                m_table->snapshotFinishedScanningBlock(m_currentBlock, TBPtr());
                break;
            }
            m_table->snapshotFinishedScanningBlock(m_currentBlock, m_blockIterator.data());
            m_location = m_blockIterator.key();
            m_currentBlock = m_blockIterator.data();
            assert(m_currentBlock->address() == m_location);
            m_blockIterator.data() = TBPtr();
            m_blockOffset = 0;
            m_blockIterator++;
        }
        assert(m_location < m_currentBlock.get()->address() + m_table->m_tableAllocationSize);
        assert(m_location < m_currentBlock.get()->address() + (m_table->m_tupleLength * m_table->m_tuplesPerBlock));
        assert (out.sizeInValues() == m_table->columnCount());
        m_blockOffset++;
        out.move(m_location);
        const bool active = out.isActive();
        const bool dirty = out.isDirty();
        // Return this tuple only when this tuple is not marked as deleted and isn't dirty
        if (active && !dirty) {
            out.setDirtyFalse();
            m_location += m_tupleLength;
            return true;
        } else {
            out.setDirtyFalse();
            m_location += m_tupleLength;
        }
    }
    return false;
}
Пример #3
0
void CopyOnWriteContext::markTupleDirty(TableTuple tuple, bool newTuple) {
    assert(m_iterator != NULL);

    if (newTuple) {
        m_inserts++;
    }
    else {
        m_updates++;
    }

    /**
     * If this an update or a delete of a tuple that is already dirty then no further action is
     * required.
     */
    if (!newTuple && tuple.isDirty()) {
        return;
    }

    /**
     * If the table has been scanned already there is no need to continue marking tuples dirty
     * If the tuple is dirty then it has already been backed up.
     */
    if (m_finishedTableScan) {
        tuple.setDirtyFalse();
        return;
    }

    /**
     * Find out which block the address is contained in.
     */
    TBPtr block = PersistentTable::findBlock(tuple.address(), m_blocks, getTable().getTableAllocationSize());
    if (block.get() == NULL) {
        // tuple not in snapshot region, don't care about this tuple, no need to dirty it
        tuple.setDirtyFalse();
        return;
    }

    /**
     * Now check where this is relative to the COWIterator.
     */
    CopyOnWriteIterator *iter = reinterpret_cast<CopyOnWriteIterator*>(m_iterator.get());
    if (iter->needToDirtyTuple(block->address(), tuple.address())) {
        tuple.setDirtyTrue();
        /**
         * Don't back up a newly introduced tuple, just mark it as dirty.
         */
        if (!newTuple) {
            m_backedUpTuples->insertTupleNonVirtualWithDeepCopy(tuple, &m_pool);
        }
    } else {
        tuple.setDirtyFalse();
        return;
    }
}
Пример #4
0
/**
 * Iterate through the table blocks until all the active tuples have been found. Skip dirty tuples
 * and mark them as clean so that they can be copied during the next snapshot.
 */
bool CopyOnWriteIterator::next(TableTuple &out) {
    if (m_currentBlock == NULL) {
        return false;
    }
    while (true) {
        if (m_blockOffset >= m_currentBlock->unusedTupleBoundary()) {
            if (m_blockIterator == m_end) {
                m_surgeon->snapshotFinishedScanningBlock(m_currentBlock, TBPtr());
                break;
            }
            m_surgeon->snapshotFinishedScanningBlock(m_currentBlock, m_blockIterator.data());

            char *finishedBlock = m_currentBlock->address();

            m_location = m_blockIterator.key();
            m_currentBlock = m_blockIterator.data();
            assert(m_currentBlock->address() == m_location);
            m_blockOffset = 0;

            // Remove the finished block from the map so that it can be released
            // back to the OS if all tuples in the block is deleted.
            //
            // This invalidates the iterators, so we have to get new iterators
            // using the current block's start address. m_blockIterator has to
            // point to the next block, hence the upper_bound() call.
            m_blocks.erase(finishedBlock);
            m_blockIterator = m_blocks.upper_bound(m_currentBlock->address());
            m_end = m_blocks.end();
        }
        assert(m_location < m_currentBlock.get()->address() + m_table->getTableAllocationSize());
        assert(m_location < m_currentBlock.get()->address() + (m_table->getTupleLength() * m_table->getTuplesPerBlock()));
        assert (out.columnCount() == m_table->columnCount());
        m_blockOffset++;
        out.move(m_location);
        const bool active = out.isActive();
        const bool dirty = out.isDirty();

        if (dirty) m_skippedDirtyRows++;
        if (!active) m_skippedInactiveRows++;

        // Return this tuple only when this tuple is not marked as deleted and isn't dirty
        if (active && !dirty) {
            out.setDirtyFalse();
            m_location += m_tupleLength;
            return true;
        } else {
            out.setDirtyFalse();
            m_location += m_tupleLength;
        }
    }
    return false;
}
Пример #5
0
bool CopyOnWriteContext::notifyTupleDelete(TableTuple &tuple) {
    assert(m_iterator != NULL);

    if (tuple.isDirty() || m_finishedTableScan) {
        return true;
    }
    // This is a 'loose' count of the number of deletes because COWIterator could be past this
    // point in the block.
    m_deletes++;

    /**
     * Now check where this is relative to the COWIterator.
     */
    CopyOnWriteIterator *iter = reinterpret_cast<CopyOnWriteIterator*>(m_iterator.get());
    return !iter->needToDirtyTuple(tuple.address());
}
Пример #6
0
void CopyOnWriteContext::markTupleDirty(TableTuple tuple, bool newTuple) {
    assert(m_iterator != NULL);

    /**
     * If this an update or a delete of a tuple that is already dirty then no further action is
     * required.
     */
    if (!newTuple && tuple.isDirty()) {
        return;
    }

    /**
     * If the table has been scanned already there is no need to continue marking tuples dirty
     * If the tuple is dirty then it has already been backed up.
     */
    if (m_finishedTableScan) {
        tuple.setDirtyFalse();
        return;
    }

    /**
     * Now check where this is relative to the COWIterator.
     */
    CopyOnWriteIterator *iter = reinterpret_cast<CopyOnWriteIterator*>(m_iterator.get());
    if (iter->needToDirtyTuple(tuple.address())) {
        tuple.setDirtyTrue();

        if (newTuple) {
            /**
             * Don't back up a newly introduced tuple, just mark it as dirty.
             */
            m_inserts++;
        }
        else {
            m_updates++;
            m_backedUpTuples->insertTempTupleDeepCopy(tuple, &m_pool);
        }
    } else {
        tuple.setDirtyFalse();
        return;
    }
}
Пример #7
0
bool CopyOnWriteContext::canSafelyFreeTuple(TableTuple tuple) {
    assert(m_iterator != NULL);

    if (tuple.isDirty() || m_finishedTableScan) {
        return true;
    }

    /**
     * Find out which block the address is contained in. Lower bound returns the first entry
     * in the index >= the address. Unless the address happens to be equal then the block
     * we are looking for is probably the previous entry. Then check if the address fits
     * in the previous entry. If it doesn't then the block is something new.
     */
    char *address = tuple.address();
    TBMapI i = m_blocks.lower_bound(address);
    if (i == m_blocks.end() && m_blocks.empty()) {
        return true;
    }
    if (i == m_blocks.end()) {
        i--;
        if (i.key() + getTable().m_tableAllocationSize < address) {
            return true;
        }
        //OK it is in the very last block
    } else {
        if (i.key() != address) {
            i--;
            if (i.key() + getTable().m_tableAllocationSize < address) {
                return true;
            }
            //OK... this is in this particular block
        }
    }

    const char *blockStartAddress = i.key();

    /**
     * Now check where this is relative to the COWIterator.
     */
    CopyOnWriteIterator *iter = reinterpret_cast<CopyOnWriteIterator*>(m_iterator.get());
    return !iter->needToDirtyTuple(blockStartAddress, address);
}
Пример #8
0
/**
 * Get the next tuple or return false if none is available.
 */
bool ElasticScanner::next(TableTuple &out)
{
    bool found = false;
    while (!found && continueScan()) {
        assert(m_currentBlockPtr != NULL);
        // Sanity checks.
        assert(m_tuplePtr < m_currentBlockPtr.get()->address() + m_table.getTableAllocationSize());
        assert(m_tuplePtr < m_currentBlockPtr.get()->address() + (m_tupleSize * m_table.getTuplesPerBlock()));
        assert (out.sizeInValues() == m_table.columnCount());
        // Grab the tuple pointer.
        out.move(m_tuplePtr);
        // Shift to the next tuple in block.
        // continueScan() will check if it's the last one in the block.
        m_tupleIndex++;
        m_tuplePtr += m_tupleSize;
        // The next active/non-dirty tuple is return-worthy.
        found = out.isActive() && !out.isDirty();
    }
    return found;
}
Пример #9
0
void CopyOnWriteContext::markTupleDirty(TableTuple tuple, bool newTuple) {
    if (newTuple) {
        m_inserts++;
    }
    else {
        m_updates++;
    }

    /**
     * If this an update or a delete of a tuple that is already dirty then no further action is
     * required.
     */
    if (!newTuple && tuple.isDirty()) {
        return;
    }

    /**
     * If the table has been scanned already there is no need to continue marking tuples dirty
     * If the tuple is dirty then it has already been backed up.
     */
    if (m_finishedTableScan) {
        tuple.setDirtyFalse();
        return;
    }

    /**
     * Find out which block the address is contained in.
     */
    char *address = tuple.address();
    TBMapI i =
                            m_blocks.lower_bound(address);
    if (i == m_blocks.end() && m_blocks.empty()) {
        tuple.setDirtyFalse();
        return;
    }
    if (i == m_blocks.end()) {
        i--;
        if (i.key() + m_table.m_tableAllocationSize < address) {
            tuple.setDirtyFalse();
            return;
        }
        //OK it is in the very last block
    } else {
        if (i.key() != address) {
            i--;
            if (i.key() + m_table.m_tableAllocationSize < address) {
                tuple.setDirtyFalse();
                return;
            }
            //OK... this is in this particular block
        }
    }

    const char *blockStartAddress = i.key();

    /**
     * Now check where this is relative to the COWIterator.
     */
    CopyOnWriteIterator *iter = reinterpret_cast<CopyOnWriteIterator*>(m_iterator.get());
    if (iter->needToDirtyTuple(blockStartAddress, address)) {
        tuple.setDirtyTrue();
        /**
         * Don't back up a newly introduced tuple, just mark it as dirty.
         */
        if (!newTuple) {
            m_backedUpTuples->insertTupleNonVirtualWithDeepCopy(tuple, &m_pool);
        }
    } else {
        tuple.setDirtyFalse();
        return;
    }
}
Пример #10
0
void CopyOnWriteContext::markTupleDirty(TableTuple tuple, bool newTuple) {
    /**
     * If this an update or a delete of a tuple that is already dirty then no further action is
     * required.
     */
    if (!newTuple && tuple.isDirty()) {
        return;
    }

    /**
     * If the table has been scanned already there is no need to continue marking tuples dirty
     * If the tuple is dirty then it has already been backed up.
     */
    if (m_finishedTableScan) {
        tuple.setDirtyFalse();
        return;
    }

    /**
     * Find out which block the address is contained in.
     */
    char *address = tuple.address();
#ifdef MEMCHECK
        BlockPair compP;
        compP.pair =  std::pair<char*, int>(address, 0);
        compP.tupleLength = tuple.tupleLength();
#else
    const BlockPair compP(address, 0);
#endif
    BlockPairVectorI i =
            std::lower_bound(m_blocks.begin(), m_blocks.end(), compP, pairAddressToPairAddressComparator);
    if (i == m_blocks.end()) {
        tuple.setDirtyFalse();
        return;
    }
#ifdef MEMCHECK
    const char *blockStartAddress = (*i).pair.first;
    const int blockIndex = (*i).pair.second;
    const char *blockEndAddress = blockStartAddress + tuple.tupleLength();
#else
    const char *blockStartAddress = (*i).first;
    const int blockIndex = (*i).second;
    const char *blockEndAddress = blockStartAddress + TABLE_BLOCKSIZE;
#endif

    if (address >= blockEndAddress || address < blockStartAddress) {
        /**
         * Tuple is in a block allocated after the start of COW
         */
        tuple.setDirtyFalse();
        return;
    }

    /**
     * Now check where this is relative to the COWIterator.
     */
    CopyOnWriteIterator *iter = reinterpret_cast<CopyOnWriteIterator*>(m_iterator.get());
    if (iter->needToDirtyTuple(blockIndex, address, newTuple)) {
        tuple.setDirtyTrue();
        /**
         * Don't back up a newly introduced tuple, just mark it as dirty.
         */
        if (!newTuple) {
            m_backedUpTuples->insertTupleNonVirtualWithDeepCopy(tuple, &m_pool);
        }
    } else {
        tuple.setDirtyFalse();
        return;
    }
}