コード例 #1
0
ファイル: DRTupleStream.cpp プロジェクト: yf8848/voltdb
size_t DRTupleStream::computeOffsets(DRRecordType &type,
        const std::pair<const TableIndex*, uint32_t> &indexPair,
        TableTuple &tuple,
        size_t &rowHeaderSz,
        size_t &rowMetadataSz,
        const std::vector<int> *&interestingColumns) {
    interestingColumns = NULL;
    rowMetadataSz = sizeof(int32_t);
    int columnCount;
    switch (type) {
    case DR_RECORD_DELETE:
    case DR_RECORD_UPDATE:
        if (indexPair.first) {
            // The index-optimized versions of these types have values exactly
            // 5 larger than the unoptimized versions (asserted in test)
            // DR_RECORD_DELETE => DR_RECORD_DELETE_BY_INDEX
            // DR_RECORD_UPDATE => DR_RECORD_UPDATE_BY_INDEX
            type = static_cast<DRRecordType>((int)type + 5);
            interestingColumns = &(indexPair.first->getColumnIndices());
            rowMetadataSz += sizeof(int32_t);
            columnCount = static_cast<int>(interestingColumns->size());
        } else {
            columnCount = tuple.sizeInValues();
        }
        break;
    default:
        columnCount = tuple.sizeInValues();
        break;
    }
    int nullMaskLength = ((columnCount + 7) & -8) >> 3;
    rowHeaderSz = rowMetadataSz + nullMaskLength;
    return rowHeaderSz + tuple.maxDRSerializationSize(interestingColumns);
}
コード例 #2
0
/**
 * Iterate through the table blocks until all the active tuples have been found. Skip dirty tuples
 * and mark them as clean so that they can be copied during the next snapshot.
 */
bool CopyOnWriteIterator::next(TableTuple &out) {
    assert(m_currentBlock != NULL);
    while (true) {
        if (m_blockOffset >= m_currentBlock->unusedTupleBoundry()) {
            if (m_blockIterator == m_end) {
                m_table->snapshotFinishedScanningBlock(m_currentBlock, TBPtr());
                break;
            }
            m_table->snapshotFinishedScanningBlock(m_currentBlock, m_blockIterator.data());
            m_location = m_blockIterator.key();
            m_currentBlock = m_blockIterator.data();
            assert(m_currentBlock->address() == m_location);
            m_blockIterator.data() = TBPtr();
            m_blockOffset = 0;
            m_blockIterator++;
        }
        assert(m_location < m_currentBlock.get()->address() + m_table->m_tableAllocationSize);
        assert(m_location < m_currentBlock.get()->address() + (m_table->m_tupleLength * m_table->m_tuplesPerBlock));
        assert (out.sizeInValues() == m_table->columnCount());
        m_blockOffset++;
        out.move(m_location);
        const bool active = out.isActive();
        const bool dirty = out.isDirty();
        // Return this tuple only when this tuple is not marked as deleted and isn't dirty
        if (active && !dirty) {
            out.setDirtyFalse();
            m_location += m_tupleLength;
            return true;
        } else {
            out.setDirtyFalse();
            m_location += m_tupleLength;
        }
    }
    return false;
}
コード例 #3
0
size_t CompatibleDRTupleStream::computeOffsets(DRRecordType &type,
        TableTuple &tuple,
        size_t &rowHeaderSz,
        size_t &rowMetadataSz) {
    rowMetadataSz = sizeof(int32_t);
    int columnCount;
    switch (type) {
    case DR_RECORD_DELETE:
    case DR_RECORD_UPDATE:
        columnCount = tuple.sizeInValues();
        break;
    default:
        columnCount = tuple.sizeInValues();
        break;
    }
    int nullMaskLength = ((columnCount + 7) & -8) >> 3;
    rowHeaderSz = rowMetadataSz + nullMaskLength;
    return rowHeaderSz + tuple.maxDRSerializationSize();
}
コード例 #4
0
/**
 * Iterate through the table blocks until all the active tuples have been found. Skip dirty tuples
 * and mark them as clean so that they can be copied during the next snapshot.
 */
bool CopyOnWriteIterator::next(TableTuple &out) {
    if (m_currentBlock == NULL) {
        return false;
    }
    while (true) {
        if (m_blockOffset >= m_currentBlock->unusedTupleBoundry()) {
            if (m_blockIterator == m_end) {
                m_surgeon->snapshotFinishedScanningBlock(m_currentBlock, TBPtr());
                break;
            }
            m_surgeon->snapshotFinishedScanningBlock(m_currentBlock, m_blockIterator.data());

            char *finishedBlock = m_currentBlock->address();

            m_location = m_blockIterator.key();
            m_currentBlock = m_blockIterator.data();
            assert(m_currentBlock->address() == m_location);
            m_blockOffset = 0;

            // Remove the finished block from the map so that it can be released
            // back to the OS if all tuples in the block is deleted.
            //
            // This invalidates the iterators, so we have to get new iterators
            // using the current block's start address. m_blockIterator has to
            // point to the next block, hence the upper_bound() call.
            m_blocks.erase(finishedBlock);
            m_blockIterator = m_blocks.upper_bound(m_currentBlock->address());
            m_end = m_blocks.end();
        }
        assert(m_location < m_currentBlock.get()->address() + m_table->getTableAllocationSize());
        assert(m_location < m_currentBlock.get()->address() + (m_table->getTupleLength() * m_table->getTuplesPerBlock()));
        assert (out.sizeInValues() == m_table->columnCount());
        m_blockOffset++;
        out.move(m_location);
        const bool active = out.isActive();
        const bool dirty = out.isDirty();
        // Return this tuple only when this tuple is not marked as deleted and isn't dirty
        if (active && !dirty) {
            out.setDirtyFalse();
            m_location += m_tupleLength;
            return true;
        } else {
            out.setDirtyFalse();
            m_location += m_tupleLength;
        }
    }
    return false;
}
コード例 #5
0
size_t
DRTupleStream::computeOffsets(TableTuple &tuple,
                                   size_t *rowHeaderSz)
{
    // round-up columncount to next multiple of 8 and divide by 8
    const int columnCount = tuple.sizeInValues();
    int nullMaskLength = ((columnCount + 7) & -8) >> 3;

    // row header is 32-bit length of row plus null mask
    *rowHeaderSz = sizeof(int32_t) + nullMaskLength;

    //Can return 0 for a single column varchar with null
    size_t dataSz = tuple.maxExportSerializationSize();

    return *rowHeaderSz + dataSz;
}
コード例 #6
0
ファイル: ElasticScanner.cpp プロジェクト: migue/voltdb
/**
 * Get the next tuple or return false if none is available.
 */
bool ElasticScanner::next(TableTuple &out)
{
    bool found = false;
    while (!found && continueScan()) {
        assert(m_currentBlockPtr != NULL);
        // Sanity checks.
        assert(m_tuplePtr < m_currentBlockPtr.get()->address() + m_table.getTableAllocationSize());
        assert(m_tuplePtr < m_currentBlockPtr.get()->address() + (m_tupleSize * m_table.getTuplesPerBlock()));
        assert (out.sizeInValues() == m_table.columnCount());
        // Grab the tuple pointer.
        out.move(m_tuplePtr);
        // Shift to the next tuple in block.
        // continueScan() will check if it's the last one in the block.
        m_tupleIndex++;
        m_tuplePtr += m_tupleSize;
        // The next active/non-dirty tuple is return-worthy.
        found = out.isActive() && !out.isDirty();
    }
    return found;
}
コード例 #7
0
size_t
TupleStreamWrapper::computeOffsets(TableTuple &tuple,
                                   size_t *rowHeaderSz)
{
    // round-up columncount to next multiple of 8 and divide by 8
    int columnCount = tuple.sizeInValues() + METADATA_COL_CNT;
    int nullMaskLength = ((columnCount + 7) & -8) >> 3;

    // row header is 32-bit length of row plus null mask
    *rowHeaderSz = sizeof (int32_t) + nullMaskLength;

    // metadata column width: 5 int64_ts plus CHAR(1).
    size_t metadataSz = (sizeof (int64_t) * 5) + 1;

    // returns 0 if corrupt tuple detected
    size_t dataSz = tuple.maxExportSerializationSize();
    if (dataSz == 0) {
        throwFatalException("Invalid tuple passed to computeTupleMaxLength. Crashing System.");
    }

    return *rowHeaderSz + metadataSz + dataSz;
}