Esempio n. 1
0
bool Table::checkNulls(TableTuple& tuple) const {
    assert (m_columnCount == tuple.columnCount());
    for (int i = m_columnCount - 1; i >= 0; --i) {
        if (( ! m_allowNulls[i]) && tuple.isNull(i)) {
            VOLT_TRACE ("%d th attribute was NULL. It is non-nillable attribute.", i);
            return false;
        }
    }
    return true;
}
Esempio n. 2
0
/**
 * Iterate through the table blocks until all the active tuples have been found. Skip dirty tuples
 * and mark them as clean so that they can be copied during the next snapshot.
 */
bool CopyOnWriteIterator::next(TableTuple &out) {
    if (m_currentBlock == NULL) {
        return false;
    }
    while (true) {
        if (m_blockOffset >= m_currentBlock->unusedTupleBoundary()) {
            if (m_blockIterator == m_end) {
                m_surgeon->snapshotFinishedScanningBlock(m_currentBlock, TBPtr());
                break;
            }
            m_surgeon->snapshotFinishedScanningBlock(m_currentBlock, m_blockIterator.data());

            char *finishedBlock = m_currentBlock->address();

            m_location = m_blockIterator.key();
            m_currentBlock = m_blockIterator.data();
            assert(m_currentBlock->address() == m_location);
            m_blockOffset = 0;

            // Remove the finished block from the map so that it can be released
            // back to the OS if all tuples in the block is deleted.
            //
            // This invalidates the iterators, so we have to get new iterators
            // using the current block's start address. m_blockIterator has to
            // point to the next block, hence the upper_bound() call.
            m_blocks.erase(finishedBlock);
            m_blockIterator = m_blocks.upper_bound(m_currentBlock->address());
            m_end = m_blocks.end();
        }
        assert(m_location < m_currentBlock.get()->address() + m_table->getTableAllocationSize());
        assert(m_location < m_currentBlock.get()->address() + (m_table->getTupleLength() * m_table->getTuplesPerBlock()));
        assert (out.columnCount() == m_table->columnCount());
        m_blockOffset++;
        out.move(m_location);
        const bool active = out.isActive();
        const bool dirty = out.isDirty();

        if (dirty) m_skippedDirtyRows++;
        if (!active) m_skippedInactiveRows++;

        // Return this tuple only when this tuple is not marked as deleted and isn't dirty
        if (active && !dirty) {
            out.setDirtyFalse();
            m_location += m_tupleLength;
            return true;
        } else {
            out.setDirtyFalse();
            m_location += m_tupleLength;
        }
    }
    return false;
}
Esempio n. 3
0
size_t
ExportTupleStream::computeOffsets(TableTuple &tuple,
                                   size_t *rowHeaderSz)
{
    // round-up columncount to next multiple of 8 and divide by 8
    int columnCount = tuple.columnCount() + METADATA_COL_CNT;
    int nullMaskLength = ((columnCount + 7) & -8) >> 3;

    // row header is 32-bit length of row plus null mask
    *rowHeaderSz = sizeof (int32_t) + nullMaskLength;

    // metadata column width: 5 int64_ts plus CHAR(1).
    size_t metadataSz = (sizeof (int64_t) * 5) + 1;

    // returns 0 if corrupt tuple detected
    size_t dataSz = tuple.maxExportSerializationSize();
    if (dataSz == 0) {
        throwFatalException("Invalid tuple passed to computeTupleMaxLength. Crashing System.");
    }

    return *rowHeaderSz + metadataSz + dataSz;
}