/*
 Insert a tuple into the evicted table but don't create any UNDO action. Return the address
 of the newly inserted tuple. 
 */
const void* NVMEvictedTable::insertNVMEvictedTuple(TableTuple &source) {
    // not null checks at first
    if (!checkNulls(source)) {
        throwFatalException("Failed to insert tuple into table %s for undo:"
                            " null constraint violation\n%s\n", m_name.c_str(),
                            source.debugNoHeader().c_str());
    }
    
    // First get the next free tuple This will either give us one from
    // the free slot list, or grab a tuple at the end of our chunk of
    // memory
    nextFreeTuple(&m_tmpTarget1);
    m_tupleCount++;
    
    // Then copy the source into the target
    //m_tmpTarget1.copyForPersistentInsert(source);
    m_tmpTarget1.copyForPersistentInsert(source, m_pool);
    m_tmpTarget1.setDeletedFalse();
    
    // Make sure this tuple is marked as evicted, so that we know it is an
    // evicted tuple as we iterate through the index
    m_tmpTarget1.setNVMEvictedTrue(); 

    assert(m_tmpTarget1.isNVMEvicted()); 
    
    return m_tmpTarget1.address(); 
}
Exemple #2
0
void Table::loadTuplesFromNoHeader(SerializeInputBE &serialInput,
                                   Pool *stringPool,
                                   ReferenceSerializeOutput *uniqueViolationOutput,
                                   bool shouldDRStreamRow) {
    int tupleCount = serialInput.readInt();
    assert(tupleCount >= 0);

    TableTuple target(m_schema);

    //Reserve space for a length prefix for rows that violate unique constraints
    //If there is no output supplied it will just throw
    size_t lengthPosition = 0;
    int32_t serializedTupleCount = 0;
    size_t tupleCountPosition = 0;
    if (uniqueViolationOutput != NULL) {
        lengthPosition = uniqueViolationOutput->reserveBytes(4);
    }

    for (int i = 0; i < tupleCount; ++i) {
        nextFreeTuple(&target);
        target.setActiveTrue();
        target.setDirtyFalse();
        target.setPendingDeleteFalse();
        target.setPendingDeleteOnUndoReleaseFalse();

        target.deserializeFrom(serialInput, stringPool);

        processLoadedTuple(target, uniqueViolationOutput, serializedTupleCount, tupleCountPosition, shouldDRStreamRow);
    }

    //If unique constraints are being handled, write the length/size of constraints that occured
    if (uniqueViolationOutput != NULL) {
        if (serializedTupleCount == 0) {
            uniqueViolationOutput->writeIntAt(lengthPosition, 0);
        } else {
            uniqueViolationOutput->writeIntAt(lengthPosition,
                                              static_cast<int32_t>(uniqueViolationOutput->position() - lengthPosition - sizeof(int32_t)));
            uniqueViolationOutput->writeIntAt(tupleCountPosition,
                                              serializedTupleCount);
        }
    }
}
Exemple #3
0
void Table::loadTuplesFromNoHeader(SerializeInputBE &serialInput,
                                   Pool *stringPool) {
    int tupleCount = serialInput.readInt();
    assert(tupleCount >= 0);

    int32_t serializedTupleCount = 0;
    size_t tupleCountPosition = 0;
    TableTuple target(m_schema);
    for (int i = 0; i < tupleCount; ++i) {
        nextFreeTuple(&target);
        target.setActiveTrue();
        target.setDirtyFalse();
        target.setPendingDeleteFalse();
        target.setPendingDeleteOnUndoReleaseFalse();

        target.deserializeFrom(serialInput, stringPool);

        processLoadedTuple(target, NULL, serializedTupleCount, tupleCountPosition);
    }
}
std::pair<int, int> TupleBlock::merge(Table *table, TBPtr source) {
    assert(source != this);
    /*
      std::cout << "Attempting to merge " << static_cast<void*> (this)
                << "(" << m_activeTuples << ") with " << static_cast<void*>(source.get())
                << "(" << source->m_activeTuples << ")";
      std::cout << " source last compaction offset is " << source->lastCompactionOffset()
                << " and active tuple count is " << source->m_activeTuples << std::endl;
    */

    uint32_t m_nextTupleInSourceOffset = source->lastCompactionOffset();
    int sourceTuplesPendingDeleteOnUndoRelease = 0;
    while (hasFreeTuples() && !source->isEmpty()) {
        TableTuple sourceTuple(table->schema());
        TableTuple destinationTuple(table->schema());

        bool foundSourceTuple = false;
        //Iterate further into the block looking for active tuples
        //Stop when running into the unused tuple boundry
        while (m_nextTupleInSourceOffset < source->unusedTupleBoundry()) {
            sourceTuple.move(&source->address()[m_tupleLength * m_nextTupleInSourceOffset]);
            m_nextTupleInSourceOffset++;
            if (sourceTuple.isActive()) {
                foundSourceTuple = true;
                break;
            }
        }

        if (!foundSourceTuple) {
           //The block isn't empty, but there are no more active tuples.
           //Some of the tuples that make it register as not empty must have been
           //pending delete and those aren't mergable
            assert(sourceTuplesPendingDeleteOnUndoRelease);
            break;
        }

        //Can't move a tuple with a pending undo action, it would invalidate the pointer
        //Keep a count so that the block can be notified of the number
        //of tuples pending delete on undo release when calculating the correct bucket
        //index. If all the active tuples are pending delete on undo release the block
        //is effectively empty and shouldn't be considered for merge ops.
        //It will be completely discarded once the undo log releases the block.
        if (sourceTuple.isPendingDeleteOnUndoRelease()) {
            sourceTuplesPendingDeleteOnUndoRelease++;
            continue;
        }

        destinationTuple.move(nextFreeTuple().first);
        table->swapTuples( sourceTuple, destinationTuple);
        source->freeTuple(sourceTuple.address());
    }
    source->lastCompactionOffset(m_nextTupleInSourceOffset);

    int newBucketIndex = calculateBucketIndex();
    if (newBucketIndex != m_bucketIndex) {
        m_bucketIndex = newBucketIndex;
        //std::cout << "Merged " << static_cast<void*> (this) << "(" << m_activeTuples << ") with " << static_cast<void*>(source.get())  << "(" << source->m_activeTuples << ")";
        //std::cout << " found " << sourceTuplesPendingDeleteOnUndoRelease << " tuples pending delete on undo release "<< std::endl;
        return std::pair<int, int>(newBucketIndex, source->calculateBucketIndex(sourceTuplesPendingDeleteOnUndoRelease));
    } else {
        //std::cout << "Merged " << static_cast<void*> (this) << "(" << m_activeTuples << ") with " << static_cast<void*>(source.get()) << "(" << source->m_activeTuples << ")";
        //std::cout << " found " << sourceTuplesPendingDeleteOnUndoRelease << " tuples pending delete on undo release "<< std::endl;
        return std::pair<int, int>( -1, source->calculateBucketIndex(sourceTuplesPendingDeleteOnUndoRelease));
    }
}