Esempio n. 1
0
void Table::loadTuplesFromNoHeader(bool allowExport,
                            SerializeInput &serialize_io,
                            Pool *stringPool) {
    int tupleCount = serialize_io.readInt();
    assert(tupleCount >= 0);

    // allocate required data blocks first to make them alligned well
    while (tupleCount + m_usedTuples > m_allocatedTuples) {
        allocateNextBlock();
    }

    for (int i = 0; i < tupleCount; ++i) {
        m_tmpTarget1.move(dataPtrForTuple((int) m_usedTuples + i));
        m_tmpTarget1.setDeletedFalse();
        m_tmpTarget1.setDirtyFalse();
        m_tmpTarget1.setEvictedFalse();
        m_tmpTarget1.deserializeFrom(serialize_io, stringPool);

        processLoadedTuple( allowExport, m_tmpTarget1);
        VOLT_TRACE("Loaded new tuple #%02d\n%s", i, m_tmpTarget1.debug(name()).c_str());
    }

    populateIndexes(tupleCount);

    m_tupleCount += tupleCount;
    m_usedTuples += tupleCount;
    
}
Esempio n. 2
0
void Table::loadTuplesFromNoHeader(SerializeInputBE &serialInput,
                                   Pool *stringPool,
                                   ReferenceSerializeOutput *uniqueViolationOutput,
                                   bool shouldDRStreamRow) {
    int tupleCount = serialInput.readInt();
    assert(tupleCount >= 0);

    TableTuple target(m_schema);

    //Reserve space for a length prefix for rows that violate unique constraints
    //If there is no output supplied it will just throw
    size_t lengthPosition = 0;
    int32_t serializedTupleCount = 0;
    size_t tupleCountPosition = 0;
    if (uniqueViolationOutput != NULL) {
        lengthPosition = uniqueViolationOutput->reserveBytes(4);
    }

    for (int i = 0; i < tupleCount; ++i) {
        nextFreeTuple(&target);
        target.setActiveTrue();
        target.setDirtyFalse();
        target.setPendingDeleteFalse();
        target.setPendingDeleteOnUndoReleaseFalse();

        target.deserializeFrom(serialInput, stringPool);

        processLoadedTuple(target, uniqueViolationOutput, serializedTupleCount, tupleCountPosition, shouldDRStreamRow);
    }

    //If unique constraints are being handled, write the length/size of constraints that occured
    if (uniqueViolationOutput != NULL) {
        if (serializedTupleCount == 0) {
            uniqueViolationOutput->writeIntAt(lengthPosition, 0);
        } else {
            uniqueViolationOutput->writeIntAt(lengthPosition,
                                              static_cast<int32_t>(uniqueViolationOutput->position() - lengthPosition - sizeof(int32_t)));
            uniqueViolationOutput->writeIntAt(tupleCountPosition,
                                              serializedTupleCount);
        }
    }
}
Esempio n. 3
0
void Table::loadTuplesFromNoHeader(SerializeInputBE &serialInput,
                                   Pool *stringPool) {
    int tupleCount = serialInput.readInt();
    assert(tupleCount >= 0);

    int32_t serializedTupleCount = 0;
    size_t tupleCountPosition = 0;
    TableTuple target(m_schema);
    for (int i = 0; i < tupleCount; ++i) {
        nextFreeTuple(&target);
        target.setActiveTrue();
        target.setDirtyFalse();
        target.setPendingDeleteFalse();
        target.setPendingDeleteOnUndoReleaseFalse();

        target.deserializeFrom(serialInput, stringPool);

        processLoadedTuple(target, NULL, serializedTupleCount, tupleCountPosition);
    }
}
Esempio n. 4
0
void Table::loadTuplesFrom(bool allowELT,
                            SerializeInput &serialize_io,
                            Pool *stringPool) {
    /*
     * directly receives a VoltTable buffer.
     * [00 01]   [02 03]   [04 .. 0x]
     * headersize  colcount  colcount * 1 byte (column types)
     *
     * [0x+1 .. 0y]
     * colcount * strings (column names)
     *
     * [0y+1 0y+2 0y+3 0y+4]
     * rowcount
     *
     * [0y+5 .. end]
     * rowdata
     */

    // todo: just skip ahead to this position
    serialize_io.readInt(); // rowstart
    serialize_io.readByte(); // status

    int16_t colcount = serialize_io.readShort();
    assert(colcount >= 0);

    // Store the following information so that we can provide them to the user
    // on failure
    ValueType types[colcount];
    std::string names[colcount];

    // skip the column types
    for (int i = 0; i < colcount; ++i) {
        types[i] = (ValueType) serialize_io.readEnumInSingleByte();
    }

    // skip the column names
    for (int i = 0; i < colcount; ++i) {
        names[i] = serialize_io.readTextString();
    }

    // Check if the column count matches what the temp table is expecting
    if (colcount != m_schema->columnCount()) {
        std::stringstream message(std::stringstream::in
                                  | std::stringstream::out);
        message << "Column count mismatch. Expecting "
                << m_schema->columnCount()
                << ", but " << colcount << " given" << std::endl;
        message << "Expecting the following columns:" << std::endl;
        message << debug() << std::endl;
        message << "The following columns are given:" << std::endl;
        for (int i = 0; i < colcount; i++) {
            message << "column " << i << ": " << names[i]
                    << ", type = " << getTypeName(types[i]) << std::endl;
        }
        throw SerializableEEException(VOLT_EE_EXCEPTION_TYPE_EEEXCEPTION,
                                      message.str().c_str());
    }

    int tupleCount = serialize_io.readInt();
    assert(tupleCount >= 0);

    // allocate required data blocks first to make them alligned well
    while (tupleCount + m_usedTuples > m_allocatedTuples) {
        allocateNextBlock();
    }

    for (int i = 0; i < tupleCount; ++i) {
        m_tmpTarget1.move(dataPtrForTuple((int) m_usedTuples + i));
        m_tmpTarget1.setDeletedFalse();
        m_tmpTarget1.setDirtyFalse();
        m_tmpTarget1.deserializeFrom(serialize_io, stringPool);

        processLoadedTuple( allowELT, m_tmpTarget1);
    }

    populateIndexes(tupleCount);

    m_tupleCount += tupleCount;
    m_usedTuples += tupleCount;
}