TableIndex *TableIndexFactory::getInstance(const TableIndexScheme &scheme) { int colCount = (int)scheme.columnIndices.size(); TupleSchema *tupleSchema = scheme.tupleSchema; assert(tupleSchema); std::vector<ValueType> keyColumnTypes; std::vector<int32_t> keyColumnLengths; std::vector<bool> keyColumnAllowNull(colCount, true); for (int i = 0; i < colCount; ++i) { keyColumnTypes.push_back(tupleSchema->columnType(scheme.columnIndices[i])); keyColumnLengths.push_back(tupleSchema->columnLength(scheme.columnIndices[i])); } TupleSchema *keySchema = TupleSchema::createTupleSchema(keyColumnTypes, keyColumnLengths, keyColumnAllowNull, true); assert(keySchema); VOLT_TRACE("Creating index for %s.\n%s", scheme.name.c_str(), keySchema->debug().c_str()); TableIndexPicker picker(keySchema, scheme); TableIndex *retval = picker.getInstance(); return retval; }
// GWW: escrow column TupleSchema* TupleSchema::createTupleSchema(const std::vector<ValueType> columnTypes, const std::vector<int32_t> columnSizes, const std::vector<bool> allowNull, const std::vector<bool> isEscrowColumn, bool allowInlinedObjects) { const uint16_t uninlineableObjectColumnCount = TupleSchema::countUninlineableObjectColumns(columnTypes, columnSizes, allowInlinedObjects); const uint16_t columnCount = static_cast<uint16_t>(columnTypes.size()); // big enough for any data members plus big enough for tupleCount + 1 "ColumnInfo" // fields. We need CI+1 because we get the length of a column by offset subtraction // Also allocate space for an int16_t for each uninlineable object column so that // the indices of uninlineable columns can be stored at the front and aid in iteration int memSize = (int)(sizeof(TupleSchema) + (sizeof(ColumnInfo) * (columnCount + 1)) + (uninlineableObjectColumnCount * sizeof(int16_t))); // allocate the set amount of memory and cast it to a tuple pointer TupleSchema *retval = reinterpret_cast<TupleSchema*>(new char[memSize]); // clear all the offset values memset(retval, 0, memSize); retval->m_allowInlinedObjects = allowInlinedObjects; retval->m_columnCount = columnCount; retval->m_uninlinedObjectColumnCount = uninlineableObjectColumnCount; uint16_t uninlinedObjectColumnIndex = 0; for (uint16_t ii = 0; ii < columnCount; ii++) { const ValueType type = columnTypes[ii]; const uint32_t length = columnSizes[ii]; const bool columnAllowNull = allowNull[ii]; const bool escrowColumn = isEscrowColumn[ii]; retval->setColumnMetaData(ii, type, length, columnAllowNull, escrowColumn, uninlinedObjectColumnIndex); } VOLT_TRACE("WGWG - %s", retval->debug().c_str()); return retval; }
TableIndex *TableIndexFactory::getInstance(const TableIndexScheme &scheme) { const TupleSchema *tupleSchema = scheme.tupleSchema; assert(tupleSchema); bool isIntsOnly = true; bool isInlinesOrColumnsOnly = true; std::vector<ValueType> keyColumnTypes; std::vector<int32_t> keyColumnLengths; size_t valueCount = 0; size_t exprCount = scheme.indexedExpressions.size(); if (exprCount != 0) { valueCount = exprCount; // TODO: This is where we could gain some extra runtime and space efficiency by // somehow marking which indexed expressions happen to be non-inlined column expressions. // This case is significant because it presents an opportunity for the GenericPersistentKey // index keys to avoid a persistent allocation and copy of an already persistent value. // This could be implemented as a bool attribute of TupleSchema::ColumnInfo that is only // set to true in this special case. It would universally disable deep copying of that // particular "tuple column"'s referenced object. for (size_t ii = 0; ii < valueCount; ++ii) { ValueType exprType = scheme.indexedExpressions[ii]->getValueType(); if ( ! isIntegralType(exprType)) { isIntsOnly = false; } uint32_t declaredLength; if (exprType == VALUE_TYPE_VARCHAR || exprType == VALUE_TYPE_VARBINARY) { // Setting the column length to TUPLE_SCHEMA_COLUMN_MAX_VALUE_LENGTH constrains the // maximum length of expression values that can be indexed with the same limit // that gets applied to column values. // In theory, indexed expression values could have an independent limit // up to any length that can be allocated via ThreadLocalPool. // Currently, all of these cases are constrained with the same limit, // which is also the default/maximum size for variable columns defined in schema, // as controlled in java by VoltType.MAX_VALUE_LENGTH. // It's not clear whether scheme.indexedExpressions[ii]->getValueSize() // can or should be called for a more useful answer. // There's probably little to gain since expressions usually do not contain enough information // to reliably determine that the result value is always small enough to "inline". declaredLength = TupleSchema::COLUMN_MAX_VALUE_LENGTH; isInlinesOrColumnsOnly = false; } else { declaredLength = NValue::getTupleStorageSize(exprType); } keyColumnTypes.push_back(exprType); keyColumnLengths.push_back(declaredLength); } } else { valueCount = scheme.columnIndices.size(); for (size_t ii = 0; ii < valueCount; ++ii) { ValueType exprType = tupleSchema->columnType(scheme.columnIndices[ii]); if ( ! isIntegralType(exprType)) { isIntsOnly = false; } keyColumnTypes.push_back(exprType); keyColumnLengths.push_back(tupleSchema->columnLength(scheme.columnIndices[ii])); } } std::vector<bool> keyColumnAllowNull(valueCount, true); TupleSchema *keySchema = TupleSchema::createTupleSchema(keyColumnTypes, keyColumnLengths, keyColumnAllowNull, true); assert(keySchema); VOLT_TRACE("Creating index for '%s' with key schema '%s'", scheme.name.c_str(), keySchema->debug().c_str()); TableIndexPicker picker(keySchema, isIntsOnly, isInlinesOrColumnsOnly, scheme); TableIndex *retval = picker.getInstance(); return retval; }