/** * When this function is called, the AbstractExecutor's init function * will have set the input tables in the plan node, but nothing else. */ bool WindowFunctionExecutor::p_init(AbstractPlanNode *init_node, TempTableLimits *limits) { VOLT_TRACE("WindowFunctionExecutor::p_init(start)"); WindowFunctionPlanNode* node = dynamic_cast<WindowFunctionPlanNode*>(m_abstractNode); assert(node); if (!node->isInline()) { setTempOutputTable(limits); } /* * Initialize the memory pool early, so that we can * use it for constructing temp. tuples. */ m_memoryPool.purge(); assert( getInProgressPartitionByKeyTuple().isNullTuple()); assert( getInProgressOrderByKeyTuple().isNullTuple()); assert( getLastPartitionByKeyTuple().isNullTuple()); assert( getLastOrderByKeyTuple().isNullTuple()); m_partitionByKeySchema = TupleSchema::createTupleSchema(m_partitionByExpressions); m_orderByKeySchema = TupleSchema::createTupleSchema(m_orderByExpressions); /* * Initialize all the data for partition by and * order by storage once and for all. */ VOLT_TRACE("WindowFunctionExecutor::p_init(end)\n"); return true; }
bool MaterializeExecutor::p_execute(const NValueArray ¶ms) { assert (node == dynamic_cast<MaterializePlanNode*>(m_abstractNode)); assert(node); assert (!node->isInline()); // inline projection's execute() should not be called assert (output_table == dynamic_cast<TempTable*>(node->getOutputTable())); assert (output_table); assert (m_columnCount == (int)node->getOutputColumnNames().size()); // batched insertion if (batched) { int paramcnt = engine->getUsedParamcnt(); VOLT_TRACE("batched insertion with %d params. %d for each tuple.", paramcnt, m_columnCount); TableTuple &temp_tuple = output_table->tempTuple(); for (int i = 0, tuples = paramcnt / m_columnCount; i < tuples; ++i) { for (int j = m_columnCount - 1; j >= 0; --j) { temp_tuple.setNValue(j, params[i * m_columnCount + j]); } output_table->insertTupleNonVirtual(temp_tuple); } VOLT_TRACE ("Materialized :\n %s", this->output_table->debug().c_str()); return true; } // substitute parameterized values in expression trees. if (all_param_array == NULL) { for (int ctr = m_columnCount - 1; ctr >= 0; --ctr) { assert(expression_array[ctr]); expression_array[ctr]->substitute(params); VOLT_TRACE("predicate[%d]: %s", ctr, expression_array[ctr]->debug(true).c_str()); } } // For now a MaterializePlanNode can make at most one new tuple We // should think about whether we would ever want to materialize // more than one tuple and whether such a thing is possible with // the AbstractExpression scheme TableTuple &temp_tuple = output_table->tempTuple(); if (all_param_array != NULL) { VOLT_TRACE("sweet, all params\n"); for (int ctr = m_columnCount - 1; ctr >= 0; --ctr) { temp_tuple.setNValue(ctr, params[all_param_array[ctr]]); } } else { TableTuple dummy; // add the generated value to the temp tuple. it must have the // same value type as the output column. for (int ctr = m_columnCount - 1; ctr >= 0; --ctr) { temp_tuple.setNValue(ctr, expression_array[ctr]->eval(&dummy, NULL)); } } // Add tuple to the output output_table->insertTupleNonVirtual(temp_tuple); return true; }
bool ProjectionExecutor::p_execute(const NValueArray ¶ms) { #ifndef NDEBUG ProjectionPlanNode* node = dynamic_cast<ProjectionPlanNode*>(m_abstractNode); #endif assert (node); assert (!node->isInline()); // inline projection's execute() should not be // called assert (m_outputTable == dynamic_cast<AbstractTempTable*>(node->getOutputTable())); assert (m_outputTable); Table* input_table = m_abstractNode->getInputTable(); assert (input_table); VOLT_TRACE("INPUT TABLE: %s\n", input_table->debug().c_str()); assert (m_columnCount == (int)node->getOutputColumnNames().size()); if (m_allTupleArray == NULL && m_allParamArray == NULL) { for (int ctr = m_columnCount - 1; ctr >= 0; --ctr) { assert(expression_array[ctr]); VOLT_TRACE("predicate[%d]: %s", ctr, expression_array[ctr]->debug(true).c_str()); } } // // Now loop through all the tuples and push them through our output // expression This will generate new tuple values that we will insert into // our output table // TableIterator iterator = input_table->iteratorDeletingAsWeGo(); assert (m_tuple.columnCount() == input_table->columnCount()); while (iterator.next(m_tuple)) { // // Project (or replace) values from input tuple // TableTuple &temp_tuple = m_outputTable->tempTuple(); if (m_allTupleArray != NULL) { VOLT_TRACE("sweet, all tuples"); for (int ctr = m_columnCount - 1; ctr >= 0; --ctr) { temp_tuple.setNValue(ctr, m_tuple.getNValue(m_allTupleArray[ctr])); } } else if (m_allParamArray != NULL) { VOLT_TRACE("sweet, all params"); for (int ctr = m_columnCount - 1; ctr >= 0; --ctr) { temp_tuple.setNValue(ctr, params[m_allParamArray[ctr]]); } } else { for (int ctr = m_columnCount - 1; ctr >= 0; --ctr) { temp_tuple.setNValue(ctr, expression_array[ctr]->eval(&m_tuple, NULL)); } } m_outputTable->insertTempTuple(temp_tuple); VOLT_TRACE("OUTPUT TABLE: %s\n", m_outputTable->debug().c_str()); } return true; }
virtual void substitute(const NValueArray ¶ms) { if (!m_hasParameter) return; VOLT_TRACE("Substituting parameters for expression \n%s ...", debug(true).c_str()); for (size_t i = 0; i < m_args.size(); i++) { assert(m_args[i]); VOLT_TRACE("Substituting parameters for arg at index %d...", static_cast<int>(i)); m_args[i]->substitute(params); } }
bool DeleteExecutor::p_init(AbstractPlanNode *abstract_node, const catalog::Database* catalog_db, int* tempTableMemoryInBytes) { VOLT_TRACE("init Delete Executor"); DeletePlanNode* node = dynamic_cast<DeletePlanNode*>(abstract_node); assert(node); assert(node->getTargetTable()); m_targetTable = dynamic_cast<PersistentTable*>(node->getTargetTable()); //target table should be persistenttable assert(m_targetTable); m_truncate = node->getTruncate(); if (m_truncate) { assert(node->getInputTables().size() == 0); // TODO : we can't use target table here because // it will report that "0 tuples deleted" as it's already truncated as of Result node.. node->setOutputTable(TableFactory::getCopiedTempTable(m_targetTable->databaseId(), "result_table", m_targetTable, tempTableMemoryInBytes)); return true; } assert(node->getInputTables().size() == 1); m_inputTable = dynamic_cast<TempTable*>(node->getInputTables()[0]); //input table should be temptable assert(m_inputTable); // Our output is just our input table (regardless if plan is single-sited or not) node->setOutputTable(node->getInputTables()[0]); m_inputTuple = TableTuple(m_inputTable->schema()); m_targetTuple = TableTuple(m_targetTable->schema()); return true; }
bool ReceiveExecutor::p_init(AbstractPlanNode *abstract_node, const catalog::Database* catalog_db, int* tempTableMemoryInBytes) { VOLT_TRACE("init Receive Executor"); assert(tempTableMemoryInBytes); ReceivePlanNode* node = dynamic_cast<ReceivePlanNode*>(abstract_node); assert(node); // // Construct the output table // int num_of_columns = (int)node->getOutputColumnNames().size(); assert(num_of_columns >= 0); assert(num_of_columns == node->getOutputColumnTypes().size()); assert(num_of_columns == node->getOutputColumnSizes().size()); const std::vector<std::string> outputColumnNames = node->getOutputColumnNames(); const std::vector<voltdb::ValueType> outputColumnTypes = node->getOutputColumnTypes(); const std::vector<int32_t> outputColumnSizes = node->getOutputColumnSizes(); const std::vector<bool> outputColumnAllowNull(num_of_columns, true); TupleSchema *schema = TupleSchema::createTupleSchema(outputColumnTypes, outputColumnSizes, outputColumnAllowNull, true); std::string *columnNames = new std::string[num_of_columns]; for (int ctr = 0; ctr < num_of_columns; ctr++) { columnNames[ctr] = node->getOutputColumnNames()[ctr]; } node->setOutputTable(TableFactory::getTempTable(node->databaseId(), "temp", schema, columnNames, tempTableMemoryInBytes)); delete[] columnNames; return true; }
void Table::nextFreeTuple(TableTuple *tuple) { // First check whether we have any in our list // In the memcheck it uses the heap instead of a free list to help Valgrind. #ifndef MEMCHECK_NOFREELIST if (!m_holeFreeTuples.empty()) { VOLT_TRACE("GRABBED FREE TUPLE!\n"); char* ret = m_holeFreeTuples.back(); m_holeFreeTuples.pop_back(); assert (m_columnCount == tuple->sizeInValues()); tuple->move(ret); return; } #endif // if there are no tuples free, we need to grab another chunk of memory // Allocate a new set of tuples if (m_usedTuples >= m_allocatedTuples) { allocateNextBlock(); } // get free tuple assert (m_usedTuples < m_allocatedTuples); assert (m_columnCount == tuple->sizeInValues()); tuple->move(dataPtrForTuple((int) m_usedTuples)); ++m_usedTuples; //cout << "table::nextFreeTuple(" << reinterpret_cast<const void *>(this) << ") m_usedTuples == " << m_usedTuples << endl; }
bool ReceiveExecutor::p_init(AbstractPlanNode* abstract_node, TempTableLimits* limits) { VOLT_TRACE("init Receive Executor"); assert(limits); ReceivePlanNode* node = dynamic_cast<ReceivePlanNode*>(abstract_node); assert(node); // // Construct the output table // TupleSchema* schema = node->generateTupleSchema(true); int num_of_columns = static_cast<int>(node->getOutputSchema().size()); std::string* column_names = new std::string[num_of_columns]; for (int ctr = 0; ctr < num_of_columns; ctr++) { column_names[ctr] = node->getOutputSchema()[ctr]->getColumnName(); } node->setOutputTable(TableFactory::getTempTable(node->databaseId(), "temp", schema, column_names, limits)); delete[] column_names; return true; }
void Table::loadTuplesFromNoHeader(bool allowExport, SerializeInput &serialize_io, Pool *stringPool) { int tupleCount = serialize_io.readInt(); assert(tupleCount >= 0); // allocate required data blocks first to make them alligned well while (tupleCount + m_usedTuples > m_allocatedTuples) { allocateNextBlock(); } for (int i = 0; i < tupleCount; ++i) { m_tmpTarget1.move(dataPtrForTuple((int) m_usedTuples + i)); m_tmpTarget1.setDeletedFalse(); m_tmpTarget1.setDirtyFalse(); m_tmpTarget1.setEvictedFalse(); m_tmpTarget1.deserializeFrom(serialize_io, stringPool); processLoadedTuple( allowExport, m_tmpTarget1); VOLT_TRACE("Loaded new tuple #%02d\n%s", i, m_tmpTarget1.debug(name()).c_str()); } populateIndexes(tupleCount); m_tupleCount += tupleCount; m_usedTuples += tupleCount; }
std::string Table::debug() { VOLT_TRACE("tabledebug start"); std::ostringstream buffer; buffer << tableType() << "(" << name() << "):\n"; buffer << "\tAllocated Tuples: " << m_allocatedTuples << "\n"; #ifdef MEMCHECK_NOFREELIST buffer << "\tDeleted Tuples: " << m_deletedTupleCount << "\n"; #else buffer << "\tDeleted Tuples: " << m_holeFreeTuples.size() << "\n"; #endif buffer << "\tNumber of Columns: " << columnCount() << "\n"; // // Columns // buffer << "===========================================================\n"; buffer << "\tCOLUMNS\n"; buffer << m_schema->debug(); //buffer << " - TupleSchema needs a \"debug\" method. Add one for output here.\n"; // // Tuples // buffer << "===========================================================\n"; buffer << "\tDATA\n"; TableIterator iter(this); TableTuple tuple(m_schema); if (this->activeTupleCount() == 0) { buffer << "\t<NONE>\n"; } else { std::string lastTuple = ""; while (iter.next(tuple)) { if (tuple.isActive()) { buffer << "\t" << tuple.debug(this->name().c_str()) << "\n"; } } } buffer << "===========================================================\n"; std::string ret(buffer.str()); VOLT_TRACE("tabledebug end"); return ret; }
bool SendExecutor::p_init(AbstractPlanNode* abstractNode, const ExecutorVector&) { VOLT_TRACE("init Send Executor"); assert(dynamic_cast<SendPlanNode*>(m_abstractNode)); assert(m_abstractNode->getInputTableCount() == 1); return true; }
void AbstractExpression::substitute(const NValueArray ¶ms) { if (!m_hasParameter) return; // descend. nodes with parameters overload substitute() VOLT_TRACE("Substituting parameters for expression \n%s ...", debug(true).c_str()); if (m_left) { VOLT_TRACE("Substitute processing left child..."); m_left->substitute(params); } if (m_right) { VOLT_TRACE("Substitute processing right child..."); m_right->substitute(params); } }
void InsertExecutor::p_execute_finish() { if (m_replicatedTableOperation) { // Use the static value assigned above to propagate the result to the other engines // that skipped the replicated table work assert(s_modifiedTuples != -1); m_modifiedTuples = s_modifiedTuples; } m_count_tuple.setNValue(0, ValueFactory::getBigIntValue(m_modifiedTuples)); // put the tuple into the output table m_tmpOutputTable->insertTuple(m_count_tuple); // add to the planfragments count of modified tuples m_engine->addToTuplesModified(m_modifiedTuples); VOLT_DEBUG("Finished inserting %" PRId64 " tuples", m_modifiedTuples); VOLT_TRACE("InsertExecutor output table:\n%s\n", m_tmpOutputTable->debug().c_str()); VOLT_TRACE("InsertExecutor target table:\n%s\n", m_targetTable->debug().c_str()); }
void NVMAntiCacheDB::writeBlock(const std::string tableName, uint32_t blockId, const int tupleCount, const char* data, const long size, const int evictedTupleCount) { VOLT_TRACE("free blocks: %d", getFreeBlocks()); if (getFreeBlocks() == 0) { VOLT_WARN("No free space in ACID %d for blockid %u with blocksize %ld", m_ACID, blockId, size); throw FullBackingStoreException(((int32_t)m_ACID << 16) & blockId, 0); } uint32_t index = getFreeNVMBlockIndex(); VOLT_TRACE("block index: %u", index); char* block = getNVMBlock(index); long bufsize; char* buffer = new char [tableName.size() + 1 + size]; memset(buffer, 0, tableName.size() + 1 + size); bufsize = tableName.size() + 1; memcpy(buffer, tableName.c_str(), bufsize); memcpy(buffer + bufsize, data, size); bufsize += size; memcpy(block, buffer, bufsize); delete[] buffer; VOLT_DEBUG("Writing NVM Block: ID = %u, index = %u, tupleCount = %d, size = %ld, tableName = %s", blockId, index, tupleCount, bufsize, tableName.c_str()); m_blocksEvicted++; if (!isBlockMerge()) { tupleInBlock[blockId] = tupleCount; evictedTupleInBlock[blockId] = evictedTupleCount; blockSize[blockId] = bufsize; m_bytesEvicted += static_cast<int32_t>((int64_t)bufsize * evictedTupleCount / tupleCount); } else { m_bytesEvicted += static_cast<int32_t>(bufsize); } m_blockMap.insert(std::pair<uint32_t, std::pair<int, int32_t> >(blockId, std::pair<uint32_t, int32_t>(index, static_cast<int32_t>(bufsize)))); m_monoBlockID++; // FIXME: I'm hacking!!!!!!!!!!!!!!!!!!!!!!!!! pushBlockLRU(blockId); }
virtual void substitute(const NValueArray ¶ms) { assert (m_child); if (!m_hasParameter) return; VOLT_TRACE("Substituting parameters for expression \n%s ...", debug(true).c_str()); m_child->substitute(params); }
bool ProjectionExecutor::p_init(AbstractPlanNode *abstractNode, TempTableLimits* limits) { VOLT_TRACE("init Projection Executor"); assert(limits); ProjectionPlanNode* node = dynamic_cast<ProjectionPlanNode*>(abstractNode); assert(node); // Create output table based on output schema from the plan setTempOutputTable(limits); m_columnCount = static_cast<int>(node->getOutputSchema().size()); // initialize local variables all_tuple_array_ptr = ExpressionUtil::convertIfAllTupleValues(node->getOutputColumnExpressions()); all_tuple_array = all_tuple_array_ptr.get(); all_param_array_ptr = ExpressionUtil::convertIfAllParameterValues(node->getOutputColumnExpressions()); all_param_array = all_param_array_ptr.get(); needs_substitute_ptr = boost::shared_array<bool>(new bool[m_columnCount]); needs_substitute = needs_substitute_ptr.get(); typedef AbstractExpression* ExpRawPtr; expression_array_ptr = boost::shared_array<ExpRawPtr>(new ExpRawPtr[m_columnCount]); expression_array = expression_array_ptr.get(); for (int ctr = 0; ctr < m_columnCount; ctr++) { assert (node->getOutputColumnExpressions()[ctr] != NULL); VOLT_TRACE("OutputColumnExpressions [%d]: %s", ctr, node->getOutputColumnExpressions()[ctr]->debug(true).c_str()); expression_array_ptr[ctr] = node->getOutputColumnExpressions()[ctr]; needs_substitute_ptr[ctr] = node->getOutputColumnExpressions()[ctr]->hasParameter(); } output_table = dynamic_cast<TempTable*>(node->getOutputTable()); //output table should be temptable if (!node->isInline()) { Table* input_table = node->getInputTable(); tuple = TableTuple(input_table->schema()); } return true; }
bool Table::checkNulls(TableTuple& tuple) const { assert (m_columnCount == tuple.columnCount()); for (int i = m_columnCount - 1; i >= 0; --i) { if (( ! m_allowNulls[i]) && tuple.isNull(i)) { VOLT_TRACE ("%d th attribute was NULL. It is non-nillable attribute.", i); return false; } } return true; }
bool ArrayUniqueIndex::exists(const TableTuple* values) { int32_t key = ValuePeeker::peekAsInteger(values->getNValue(column_indices_[0])); //VOLT_DEBUG("Exists?: %lld", key); assert(key < ARRAY_INDEX_INITIAL_SIZE); assert(key >= 0); if (key >= allocated_entries_) return false; VOLT_TRACE("Checking entry b: %d", (int)key); ++m_lookups; return entries_[key] != NULL; }
void DataConflictException::p_serialize(ReferenceSerializeOutput *output) { SQLException::p_serialize(output); output->writeLong(m_blockerTxnId); output->writeLong(m_waitingTxnId); output->writeLong(m_executedBatch); output->writeLong(m_executedPlanNodes); output->writeTextString(m_table->name()); VOLT_TRACE("WWWG: blocker %jd waiter %jd %jd %jd %s \n", (intmax_t)m_blockerTxnId, (intmax_t)m_waitingTxnId, (intmax_t)m_executedBatch, (intmax_t)m_executedPlanNodes, m_table->name().c_str()); }
bool ReceiveExecutor::p_init(AbstractPlanNode* abstract_node, TempTableLimits* limits) { VOLT_TRACE("init Receive Executor"); assert(dynamic_cast<ReceivePlanNode*>(abstract_node)); // Create output table based on output schema from the plan setTempOutputTable(limits); return true; }
ParameterValueExpression::ParameterValueExpression(int value_idx) : AbstractExpression(EXPRESSION_TYPE_VALUE_PARAMETER), m_valueIdx(value_idx), m_paramValue() { VOLT_TRACE("ParameterValueExpression %d", value_idx); ExecutorContext* context = ExecutorContext::getExecutorContext(); VoltDBEngine* engine = context->getEngine(); assert(engine != NULL); NValueArray& params = engine->getParameterContainer(); assert(value_idx < params.size()); m_paramValue = ¶ms[value_idx]; };
bool ProjectionExecutor::p_init(AbstractPlanNode *abstractNode, TempTableLimits* limits) { VOLT_TRACE("init Projection Executor"); assert(limits); ProjectionPlanNode* node = dynamic_cast<ProjectionPlanNode*>(abstractNode); assert(node); // // Construct the output table // TupleSchema* schema = node->generateTupleSchema(true); m_columnCount = static_cast<int>(node->getOutputSchema().size()); std::string* column_names = new std::string[m_columnCount]; for (int ctr = 0; ctr < m_columnCount; ctr++) { column_names[ctr] = node->getOutputSchema()[ctr]->getColumnName(); } node->setOutputTable(TableFactory::getTempTable(node->databaseId(), "temp", schema, column_names, limits)); delete[] column_names; // initialize local variables all_tuple_array_ptr = ExpressionUtil::convertIfAllTupleValues(node->getOutputColumnExpressions()); all_tuple_array = all_tuple_array_ptr.get(); all_param_array_ptr = ExpressionUtil::convertIfAllParameterValues(node->getOutputColumnExpressions()); all_param_array = all_param_array_ptr.get(); needs_substitute_ptr = boost::shared_array<bool>(new bool[m_columnCount]); needs_substitute = needs_substitute_ptr.get(); typedef AbstractExpression* ExpRawPtr; expression_array_ptr = boost::shared_array<ExpRawPtr>(new ExpRawPtr[m_columnCount]); expression_array = expression_array_ptr.get(); for (int ctr = 0; ctr < m_columnCount; ctr++) { assert (node->getOutputColumnExpressions()[ctr] != NULL); expression_array_ptr[ctr] = node->getOutputColumnExpressions()[ctr]; needs_substitute_ptr[ctr] = node->getOutputColumnExpressions()[ctr]->hasParameter(); } output_table = dynamic_cast<TempTable*>(node->getOutputTable()); //output table should be temptable if (!node->isInline()) { input_table = node->getInputTables()[0]; tuple = TableTuple(input_table->schema()); } return true; }
/* * * Helper method responsible for inserting the results of the * aggregation into a new tuple in the output table as well as passing * through any additional columns from the input table. */ inline void WindowFunctionExecutor::insertOutputTuple() { TableTuple& tempTuple = m_tmpOutputTable->tempTuple(); // We copy the aggregate values into the output tuple, // then the passthrough columns. WindowAggregate** aggs = m_aggregateRow->getAggregates(); for (int ii = 0; ii < getAggregateCount(); ii++) { NValue result = aggs[ii]->finalize(tempTuple.getSchema()->columnType(ii)); tempTuple.setNValue(ii, result); } VOLT_TRACE("Setting passthrough columns"); size_t tupleSize = tempTuple.sizeInValues(); for (int ii = getAggregateCount(); ii < tupleSize; ii += 1) { AbstractExpression *expr = m_outputColumnExpressions[ii]; tempTuple.setNValue(ii, expr->eval(&(m_aggregateRow->getPassThroughTuple()))); } m_tmpOutputTable->insertTempTuple(tempTuple); VOLT_TRACE("output_table:\n%s", m_tmpOutputTable->debug().c_str()); }
WindowFunctionExecutor::EdgeType WindowFunctionExecutor::findNextEdge(EdgeType edgeType, TableWindow &tableWindow) { // This is just an alias for the buffered input tuple. TableTuple &nextTuple = getBufferedInputTuple(); VOLT_TRACE("findNextEdge(start): %s", tableWindow.debug().c_str()); /* * At the start of the input we need to prime the * tuple pairs. */ if (edgeType == START_OF_INPUT) { if (tableWindow.m_leadingEdge.next(nextTuple)) { initPartitionByKeyTuple(nextTuple); initOrderByKeyTuple(nextTuple); /* First row. Nothing to compare it with. */ tableWindow.m_orderByGroupSize = 1; lookaheadOneRowForAggs(nextTuple, tableWindow); } else { /* * If there is no first row, then just * return false. The leading edge iterator * will never have a next row, so we can * ask for its next again and will always get false. * We return a zero length group here. */ tableWindow.m_orderByGroupSize = 0; return END_OF_INPUT; } } else { /* * We've already got a row, so * count it. */ tableWindow.m_orderByGroupSize = 1; lookaheadOneRowForAggs(nextTuple, tableWindow); } do { VOLT_TRACE("findNextEdge(loopStart): %s", m_tableWindow->debug().c_str()); if (tableWindow.m_leadingEdge.next(nextTuple)) { initPartitionByKeyTuple(nextTuple); initOrderByKeyTuple(nextTuple); if (compareTuples(getInProgressPartitionByKeyTuple(), getLastPartitionByKeyTuple()) != 0) { VOLT_TRACE("findNextEdge(Partition): %s", m_tableWindow->debug().c_str()); return START_OF_PARTITION_GROUP; } if (compareTuples(getInProgressOrderByKeyTuple(), getLastOrderByKeyTuple()) != 0) { VOLT_TRACE("findNextEdge(Group): %s", m_tableWindow->debug().c_str()); return START_OF_PARTITION_BY_GROUP; } tableWindow.m_orderByGroupSize += 1; lookaheadOneRowForAggs(nextTuple, tableWindow); VOLT_TRACE("findNextEdge(loop): %s", tableWindow.debug().c_str()); } else { VOLT_TRACE("findNextEdge(EOI): %s", tableWindow.debug().c_str()); return END_OF_INPUT; } } while (true); }
AbstractPlanNode* AbstractPlanNode::getInlinePlanNode(PlanNodeType type) const { map<PlanNodeType, AbstractPlanNode*>::const_iterator lookup = m_inlineNodes.find(type); AbstractPlanNode* ret = NULL; if (lookup != m_inlineNodes.end()) { ret = lookup->second; } else { VOLT_TRACE("No internal PlanNode with type '%s' is available for '%s'", planNodeToString(type).c_str(), debug().c_str()); } return ret; }
bool EvictionIterator::hasNext() { VOLT_TRACE("Size: %lu\n", (long unsigned int)m_size); PersistentTable* ptable = static_cast<PersistentTable*>(table); VOLT_TRACE("Count: %lu %lu\n", ptable->usedTupleCount(), ptable->activeTupleCount()); if(ptable->usedTupleCount() == 0) return false; #ifndef ANTICACHE_TIMESTAMPS if(current_tuple_id == ptable->getNewestTupleID()) return false; if(ptable->getNumTuplesInEvictionChain() == 0) { // there are no tuples in the chain VOLT_DEBUG("There are no tuples in the eviction chain."); return false; } #else if (current_tuple_id == m_size) return false; #endif return true; }
bool MaterializeExecutor::p_init(AbstractPlanNode* abstractNode, TempTableLimits* limits) { VOLT_TRACE("init Materialize Executor"); node = dynamic_cast<MaterializePlanNode*>(abstractNode); assert(node); batched = node->isBatched(); // Construct the output table m_columnCount = static_cast<int>(node->getOutputSchema().size()); assert(m_columnCount >= 0); TupleSchema* schema = node->generateTupleSchema(true); std::string* column_names = new std::string[m_columnCount]; for (int ctr = 0; ctr < m_columnCount; ctr++) { column_names[ctr] = node->getOutputSchema()[ctr]->getColumnName(); } node->setOutputTable(TableFactory::getTempTable(node->databaseId(), "temp", schema, column_names, limits)); delete[] column_names; // initialize local variables all_param_array_ptr = expressionutil::convertIfAllParameterValues(node->getOutputColumnExpressions()); all_param_array = all_param_array_ptr.get(); needs_substitute_ptr = boost::shared_array<bool>(new bool[m_columnCount]); needs_substitute = needs_substitute_ptr.get(); expression_array_ptr = boost::shared_array<AbstractExpression*>(new AbstractExpression*[m_columnCount]); expression_array = expression_array_ptr.get(); for (int ctr = 0; ctr < m_columnCount; ctr++) { assert (node->getOutputColumnExpressions()[ctr] != NULL); expression_array_ptr[ctr] = node->getOutputColumnExpressions()[ctr]; needs_substitute_ptr[ctr] = node->getOutputColumnExpressions()[ctr]->hasParameter(); } //output table should be temptable output_table = dynamic_cast<TempTable*>(node->getOutputTable()); return (true); }
SubqueryExpression::SubqueryExpression( ExpressionType subqueryType, int subqueryId, const std::vector<int>& paramIdxs, const std::vector<int>& otherParamIdxs, const std::vector<AbstractExpression*>* tveParams) : AbstractExpression(subqueryType), m_subqueryId(subqueryId), m_paramIdxs(paramIdxs), m_otherParamIdxs(otherParamIdxs), m_tveParams(tveParams) { VOLT_TRACE("SubqueryExpression %d", subqueryId); assert((m_tveParams.get() == NULL && m_paramIdxs.empty()) || (m_tveParams.get() != NULL && m_paramIdxs.size() == m_tveParams->size())); }
boost::shared_ptr<ExecutorVector> ExecutorVector::fromJsonPlan(VoltDBEngine* engine, const std::string& jsonPlan, int64_t fragId) { PlanNodeFragment *pnf = NULL; try { pnf = PlanNodeFragment::createFromCatalog(jsonPlan); } catch (SerializableEEException &seee) { throw; } catch (...) { char msg[1024 * 100]; snprintf(msg, 1024 * 100, "Unable to initialize PlanNodeFragment for PlanFragment '%jd' with plan:\n%s", (intmax_t)fragId, jsonPlan.c_str()); VOLT_ERROR("%s", msg); throw SerializableEEException(VOLT_EE_EXCEPTION_TYPE_EEEXCEPTION, msg); } VOLT_TRACE("\n%s\n", pnf->debug().c_str()); assert(pnf->getRootNode()); if (!pnf->getRootNode()) { char msg[1024]; snprintf(msg, 1024, "Deserialized PlanNodeFragment for PlanFragment '%jd' does not have a root PlanNode", (intmax_t)fragId); VOLT_ERROR("%s", msg); throw SerializableEEException(VOLT_EE_EXCEPTION_TYPE_EEEXCEPTION, msg); } int64_t tempTableLogLimit = engine->tempTableLogLimit(); int64_t tempTableMemoryLimit = engine->tempTableMemoryLimit(); // ENG-1333 HACK. If the plan node fragment has a delete node, // then turn off the governors if (pnf->hasDelete()) { tempTableLogLimit = DEFAULT_TEMP_TABLE_MEMORY; tempTableMemoryLimit = -1; } // Note: the executor vector takes ownership of the plan node // fragment here. boost::shared_ptr<ExecutorVector> ev(new ExecutorVector(fragId, tempTableLogLimit, tempTableMemoryLimit, pnf)); ev->init(engine); return ev; }
TableIndex *TableIndexFactory::getInstance(const TableIndexScheme &scheme) { int colCount = (int)scheme.columnIndices.size(); TupleSchema *tupleSchema = scheme.tupleSchema; assert(tupleSchema); std::vector<ValueType> keyColumnTypes; std::vector<int32_t> keyColumnLengths; std::vector<bool> keyColumnAllowNull(colCount, true); for (int i = 0; i < colCount; ++i) { keyColumnTypes.push_back(tupleSchema->columnType(scheme.columnIndices[i])); keyColumnLengths.push_back(tupleSchema->columnLength(scheme.columnIndices[i])); } TupleSchema *keySchema = TupleSchema::createTupleSchema(keyColumnTypes, keyColumnLengths, keyColumnAllowNull, true); assert(keySchema); VOLT_TRACE("Creating index for %s.\n%s", scheme.name.c_str(), keySchema->debug().c_str()); TableIndexPicker picker(keySchema, scheme); TableIndex *retval = picker.getInstance(); return retval; }