short MergeUnion::codeGen(Generator * generator) { ExpGenerator * exp_gen = generator->getExpGenerator(); Space * space = generator->getSpace(); MapTable * my_map_table = generator->appendAtEnd(); //////////////////////////////////////////////////////////////////////////// // // Layout at this node: // // |------------------------------------------------------------------------| // | input data | Unioned data | left child's data | right child's data | // | ( I tupps ) | ( 1 tupp ) | ( L tupps ) | ( R tupp ) | // |------------------------------------------------------------------------| // <-- returned row to parent ---> // <------------ returned row from left child -------> // <-------------------- returned row from right child ---------------------> // // input data: the atp input to this node by its parent. // unioned data: tupp where the unioned result is moved // left child data: tupps appended by the left child // right child data: tupps appended by right child // // Input to left child: I + 1 tupps // Input to right child: I + 1 + L tupps // // Tupps returned from left and right child are only used to create the // unioned data. They are not returned to parent. // //////////////////////////////////////////////////////////////////////////// ex_cri_desc * given_desc = generator->getCriDesc(Generator::DOWN); ex_cri_desc * returned_desc = NULL; if(child(0) || child(1)) returned_desc = new(space) ex_cri_desc(given_desc->noTuples() + 1, space); else returned_desc = given_desc; // expressions to move the left and right child's output to the // unioned row. ex_expr * left_expr = 0; ex_expr * right_expr = 0; // expression to compare left and right child's output to // evaluate merge union. ex_expr * merge_expr = 0; // Expression to conditionally execute the left or right child. ex_expr *cond_expr = NULL; // Expression to handle triggered action excpetion ex_expr *trig_expr = NULL; // It is OK for neither child to exist when generating a merge union TDB // for index maintenenace. The children are filled in at build time. // GenAssert((child(0) AND child(1)) OR (NOT child(0) AND NOT (child(1))), "MergeUnion -- missing one child"); ComTdb * left_child_tdb = NULL; ComTdb * right_child_tdb = NULL; ExplainTuple *leftExplainTuple = NULL; ExplainTuple *rightExplainTuple = NULL; NABoolean afterUpdate = FALSE; NABoolean rowsFromLeft = TRUE; NABoolean rowsFromRight = TRUE; if(child(0) && child(1)) { // if an update operation is found before the execution of the // IF statement, set afterUpdate to 1 indicating that an update operation // was performed before the execution of the IF statement. Which // is used at runtime to decide whether to set rollbackTransaction in the // diagsArea if (generator->updateWithinCS() && getUnionForIF()) { afterUpdate = TRUE; } // generate the left child generator->setCriDesc(returned_desc, Generator::DOWN); child(0)->codeGen(generator); left_child_tdb = (ComTdb *)(generator->getGenObj()); leftExplainTuple = generator->getExplainTuple(); // MVs -- // If the left child does not have any outputs, don't expect any rows. if (child(0)->getGroupAttr()->getCharacteristicOutputs().isEmpty()) rowsFromLeft = FALSE; // if an update operation is found in the left subtree of this Union then // set rowsFromLeft to 0 which is passed on to execution tree indicating // that this Union node is not expecting rows from the left child, then // foundAnUpdate_ is reset so it can be reused while doing codGen() on // the right sub tree if (getUnionForIF()) { if (! getCondEmptyIfThen()) { if (generator->foundAnUpdate()) { rowsFromLeft = FALSE; generator->setFoundAnUpdate(FALSE); } } else { rowsFromLeft = FALSE; } } // descriptor returned by left child is given to right child as input. generator->setCriDesc(generator->getCriDesc(Generator::UP), Generator::DOWN); child(1)->codeGen(generator); right_child_tdb = (ComTdb *)(generator->getGenObj()); rightExplainTuple = generator->getExplainTuple(); // MVs // If the right child does not have any outputs, don't expect any rows. if (child(1)->getGroupAttr()->getCharacteristicOutputs().isEmpty()) rowsFromRight = FALSE; // if an update operation is found in the right subtree of this CS then // set rowsFromRight to 0 which is passed on to execution tree indicating // that this CS node is not expecting rows from the right child, then // foundAnUpdate_ is reset so it can be reused while doing codGen() on // the left or right child of another CS node if (getUnionForIF()) { if (! getCondEmptyIfElse()) { if (generator->foundAnUpdate()) { rowsFromRight = FALSE; } } else { rowsFromRight = FALSE; } // we cannot always expect a row from a conditional operator. If it is an // IF statement without an ELSE and the condition fails then we do not get // any rows back. So we allow a conditional union operator to handle all // errors below it and for the purposes of 8015 error / 8014 warning // treat it as an update node. In this way the nodes above it do not expect // any row from this child and do not raise an error if no row is returned. // 8014/8015 type errors within this IF statement are handled as in any // regular CS. generator->setFoundAnUpdate(TRUE); } } // Create the unioned row. // colMapTable() is a list of ValueIdUnion nodes where each node points to // the corresponding left and the right output entries. // Generate expressions to move the left and right child's output to // the unioned row. ValueIdList left_val_id_list; ValueIdList right_val_id_list; CollIndex i; for (i = 0; i < colMapTable().entries(); i++) { ValueIdUnion * vidu_node = (ValueIdUnion *)(((colMapTable()[i]).getValueDesc())->getItemExpr()); Cast * cnode; if (vidu_node->getResult().getType().getTypeQualifier() != NA_ROWSET_TYPE) { // move left child's output to result. The 'type' of Cast result is same // as that of the vidu_node. cnode = new(generator->wHeap()) Cast(((vidu_node->getLeftSource()).getValueDesc())->getItemExpr(), &(vidu_node->getResult().getType())); } else { // We indicate that the whole array is to be copied SQLRowset *rowsetInfo = (SQLRowset *) &(vidu_node->getResult().getType()); SQLRowset *newRowset = new (generator->wHeap()) SQLRowset(generator->wHeap(), rowsetInfo->getElementType(), rowsetInfo->getMaxNumElements(), rowsetInfo->getNumElements()); newRowset->useTotalSize() = TRUE; cnode = new(generator->wHeap()) Cast(((vidu_node->getLeftSource()).getValueDesc())->getItemExpr(), newRowset); } cnode->bindNode(generator->getBindWA()); left_val_id_list.insert(cnode->getValueId()); if (vidu_node->getResult().getType().getTypeQualifier() != NA_ROWSET_TYPE) { // move left child's output to result. The 'type' of Cast result is same // as that of the vidu_node. cnode = new(generator->wHeap()) Cast(((vidu_node->getRightSource()).getValueDesc())->getItemExpr(), &(vidu_node->getResult().getType())); } else { // We indicate that the whole array is to be copied SQLRowset *rowsetInfo = (SQLRowset *) &(vidu_node->getResult().getType()); SQLRowset *newRowset = new (generator->wHeap()) SQLRowset(generator->wHeap(), rowsetInfo->getElementType(), rowsetInfo->getMaxNumElements(), rowsetInfo->getNumElements()); newRowset->useTotalSize() = TRUE; cnode = new(generator->wHeap()) Cast(((vidu_node->getRightSource()).getValueDesc())->getItemExpr(), newRowset); } cnode->bindNode(generator->getBindWA()); right_val_id_list.insert(cnode->getValueId()); } ExpTupleDesc * tuple_desc = 0; ULng32 tuple_length = 0; if(child(0) && child(1)) { exp_gen->generateContiguousMoveExpr(left_val_id_list, 0, // don't add convert nodes 1, returned_desc->noTuples() - 1, ExpTupleDesc::SQLARK_EXPLODED_FORMAT, tuple_length, &left_expr, &tuple_desc, ExpTupleDesc::SHORT_FORMAT); exp_gen->generateContiguousMoveExpr(right_val_id_list, 0, // don't add convert nodes 1, returned_desc->noTuples() - 1, ExpTupleDesc::SQLARK_EXPLODED_FORMAT, tuple_length, &right_expr); } // add value ids for all vidu_nodes to my map table. This is the // the map table that will be returned. The attributes of the value ids // are same as that of left(or right) expression outputs. for (i = 0; i < colMapTable().entries(); i++) { ValueIdUnion * vidu_node = (ValueIdUnion *)(((colMapTable()[i]).getValueDesc())->getItemExpr()); Attributes * attr = generator->addMapInfoToThis(my_map_table, vidu_node->getValueId(), generator->getMapInfo(left_val_id_list[i])->getAttr())->getAttr(); attr->setAtp(0); } // describe the returned unioned row returned_desc->setTupleDescriptor(returned_desc->noTuples() - 1, tuple_desc); // if sort-merge union is being done, generate expression to // compare the left and the right values. // This predicate should return TRUE if the left value is // less than the right value. merge_expr = 0; if (getMergeExpr()) { // generate the merge predicate. ItemExpr * mergeExpr = new(generator->wHeap()) BoolResult(getMergeExpr()); mergeExpr->bindNode(generator->getBindWA()); exp_gen->generateExpr(mergeExpr->getValueId(), ex_expr::exp_SCAN_PRED, &merge_expr); } // If conditional union, generate conditional expression, and ignore // right child if it was just being used as a no-op. cond_expr = 0; if (NOT condExpr().isEmpty()) { ItemExpr *condExp = condExpr().rebuildExprTree(ITM_AND, TRUE, TRUE); exp_gen->generateExpr(condExp->getValueId(), ex_expr::exp_SCAN_PRED, &cond_expr); } // If conditional union, generate triggered action exception error if (NOT trigExceptExpr().isEmpty()) { ItemExpr *trigExp = trigExceptExpr().rebuildExprTree(ITM_AND, TRUE, TRUE); exp_gen->generateExpr(trigExp->getValueId(), ex_expr::exp_SCAN_PRED, &trig_expr); } // remove both children's map table. Nothing from child's context // should be visible from here on upwards. generator->removeAll(my_map_table); // Ensure the default buffer size is at least as large as the unioned output // row. UInt32 outputBuffSize = MAXOF( getDefault(GEN_UN_BUFFER_SIZE), tuple_length ); outputBuffSize = SqlBufferNeededSize( 1, // # of tuples outputBuffSize, SqlBuffer::NORMAL_ ); ComTdbUnion * union_tdb = new(space) ComTdbUnion( left_child_tdb, right_child_tdb, left_expr, right_expr, merge_expr, cond_expr, trig_expr, tuple_length, // unioned rowlen returned_desc->noTuples()-1, // tupp index for // unioned buffer given_desc, returned_desc, (queue_index)getDefault(GEN_UN_SIZE_DOWN), (queue_index)getDefault(GEN_UN_SIZE_UP), (Cardinality) (getInputCardinality() * getEstRowsUsed()).getValue(), getDefault(GEN_UN_NUM_BUFFERS), outputBuffSize, getOrderedUnion(), getBlockedUnion(), //++ Triggers - hasNoOutputs(), //++ Triggers - rowsFromLeft, rowsFromRight, afterUpdate, getInNotAtomicStatement()); generator->initTdbFields(union_tdb); // If it does not have two children, this is index maintenance code and // should not be Explained if (!generator->explainDisabled()) { generator->setExplainTuple(addExplainInfo(union_tdb, leftExplainTuple, rightExplainTuple, generator)); } // restore the original down cri desc since this node changed it. generator->setCriDesc(given_desc, Generator::DOWN); // set the new up cri desc. generator->setCriDesc(returned_desc, Generator::UP); generator->setGenObj(this, union_tdb); return 0; }
short RelInternalSP::codeGen(Generator * generator) { Space * space = generator->getSpace(); ExpGenerator * exp_gen = generator->getExpGenerator(); MapTable * last_map_table = generator->getLastMapTable(); ex_expr * input_expr = NULL; ex_expr * output_expr = NULL; //////////////////////////////////////////////////////////////////////////// // // Returned atp layout: // // |--------------------------------| // | input data | stored proc row | // | ( I tupps ) | ( 1 tupp ) | // |--------------------------------| // <-- returned row to parent ----> // // input data: the atp input to this node by its parent. // stored proc row: tupp where the row read from SP is moved. // //////////////////////////////////////////////////////////////////////////// ex_cri_desc * given_desc = generator->getCriDesc(Generator::DOWN); ex_cri_desc * returned_desc = new(space) ex_cri_desc(given_desc->noTuples() + 1, space); // cri descriptor for work atp has 3 entries: // -- the first two entries for consts and temps. // -- Entry 3(index #2) is where the input and output rows will be created. ex_cri_desc * work_cri_desc = new(space) ex_cri_desc(3, space); const Int32 work_atp = 1; const Int32 work_atp_index = 2; ExpTupleDesc * input_tuple_desc = NULL; ExpTupleDesc * output_tuple_desc = NULL; // Generate expression to create the input row that will be // given to the stored proc. // The input value is in sp->getProcAllParams() // and has to be converted to sp->procType(). // Generate Cast node to convert procParam to ProcType. // If procType is a varchar, explode it. This is done // so that values could be extracted correctly. ValueIdList procVIDList; for (CollIndex i = 0; i < procTypes().entries(); i++) { Cast * cn; if ((procTypes())[i].getType().getVarLenHdrSize() > 0) { // 5/9/98: add support for VARNCHAR const CharType& char_type = (CharType&)((procTypes())[i].getType()); // Explode varchars by moving them to a fixed field // whose length is equal to the max length of varchar. cn = new(generator->wHeap()) Cast ((getProcAllParamsVids())[i].getItemExpr(), (new(generator->wHeap()) SQLChar(generator->wHeap(), CharLenInfo(char_type.getStrCharLimit(), char_type.getDataStorageSize()), char_type.supportsSQLnull(), FALSE, FALSE, FALSE, char_type.getCharSet(), char_type.getCollation(), char_type.getCoercibility() /* (procTypes())[i].getType().getNominalSize(), (procTypes())[i].getType().supportsSQLnull() */ ) ) ); // Move the exploded field to a varchar field since // procType is varchar. // Can optimize by adding an option to convert node to // blankpad. TBD. // cn = new(generator->wHeap()) Cast(cn, &((procTypes())[i].getType())); } else cn = new(generator->wHeap()) Cast((getProcAllParamsVids())[i].getItemExpr(), &((procTypes())[i].getType())); cn->bindNode(generator->getBindWA()); procVIDList.insert(cn->getValueId()); } ULng32 inputRowlen_ = 0; exp_gen->generateContiguousMoveExpr(procVIDList, -1, /*add conv nodes*/ work_atp, work_atp_index, ExpTupleDesc::SQLARK_EXPLODED_FORMAT, inputRowlen_, &input_expr, &input_tuple_desc, ExpTupleDesc::LONG_FORMAT); // add all columns from this SP to the map table. ULng32 tupleLength; exp_gen->processValIdList(getTableDesc()->getColumnList(), ExpTupleDesc::SQLARK_EXPLODED_FORMAT, tupleLength, work_atp, work_atp_index); // Generate expression to move the output row returned by the // stored proc back to parent. ULng32 outputRowlen_ = 0; MapTable * returnedMapTable = 0; exp_gen->generateContiguousMoveExpr(getTableDesc()->getColumnList(), -1 /*add conv nodes*/, 0, returned_desc->noTuples() - 1, ExpTupleDesc::SQLARK_EXPLODED_FORMAT, outputRowlen_, &output_expr, &output_tuple_desc, ExpTupleDesc::LONG_FORMAT, &returnedMapTable); // Now generate expressions used to extract or move input or // output values. See class ExSPInputOutput. ExSPInputOutput * extractInputExpr = NULL; ExSPInputOutput * moveOutputExpr = NULL; generateSPIOExpr(this, generator, extractInputExpr, moveOutputExpr); // done with expressions at this operator. Remove the appended map tables. generator->removeAll(last_map_table); // append the map table containing the returned columns generator->appendAtEnd(returnedMapTable); NAString procNameAsNAString(procName_); char * sp_name = space->allocateAndCopyToAlignedSpace(procNameAsNAString, procNameAsNAString.length(), 0); ExpGenerator *expGen = generator->getExpGenerator(); // expression to conditionally return 0 or more rows. ex_expr *predExpr = NULL; // generate tuple selection expression, if present if(NOT selectionPred().isEmpty()) { ItemExpr* pred = selectionPred().rebuildExprTree(ITM_AND,TRUE,TRUE); expGen->generateExpr(pred->getValueId(),ex_expr::exp_SCAN_PRED,&predExpr); } ComTdbStoredProc * sp_tdb = new(space) ComTdbStoredProc(sp_name, input_expr, inputRowlen_, output_expr, outputRowlen_, work_cri_desc, work_atp_index, given_desc, returned_desc, extractInputExpr, moveOutputExpr, 2, 1024, (Cardinality) getGroupAttr()-> getOutputLogPropList()[0]-> getResultCardinality().value(), 5, 64000, //10240 predExpr, (UInt16) arkcmpInfo_); generator->initTdbFields(sp_tdb); if(!generator->explainDisabled()) { generator->setExplainTuple( addExplainInfo(sp_tdb, 0, 0, generator)); } // Do not infer that any transaction started can // be in READ ONLY mode if ISPs are present. generator->setNeedsReadWriteTransaction(TRUE); generator->setCriDesc(given_desc, Generator::DOWN); generator->setCriDesc(returned_desc, Generator::UP); generator->setGenObj(this, sp_tdb); // Some built-in functions require a TMF transaction // because they get their information from catman generator->setTransactionFlag(getRequiresTMFTransaction()); return 0; }
short PhysicalFastExtract::codeGen(Generator *generator) { short result = 0; Space *space = generator->getSpace(); CmpContext *cmpContext = generator->currentCmpContext(); const ULng32 downQueueMaxSize = getDefault(GEN_FE_SIZE_DOWN); const ULng32 upQueueMaxSize = getDefault(GEN_FE_SIZE_UP); const ULng32 defaultBufferSize = getDefault(GEN_FE_BUFFER_SIZE); const ULng32 outputBufferSize = defaultBufferSize; const ULng32 requestBufferSize = defaultBufferSize; const ULng32 replyBufferSize = defaultBufferSize; const ULng32 numOutputBuffers = getDefault(GEN_FE_NUM_BUFFERS); // used in runtime stats Cardinality estimatedRowCount = (Cardinality) (getInputCardinality() * getEstRowsUsed()).getValue(); Int32 numChildren = getArity(); ex_cri_desc * givenDesc = generator->getCriDesc(Generator::DOWN); ComTdb * childTdb = (ComTdb*) new (space) ComTdb(); ExplainTuple *firstExplainTuple = 0; // Allocate a new map table for this child. // MapTable *localMapTable = generator->appendAtEnd(); generator->setCriDesc(givenDesc, Generator::DOWN); child(0)->codeGen(generator); childTdb = (ComTdb *)(generator->getGenObj()); firstExplainTuple = generator->getExplainTuple(); ComTdbFastExtract *newTdb = NULL; char * targetName = NULL; char * hiveTableName = NULL; char * delimiter = NULL; char * header = NULL; char * nullString = NULL; char * recordSeparator = NULL; char * hdfsHostName = NULL; Int32 hdfsPortNum = getHdfsPort(); char * newDelimiter = (char *)getDelimiter().data(); char specChar = '0'; if (!isHiveInsert() && isSpecialChar(newDelimiter, specChar)) { newDelimiter = new (cmpContext->statementHeap()) char[2]; newDelimiter[0] = specChar; newDelimiter[1] = '\0'; } char * newRecordSep = (char *)getRecordSeparator().data(); specChar = '0'; if (!isHiveInsert() && isSpecialChar(newRecordSep, specChar)) { newRecordSep = new (cmpContext->statementHeap()) char[2]; newRecordSep[0] = specChar; newRecordSep[1] = '\0'; } targetName = AllocStringInSpace(*space, (char *)getTargetName().data()); hdfsHostName = AllocStringInSpace(*space, (char *)getHdfsHostName().data()); hiveTableName = AllocStringInSpace(*space, (char *)getHiveTableName().data()); delimiter = AllocStringInSpace(*space, newDelimiter); header = AllocStringInSpace(*space, (char *)getHeader().data()); nullString = AllocStringInSpace(*space, (char *)getNullString().data()); recordSeparator = AllocStringInSpace(*space, newRecordSep); result = ft_codegen(generator, *this, // RelExpr &relExpr newTdb, // ComTdbUdr *&newTdb estimatedRowCount, targetName, hdfsHostName, hdfsPortNum, hiveTableName, delimiter, header, nullString, recordSeparator, downQueueMaxSize, upQueueMaxSize, outputBufferSize, requestBufferSize, replyBufferSize, numOutputBuffers, childTdb, isSequenceFile()); if (!generator->explainDisabled()) { generator->setExplainTuple(addExplainInfo(newTdb, firstExplainTuple, 0, generator)); } if (getTargetType() == FILE) newTdb->setTargetFile(1); else if (getTargetType() == SOCKET) newTdb->setTargetSocket(1); else GenAssert(0, "Unexpected Fast Extract target type") if (isAppend()) newTdb->setIsAppend(1); if (this->includeHeader()) newTdb->setIncludeHeader(1); if (isHiveInsert()) { newTdb->setIsHiveInsert(1); newTdb->setIncludeHeader(0); setOverwriteHiveTable( getOverwriteHiveTable()); } else { if (includeHeader()) newTdb->setIncludeHeader(1); } if (getCompressionType() != NONE) { if (getCompressionType() == LZO) newTdb->setCompressLZO(1); else GenAssert(0, "Unexpected Fast Extract compression type") } if((ActiveSchemaDB()->getDefaults()).getToken(FAST_EXTRACT_DIAGS) == DF_ON) newTdb->setPrintDiags(1); return result; }
short PhysUnPackRows::codeGen(Generator *generator) { // Get handles on expression generator, map table, and heap allocator // ExpGenerator *expGen = generator->getExpGenerator(); Space *space = generator->getSpace(); // Allocate a new map table for this operation // MapTable *localMapTable = generator->appendAtEnd(); // Generate the child and capture the task definition block and a description // of the reply composite row layout and the explain information. // child(0)->codeGen(generator); ComTdb *childTdb = (ComTdb*)(generator->getGenObj()); ex_cri_desc *childCriDesc = generator->getCriDesc(Generator::UP); ExplainTuple *childExplainTuple = generator->getExplainTuple(); // Make all of my child's outputs map to ATP 1. Since they are // not needed above, they will not be in the work ATP (0). // (Later, they will be removed from the map table) // localMapTable->setAllAtp(1); // Generate the given and returned composite row descriptors. // unPackRows adds a tupp (for the generated outputs) to the // row given by the parent. The workAtp will have the 2 more // tupps (1 for the generated outputs and another for the // indexValue) than the given. // ex_cri_desc *givenCriDesc = generator->getCriDesc(Generator::DOWN); ex_cri_desc *returnedCriDesc = #pragma nowarn(1506) // warning elimination new(space) ex_cri_desc(givenCriDesc->noTuples() + 1, space); #pragma warn(1506) // warning elimination ex_cri_desc *workCriDesc = #pragma nowarn(1506) // warning elimination new(space) ex_cri_desc(givenCriDesc->noTuples() + 2, space); #pragma warn(1506) // warning elimination // unPackCols is the next to the last Tp in Atp 0, the work ATP. // and the last Tp in the returned ATP. // const Int32 unPackColsAtpIndex = workCriDesc->noTuples() - 2; const Int32 unPackColsAtp = 0; // The length of the new tuple which will contain the columns // generated by unPackRows // ULng32 unPackColsTupleLen; // The Tuple Desc describing the tuple containing the new unPacked columns // It is generated when the expression is generated. // ExpTupleDesc *unPackColsTupleDesc = 0; // indexValue is the last Tp in Atp 0, the work ATP. // const Int32 indexValueAtpIndex = workCriDesc->noTuples() - 1; const Int32 indexValueAtp = 0; // The length of the work tuple which will contain the value // of the index. This should always be sizeof(int). // ULng32 indexValueTupleLen = 0; // The Tuple Desc describing the tuple containing the new unPacked columns // It is generated when the expression is generated. // ExpTupleDesc *indexValueTupleDesc = 0; ValueIdList indexValueList; if (indexValue() != NULL_VALUE_ID) { indexValueList.insert(indexValue()); expGen->processValIdList(indexValueList, ExpTupleDesc::SQLARK_EXPLODED_FORMAT, indexValueTupleLen, indexValueAtp, indexValueAtpIndex, &indexValueTupleDesc, ExpTupleDesc::SHORT_FORMAT); GenAssert(indexValueTupleLen == sizeof(Int32), "UnPackRows::codeGen: Internal Error"); } // If a packingFactor exists, generate a move expression for this. // It is assumed that the packingFactor expression evaluates to a // 4 byte integer. // ex_expr *packingFactorExpr = 0; ULng32 packingFactorTupleLen; if(packingFactor().entries() > 0) { expGen->generateContiguousMoveExpr(packingFactor(), -1, unPackColsAtp, unPackColsAtpIndex, ExpTupleDesc::SQLARK_EXPLODED_FORMAT, packingFactorTupleLen, &packingFactorExpr); GenAssert(packingFactorTupleLen == sizeof(Int32), "UnPackRows::codeGen: Internal Error"); } // Generate the UnPack expressions. // // characteristicOutputs() - refers to the list of expressions // to be move to another tuple. // // 0 - Do not add conv. nodes. // // unPackColsAtp - this expression will move data to the // unPackColsAtp (0) ATP. // // unPackColsAtpIndex - within the unPackColsAtp (0) ATP, the destination // for this move expression will be the unPackColsAtpIndex TP. This should // be the next to the last TP of the work ATP. (The indexValue will be in // the last position) // // SQLARK_EXPLODED_FORMAT - generate the move expression to construct // the destination tuple in EXPLODED FORMAT. // // unPackColsTupleLen - This is an output which will contain the length // of the destination Tuple. // // &unPackColsExpr - The address of the pointer to the expression // which will be generated. // // &unPackColsTupleDesc - The address of the tuple descriptor which is // generated. This describes the destination tuple of the move expression. // // SHORT_FORMAT - generate the unPackColsTupleDesc in the SHORT FORMAT. // ex_expr *unPackColsExpr = 0; expGen-> genGuardedContigMoveExpr(selectionPred(), getGroupAttr()->getCharacteristicOutputs(), 0, // No Convert Nodes added unPackColsAtp, unPackColsAtpIndex, ExpTupleDesc::SQLARK_EXPLODED_FORMAT, unPackColsTupleLen, &unPackColsExpr, &unPackColsTupleDesc, ExpTupleDesc::SHORT_FORMAT); #pragma nowarn(1506) // warning elimination workCriDesc->setTupleDescriptor(unPackColsAtpIndex, #pragma warn(1506) // warning elimination unPackColsTupleDesc); #pragma nowarn(1506) // warning elimination returnedCriDesc->setTupleDescriptor(unPackColsAtpIndex, #pragma warn(1506) // warning elimination unPackColsTupleDesc); // expressions for rowwise rowset implementation. ex_expr * rwrsInputSizeExpr = 0; ex_expr * rwrsMaxInputRowlenExpr = 0; ex_expr * rwrsBufferAddrExpr = 0; ULng32 rwrsInputSizeExprLen = 0; ULng32 rwrsMaxInputRowlenExprLen = 0; ULng32 rwrsBufferAddrExprLen = 0; const Int32 rwrsAtp = 1; const Int32 rwrsAtpIndex = workCriDesc->noTuples() - 2; ExpTupleDesc * rwrsTupleDesc = 0; ValueIdList rwrsVidList; if (rowwiseRowset()) { rwrsVidList.insert(this->rwrsInputSizeExpr()->getValueId()); expGen->generateContiguousMoveExpr(rwrsVidList, 0 /*don't add conv nodes*/, rwrsAtp, rwrsAtpIndex, ExpTupleDesc::SQLARK_EXPLODED_FORMAT, rwrsInputSizeExprLen, &rwrsInputSizeExpr, &rwrsTupleDesc,ExpTupleDesc::SHORT_FORMAT); rwrsVidList.clear(); rwrsVidList.insert(this->rwrsMaxInputRowlenExpr()->getValueId()); expGen->generateContiguousMoveExpr(rwrsVidList, 0 /*don't add conv nodes*/, rwrsAtp, rwrsAtpIndex, ExpTupleDesc::SQLARK_EXPLODED_FORMAT, rwrsMaxInputRowlenExprLen, &rwrsMaxInputRowlenExpr, &rwrsTupleDesc,ExpTupleDesc::SHORT_FORMAT); rwrsVidList.clear(); rwrsVidList.insert(this->rwrsBufferAddrExpr()->getValueId()); expGen->generateContiguousMoveExpr(rwrsVidList, 0 /*don't add conv nodes*/, rwrsAtp, rwrsAtpIndex, ExpTupleDesc::SQLARK_EXPLODED_FORMAT, rwrsBufferAddrExprLen, &rwrsBufferAddrExpr, &rwrsTupleDesc,ExpTupleDesc::SHORT_FORMAT); expGen->assignAtpAndAtpIndex(rwrsOutputVids(), unPackColsAtp, unPackColsAtpIndex); } // Move the generated maptable entries, to the localMapTable, // so that all other entries can later be removed. // for(ValueId outputValId = getGroupAttr()->getCharacteristicOutputs().init(); getGroupAttr()->getCharacteristicOutputs().next(outputValId); getGroupAttr()->getCharacteristicOutputs().advance(outputValId)) { generator->addMapInfoToThis(localMapTable, outputValId, generator->getMapInfo(outputValId)-> getAttr()); // Indicate that code was generated for this map table entry. // generator->getMapInfoFromThis(localMapTable, outputValId)->codeGenerated(); } NABoolean tolerateNonFatalError = FALSE; if (isRowsetIterator() && (generator->getTolerateNonFatalError())) { tolerateNonFatalError = TRUE; setTolerateNonFatalError(RelExpr::NOT_ATOMIC_); } // Allocate the UnPack TDB // ComTdbUnPackRows *unPackTdb = NULL; if (rowwiseRowset()) { unPackTdb = new (space) ComTdbUnPackRows(NULL, //childTdb, rwrsInputSizeExpr, rwrsMaxInputRowlenExpr, rwrsBufferAddrExpr, rwrsAtpIndex, givenCriDesc, returnedCriDesc, workCriDesc, 16, 1024, (Cardinality) getGroupAttr()-> getOutputLogPropList()[0]-> getResultCardinality().value(), 2, 20000); } else { // Base the initial queue size on the est. cardinality. // UnPackRows does not do dyn queue resize, so passed in // queue size values represent initial (and final) queue // sizes (not max queue sizes). // queue_index upQueueSize = (queue_index)getGroupAttr()->getOutputLogPropList()[0]->getResultCardinality().value(); // Make sure it is at least 1024. upQueueSize = (upQueueSize < 1024 ? 1024 : upQueueSize); // Make sure that it is not more the 64K. upQueueSize = (upQueueSize > 65536 ? 65536 : upQueueSize); unPackTdb = new (space) ComTdbUnPackRows(childTdb, packingFactorExpr, unPackColsExpr, #pragma nowarn(1506) // warning elimination unPackColsTupleLen, unPackColsAtpIndex, indexValueAtpIndex, givenCriDesc, returnedCriDesc, workCriDesc, 16, upQueueSize, (Cardinality) getGroupAttr()-> getOutputLogPropList()[0]-> getResultCardinality().value(), isRowsetIterator(), tolerateNonFatalError); } #pragma warn(1506) // warning elimination generator->initTdbFields(unPackTdb); // Remove child's outputs from mapTable, They are not needed // above. // generator->removeAll(localMapTable); // Add the explain Information for this node to the EXPLAIN // Fragment. Set the explainTuple pointer in the generator so // the parent of this node can get a handle on this explainTuple. // if(!generator->explainDisabled()) { generator->setExplainTuple(addExplainInfo(unPackTdb, childExplainTuple, 0, generator)); } // Restore the Cri Desc's and set the return object. // generator->setCriDesc(givenCriDesc, Generator::DOWN); generator->setCriDesc(returnedCriDesc, Generator::UP); generator->setGenObj(this, unPackTdb); return 0; }
short PhysSequence::codeGen(Generator *generator) { // Get a local handle on some of the generator objects. // CollHeap *wHeap = generator->wHeap(); Space *space = generator->getSpace(); ExpGenerator *expGen = generator->getExpGenerator(); MapTable *mapTable = generator->getMapTable(); // Allocate a new map table for this node. This must be done // before generating the code for my child so that this local // map table will be sandwiched between the map tables already // generated and the map tables generated by my offspring. // // Only the items available as output from this node will // be put in the local map table. Before exiting this function, all of // my offsprings map tables will be removed. Thus, none of the outputs // from nodes below this node will be visible to nodes above it except // those placed in the local map table and those that already exist in // my ancestors map tables. This is the standard mechanism used in the // generator for managing the access to item expressions. // MapTable *localMapTable = generator->appendAtEnd(); // Since this operation doesn't modify the row on the way down the tree, // go ahead and generate the child subtree. Capture the given composite row // descriptor and the child's returned TDB and composite row descriptor. // ex_cri_desc * givenCriDesc = generator->getCriDesc(Generator::DOWN); child(0)->codeGen(generator); ComTdb *childTdb = (ComTdb*)generator->getGenObj(); ex_cri_desc * childCriDesc = generator->getCriDesc(Generator::UP); ExplainTuple *childExplainTuple = generator->getExplainTuple(); // Make all of my child's outputs map to ATP 1. The child row is only // accessed in the project expression and it will be the second ATP // (ATP 1) passed to this expression. // localMapTable->setAllAtp(1); // My returned composite row has an additional tupp. // Int32 numberTuples = givenCriDesc->noTuples() + 1; ex_cri_desc * returnCriDesc #pragma nowarn(1506) // warning elimination = new (space) ex_cri_desc(numberTuples, space); #pragma warn(1506) // warning elimination // For now, the history buffer row looks just the return row. Later, // it may be useful to add an additional tupp for sequence function // itermediates that are not needed above this node -- thus, this // ATP is kept separate from the returned ATP. // const Int32 historyAtp = 0; const Int32 historyAtpIndex = numberTuples-1; #pragma nowarn(1506) // warning elimination ex_cri_desc *historyCriDesc = new (space) ex_cri_desc(numberTuples, space); #pragma warn(1506) // warning elimination ExpTupleDesc *historyDesc = 0; //seperate the read and retur expressions seperateReadAndReturnItems(wHeap); // The history buffer consists of items projected directly from the // child, the root sequence functions, the value arguments of the // offset functions, and running sequence functions. These elements must // be materialized in the history buffer in order to be able to compute // the outputs of this node -- the items projected directly from the child // (projectValues) and the root sequence functions (sequenceFunctions). // // Compute the set of sequence function items that must be materialized // int the history buffer. -- sequenceItems // // Compute the set of items in the history buffer: the union of the // projected values and the value arguments. -- historyIds // // Compute the set of items in the history buffer that are computed: // the difference between all the elements in the history buffer // and the projected items. -- computedHistoryIds // // KB---will need to return atp with 3 tups only 0,1 and 2 // 2 -->values from history buffer after ther are moved to it addCheckPartitionChangeExpr(generator, TRUE); ValueIdSet historyIds; historyIds += movePartIdsExpr(); historyIds += sequencedColumns(); ValueIdSet outputFromChild = child(0)->getGroupAttr()->getCharacteristicOutputs(); getHistoryAttributes(readSeqFunctions(),outputFromChild, historyIds, TRUE, wHeap); // Add in the top level sequence functions. historyIds += readSeqFunctions(); getHistoryAttributes(returnSeqFunctions(),outputFromChild, historyIds, TRUE, wHeap); // Add in the top level functions. historyIds += returnSeqFunctions(); // Layout the work tuple format which consists of the projected // columns and the computed sequence functions. First, compute // the number of attributes in the tuple. // ULng32 numberAttributes = ((NOT historyIds.isEmpty()) ? historyIds.entries() : 0); // Allocate an attribute pointer vector from the working heap. // Attributes **attrs = new(wHeap) Attributes*[numberAttributes]; // Fill in the attributes vector for the history buffer including // adding the entries to the map table. Also, compute the value ID // set for the elements to project from the child row. // //??????????re-visit this function?? computeHistoryAttributes(generator, localMapTable, attrs, historyIds); // Create the tuple descriptor for the history buffer row and // assign the offsets to the attributes. For now, this layout is // identical to the returned row. Set the tuple descriptors for // the return and history rows. // ULng32 historyRecLen; expGen->processAttributes(numberAttributes, attrs, ExpTupleDesc::SQLARK_EXPLODED_FORMAT, historyRecLen, historyAtp, historyAtpIndex, &historyDesc, ExpTupleDesc::SHORT_FORMAT); NADELETEBASIC(attrs, wHeap); #pragma nowarn(1506) // warning elimination returnCriDesc->setTupleDescriptor(historyAtpIndex, historyDesc); #pragma warn(1506) // warning elimination #pragma nowarn(1506) // warning elimination historyCriDesc->setTupleDescriptor(historyAtpIndex, historyDesc); #pragma warn(1506) // warning elimination // If there are any sequence function items, generate the sequence // function expressions. // ex_expr * readSeqExpr = NULL; if(NOT readSeqFunctions().isEmpty()) { ValueIdSet seqVals = readSeqFunctions(); seqVals += sequencedColumns(); seqVals += movePartIdsExpr(); expGen->generateSequenceExpression(seqVals, readSeqExpr); } ex_expr *checkPartChangeExpr = NULL; if (!checkPartitionChangeExpr().isEmpty()) { ItemExpr * newCheckPartitionChangeTree= checkPartitionChangeExpr().rebuildExprTree(ITM_AND,TRUE,TRUE); expGen->generateExpr(newCheckPartitionChangeTree->getValueId(), ex_expr::exp_SCAN_PRED, &checkPartChangeExpr); } //unsigned long rowLength; ex_expr * returnExpr = NULL; if(NOT returnSeqFunctions().isEmpty()) { expGen->generateSequenceExpression(returnSeqFunctions(), returnExpr); } // Generate expression to evaluate predicate on the output // ex_expr *postPred = 0; if (! selectionPred().isEmpty()) { ItemExpr * newPredTree = selectionPred().rebuildExprTree(ITM_AND,TRUE,TRUE); expGen->generateExpr(newPredTree->getValueId(), ex_expr::exp_SCAN_PRED, &postPred); } // Reset ATP's to zero for parent. // localMapTable->setAllAtp(0); // Generate expression to evaluate the cancel expression // ex_expr *cancelExpression = 0; if (! cancelExpr().isEmpty()) { ItemExpr * newCancelExprTree = cancelExpr().rebuildExprTree(ITM_AND,TRUE,TRUE); expGen->generateExpr(newCancelExprTree->getValueId(), ex_expr::exp_SCAN_PRED, &cancelExpression); } // // For overflow // // ( The following are meaningless if ! unlimitedHistoryRows() ) NABoolean noOverflow = CmpCommon::getDefault(EXE_BMO_DISABLE_OVERFLOW) == DF_ON ; NABoolean logDiagnostics = CmpCommon::getDefault(EXE_DIAGNOSTIC_EVENTS) == DF_ON ; NABoolean possibleMultipleCalls = generator->getRightSideOfFlow() ; short scratchTresholdPct = (short) CmpCommon::getDefaultLong(SCRATCH_FREESPACE_THRESHOLD_PERCENT); // determione the memory usage (amount of memory as percentage from total // physical memory used to initialize data structures) unsigned short memUsagePercent = (unsigned short) getDefault(BMO_MEMORY_USAGE_PERCENT); short memPressurePct = (short)getDefault(GEN_MEM_PRESSURE_THRESHOLD); historyRecLen = ROUND8(historyRecLen); Lng32 maxNumberOfOLAPBuffers; Lng32 maxRowsInOLAPBuffer; Lng32 minNumberOfOLAPBuffers; Lng32 numberOfWinOLAPBuffers; Lng32 olapBufferSize; computeHistoryParams(historyRecLen, maxRowsInOLAPBuffer, minNumberOfOLAPBuffers, numberOfWinOLAPBuffers, maxNumberOfOLAPBuffers, olapBufferSize); ComTdbSequence *sequenceTdb = new(space) ComTdbSequence(readSeqExpr, returnExpr, postPred, cancelExpression, getMinFollowingRows(), #pragma nowarn(1506) // warning elimination historyRecLen, historyAtpIndex, childTdb, givenCriDesc, returnCriDesc, (queue_index)getDefault(GEN_SEQFUNC_SIZE_DOWN), (queue_index)getDefault(GEN_SEQFUNC_SIZE_UP), getDefault(GEN_SEQFUNC_NUM_BUFFERS), getDefault(GEN_SEQFUNC_BUFFER_SIZE), olapBufferSize, maxNumberOfOLAPBuffers, numHistoryRows(), getUnboundedFollowing(), logDiagnostics, possibleMultipleCalls, scratchTresholdPct, memUsagePercent, memPressurePct, maxRowsInOLAPBuffer, minNumberOfOLAPBuffers, numberOfWinOLAPBuffers, noOverflow, checkPartChangeExpr); #pragma warn(1506) // warning elimination generator->initTdbFields(sequenceTdb); // update the estimated value of HistoryRowLength with actual value //setEstHistoryRowLength(historyIds.getRowLength()); double sequenceMemEst = getEstimatedRunTimeMemoryUsage(sequenceTdb); generator->addToTotalEstimatedMemory(sequenceMemEst); if(!generator->explainDisabled()) { Lng32 seqMemEstInKBPerCPU = (Lng32)(sequenceMemEst / 1024) ; seqMemEstInKBPerCPU = seqMemEstInKBPerCPU/ (MAXOF(generator->compilerStatsInfo().dop(),1)); generator->setOperEstimatedMemory(seqMemEstInKBPerCPU); generator-> setExplainTuple(addExplainInfo(sequenceTdb, childExplainTuple, 0, generator)); generator->setOperEstimatedMemory(0); } sequenceTdb->setScratchIOVectorSize((Int16)getDefault(SCRATCH_IO_VECTOR_SIZE_HASH)); sequenceTdb->setOverflowMode(generator->getOverflowMode()); sequenceTdb->setBmoMinMemBeforePressureCheck((Int16)getDefault(EXE_BMO_MIN_SIZE_BEFORE_PRESSURE_CHECK_IN_MB)); if(generator->getOverflowMode() == ComTdb::OFM_SSD ) sequenceTdb->setBMOMaxMemThresholdMB((UInt16)(ActiveSchemaDB()-> getDefaults()). getAsLong(SSD_BMO_MAX_MEM_THRESHOLD_IN_MB)); else sequenceTdb->setBMOMaxMemThresholdMB((UInt16)(ActiveSchemaDB()-> getDefaults()). getAsLong(EXE_MEMORY_AVAILABLE_IN_MB)); // The CQD EXE_MEM_LIMIT_PER_BMO_IN_MB has precedence over the mem quota sys NADefaults &defs = ActiveSchemaDB()->getDefaults(); UInt16 mmu = (UInt16)(defs.getAsDouble(EXE_MEM_LIMIT_PER_BMO_IN_MB)); UInt16 numBMOsInFrag = (UInt16)generator->getFragmentDir()->getNumBMOs(); if (mmu != 0) sequenceTdb->setMemoryQuotaMB(mmu); else { // Apply quota system if either one the following two is true: // 1. the memory limit feature is turned off and more than one BMOs // 2. the memory limit feature is turned on NABoolean mlimitPerCPU = defs.getAsDouble(EXE_MEMORY_LIMIT_PER_CPU) > 0; if ( mlimitPerCPU || numBMOsInFrag > 1 ) { double memQuota = computeMemoryQuota(generator->getEspLevel() == 0, mlimitPerCPU, generator->getBMOsMemoryLimitPerCPU().value(), generator->getTotalNumBMOsPerCPU(), generator->getTotalBMOsMemoryPerCPU().value(), numBMOsInFrag, generator->getFragmentDir()->getBMOsMemoryUsage() ); sequenceTdb->setMemoryQuotaMB( UInt16(memQuota) ); } } generator->setCriDesc(givenCriDesc, Generator::DOWN); generator->setCriDesc(returnCriDesc, Generator::UP); generator->setGenObj(this, sequenceTdb); return 0; }
short PhysSample::codeGen(Generator *generator) { // Get a local handle on some of the generator objects. // CollHeap *wHeap = generator->wHeap(); Space *space = generator->getSpace(); MapTable *mapTable = generator->getMapTable(); ExpGenerator *expGen = generator->getExpGenerator(); // Allocate a new map table for this node. This must be done // before generating the code for my child so that this local // map table will be sandwiched between the map tables already // generated and the map tables generated by my offspring. // // Only the items available as output from this node will // be put in the local map table. Before exiting this function, all of // my offsprings map tables will be removed. Thus, none of the outputs // from nodes below this node will be visible to nodes above it except // those placed in the local map table and those that already exist in // my ancestors map tables. This is the standard mechanism used in the // generator for managing the access to item expressions. // MapTable *localMapTable = generator->appendAtEnd(); // Since this operation doesn't modify the row on the way down the tree, // go ahead and generate the child subtree. Capture the given composite row // descriptor and the child's returned TDB and composite row descriptor. // ex_cri_desc * givenCriDesc = generator->getCriDesc(Generator::DOWN); child(0)->codeGen(generator); ComTdb *childTdb = (ComTdb*)generator->getGenObj(); ex_cri_desc * childCriDesc = generator->getCriDesc(Generator::UP); ExplainTuple *childExplainTuple = generator->getExplainTuple(); // Geneate the sampling expression. // ex_expr *balExpr = NULL; Int32 returnFactorOffset = 0; ValueId val; val = balanceExpr().init(); if(balanceExpr().next(val)) expGen->generateSamplingExpr(val, &balExpr, returnFactorOffset); // Alias the sampleColumns() so that they reference the underlying // expressions directly. This is done to avoid having to generate and // execute a project expression that simply moves the columns from // one tupp to another to reflect the application of the sampledCol // function. // // ValueId valId; // for(valId = sampledColumns().init(); // sampledColumns().next(valId); // sampledColumns().advance(valId)) // { // MapInfo *mapInfoChild = localMapTable->getMapInfoAsIs // (valId.getItemExpr()->child(0)->castToItemExpr()->getValueId()); // GenAssert(mapInfoChild, "Sample::codeGen -- no child map info."); // Attributes *attr = mapInfoChild->getAttr(); // MapInfo *mapInfo = localMapTable->addMapInfoToThis(valId, attr); // mapInfo->codeGenerated(); // } // check if any of the columns inthe sampled columns are lob columns. If so, return an error. ValueId valId; for(valId = sampledColumns().init(); sampledColumns().next(valId); sampledColumns().advance(valId)) { const NAType &colType = valId.getType(); if ((colType.getFSDatatype() == REC_BLOB) || (colType.getFSDatatype() == REC_CLOB)) { *CmpCommon::diags() << DgSqlCode(-4322); GenExit(); } } // Now, remove all attributes from the map table except the // the stuff in the local map table -- the result of this node. // // localMapTable->removeAll(); // Generate the expression to evaluate predicate on the sampled row. // ex_expr *postPred = 0; if (!selectionPred().isEmpty()) { ItemExpr * newPredTree = selectionPred().rebuildExprTree(ITM_AND,TRUE,TRUE); expGen->generateExpr(newPredTree->getValueId(), ex_expr::exp_SCAN_PRED, &postPred); } // Construct the Sample TDB. // ComTdbSample *sampleTdb = new(space) ComTdbSample(NULL, balExpr, returnFactorOffset, postPred, childTdb, givenCriDesc, childCriDesc, (queue_index)getDefault(GEN_SAMPLE_SIZE_DOWN), (queue_index)getDefault(GEN_SAMPLE_SIZE_UP)); generator->initTdbFields(sampleTdb); if(!generator->explainDisabled()) { generator-> setExplainTuple(addExplainInfo(sampleTdb, childExplainTuple, 0, generator)); } generator->setCriDesc(givenCriDesc, Generator::DOWN); generator->setCriDesc(childCriDesc, Generator::UP); generator->setGenObj(this, sampleTdb); return 0; }