コード例 #1
0
void PhysSequence::transformOlapFunctions(CollHeap *wHeap)
{

 
  for(ValueId valId = sequenceFunctions().init();
      sequenceFunctions().next(valId);
      sequenceFunctions().advance(valId)) 
  {
    
    ItemExpr * itmExpr = valId.getItemExpr();

    //NAType *itmType = itmExpr->getValueId().getType().newCopy(wHeap);

    if (itmExpr->isOlapFunction())
    {
      NAType *itmType = itmExpr->getValueId().getType().newCopy(wHeap);

      itmExpr = ((ItmSeqOlapFunction*)itmExpr)->transformOlapFunction(wHeap);

      CMPASSERT(itmExpr);
      if(itmExpr->getValueId() != valId)
      {
	itmExpr = new (wHeap) Cast(itmExpr, itmType);
	itmExpr->synthTypeAndValueId(TRUE);
	valId.replaceItemExpr(itmExpr);
	itmExpr->getValueId().changeType(itmType);//????
      }
    }
      itmExpr->transformOlapFunctions(wHeap);
  }
}
コード例 #2
0
// is any literal in this expr safely coercible to its target type?
NABoolean ItemExpr::isSafelyCoercible(CacheWA &cwa) const
{
  if (cwa.getPhase() >= CmpMain::BIND) {
    Int32 arity = getArity();
    for (Int32 x = 0; x < arity; x++) {
      if (!child(x)->isSafelyCoercible(cwa)) { 
        return FALSE; 
      }
    }
    if (arity == 2) {
      // we have to disallow caching of the following types of exprs:
      //   expr + 123456789012345678901234567890
      //   expr || 'overlylongstringthatwouldoverflow'
      ItemExpr *left = child(0), *right = child(1);
      if (left->getOperatorType() == ITM_CONSTANT) {
        if (right->getOperatorType() == ITM_CONSTANT) {
          // "10 + 1" should be safely coercible
          return TRUE;
        } else {
          return ((ConstValue*)left)->canBeSafelyCoercedTo
            (right->getValueId().getType());
        }
      }      
      else if (right->getOperatorType() == ITM_CONSTANT) {
        return ((ConstValue*)right)->canBeSafelyCoercedTo
          (left->getValueId().getType());
      }      
      // else both are nonliterals; fall thru
    }
    // else nondyadic expr; fall thru
    return TRUE; 
  }
  return FALSE;
}
コード例 #3
0
ItemExpr *
addConvNode(ItemExpr *childExpr,
            ValueIdMap *mapping,
            CollHeap *wHeap)

{
  if(childExpr->getOperatorType() != ITM_CONVERT &&
     !childExpr->isASequenceFunction()) {

    ValueId topValue;
    mapping->mapValueIdUp(topValue,
                          childExpr->getValueId());
    if(topValue == childExpr->getValueId()) {

      // add the convert node
      ItemExpr *newChild = new(wHeap) Convert (childExpr);
      newChild->synthTypeAndValueId(TRUE);
      mapping->addMapEntry(newChild->getValueId(),
                           childExpr->getValueId());
      return newChild;
    } else {
      return topValue.getItemExpr();
    }
  }
  return childExpr;
}
コード例 #4
0
void
RelSequence::addCancelExpr(CollHeap *wHeap)
{
  ItemExpr *cPred = NULL;

  if (this->partition().entries() > 0)
  {
    return;
  }
  if(cancelExpr().entries() > 0) 
  {
    return;
  }

  for(ValueId valId = selectionPred().init();
      selectionPred().next(valId);
      selectionPred().advance(valId)) 
  {
    ItemExpr *pred = valId.getItemExpr();

    // Look for preds that select a prefix of the sequence.
    // Rank() < const; Rank <= const; const > Rank; const >= Rank
    ItemExpr *op1 = NULL;
    ItemExpr *op2 = NULL;

    if(pred->getOperatorType() == ITM_LESS ||
       pred->getOperatorType() == ITM_LESS_EQ) 
    {
      op1 = pred->child(0);
      op2 = pred->child(1);
    }
    else if (pred->getOperatorType() == ITM_GREATER ||
             pred->getOperatorType() == ITM_GREATER_EQ) 
    {
      op1 = pred->child(1);
      op2 = pred->child(0);
    }
    NABoolean negate;
    if (op1 && op2 &&
        (op2->getOperatorType() == ITM_CONSTANT || 
         op2->getOperatorType() == ITM_DYN_PARAM)  &&
         (op1->getOperatorType() == ITM_OLAP_RANK ||
          op1->getOperatorType() == ITM_OLAP_DRANK ||
          (op1->getOperatorType() == ITM_OLAP_COUNT &&
           op1->child(0)->getOperatorType() == ITM_CONSTANT &&
           !op1->child(0)->castToConstValue(negate)->isNull())))
    {
       cPred = new(wHeap) UnLogic(ITM_NOT, pred);
       //break at first occurence
       break;
    }
  }
  
  if(cPred) 
  {
    cPred->synthTypeAndValueId(TRUE);
    cancelExpr().insert(cPred->getValueId());
  }
}
コード例 #5
0
void PhysSequence::addCheckPartitionChangeExpr( Generator *generator,
                                                NABoolean addConvNodes,
                                                ValueIdMap *origAttributes) 
{
  if(!(partition().entries() > 0)) 
  {
    return;
  }
  CollHeap * wHeap = generator->wHeap();
    
  if(addConvNodes && !origAttributes) 
  {
    origAttributes = new (wHeap) ValueIdMap();
  }

  ItemExpr * checkPartChng= NULL;
  for (CollIndex ix = 0; ix < partition().entries(); ix++)
  {
    ItemExpr *iePtr = partition().at(ix).getItemExpr();
    ItemExpr * convIePtr = addConvNode(iePtr, origAttributes,wHeap);

    movePartIdsExpr() += convIePtr->getValueId();
    ItemExpr * comp =  new (wHeap) BiRelat(ITM_EQUAL,
                                           iePtr,
                                           convIePtr,
                                           TRUE);
    if (!checkPartChng)
    {
      checkPartChng = comp;
    }
    else
    {
      checkPartChng = new (wHeap) BiLogic( ITM_AND,
                                           checkPartChng,
                                           comp);
    }
  } 

  checkPartChng->convertToValueIdSet(checkPartitionChangeExpr(),
                                      generator->getBindWA(),
                                      ITM_AND);
}
コード例 #6
0
ValueIdSet TableDesc::getComputedColumns(NAColumnBooleanFuncPtrT fptr)
{
    ValueIdSet computedColumns;

    for (CollIndex j=0; j<getClusteringIndex()->getIndexKey().entries(); j++)
    {
        ItemExpr *ck = getClusteringIndex()->getIndexKey()[j].getItemExpr();

        if (ck->getOperatorType() == ITM_INDEXCOLUMN)
            ck = ((IndexColumn *) ck)->getDefinition().getItemExpr();

        CMPASSERT(ck->getOperatorType() == ITM_BASECOLUMN);

        NAColumn* x = ((BaseColumn *) ck)->getNAColumn();

        if (((*x).*fptr)())
            computedColumns += ck->getValueId();
    }
    return computedColumns;
}
コード例 #7
0
short ExpGenerator::buildKeyInfo(keyRangeGen ** keyInfo, // out -- generated object
                                 Generator * generator,
                                 const NAColumnArray & keyColumns,
                                 const ValueIdList & listOfKeyColumns,
                                 const ValueIdList & beginKeyPred,
                                 const ValueIdList & endKeyPred,
                                 const SearchKey * searchKey,
                                 const MdamKey * mdamKeyPtr,
                                 const NABoolean reverseScan,
                                 unsigned short keytag,
                                 const ExpTupleDesc::TupleDataFormat tf,
                                 // the next few parameters are here
                                 // as part of a horrible kludge for
                                 // the PartitionAccess::codeGen()
                                 // method, which lacks a SearchKey
                                 // object and therefore exposes
                                 // things like the exclusion
                                 // expressions; with luck, later work
                                 // in the Optimizer will result in a
                                 // much cleaner interface
                                 const NABoolean useTheHorribleKludge,
                                 ItemExpr * beginKeyExclusionExpr,
                                 ItemExpr * endKeyExclusionExpr,

                                 ex_expr_lean ** unique_key_expr,
                                 ULng32 *uniqueKeyLen,
                                 NABoolean doKeyEncodeOpt,
                                 Lng32 * firstKeyColOffset,
				 Int32 in_key_atp_index
                                 )

{
  Space * space = generator->getSpace();

  const Int32 work_atp = 1;
  const Int32 key_atp_index = (in_key_atp_index <= 0 ? 2 : in_key_atp_index);
  const Int32 exclude_flag_atp_index = 3;
  const Int32 data_conv_error_atp_index = 4;
  const Int32 key_column_atp_index = 5; // used only for Mdam
  const Int32 key_column2_atp_index = 6; // used only for Mdam MDAM_BETWEEN pred;
                                         //   code in BiLogic::mdamPredGenSubrange
                                         //   and MdamColumn::buildDisjunct
                                         //   requires this to be 1 more than
                                         //   key_column_atp_index
  ULng32 keyLen;

  // add an entry to the map table for work Atp
  MapTable *keyBufferPartMapTable = generator->appendAtEnd();

  // generate a temporary variable, which will be used for handling
  // data conversion errors during key building
  ValueIdList temp_varb_list;

  ItemExpr * dataConversionErrorFlag = new(generator->wHeap())
    HostVar("_sys_dataConversionErrorFlag",
            new(generator->wHeap()) SQLInt(TRUE,FALSE), // int not null
            TRUE);
  ULng32 temp_varb_tupp_len;

  dataConversionErrorFlag->bindNode(generator->getBindWA());
  temp_varb_list.insert(dataConversionErrorFlag->getValueId());
  processValIdList(temp_varb_list,
                   ExpTupleDesc::SQLARK_EXPLODED_FORMAT,
                   temp_varb_tupp_len,  // out
                   work_atp,
                   data_conv_error_atp_index);

  NABoolean doEquiKeyPredOpt = FALSE;
#ifdef _DEBUG
  if (getenv("DO_EQUI_KEY_PRED_OPT"))
    doEquiKeyPredOpt 
      = (searchKey ? searchKey->areAllChosenPredsEqualPreds() : FALSE);
#endif
  if (mdamKeyPtr == NULL)
    {
      // check to see if there is a begin key expression; if there
      // isn't, don't generate a key object
      if (beginKeyPred.entries() == 0)
        *keyInfo = 0;
      else
        {
          // For subset and range operators, generate the begin key
          // expression, end key expression, begin key exclusion expression
          // and end key exclusion expression.  For unique operators,
          // generate only the begin key exppression.
          ex_expr *bk_expr = 0;
          ex_expr *ek_expr = 0;
          ex_expr *bk_excluded_expr = 0;
          ex_expr *ek_excluded_expr = 0;
          
          short bkey_excluded = 0;
          short ekey_excluded = 0;
          
          generateKeyExpr(keyColumns,
              beginKeyPred,
              work_atp,
              key_atp_index,
              dataConversionErrorFlag,
              tf,
              keyLen, // out
              &bk_expr,  // out
              doKeyEncodeOpt,
              firstKeyColOffset,
              doEquiKeyPredOpt);
              
          if (&endKeyPred)
             generateKeyExpr(keyColumns,
                 endKeyPred,
                 work_atp,
                 key_atp_index,
                 dataConversionErrorFlag,
                 tf,
                 keyLen, // out -- should be the same as above
                 &ek_expr,  // out
                 doKeyEncodeOpt,
                 firstKeyColOffset,
                 doEquiKeyPredOpt);
              
          if (reverseScan)
            {
              // reverse scan - swap the begin and end key predicates
              
              // Note: evidently, the Optimizer has already switched
              // the key predicates in this case, so what we are
              // really doing is switching them back.
              
              ex_expr *temp = bk_expr;
              bk_expr = ek_expr;
              ek_expr = temp;
            }
          if (searchKey)
            {
              generateExclusionExpr(searchKey->getBeginKeyExclusionExpr(),
                  work_atp,
                  exclude_flag_atp_index,
                  &bk_excluded_expr); // out
              
              bkey_excluded = (short) searchKey->isBeginKeyExclusive(); 
              
              generateExclusionExpr(searchKey->getEndKeyExclusionExpr(),
                  work_atp,
                  exclude_flag_atp_index,
                  &ek_excluded_expr); // out
              
              ekey_excluded = (short) searchKey->isEndKeyExclusive(); 

              if (reverseScan)
                {
                  NABoolean x = bkey_excluded;
                  bkey_excluded = ekey_excluded;
#pragma nowarn(1506)   // warning elimination 
                  ekey_excluded = x;
#pragma warn(1506)  // warning elimination 

                  ex_expr* temp = bk_excluded_expr;
                  bk_excluded_expr = ek_excluded_expr;
                  bk_excluded_expr = temp;
                }
            } // if searchKey
          else if (useTheHorribleKludge)
            {
              generateExclusionExpr(beginKeyExclusionExpr,
                    work_atp,
                    exclude_flag_atp_index,
                    &bk_excluded_expr); // out
              
              generateExclusionExpr(endKeyExclusionExpr,
                    work_atp,
                    exclude_flag_atp_index,
                    &ek_excluded_expr); // out
              
              // note that the old PartitionAccess::codeGen() code didn't
              // set values for bkey_excluded and ekey_excluded, so the
              // safest choice is to choose inclusion, i.e. let the flags
              // retain their initial value of 0.
            }
          
          // Build key info
          if (keytag > 0)
            keyLen += sizeof(short);

          if ((unique_key_expr == NULL) ||
              (NOT generator->genLeanExpr()))
            {
              // the work cri desc is used to build key values (entry 2) and
              // to compute the exclusion flag (entry 3) to monitor for data
              // conversion errors (entry 4) and to compute values on a column
              // basis (entry 5 - Mdam only)
              ex_cri_desc * work_cri_desc 
                        = new(space) ex_cri_desc(6, space);
              
              *keyInfo = new(space) keySingleSubsetGen(
                   keyLen,
                   work_cri_desc,
                   key_atp_index,
                   exclude_flag_atp_index,
                   data_conv_error_atp_index,
                   bk_expr,
                   ek_expr,
                   bk_excluded_expr,
                   ek_excluded_expr,
                   // static exclude flags (if exprs are NULL)
                   bkey_excluded,
                   ekey_excluded); 
              if (unique_key_expr)
                *unique_key_expr = NULL;
            }
          else
            {
              if (keyInfo)
                *keyInfo = NULL;
              *unique_key_expr = (ex_expr_lean*)bk_expr;
              *uniqueKeyLen = keyLen;
            }
        }
    }  // end of non-mdam case
  else // Mdam case
    {
      // the work cri desc is used to build key values (entry 2) and
      // to compute the exclusion flag (entry 3) to monitor for data
      // conversion errors (entry 4) and to compute values on a column
      // basis (entry 5 - Mdam only, and entry 6 - Mdam only, and only
      // for MDAM_BETWEEN predtype)
      ex_cri_desc * work_cri_desc 
            = new(space) ex_cri_desc(7, space);

      // compute the format of the key buffer -- We need this
      // so that Mdam will know, for each column, where in the buffer
      // to move a value and how many bytes that value takes.  The
      // next few lines of code result in this information being stored
      // in the attrs array.

      // Some words on the technique:  We create expressions whose
      // result datatype matches the key buffer datatypes for each key
      // column.  Then we use the datatypes of these expressions to
      // compute buffer format.  The expressions themselves are not
      // used any further; they do not result in compiled expressions
      // in the plan.  At run time we use string moves to move key
      // values instead.

 
      const CollIndex keyCount = listOfKeyColumns.entries();
      CollIndex i;

      // assert at least one column
      GenAssert(keyCount > 0,"MDAM:  at least one key column required.");

      Attributes ** attrs = new(generator->wHeap()) Attributes * [keyCount];
      
      for (i = 0; i < keyCount; i++)
        {
          ItemExpr * col_node =
              listOfKeyColumns[i].getItemExpr(); 
          ItemExpr *enode = col_node;

          if ((tf == ExpTupleDesc::SQLMX_KEY_FORMAT) &&
              (enode->getValueId().getType().getVarLenHdrSize() > 0))
            {
              // varchar keys in SQL/MP tables are converted to
              // fixed length chars in key buffers

              const CharType& char_type =
                          (CharType&)(enode->getValueId().getType());

              if (!CollationInfo::isSystemCollation(char_type.getCollation()))
                {
                  enode = new(generator->wHeap()) 
                             Cast(enode, 
                                   (new (generator->wHeap())
                                          SQLChar( CharLenInfo(char_type.getStrCharLimit(),
                                                   char_type.getDataStorageSize()),
                                          char_type.supportsSQLnull(),
                                          FALSE, FALSE, FALSE,
                                          char_type.getCharSet(),
                                          char_type.getCollation(),
                                          char_type.getCoercibility())));
                }
            }

          NABoolean desc_flag;

          if (keyColumns.isAscending(i))
            desc_flag = reverseScan;
          else
            desc_flag = !reverseScan;

#pragma nowarn(1506)   // warning elimination 
          enode = new(generator->wHeap()) CompEncode(enode,desc_flag); 
#pragma warn(1506)  // warning elimination 
          enode->bindNode(generator->getBindWA());

          attrs[i] = 
            (generator->
             addMapInfoToThis(keyBufferPartMapTable, enode->getValueId(), 0))->getAttr();
        }  // for, over keyCount

      // Compute offsets, lengths, etc. and assign them to the right
      // atp and atp index

      processAttributes((ULng32)keyCount,
                        attrs, tf,
                        keyLen,
                        work_atp, key_atp_index);

      // Now we have key column offsets and lengths stored in attrs.
 
      // Next, for each column, generate expressions to compute hi,
      // lo, non-null hi and non-null lo values, and create
      // MdamColumnGen structures.

      // Notes: In the Mdam network itself, all key values are
      // encoded.  Hence, we generate CompEncode nodes in all of the
      // expressions, regardless of tuple format.  In the Simulator
      // case, we must at run-time decode the encoded values when
      // moving them to the key buffer.  $$$ We need an expression to
      // do this.  This decoding work has not yet been done, so the
      // simulator only works correctly for columns that happen to be
      // correctly aligned and whose encoding function does not change
      // the value. $$$

      MdamColumnGen * first = 0;
      MdamColumnGen * last = 0;
      LIST(NAType *) keyTypeList(generator->wHeap());//to keep the type of the keys for later

      for (i = 0; i < keyCount; i++)
        {
          // generate expressions to compute hi, lo, non-null hi, non-null lo
          NAType * targetType = (keyColumns[i]->getType())->newCopy(generator->wHeap());

          // Genesis case 10-971031-9814 fix: desc_flag must take into account
          // both the ASC/DESC attribute of the key column and the reverseScan
          // attribute. Before this fix, it only took into account the first of
          // these.
          NABoolean desc_flag;

          if (keyColumns.isAscending(i))
            desc_flag = reverseScan;
          else
            desc_flag = !reverseScan;
          // End Genesis case 10-971031-9814 fix.

          if ((tf == ExpTupleDesc::SQLMX_KEY_FORMAT) &&
              (targetType->getVarLenHdrSize() > 0))
            {

// 5/9/98: add support for VARNCHAR
              const CharType* char_type = (CharType*)(targetType);

              if (!CollationInfo::isSystemCollation(char_type->getCollation()))
                {
                  targetType = new(generator->wHeap()) 
                                      SQLChar( CharLenInfo(char_type->getStrCharLimit(),
                                                           char_type->getDataStorageSize()),
                                      char_type -> supportsSQLnull(),
                                      FALSE, FALSE, FALSE,
                                      char_type -> getCharSet(),
                                      char_type -> getCollation(),
                                      char_type -> getCoercibility());
/*
                  targetType->getNominalSize(),
                  targetType->supportsSQLnull()
*/
                }
            }

          keyTypeList.insert(targetType);  // save in ith position for later
  
          // don't need to make copy of targetType in next call
          ItemExpr * lo = new(generator->wHeap()) ConstValue(targetType, 
                                !desc_flag, 
                                TRUE /* allow NULL */);
#pragma nowarn(1506)   // warning elimination 
          lo = new(generator->wHeap()) CompEncode(lo,desc_flag); 
#pragma warn(1506)  // warning elimination 
          lo->bindNode(generator->getBindWA());

          ValueIdList loList;
          loList.insert(lo->getValueId());

          ex_expr *loExpr = 0;
          ULng32 dataLen = 0;

          generateContiguousMoveExpr(loList,
                   0, // don't add convert nodes
                   work_atp,
                   key_column_atp_index,
                   tf,
                   dataLen,
                   &loExpr);

          ItemExpr * hi = new(generator->wHeap()) ConstValue(targetType->newCopy(generator->wHeap()),
                                desc_flag,
                                TRUE /* allow NULL */);
#pragma nowarn(1506)   // warning elimination 
          hi = new(generator->wHeap()) CompEncode(hi,desc_flag);
#pragma warn(1506)  // warning elimination 
          hi->bindNode(generator->getBindWA());

          ValueIdList hiList;
          hiList.insert(hi->getValueId());

          ex_expr *hiExpr = 0;
          
          generateContiguousMoveExpr(hiList,
                   0, // don't add convert nodes
                   work_atp,
                   key_column_atp_index,
                   tf,
                   dataLen,
                   &hiExpr);

          ex_expr *nonNullLoExpr = loExpr;
          ex_expr *nonNullHiExpr = hiExpr;

          if (targetType->supportsSQLnull())
            {
              if (desc_flag)
                {
                  ItemExpr * nonNullLo = new(generator->wHeap())
                    ConstValue(targetType->newCopy(generator->wHeap()),
                               !desc_flag, 
                               FALSE /* don't allow NULL */);
      #pragma nowarn(1506)   // warning elimination 
                  nonNullLo = new(generator->wHeap()) CompEncode(nonNullLo,desc_flag); 
      #pragma warn(1506)  // warning elimination 
                  nonNullLo->bindNode(generator->getBindWA());

                  ValueIdList nonNullLoList;
                  nonNullLoList.insert(nonNullLo->getValueId());
                  nonNullLoExpr = 0;  // so we will get an expression back

                  generateContiguousMoveExpr(nonNullLoList,
                           0, // don't add convert nodes
                           work_atp,
                           key_column_atp_index,
                           tf,
                           dataLen,
                           &nonNullLoExpr);
                }
              else
                {
                  ItemExpr * nonNullHi = new(generator->wHeap())
                    ConstValue(targetType->newCopy(generator->wHeap()),
                         desc_flag, 
                         FALSE /* don't allow NULL */);
#pragma nowarn(1506)   // warning elimination 
                  nonNullHi = new(generator->wHeap()) CompEncode(nonNullHi,desc_flag); 
#pragma warn(1506)  // warning elimination 
                  nonNullHi->bindNode(generator->getBindWA());
                  
                  ValueIdList nonNullHiList;
                  nonNullHiList.insert(nonNullHi->getValueId());
                  nonNullHiExpr = 0;  // so we will get an expression back
                  
                  generateContiguousMoveExpr(nonNullHiList,
                           0, // don't add convert nodes
                           work_atp,
                           key_column_atp_index,
                           tf,
                           dataLen,
                           &nonNullHiExpr);
                }
            }

          NABoolean useSparseProbes = mdamKeyPtr->isColumnSparse(i); 
          
          // calculate offset to the beginning of the column value
          // (including the null indicator and the varchar length
          // indicator if present)

          ULng32 column_offset = attrs[i]->getOffset();

          if (attrs[i]->getNullFlag())
            column_offset = attrs[i]->getNullIndOffset();
          else if (attrs[i]->getVCIndicatorLength() > 0)
            column_offset = attrs[i]->getVCLenIndOffset();

          last = new(space) MdamColumnGen(last,
                  dataLen,
                  column_offset,
                  useSparseProbes,
                  loExpr,
                  hiExpr,
                  nonNullLoExpr,
                  nonNullHiExpr);
          if (first == 0)
            first = last;
        }  // for over keyCount

      // generate MdamPred's and attach to MdamColumnGen's

      const ColumnOrderListPtrArray &columnOrderListPtrArray =
           mdamKeyPtr->getColumnOrderListPtrArray();

#ifdef _DEBUG
      // Debug print stataments below depend on this
      // variable:
      char *ev = getenv("MDAM_PRINT");
      const NABoolean mdamPrintOn = (ev != NULL AND strcmp(ev,"ON")==0);
#endif

#ifdef _DEBUG
      if (mdamPrintOn)
        {
          fprintf(stdout, "\n\n***Generating the MDAM key for table with index"
                  " columns: ");
          listOfKeyColumns.display();
        }
#endif

      for (CollIndex n = 0; n < columnOrderListPtrArray.entries(); n++)
        {
          // get the list of key predicates associated with the n disjunct:
          const ColumnOrderList &columnOrderList = *columnOrderListPtrArray[n];

#ifdef _DEBUG
          if (mdamPrintOn)
            {
              fprintf(stdout,"\nDisjunct[%d]:----------------\n",n);
              columnOrderList.print();
            }
#endif
          MdamColumnGen * cc = first;

          CMPASSERT(keyCount == columnOrderList.entries());
          const ValueIdSet *predsPtr = NULL;
          for (i = 0; i < keyCount; i++)
            {
#ifdef _DEBUG
              if (mdamPrintOn)
                {
                  fprintf(stdout, "Column(%d) using: ", i);
                  if ( mdamKeyPtr->isColumnSparse(i) )
                    fprintf(stdout,"SPARSE probes\n");
                  else
                    fprintf(stdout, "DENSE probes\n");
                }
#endif
              // get predicates for column order i:
              predsPtr = columnOrderList[i];

              NAType * keyType = keyTypeList[i];

              NABoolean descending;

              if (keyColumns.isAscending(i))
                descending = reverseScan;
              else
                descending = !reverseScan;

              ValueId keyColumn = listOfKeyColumns[i];

              MdamCodeGenHelper mdamHelper(
                   n,
                   keyType,
                   descending,
                   work_atp,
                   key_column_atp_index,
                   tf,
                   dataConversionErrorFlag,
                   keyColumn);

              MdamPred * lastPred = cc->getLastPred();

              if (predsPtr != NULL)
                {
                  for (ValueId predId = predsPtr->init(); 
                       predsPtr->next(predId); predsPtr->advance(predId))
                    {
                      MdamPred * head = 0;  // head of generated MdamPred's
                      MdamPred * tail = 0;

                      ItemExpr * orGroup = predId.getItemExpr();

                      orGroup->mdamPredGen(generator,&head,&tail,mdamHelper,NULL);

                      if (lastPred)
                        {
                          if ( CmpCommon::getDefault(RANGESPEC_TRANSFORMATION) == DF_ON )
                            {
                              MdamPred* curr = lastPred;
                              while(curr->getNext() != NULL)
                                curr=curr->getNext();
                              curr->setNext(head);
                            }
                          else
                            lastPred->setNext(head);
                        }
                      cc->setLastPred(tail);
                      lastPred = tail;  //@ZXmdam if 1st pred has head != tail, head is lost
                    } // for over preds
                } // if (predsPtr != NULL)
              cc = cc->getNext();
            } // for every order...
        } // for every column order list in the array (of disjuncts)

      // build the Mdam key info
      if (keytag > 0)
        keyLen += sizeof(short);
      *keyInfo = new(space) keyMdamGen(keyLen,
                                       work_cri_desc,
                                       key_atp_index,
                                       exclude_flag_atp_index,
                                       data_conv_error_atp_index,
                                       key_column_atp_index,
                                       first,
                                       last,
                                       reverseScan,
                                       generator->wHeap());

    }  // end of mdam case

  if (*keyInfo)
    (*keyInfo)->setKeytag(keytag);

  // reset map table to forget about the key object's work Atp
  
  // aside: this logic is more bloody than it should be because the
  // map table implementation doesn't accurately reflect the map table
  // abstraction
  
  generator->removeAll(keyBufferPartMapTable); // deletes anything that might have been
  // added after keyBufferPartMapTable (at
  // this writing we don't expect there to
  // be anything, but we want to be safe)
  // at this point keyBufferPartMapTable should be the last map table in the
  // global map table chain
  generator->removeLast();  // unlinks keyBufferPartMapTable and deletes it
  
  return 0;
};
コード例 #8
0
short MergeUnion::codeGen(Generator * generator)
{
  ExpGenerator * exp_gen = generator->getExpGenerator();
  Space * space = generator->getSpace();
  
  MapTable * my_map_table = generator->appendAtEnd();

  ////////////////////////////////////////////////////////////////////////////
  //
  // Layout at this node:
  //
  // |------------------------------------------------------------------------|
  // | input data  |  Unioned data | left child's data | right child's data   |
  // | ( I tupps ) |  ( 1 tupp )   | ( L tupps )       |  ( R tupp )          |
  // |------------------------------------------------------------------------|
  // <-- returned row to parent --->
  // <------------ returned row from left child ------->
  // <-------------------- returned row from right child --------------------->
  //
  // input data:        the atp input to this node by its parent. 
  // unioned data:      tupp where the unioned result is moved
  // left child data:   tupps appended by the left child
  // right child data:  tupps appended by right child
  //
  // Input to left child:    I + 1 tupps
  // Input to right child:   I + 1 + L tupps
  //
  // Tupps returned from left and right child are only used to create the
  // unioned data. They are not returned to parent.
  //
  ////////////////////////////////////////////////////////////////////////////

  ex_cri_desc * given_desc
    = generator->getCriDesc(Generator::DOWN);

  ex_cri_desc * returned_desc = NULL;
  if(child(0) || child(1))
    returned_desc = new(space) ex_cri_desc(given_desc->noTuples() + 1, space);
  else
    returned_desc = given_desc;

  // expressions to move the left and right child's output to the
  // unioned row.
  ex_expr * left_expr = 0;
  ex_expr * right_expr = 0;

  // expression to compare left and right child's output to
  // evaluate merge union.
  ex_expr * merge_expr = 0;

  // Expression to conditionally execute the left or right child.
  ex_expr *cond_expr = NULL;

  // Expression to handle triggered action excpetion 
  ex_expr *trig_expr = NULL;

  // It is OK for neither child to exist when generating a merge union TDB
  // for index maintenenace. The children are filled in at build time.
  //
  GenAssert((child(0) AND child(1)) OR (NOT child(0) AND NOT (child(1))),
	    "MergeUnion -- missing one child");
  ComTdb * left_child_tdb = NULL;
  ComTdb * right_child_tdb = NULL;
  ExplainTuple *leftExplainTuple = NULL;
  ExplainTuple *rightExplainTuple = NULL;
  NABoolean afterUpdate = FALSE;
  NABoolean rowsFromLeft = TRUE;
  NABoolean rowsFromRight = TRUE;

  if(child(0) && child(1)) {
 
    // if an update operation is found  before the execution of the
  // IF statement, set afterUpdate to 1 indicating that an update operation
  // was performed before the execution of the IF statement.  Which 
  // is used at runtime to decide whether to set rollbackTransaction in the 
  // diagsArea

 
  if (generator->updateWithinCS() && getUnionForIF()) {
    afterUpdate = TRUE;
  }

    // generate the left child
    generator->setCriDesc(returned_desc, Generator::DOWN);
    child(0)->codeGen(generator);
    left_child_tdb = (ComTdb *)(generator->getGenObj());
    leftExplainTuple = generator->getExplainTuple();

  // MVs --
  // If the left child does not have any outputs, don't expect any rows.
  if (child(0)->getGroupAttr()->getCharacteristicOutputs().isEmpty())
    rowsFromLeft = FALSE;

  // if an update operation is found in the left subtree of this Union then
  // set rowsFromLeft to 0 which is passed on to execution tree indicating
  // that this Union node is not  expecting rows from the left child, then
  // foundAnUpdate_ is reset so it can be reused while doing codGen() on 
  // the right sub tree

    if (getUnionForIF()) {
      if (! getCondEmptyIfThen()) {
        if (generator->foundAnUpdate())   {
          rowsFromLeft = FALSE;
          generator->setFoundAnUpdate(FALSE);
        } 
      } 
      else {
        rowsFromLeft = FALSE;  
      }
    }
    
    // descriptor returned by left child is given to right child as input.
    generator->setCriDesc(generator->getCriDesc(Generator::UP), 
			  Generator::DOWN);
    child(1)->codeGen(generator);
    right_child_tdb = (ComTdb *)(generator->getGenObj());
    rightExplainTuple = generator->getExplainTuple();

  // MVs
  // If the right child does not have any outputs, don't expect any rows.
  if (child(1)->getGroupAttr()->getCharacteristicOutputs().isEmpty())
    rowsFromRight = FALSE;

  // if an update operation is found in the right subtree of this CS then
  // set rowsFromRight to 0 which is passed on to execution tree indicating
  // that this CS node is not  expecting rows from the right child, then
  // foundAnUpdate_ is reset so it can be reused while doing codGen() on
  // the left or right child of another CS node


  if (getUnionForIF()) {
    if (! getCondEmptyIfElse()) {
      if (generator->foundAnUpdate())  {
        rowsFromRight = FALSE;
      } 
    } 
    else {
      rowsFromRight = FALSE;  
    }


    // we cannot always expect a row from a conditional operator. If it is an
    // IF statement without an ELSE and the condition fails then we do not get
    // any rows back. So we allow a conditional union operator to handle all
    // errors below it and for the purposes of 8015 error / 8014 warning
    // treat it as an update node. In this way the nodes above it do not expect
    // any row from this child and do not raise an error if no row is returned.
    // 8014/8015 type errors within this IF statement are handled as in any
    // regular CS.

    generator->setFoundAnUpdate(TRUE);
  }

  }

  // Create the unioned row. 
  // colMapTable() is a list of ValueIdUnion nodes where each node points to
  // the corresponding left and the right output entries.
  // Generate expressions to move the left and right child's output to
  // the unioned row. 
  ValueIdList left_val_id_list;
  ValueIdList right_val_id_list;
  CollIndex   i;                                      

  for (i = 0; i < colMapTable().entries(); i++)       
    {
      ValueIdUnion * vidu_node = (ValueIdUnion *)(((colMapTable()[i]).getValueDesc())->getItemExpr());

      Cast * cnode;
      if (vidu_node->getResult().getType().getTypeQualifier() != NA_ROWSET_TYPE) {
        // move left child's output to result. The 'type' of Cast result is same
        // as that of the vidu_node.
        cnode = new(generator->wHeap())
	           Cast(((vidu_node->getLeftSource()).getValueDesc())->getItemExpr(),
	                &(vidu_node->getResult().getType()));
      }
      else {
	// We indicate that the whole array is to be copied
	SQLRowset *rowsetInfo = (SQLRowset *) &(vidu_node->getResult().getType());
        SQLRowset *newRowset =  new (generator->wHeap()) 
	                         SQLRowset(generator->wHeap(), rowsetInfo->getElementType(),
	                                   rowsetInfo->getMaxNumElements(),
                                           rowsetInfo->getNumElements());
	newRowset->useTotalSize() = TRUE;
        cnode = new(generator->wHeap())
	           Cast(((vidu_node->getLeftSource()).getValueDesc())->getItemExpr(),
	                newRowset);
      }

      cnode->bindNode(generator->getBindWA());
      
      left_val_id_list.insert(cnode->getValueId());

      if (vidu_node->getResult().getType().getTypeQualifier() != NA_ROWSET_TYPE) {
        // move left child's output to result. The 'type' of Cast result is same
        // as that of the vidu_node.
        cnode = new(generator->wHeap())
	           Cast(((vidu_node->getRightSource()).getValueDesc())->getItemExpr(),
	                &(vidu_node->getResult().getType()));
      }
      else {
	// We indicate that the whole array is to be copied
	SQLRowset *rowsetInfo = (SQLRowset *) &(vidu_node->getResult().getType());
        SQLRowset *newRowset =  new (generator->wHeap()) 
	                         SQLRowset(generator->wHeap(), rowsetInfo->getElementType(),
	                                   rowsetInfo->getMaxNumElements(),
                                           rowsetInfo->getNumElements());
	newRowset->useTotalSize() = TRUE;
        cnode = new(generator->wHeap())
	           Cast(((vidu_node->getRightSource()).getValueDesc())->getItemExpr(),
	                newRowset);
      }

      cnode->bindNode(generator->getBindWA());
      right_val_id_list.insert(cnode->getValueId());
    }
  
  ExpTupleDesc * tuple_desc = 0;
  ULng32 tuple_length = 0;
  if(child(0) && child(1)) {
    exp_gen->generateContiguousMoveExpr(left_val_id_list,
					0, // don't add convert nodes
					1, returned_desc->noTuples() - 1,
					ExpTupleDesc::SQLARK_EXPLODED_FORMAT,
					tuple_length,
					&left_expr,
					&tuple_desc,
					ExpTupleDesc::SHORT_FORMAT);
  

    exp_gen->generateContiguousMoveExpr(right_val_id_list,
					0, // don't add convert nodes
					1, returned_desc->noTuples() - 1,
					ExpTupleDesc::SQLARK_EXPLODED_FORMAT,
					tuple_length,
					&right_expr);
  }
  
  // add value ids for all vidu_nodes to my map table. This is the
  // the map table that will be returned. The attributes of the value ids
  // are same as that of left(or right) expression outputs.
  for (i = 0; i < colMapTable().entries(); i++)
    {
      ValueIdUnion * vidu_node = (ValueIdUnion *)(((colMapTable()[i]).getValueDesc())->getItemExpr());
      
      Attributes * attr =
	generator->addMapInfoToThis(my_map_table, vidu_node->getValueId(), 
				    generator->getMapInfo(left_val_id_list[i])->getAttr())->getAttr();
      attr->setAtp(0);
    }
  
  // describe the returned unioned row
  returned_desc->setTupleDescriptor(returned_desc->noTuples() - 1, tuple_desc);
  
  // if sort-merge union is being done, generate expression to
  // compare the left and the right values.
  // This predicate should return TRUE if the left value is
  // less than the right value.
  merge_expr = 0;
  if (getMergeExpr()) {
    // generate the merge predicate. 
    ItemExpr * mergeExpr = new(generator->wHeap()) BoolResult(getMergeExpr());
    mergeExpr->bindNode(generator->getBindWA());    
      
    exp_gen->generateExpr(mergeExpr->getValueId(),
			  ex_expr::exp_SCAN_PRED,
			  &merge_expr);   
  }

  // If conditional union, generate conditional expression, and ignore
  // right child if it was just being used as a no-op.
  cond_expr = 0;
  if (NOT condExpr().isEmpty()) {
    ItemExpr *condExp = condExpr().rebuildExprTree(ITM_AND, TRUE, TRUE);

    exp_gen->generateExpr(condExp->getValueId(), 
                          ex_expr::exp_SCAN_PRED, 
		          &cond_expr);
  }

  // If conditional union, generate triggered action exception error 
  if (NOT trigExceptExpr().isEmpty()) {
    ItemExpr *trigExp = trigExceptExpr().rebuildExprTree(ITM_AND, TRUE, TRUE);

    exp_gen->generateExpr(trigExp->getValueId(), 
                          ex_expr::exp_SCAN_PRED, 
		          &trig_expr);
  }

  // remove both children's map table. Nothing from child's context
  // should be visible from here on upwards.
  generator->removeAll(my_map_table);

  // Ensure the default buffer size is at least as large as the unioned output
  // row.
  UInt32 outputBuffSize = MAXOF( getDefault(GEN_UN_BUFFER_SIZE),
                                 tuple_length );
  outputBuffSize = SqlBufferNeededSize( 1,                 // # of tuples
                                        outputBuffSize,
                                        SqlBuffer::NORMAL_
                                        );

  ComTdbUnion * union_tdb 
    = new(space) ComTdbUnion(
			     left_child_tdb,
			     right_child_tdb,
			     left_expr,
			     right_expr,
			     merge_expr,
			     cond_expr,
			     trig_expr,
                             tuple_length, // unioned rowlen
			     returned_desc->noTuples()-1, // tupp index for
			                                  // unioned buffer
			     given_desc,
			     returned_desc, 
			     (queue_index)getDefault(GEN_UN_SIZE_DOWN),
			     (queue_index)getDefault(GEN_UN_SIZE_UP),
			     (Cardinality) (getInputCardinality() * getEstRowsUsed()).getValue(),
			     getDefault(GEN_UN_NUM_BUFFERS),
			     outputBuffSize,
			     getOrderedUnion(),
                             getBlockedUnion(),  //++ Triggers -
                             hasNoOutputs(),     //++ Triggers -
                             rowsFromLeft,
                             rowsFromRight,
                             afterUpdate,
			     getInNotAtomicStatement());

  generator->initTdbFields(union_tdb);

  // If it does not have two children, this is index maintenance code and
  // should not be Explained  
  if (!generator->explainDisabled()) {
    generator->setExplainTuple(addExplainInfo(union_tdb,
					    leftExplainTuple,
					    rightExplainTuple,
					    generator));
  }

  // restore the original down cri desc since this node changed it.
  generator->setCriDesc(given_desc, Generator::DOWN);

  // set the new up cri desc.
  generator->setCriDesc(returned_desc, Generator::UP);

  generator->setGenObj(this, union_tdb);

  return 0;  
}
コード例 #9
0
// compress the histograms based on query predicates on this table
void TableDesc::compressHistogramsForCurrentQuery()
{

    // if there are some column statistics
    if ((colStats_.entries() != 0) &&
            (table_) &&
            (table_->getExtendedQualName().getSpecialType() == ExtendedQualName::NORMAL_TABLE))
    {   // if 1
        // check if query analysis info is available
        if(QueryAnalysis::Instance()->isAnalysisON())
        {   // if 2
            // get a handle to the query analysis
            QueryAnalysis* queryAnalysis = QueryAnalysis::Instance();

            // get a handle to the table analysis
            const TableAnalysis * tableAnalysis = getTableAnalysis();

            if(!tableAnalysis)
                return;

            // iterate over statistics for each column
            for(CollIndex i = 0; i < colStats_.entries(); i++)
            {   // for 1
                // Get a handle to the column's statistics descriptor
                ColStatDescSharedPtr columnStatDesc = colStats_[i];

                // get a handle to the ColStats
                ColStatsSharedPtr colStats = columnStatDesc->getColStats();

                // if this is a single column, as opposed to a multicolumn
                if(colStats->getStatColumns().entries() == 1)
                {   // if 3
                    // get column's value id
                    const ValueId columnId = columnStatDesc->getColumn();

                    // get column analysis
                    ColAnalysis* colAnalysis = queryAnalysis->getColAnalysis(columnId);

                    if(!colAnalysis) continue;

                    ValueIdSet predicatesOnColumn =
                        colAnalysis->getReferencingPreds();

                    // we can compress this column's histogram if there
                    // is a equality predicate against a constant

                    ItemExpr *constant = NULL;

                    NABoolean colHasEqualityAgainstConst =
                        colAnalysis->getConstValue(constant);

                    // if a equality predicate with a constant was found
                    // i.e. predicate of the form col = 5
                    if (colHasEqualityAgainstConst)
                    {   // if 4
                        if (constant)
                            // compress the histogram
                            columnStatDesc->compressColStatsForQueryPreds(constant,constant);
                    } // if 4
                    else { // else 4

                        // since there is no equality predicates we might still
                        // be able to compress the column's histogram based on
                        // range predicates against a constant. Following are
                        // examples of such predicates
                        // * col > 1 <-- predicate defines a lower bound
                        // * col < 3 <-- predicate defines a upper bound
                        // * col >1 and col < 30 <-- window predicate, define both bounds
                        ItemExpr * lowerBound = NULL;
                        ItemExpr * upperBound = NULL;

                        // Extract predicates from range spec and add it to the
                        // original predicate set otherwise isARangePredicate() will
                        // return FALSE, so histgram compression won't happen.
                        ValueIdSet rangeSpecPred(predicatesOnColumn);
                        for (ValueId predId= rangeSpecPred.init();
                                rangeSpecPred.next(predId);
                                rangeSpecPred.advance(predId))
                        {
                            ItemExpr * pred = predId.getItemExpr();
                            if ( pred->getOperatorType() == ITM_RANGE_SPEC_FUNC )
                            {
                                ValueIdSet vs;
                                ((RangeSpecRef *)pred)->getValueIdSetForReconsItemExpr(vs);
                                // remove rangespec vid from the original set
                                predicatesOnColumn.remove(predId);
                                // add preds extracted from rangespec to the original set
                                predicatesOnColumn.insert(vs);
                            }
                        }

                        // in the following loop we iterate over all the predicates
                        // on this column. If there is a range predicate e.g. a > 2
                        // or a < 3, then we use that to define upper and lower bounds.
                        // Given predicate a > 2, we get a lower bound of 2.
                        // Given predicate a < 3, we get a upper bound of 3.
                        // The bound are then passed down to the histogram
                        // compression methods.

                        // iterate over predicates to see if any of them is a range
                        // predicate e.g. a > 2
                        for (ValueId predId= predicatesOnColumn.init();
                                predicatesOnColumn.next(predId);
                                predicatesOnColumn.advance(predId))
                        {   // for 2
                            // check if this predicate is a range predicate
                            ItemExpr * predicateOnColumn = predId.getItemExpr();
                            if (predicateOnColumn->isARangePredicate())
                            {   // if 5

                                // if a predicate is a range predicate we need to find out more
                                // information regarding the predicate to see if it can be used
                                // to compress the columns histogram. We look for the following:
                                // * The predicate is against a constant e.g. a > 3 and not against
                                //   another column e.g. a > b
                                // Also give a predicate we need to find out what side is the column
                                // and what side is the constant. Normally people write a range predicate
                                // as a > 3, but the same could be written as 3 < a.
                                // Also either on of the operands of the range predicate might be
                                // a VEG, if so then we need to dig into the VEG to see where is
                                // the constant and where is the column.

                                // check the right and left children of this predicate to
                                // see if one of them is a constant
                                ItemExpr * leftChildItemExpr = (ItemExpr *) predicateOnColumn->getChild(0);
                                ItemExpr * rightChildItemExpr = (ItemExpr *) predicateOnColumn->getChild(1);

                                // by default assume the literal is at right i.e. predicate of
                                // the form a > 2
                                NABoolean columnAtRight = FALSE;

                                // check if right child of predicate is a VEG
                                if ( rightChildItemExpr->getOperatorType() == ITM_VEG_REFERENCE)
                                {   // if 6
                                    // if child is a VEG
                                    VEGReference * rightChildVEG = (VEGReference *) rightChildItemExpr;

                                    // check if the VEG contains the current column
                                    // if it does contain the current column then
                                    // the predicate has the column on right and potentially
                                    // a constant on the left.
                                    if(rightChildVEG->getVEG()->getAllValues().contains(columnId))
                                    {   // if 7
                                        // column is at right i.e. predicate is of the form
                                        // 2 < a
                                        columnAtRight = TRUE;
                                    } // if 7
                                } // if 6
                                else { // else 6
                                    // child is not a VEG
                                    if ( columnId == rightChildItemExpr->getValueId() )
                                    {   // if 8
                                        // literals are at left i.e. predicate is of the form
                                        // (1,2) < (a, b)
                                        columnAtRight = TRUE;
                                    } // if 8
                                } // else 6

                                ItemExpr * potentialConstantExpr = NULL;

                                // check if the range predicate is against a constant
                                if (columnAtRight)
                                {   // if 9
                                    // the left child is potentially a constant
                                    potentialConstantExpr = leftChildItemExpr;
                                } // if 9
                                else { // else 9
                                    // the right child is potentially a constant
                                    potentialConstantExpr = rightChildItemExpr;
                                } // else 9

                                // initialize constant to NULL before
                                // looking for next constant
                                constant = NULL;

                                // check if potentialConstantExpr contains a constant.
                                // we need to see if this range predicate is a predicate
                                // against a constant e.g col > 1 and not a predicate
                                // against another column e.g. col > anothercol

                                // if the expression is a VEG
                                if ( potentialConstantExpr->getOperatorType() == ITM_VEG_REFERENCE)
                                {   // if 10

                                    // expression is a VEG, dig into the VEG to
                                    // get see if it contains a constant
                                    VEGReference * potentialConstantExprVEG =
                                        (VEGReference *) potentialConstantExpr;

                                    potentialConstantExprVEG->getVEG()->\
                                    getAllValues().referencesAConstValue(&constant);
                                } // if 10
                                else { // else 10

                                    // express is not a VEG, it is a constant
                                    if ( potentialConstantExpr->getOperatorType() == ITM_CONSTANT )
                                        constant = potentialConstantExpr;
                                } // else 10

                                // if predicate involves a constant, does the constant imply
                                // a upper bound or lower bound
                                if (constant)
                                {   // if 11
                                    // if range predicate has column at right e.g. 3 > a
                                    if (columnAtRight)
                                    {   // if 12
                                        if ( predicateOnColumn->getOperatorType() == ITM_GREATER ||
                                                predicateOnColumn->getOperatorType() == ITM_GREATER_EQ)
                                        {   // if 13
                                            if (!upperBound)
                                                upperBound = constant;
                                        } // if 13
                                        else
                                        {   // else 13
                                            if (!lowerBound)
                                                lowerBound = constant;
                                        } // else 13
                                    } // if 12
                                    else { // else 12
                                        // range predicate has column at left e.g. a < 3
                                        if ( predicateOnColumn->getOperatorType() == ITM_LESS ||
                                                predicateOnColumn->getOperatorType() == ITM_LESS_EQ)
                                        {   // if 14
                                            if (!upperBound)
                                                upperBound = constant;
                                        } // if 14
                                        else
                                        {   // else 14
                                            if (!lowerBound)
                                                lowerBound = constant;
                                        } // else 14
                                    } // else 12
                                } // if 11
                            } // if 5
                        } // for 2

                        // if we found a upper bound or a lower bound
                        if (lowerBound || upperBound)
                        {
                            // compress the histogram based on range predicates
                            columnStatDesc->compressColStatsForQueryPreds(lowerBound, upperBound);
                        }
                    } // else 4
                } // if 3
            } // for 1
        } // if 2
    } // if 1
    // All histograms compressed. Set the histCompressed flag to TRUE
    histsCompressed(TRUE);
}
コード例 #10
0
// change literals of a cacheable query into ConstantParameters 
ItemExpr* UDFunction::normalizeForCache(CacheWA& cwa, BindWA& bindWA)
{
  if (nodeIsNormalizedForCache()) { 
    return this; 
  }
  // Since the UDFunction::transformNode refers to both the 
  // inputVars_ ValueIdSet and the children array we need to make sure
  // they are consistent.
  // The children array may contain cast() of the expressions in inputVars_


  // If we have a UUDF function the inputs that were given 
  // at parse time, really do not reflect reality anymore.
  // So here we will simply reinitialize them with what we find in the
  // inputVars_.

  CMPASSERT(udfDesc_); // we better have one after binding.
  NABoolean isUUDF(udfDesc_->isUUDFRoutine());

  // Save off a copy of the original inputVars_ set.
  ValueIdSet origInputVars(inputVars_);


  // Loop through the given inputs
  for (Int32 x = 0; x < getArity(); x++)
  {
    NABoolean origInputIsChildOfCast(FALSE);
    ItemExpr * origIe = child(x);
    ItemExpr * newIe = NULL;
    ValueId vid = origIe->getValueId();
    if (cwa.getPhase() == CmpMain::BIND) 
    {
      if (origIe->isSafelyCoercible(cwa)) 
      {
   
        // fix CR 10-010726-4109: make sure queries with constants that 
        // cannot be safely backpatched such as 
        //   select case smallintcol when 4294967393 then 'max' end from t
        // are not parameterized
   
   
        newIe = origIe->normalizeForCache(cwa, bindWA);
   
        if (newIe != origIe )
        {
          // normalizeForCache returned a new ItemExpr. We have to update
          // the UDFunction inputVars_ set, as this is used to determine
          // characteristic inputs for IsolatedScalarUDF at transform time.

          child(x) = newIe;

          // Is it a input that UDFunction::bindNode put a cast around?
          if ((origIe->getOperatorType() == ITM_CAST) &&
              (origInputVars.contains(origIe->child(0)->getValueId())))
          {

            // Since the original child was a CAST that UDFunction::bindNode
            // created, check to see if that CAST's child is part of the new
            // expression, if it is, we don't have to do anything, since the
            // CASTED value is part of the inputVars set, otherwise, we have
            // to update the inputVars set to keep the characteristic inputs
            // consistent.
            if (! newIe->referencesTheGivenValue(
                    origIe->child(0)->getValueId()), TRUE)
            {
              // remove the original input from inputVars. It is the child
              // of origIe because origIe is a cast that UDFunction::bindNode
              // introduced.
              inputVars_ -= origIe->child(0)->getValueId();

              if (newIe->getOperatorType() == ITM_CAST)
              {
                // If the new expression is a CAST, we assume the child is the
                // real input. We don't expect CAST(CAST(CAST(expr))) type
                // expressions only simple CAST(expr) ones.
                inputVars_ += newIe->child(0)->getValueId();
              }
              else
              {
                // add the newIe itself if it was not a CAST.
                inputVars_ += newIe->getValueId();
              }
            }
          }
          else
          {
            // If the newIe doesn't contain the original one, we need to update
            // the inputVars set.
            if (! newIe->referencesTheGivenValue(
                    origIe->getValueId()), TRUE)
            {
              // the origIe was not a CAST introduced by UDFunction::bindNode()
              if (newIe->getOperatorType() == ITM_CAST) 
              {
                if (!origInputVars.contains(newIe->child(0)->getValueId()))
                {
                  inputVars_ -= origIe->getValueId();
                  // If the new expression is a CAST, we assume the child is the
                  // real input. We don't expect CAST(CAST(CAST(expr))) type
                  // expressions only simple CAST(expr) ones.
                  inputVars_ += newIe->child(0)->getValueId();
                }
                // we don't need to update inputVars_ if the origInputVars 
                // contains the valueId of the newIe child already.
              } 
              else
              {
                // This is an entirely new input. Remove the old one, and 
                // add in the new.
                inputVars_ -= origIe->getValueId();
                inputVars_ += newIe->getValueId();
              }
            }
          }
        }
      }
    }
  }

  markAsNormalizedForCache();
  return this;
}
コード例 #11
0
short
PhysSequence::codeGen(Generator *generator) 
{
  // Get a local handle on some of the generator objects.
  //
  CollHeap *wHeap = generator->wHeap();
  Space *space = generator->getSpace();
  ExpGenerator *expGen = generator->getExpGenerator();
  MapTable *mapTable = generator->getMapTable();

  // Allocate a new map table for this node. This must be done
  // before generating the code for my child so that this local
  // map table will be sandwiched between the map tables already
  // generated and the map tables generated by my offspring.
  //
  // Only the items available as output from this node will
  // be put in the local map table. Before exiting this function, all of
  // my offsprings map tables will be removed. Thus, none of the outputs 
  // from nodes below this node will be visible to nodes above it except 
  // those placed in the local map table and those that already exist in
  // my ancestors map tables. This is the standard mechanism used in the
  // generator for managing the access to item expressions.
  //
  MapTable *localMapTable = generator->appendAtEnd();

  // Since this operation doesn't modify the row on the way down the tree,
  // go ahead and generate the child subtree. Capture the given composite row
  // descriptor and the child's returned TDB and composite row descriptor.
  //
  ex_cri_desc * givenCriDesc = generator->getCriDesc(Generator::DOWN);
  child(0)->codeGen(generator);
  ComTdb *childTdb = (ComTdb*)generator->getGenObj();
  ex_cri_desc * childCriDesc = generator->getCriDesc(Generator::UP);
  ExplainTuple *childExplainTuple = generator->getExplainTuple();

  // Make all of my child's outputs map to ATP 1. The child row is only 
  // accessed in the project expression and it will be the second ATP 
  // (ATP 1) passed to this expression.
  //
  localMapTable->setAllAtp(1);

  // My returned composite row has an additional tupp.
  //
  Int32 numberTuples = givenCriDesc->noTuples() + 1;
  ex_cri_desc * returnCriDesc 
#pragma nowarn(1506)   // warning elimination 
    = new (space) ex_cri_desc(numberTuples, space);
#pragma warn(1506)  // warning elimination 

  // For now, the history buffer row looks just the return row. Later,
  // it may be useful to add an additional tupp for sequence function
  // itermediates that are not needed above this node -- thus, this
  // ATP is kept separate from the returned ATP.
  //
  const Int32 historyAtp = 0;
  const Int32 historyAtpIndex = numberTuples-1;
#pragma nowarn(1506)   // warning elimination 
  ex_cri_desc *historyCriDesc = new (space) ex_cri_desc(numberTuples, space);
#pragma warn(1506)  // warning elimination 
  ExpTupleDesc *historyDesc = 0;

  //seperate the read and retur expressions
  seperateReadAndReturnItems(wHeap);

  // The history buffer consists of items projected directly from the
  // child, the root sequence functions, the value arguments of the 
  // offset functions, and running sequence functions. These elements must 
  // be materialized in the  history buffer in order to be able to compute 
  // the outputs of this node -- the items projected directly from the child 
  // (projectValues) and the root sequence functions (sequenceFunctions).
  //
  // Compute the set of sequence function items that must be materialized
  // int the history buffer. -- sequenceItems
  //
  // Compute the set of items in the history buffer: the union of the 
  // projected values and the value arguments. -- historyIds
  //
  // Compute the set of items in the history buffer that are computed:
  // the difference between all the elements in the history buffer
  // and the projected items. -- computedHistoryIds
  //

  // KB---will need to return atp with 3 tups only 0,1 and 2 
  // 2 -->values from history buffer after ther are moved to it

 
  addCheckPartitionChangeExpr(generator, TRUE);

  ValueIdSet historyIds;

  historyIds += movePartIdsExpr(); 
  historyIds += sequencedColumns();
  
  ValueIdSet outputFromChild = child(0)->getGroupAttr()->getCharacteristicOutputs();

  getHistoryAttributes(readSeqFunctions(),outputFromChild, historyIds, TRUE, wHeap);

  // Add in the top level sequence functions.
  historyIds += readSeqFunctions();

  getHistoryAttributes(returnSeqFunctions(),outputFromChild, historyIds, TRUE, wHeap);
  // Add in the top level functions.
  historyIds += returnSeqFunctions();
  
  // Layout the work tuple format which consists of the projected
  // columns and the computed sequence functions. First, compute
  // the number of attributes in the tuple.
  //
  ULng32 numberAttributes 
    = ((NOT historyIds.isEmpty()) ? historyIds.entries() : 0);

  // Allocate an attribute pointer vector from the working heap.
  //
  Attributes **attrs = new(wHeap) Attributes*[numberAttributes];

  // Fill in the attributes vector for the history buffer including
  // adding the entries to the map table. Also, compute the value ID
  // set for the elements to project from the child row.
  //
  //??????????re-visit this function??
  computeHistoryAttributes(generator, 
                           localMapTable,
                           attrs,
                           historyIds);

  // Create the tuple descriptor for the history buffer row and
  // assign the offsets to the attributes. For now, this layout is 
  // identical to the returned row. Set the tuple descriptors for
  // the return and history rows.
  //
  ULng32 historyRecLen;
  expGen->processAttributes(numberAttributes,
                            attrs,
                            ExpTupleDesc::SQLARK_EXPLODED_FORMAT,
                            historyRecLen,
                            historyAtp,
                            historyAtpIndex,
                            &historyDesc,
                            ExpTupleDesc::SHORT_FORMAT);
  NADELETEBASIC(attrs, wHeap);
#pragma nowarn(1506)   // warning elimination 
  returnCriDesc->setTupleDescriptor(historyAtpIndex, historyDesc);
#pragma warn(1506)  // warning elimination 
#pragma nowarn(1506)   // warning elimination 
  historyCriDesc->setTupleDescriptor(historyAtpIndex, historyDesc);
#pragma warn(1506)  // warning elimination 

  // If there are any sequence function items, generate the sequence 
  // function expressions.
  //
  ex_expr * readSeqExpr = NULL;
  if(NOT readSeqFunctions().isEmpty())
    {
      ValueIdSet seqVals = readSeqFunctions();
      seqVals += sequencedColumns();
      seqVals += movePartIdsExpr(); 
      expGen->generateSequenceExpression(seqVals,
                                         readSeqExpr);
    }

  ex_expr *checkPartChangeExpr = NULL;
  if (!checkPartitionChangeExpr().isEmpty()) {
    ItemExpr * newCheckPartitionChangeTree= 
        checkPartitionChangeExpr().rebuildExprTree(ITM_AND,TRUE,TRUE);

    expGen->generateExpr(newCheckPartitionChangeTree->getValueId(), 
                         ex_expr::exp_SCAN_PRED,
                         &checkPartChangeExpr);
  }

  //unsigned long rowLength;
  ex_expr * returnExpr = NULL;
  if(NOT returnSeqFunctions().isEmpty())
  {
    expGen->generateSequenceExpression(returnSeqFunctions(),
                                         returnExpr);

  }

  // Generate expression to evaluate predicate on the output
  //
  ex_expr *postPred = 0;

  if (! selectionPred().isEmpty()) {
    ItemExpr * newPredTree = 
      selectionPred().rebuildExprTree(ITM_AND,TRUE,TRUE);

    expGen->generateExpr(newPredTree->getValueId(), ex_expr::exp_SCAN_PRED,
                         &postPred);
  }


  // Reset ATP's to zero for parent.
  //
  localMapTable->setAllAtp(0);


  // Generate expression to evaluate the cancel expression
  //
  ex_expr *cancelExpression = 0;

  if (! cancelExpr().isEmpty()) {
    ItemExpr * newCancelExprTree = 
      cancelExpr().rebuildExprTree(ITM_AND,TRUE,TRUE);

    expGen->generateExpr(newCancelExprTree->getValueId(), ex_expr::exp_SCAN_PRED,
                         &cancelExpression);
  }

  //
  //  For overflow
  //
  // ( The following are meaningless if ! unlimitedHistoryRows() ) 
  NABoolean noOverflow =  
    CmpCommon::getDefault(EXE_BMO_DISABLE_OVERFLOW) == DF_ON ;
  NABoolean logDiagnostics = 
    CmpCommon::getDefault(EXE_DIAGNOSTIC_EVENTS) == DF_ON ;
  NABoolean possibleMultipleCalls = generator->getRightSideOfFlow() ;
  short scratchTresholdPct = 
    (short) CmpCommon::getDefaultLong(SCRATCH_FREESPACE_THRESHOLD_PERCENT);
  // determione the memory usage (amount of memory as percentage from total
  // physical memory used to initialize data structures)
  unsigned short memUsagePercent =
    (unsigned short) getDefault(BMO_MEMORY_USAGE_PERCENT);
  short memPressurePct = (short)getDefault(GEN_MEM_PRESSURE_THRESHOLD);

  historyRecLen = ROUND8(historyRecLen);

  Lng32 maxNumberOfOLAPBuffers;
  Lng32 maxRowsInOLAPBuffer;
  Lng32 minNumberOfOLAPBuffers;
  Lng32 numberOfWinOLAPBuffers;
  Lng32 olapBufferSize;

  computeHistoryParams(historyRecLen,
                       maxRowsInOLAPBuffer,
                       minNumberOfOLAPBuffers,
                       numberOfWinOLAPBuffers,
                       maxNumberOfOLAPBuffers,
                       olapBufferSize);

  ComTdbSequence *sequenceTdb
    = new(space) ComTdbSequence(readSeqExpr,
                                returnExpr,
                                postPred,
                                cancelExpression,
                                getMinFollowingRows(),
#pragma nowarn(1506)   // warning elimination 
                                historyRecLen,
                                historyAtpIndex,
                                childTdb,
                                givenCriDesc,
                                returnCriDesc,
                                (queue_index)getDefault(GEN_SEQFUNC_SIZE_DOWN),
                                (queue_index)getDefault(GEN_SEQFUNC_SIZE_UP),
                                getDefault(GEN_SEQFUNC_NUM_BUFFERS),
                                getDefault(GEN_SEQFUNC_BUFFER_SIZE),
				olapBufferSize,
                                maxNumberOfOLAPBuffers,
                                numHistoryRows(),
                                getUnboundedFollowing(),
				logDiagnostics,
				possibleMultipleCalls,
				scratchTresholdPct,
				memUsagePercent,
				memPressurePct,
                                maxRowsInOLAPBuffer,
                                minNumberOfOLAPBuffers,
                                numberOfWinOLAPBuffers,
                                noOverflow,
                                checkPartChangeExpr);
#pragma warn(1506)  // warning elimination 
  generator->initTdbFields(sequenceTdb);

  // update the estimated value of HistoryRowLength with actual value
  //setEstHistoryRowLength(historyIds.getRowLength());

  double sequenceMemEst = getEstimatedRunTimeMemoryUsage(sequenceTdb);
  generator->addToTotalEstimatedMemory(sequenceMemEst);

  if(!generator->explainDisabled()) {
    Lng32 seqMemEstInKBPerCPU = (Lng32)(sequenceMemEst / 1024) ;
    seqMemEstInKBPerCPU = seqMemEstInKBPerCPU/
      (MAXOF(generator->compilerStatsInfo().dop(),1));
    generator->setOperEstimatedMemory(seqMemEstInKBPerCPU);

    generator->
      setExplainTuple(addExplainInfo(sequenceTdb,
                                     childExplainTuple,
                                     0,
                                     generator));

    generator->setOperEstimatedMemory(0);
  }

  sequenceTdb->setScratchIOVectorSize((Int16)getDefault(SCRATCH_IO_VECTOR_SIZE_HASH));
  sequenceTdb->setOverflowMode(generator->getOverflowMode());

  sequenceTdb->setBmoMinMemBeforePressureCheck((Int16)getDefault(EXE_BMO_MIN_SIZE_BEFORE_PRESSURE_CHECK_IN_MB));
  
  if(generator->getOverflowMode() == ComTdb::OFM_SSD )
    sequenceTdb->setBMOMaxMemThresholdMB((UInt16)(ActiveSchemaDB()->
				   getDefaults()).
			  getAsLong(SSD_BMO_MAX_MEM_THRESHOLD_IN_MB));
  else
    sequenceTdb->setBMOMaxMemThresholdMB((UInt16)(ActiveSchemaDB()->
				   getDefaults()).
			  getAsLong(EXE_MEMORY_AVAILABLE_IN_MB));

  // The CQD EXE_MEM_LIMIT_PER_BMO_IN_MB has precedence over the mem quota sys
  NADefaults &defs = ActiveSchemaDB()->getDefaults();
  UInt16 mmu = (UInt16)(defs.getAsDouble(EXE_MEM_LIMIT_PER_BMO_IN_MB));
  UInt16 numBMOsInFrag = (UInt16)generator->getFragmentDir()->getNumBMOs();
  if (mmu != 0)
    sequenceTdb->setMemoryQuotaMB(mmu);
  else {
    // Apply quota system if either one the following two is true:
    //   1. the memory limit feature is turned off and more than one BMOs 
    //   2. the memory limit feature is turned on
    NABoolean mlimitPerCPU = defs.getAsDouble(EXE_MEMORY_LIMIT_PER_CPU) > 0;

    if ( mlimitPerCPU || numBMOsInFrag > 1 ) {

        double memQuota = 
           computeMemoryQuota(generator->getEspLevel() == 0,
                              mlimitPerCPU,
                              generator->getBMOsMemoryLimitPerCPU().value(),
                              generator->getTotalNumBMOsPerCPU(),
                              generator->getTotalBMOsMemoryPerCPU().value(),
                              numBMOsInFrag, 
                              generator->getFragmentDir()->getBMOsMemoryUsage()
                             );
                                  
        sequenceTdb->setMemoryQuotaMB( UInt16(memQuota) );
    }
  }

  generator->setCriDesc(givenCriDesc, Generator::DOWN);
  generator->setCriDesc(returnCriDesc, Generator::UP);
  generator->setGenObj(this, sequenceTdb);

  return 0;

}
コード例 #12
0
// ItmSeqRunningFunction::preCodeGen
//
// Transforms the running sequence functions into scalar expressions
// that use offset to reference the previous value of the function.
//
ItemExpr *ItmSeqRunningFunction::preCodeGen(Generator *generator)
{
  if (nodeIsPreCodeGenned())
    return this;
  markAsPreCodeGenned();

  // Get some local handles...
  //
  CollHeap *wHeap = generator->wHeap();
  ItemExpr *itmChild = child(0)->castToItemExpr();

  // Allocate a HostVar for referencing the result before it is computed.
  //
  ItemExpr *itmResult 
    = new(wHeap) HostVar("_sys_Result",
			 getValueId().getType().newCopy(wHeap),
			 TRUE);

  // Create an item expression to reference the previous
  // value of this running sequence function.
  //
  ItemExpr *offExpr = new(wHeap) ItmSeqOffset(itmResult, 1);
  ((ItmSeqOffset *)offExpr)->setIsOLAP(isOLAP());
  // Add the sequence function specific computation.
  //
  ItemExpr *itmNewSeqFunc = 0;
  switch(getOperatorType())
    {
    case ITM_RUNNING_COUNT:
      {
	// By this point ITM_RUNNING_COUNT is count(column). The count
	// is one more than the previous count if the current column is
	// not null, otherwise, it is the previous count.
	//

        // Create the increment value.  For non-nullable values, this
        // is always 1, essentially runningcount(*).
        //
        ItemExpr *incr;
        if(itmChild->getValueId().getType().supportsSQLnullLogical()) {
          incr = generator->getExpGenerator()->createExprTree
            ("CASE WHEN @A1 IS NULL THEN @A3 ELSE @A2 END", 
             0, 3, 
             itmChild,
             new(wHeap) ConstValue(1),
             new(wHeap) ConstValue(0));
        } else {
          incr = new(wHeap) ConstValue(1);
        }

        ((ItmSeqOffset *)offExpr)->setNullRowIsZero(TRUE);
        ItemExpr *src = offExpr;

        // Do the increment.
        //
        itmNewSeqFunc = new(wHeap)
              BiArith(ITM_PLUS, src, incr);
      }
    break;
    
    case ITM_RUNNING_SUM:
      {
	// SUM(sum from previous row, child)
	//
	itmNewSeqFunc = new(wHeap) BiArithSum(ITM_PLUS, offExpr, itmChild);
      }
    break;
    
    case ITM_RUNNING_MIN:
      {
	// MIN(min from previous rows, child)
	//
	itmNewSeqFunc 
	  = new(wHeap) ItmScalarMinMax(ITM_SCALAR_MIN,
				       offExpr,
				       itmChild);
      }
    break;
    
    case ITM_RUNNING_MAX:
      {
	// MAX(max from previous row, child)
	//
	itmNewSeqFunc 
	  = new(wHeap) ItmScalarMinMax(ITM_SCALAR_MAX,
				       offExpr,
				       itmChild);
      }
    break;
    
    case ITM_LAST_NOT_NULL:
      {
	// If the current value is null then use the previous value
	// of last not null.
	//
	itmNewSeqFunc = generator->getExpGenerator()->createExprTree
	  ("CASE WHEN @A2 IS NOT NULL THEN @A2 ELSE @A1 END", 
	   0, 2, offExpr, itmChild);
      }
    break;

    case ITM_RUNNING_CHANGE:
      {
        // The running change (or 'rows since changed') can have a
        // composite child (a list of values)
        // Convert the list of values to a list of offset of values.
        //
        ItemExpr *offChild = itmChild;
      
        if (itmChild->getOperatorType() == ITM_ITEM_LIST)
          {
            // child is a multi-valued expression, transform into multiple
            // 
            ExprValueId treePtr = itmChild;

            ItemExprTreeAsList changeValues(&treePtr,
                                            ITM_ITEM_LIST,
                                            RIGHT_LINEAR_TREE);

            offChild = new(wHeap) ItmSeqOffset( changeValues[0], 1);
	    ((ItmSeqOffset *)offChild)->setIsOLAP(isOLAP());
            // add Offset expressions for all the items of the list
            // 
            CollIndex nc = changeValues.entries();
            for (CollIndex i = 1; i < nc; i++)
              {
                ItemExpr *off = new(generator->wHeap()) ItmSeqOffset( changeValues[i], 1);
		((ItmSeqOffset *)off)->setIsOLAP(isOLAP());
                offChild = new(generator->wHeap()) ItemList(offChild, off);
              }
          } else {
            offChild = new(wHeap) ItmSeqOffset( offChild, 1);
	    ((ItmSeqOffset *)offChild)->setIsOLAP(isOLAP());
          }
        
 
        ((ItmSeqOffset *)offExpr)->setNullRowIsZero(TRUE);
        ItemExpr *prevValue = offExpr;

        // Compare the value(s) to the previous value(s).  Use special
        // NULLs flags to treat NULLs as values.  Two NULL values are
        // considered equal here.
        //
        ItemExpr *pred = new (wHeap) BiRelat(ITM_EQUAL,
                                             itmChild,
                                             offChild,
                                             TRUE); // Special NULLs
        // running change = 
        //      (value(s) == prev(value(s))) ? prev(running change)+1 : 1
        //
        // Compute base value.
        //
        itmNewSeqFunc = new (wHeap) 
                IfThenElse(pred, prevValue, new (wHeap) SystemLiteral(0));
        
        itmNewSeqFunc = new (wHeap) Case(NULL, itmNewSeqFunc);

        // Force the evaluation of the offset expression so that the
        // result can be reused by subsequent references.
        //
        itmNewSeqFunc = new(wHeap) ItmBlockFunction(offChild, itmNewSeqFunc);

        // Increment the base value.
        //
        itmNewSeqFunc = new (wHeap) BiArith(ITM_PLUS, 
                                            itmNewSeqFunc,
                                            new(wHeap) SystemLiteral(1));

      }
      break;
      
    }
  
  // Get value Ids and types for all of the items. Must do this typing before
  // replacing this value Id's item expression -- otherwise, the typing
  // will give a result different than the type already computed for this
  // sequence function.
  //
  GenAssert(itmNewSeqFunc, "ItmSeqRunningFunction::preCodeGen -- Unexpected Operator Type!");
  itmNewSeqFunc->synthTypeAndValueId(TRUE);

  // Replace the original value ID with the new expression.
  //
  getValueId().replaceItemExpr(itmNewSeqFunc);
  
  // Map the reference to the result to the actual result in the map table.
  //
  Attributes *attr =  generator->getMapInfo
    (itmNewSeqFunc->getValueId())->getAttr();
  MapInfo *mapInfo = generator->addMapInfo(itmResult->getValueId(), attr);
  itmResult->markAsPreCodeGenned();
  mapInfo->codeGenerated();

  // Return the preCodeGen of the new expression.
  //
  return itmNewSeqFunc->preCodeGen(generator);
}
コード例 #13
0
short TriRelational::mdamPredGen(Generator * generator,
                   MdamPred ** head,
                   MdamPred ** tail,
                   MdamCodeGenHelper & mdamHelper,
                   ItemExpr * parent)
{
  // temp -- haven't been able to unit test this code yet because I haven't been
  // able to figure out how to get the Optimizer to pick a TriRelational guy as
  // a key predicate -- seems better to have the code abort rather than run unverified
  // and possibly fail in strange ways
  GenAssert(0, "Reached TriRelational::mdamPredGen");
  return -1;
  // end temp code

#pragma nowarn(269)   // warning elimination 
  short rc = 0;
#pragma warn(269)  // warning elimination 

  enum MdamPred::MdamPredType predType = 
    MdamPred::MDAM_EQ; // just to initialize

  // Find out what kind of predicate this is.  Note that for DESCending
  // columns, we reverse the direction of any comparison.
  switch (getOperatorType())
    {
      case ITM_LESS_OR_LE:
      {
        predType = MdamPred::MDAM_LT;
        if (mdamHelper.isDescending())
          predType = MdamPred::MDAM_GT;
        break;
      }
      
      case ITM_GREATER_OR_GE:
      {
        predType = MdamPred::MDAM_GT;
        if (mdamHelper.isDescending())
          predType = MdamPred::MDAM_LT;
        break;
      }

      default:
      {
        GenAssert(0, "mdamPredGen: unsupported TriRelational comparison.");
        break;
      }
    }
  
  ItemExpr * child0 = child(0);
  ItemExpr * child1 = child(1);
  ValueId keyColumn = mdamHelper.getKeyColumn();
  
  //  assume predicate is <key> <compare> <value>
  ItemExpr * keyValue = child1;

  if (child1->getValueId() == keyColumn)
    {
      // we guessed wrong -- predicate is <value> <compare> <key>
      keyValue = child0;
      GenAssert(child0->getValueId() != keyColumn,
        "mdamPredGen:  unexpected form for key predicate.");
      // $$$ Add code here to reverse the comparison?
    }
  else
    {
      GenAssert(child0->getValueId() == keyColumn,
        "mdamPredGen:  unexpected form for key predicate.");
    }
  
  // generate an expression to convert the key value to the
  // type of the key column (in its key buffer) and encode it

  ItemExpr * vnode = 0;
  
  // errorsCanOccur() determines if errors can occur converting the class
  // datatype to the target datatype.  The object on whose behalf the
  // member function is called is expected to be a NAType.


  NABoolean generateNarrow = 
    keyValue->getValueId().getType().errorsCanOccur(*mdamHelper.getTargetType());

  if ((generateNarrow) &&
      (getenv("NO_NARROWS"))) // for testing -- allows turning off Narrows
    generateNarrow = FALSE;  

  if (generateNarrow)
    {
      vnode = new(generator->wHeap())
                Narrow(keyValue,
                       mdamHelper.getDataConversionErrorFlag(),
                       mdamHelper.getTargetType()->newCopy());
    }
  else
    {
      vnode = new(generator->wHeap()) 
                Cast(keyValue,mdamHelper.getTargetType()->newCopy());
    }

#pragma nowarn(1506)   // warning elimination 
  vnode = new CompEncode(vnode,mdamHelper.isDescending());
#pragma warn(1506)  // warning elimination 

  vnode->bindNode(generator->getBindWA());
 
  // add CASE 
  //      WHEN child(2)
  //         CAST(round up/round down)
  //      ELSE
  //         no-op

  ItemExpr * hnode = 0;

  if (predType == MdamPred::MDAM_LT)
    hnode = new ConstValue(2 /* ex_conv_clause::CONV_RESULT_ROUNDED_UP_TO_MIN */);
  else
    hnode = new ConstValue(-2 /* ex_conv_clause::CONV_RESULT_ROUNDED_DOWN_TO_MAX */);
                  
  hnode = generator->getExpGenerator()->
          createExprTree("CASE WHEN @B1 THEN @A2 ELSE @A3 END",
                         0,
                         3, // number of subtree parameters
                         child(2),  // @B1
                         hnode,     // @A2
                         0);        // @A3 -- results in no operation

  hnode->bindNode(generator->getBindWA());

  // Assign attributes for result value 

  ValueId vnodeId = vnode->getValueId();
  ValueId hnodeId = hnode->getValueId(); 
  ULng32 tupleLength = 0;

  ValueIdList vnodeList;
  vnodeList.insert(vnode->getValueId());
  
  generator->getExpGenerator()->processValIdList(
                vnodeList,
                mdamHelper.getTupleDataFormat(),
                tupleLength,  // out
                mdamHelper.getAtp(),
                mdamHelper.getAtpIndex());

  // Assign attributes for modifying data conversion error flag
  // Note that all we do is copy the already-assigned attributes

  ItemExpr * dataCEF = mdamHelper.getDataConversionErrorFlag();
  ValueId dataCEFId = dataCEF->getValueId();
  Attributes * dataCEFAttr = 
     (generator->getMapInfo(dataCEFId))->getAttr();
   
  generator->addMapInfoToThis(generator->getLastMapTable(), hnodeId,dataCEFAttr);
  
  // finally generate the expression and hang it off an MdamPred
  
  ex_expr *vexpr = 0;

  vnodeList.insert(hnode->getValueId());  // put hnode in there too
  rc = generator->getExpGenerator()->generateListExpr(
                                        vnodeList,
                                        ex_expr::exp_ARITH_EXPR,
                                        &vexpr);

#pragma nowarn(1506)   // warning elimination 
  *head = *tail = new(generator->getSpace()) 
                    MdamPred(mdamHelper.getDisjunctNumber(),
                             predType,
                             vexpr);
#pragma warn(1506)  // warning elimination 
  
  return rc;
}
コード例 #14
0
// BiRelat for which the following is called will be a predicate for one of the
// endpoints of an MDAM_BETWEEN.
void BiRelat::getMdamPredDetails(Generator* generator,
                                 MdamCodeGenHelper& mdamHelper, 
                                 MdamPred::MdamPredType& predType,
                                 ex_expr** vexpr)
{
  // Find out what kind of predicate this is. Inequality preds are not inverted
  // for descending keys here; instead, the endpoints of the MDAM_BETWEEN
  // interval are switched during creation of the mdam network in the executor.
  switch (getOperatorType())
    {
      case ITM_LESS:
        predType = MdamPred::MDAM_LT;
        break;
      
      case ITM_LESS_EQ:
        predType = MdamPred::MDAM_LE;
        break;
      
      case ITM_GREATER:
        predType = MdamPred::MDAM_GT;
        break;
      
      case ITM_GREATER_EQ:
        predType = MdamPred::MDAM_GE;
        break;
      
      default:
        GenAssert(0, "mdamPredGen: invalid comparison for subrange.");
        break;
    }
  
  ItemExpr* child0 = child(0);
  ItemExpr* child1 = child(1);
  ValueId keyColumn = mdamHelper.getKeyColumn();
  
  // Canonical form used by rangespec is <key> <compare> <value>.
  ItemExpr* keyValue = child1;
  GenAssert(child0->getValueId() == keyColumn,
            "mdamPredGen:  unexpected form for key predicate.");

  // generate an expression to convert the key value to the
  // type of the key column (in its key buffer) and encode it
  ItemExpr* vnode = NULL;
  
  // errorsCanOccur() determines if errors can occur converting the class
  // datatype to the target datatype.  The object on whose behalf the
  // member function is called is expected to be a NAType.
  NABoolean generateNarrow = 
      keyValue->getValueId().getType().errorsCanOccur(*mdamHelper.getTargetType());

#ifdef _DEBUG
  if ((generateNarrow) &&
      (getenv("NO_NARROWS"))) // for testing -- allows turning off Narrows
    generateNarrow = FALSE; 
#endif

  if (generateNarrow)
    vnode = new(generator->wHeap())
              Narrow(keyValue,
                     mdamHelper.getDataConversionErrorFlag(),
                     mdamHelper.getTargetType()->newCopy(generator->wHeap()));
  else
    vnode = new(generator->wHeap()) 
              Cast(keyValue,
                   mdamHelper.getTargetType()->newCopy(generator->wHeap()));

  vnode->bindNode(generator->getBindWA());
  vnode->preCodeGen(generator);

#pragma nowarn(1506)  // warning elimination 
  vnode = new(generator->wHeap()) CompEncode(vnode,mdamHelper.isDescending());
#pragma warn(1506)    // warning elimination 
  vnode->bindNode(generator->getBindWA());
  
  ValueIdList vnodeList;
  vnodeList.insert(vnode->getValueId());
  
  ULng32 dummyLen = 0;
  
  short rc = 
       generator->getExpGenerator()
                ->generateContiguousMoveExpr(vnodeList,
                                             0, // don't add convert nodes
                                             mdamHelper.getAtp(),
                                             mdamHelper.getAtpIndex(),
                                             mdamHelper.getTupleDataFormat(),
                                             dummyLen, // out
                                             vexpr);
       
  GenAssert(rc == 0, "generateContiguousMoveExpr() returned error when called "
                     "from BiRelat::getMdamPredDetails().");
}
コード例 #15
0
short BiRelat::mdamPredGen(Generator * generator,
                                MdamPred ** head,
                                MdamPred ** tail,
                                MdamCodeGenHelper & mdamHelper,
                                ItemExpr * parent)
{
  short rc = 0;     // assume success
  
  enum MdamPred::MdamPredType predType = 
    MdamPred::MDAM_EQ; // just to initialize

  // Find out what kind of predicate this is.  Note that for DESCending
  // columns, we reverse the direction of any comparison.
  switch (getOperatorType())
    {
      case ITM_EQUAL:
      {
        predType = MdamPred::MDAM_EQ;
        break;
      }
      
      case ITM_LESS:
      {
        predType = MdamPred::MDAM_LT;
        if (mdamHelper.isDescending()) predType = MdamPred::MDAM_GT;
        break;
      }
      
      case ITM_LESS_EQ:
      {
        predType = MdamPred::MDAM_LE;
        if (mdamHelper.isDescending()) predType = MdamPred::MDAM_GE;
        break;
      }
      
      case ITM_GREATER:
      {
        predType = MdamPred::MDAM_GT;
        if (mdamHelper.isDescending()) predType = MdamPred::MDAM_LT;
        break;
      }
      
      case ITM_GREATER_EQ:
      {
        predType = MdamPred::MDAM_GE;
        if (mdamHelper.isDescending()) predType = MdamPred::MDAM_LE;
        break;
      }
      
      case ITM_ITEM_LIST:
      {
        GenAssert(0, "mdamPredGen: encountered multivalued predicate.");
        break;
      }

      default:
      {
        GenAssert(0, "mdamPredGen: unsupported comparison.");
        break;
      }
    }
  
  ItemExpr * child0 = child(0);
  ItemExpr * child1 = child(1);
  ValueId keyColumn = mdamHelper.getKeyColumn();
  
  //  assume predicate is <key> <compare> <value>
  ItemExpr * keyValue = child1;

  if (child1->getValueId() == keyColumn)
    {
      // we guessed wrong -- predicate is <value> <compare> <key>
      keyValue = child0;
      GenAssert(child0->getValueId() != keyColumn,
        "mdamPredGen:  unexpected form for key predicate.");
      // Reverse the comparison operator if it is <, <=, > or >=.
      switch (predType)
        {
          case MdamPred::MDAM_LT:
          {
            predType = MdamPred::MDAM_GT;
            break;
          }

          case MdamPred::MDAM_LE:
          {
            predType = MdamPred::MDAM_GE;
            break;
          }

          case MdamPred::MDAM_GT:
          {
            predType = MdamPred::MDAM_LT;
            break;

          }
          case MdamPred::MDAM_GE:
          {
            predType = MdamPred::MDAM_LE;
            break;
          }
        }  // switch (predType)
    }
  else
    {
      GenAssert(child0->getValueId() == keyColumn,
        "mdamPredGen:  unexpected form for key predicate.");
    }
  
  // generate an expression to convert the key value to the
  // type of the key column (in its key buffer) and encode it

  ItemExpr * vnode = 0;
  
  // errorsCanOccur() determines if errors can occur converting the class
  // datatype to the target datatype.  The object on whose behalf the
  // member function is called is expected to be a NAType.

  NABoolean generateNarrow = 
    keyValue->getValueId().getType().errorsCanOccur(*mdamHelper.getTargetType());

#ifdef _DEBUG
  if ((generateNarrow) &&
      (getenv("NO_NARROWS"))) // for testing -- allows turning off Narrows
    generateNarrow = FALSE; 
#endif

  if (generateNarrow)
    {
      vnode = new(generator->wHeap()) Narrow(keyValue,
                                 mdamHelper.getDataConversionErrorFlag(),
                                 mdamHelper.getTargetType()->newCopy(generator->wHeap()));
    }
  else
    {
      vnode = new(generator->wHeap()) Cast(keyValue,mdamHelper.getTargetType()->newCopy(generator->wHeap()));
    }
  vnode->bindNode(generator->getBindWA());
  vnode->preCodeGen(generator);

#pragma nowarn(1506)   // warning elimination 
  vnode = new(generator->wHeap()) CompEncode(vnode,mdamHelper.isDescending());
#pragma warn(1506)  // warning elimination 
  vnode->bindNode(generator->getBindWA());
  
  ValueIdList vnodeList;
  vnodeList.insert(vnode->getValueId());
  
  ex_expr *vexpr = 0;
  ULng32 dummyLen = 0;
  
  rc = generator->getExpGenerator()->generateContiguousMoveExpr(
       vnodeList,
       0, // don't add convert nodes
       mdamHelper.getAtp(),
       mdamHelper.getAtpIndex(),
       mdamHelper.getTupleDataFormat(),
       dummyLen, // out
       &vexpr);

#pragma nowarn(1506)   // warning elimination   
  *head = *tail = new(generator->getSpace()) MdamPred(
       mdamHelper.getDisjunctNumber(),
       predType,
       vexpr);
#pragma warn(1506)  // warning elimination 
  
  return rc;
}
コード例 #16
0
// -----------------------------------------------------------------------
// make an IndexDesc from an existing TableDesc and an NAFileSet
// -----------------------------------------------------------------------
IndexDesc::IndexDesc(TableDesc *tdesc, 
                     NAFileSet *fileSet, 
                     CmpContext* cmpContext)
     : tableDesc_(tdesc), clusteringIndexFlag_(FALSE), 
       identityColumnUniqueIndexFlag_(FALSE), partFunc_(NULL),
       fileSet_(fileSet), cmpContext_(cmpContext), scanBasicCosts_(NULL)
{
  DCMPASSERT( tdesc != NULL AND fileSet != NULL );

  Lng32 ixColNumber;
  ValueId keyValueId;
  ValueId baseValueId;

  const NATable *naTable = tdesc->getNATable();

  indexLevels_ = fileSet_->getIndexLevels();

  // ---------------------------------------------------------------------
  // Make the column list for the index or vertical partition.
  // Any reference to index also holds for vertical partitions.
  // ---------------------------------------------------------------------
  const NAColumnArray & allColumns = fileSet_->getAllColumns();

  // any index gets a new set of IndexColumn
  // item expressions and new value ids
  CollIndex i = 0;
  for (i = 0; i < allColumns.entries(); i++)
    {
      ItemExpr *baseItemExpr = NULL;

      // make a new IndexColumn item expression, indicate how it is
      // defined (in terms of base table columns) and give a value
      // id to the new IndexColumn expression
      if (allColumns[i]->getPosition() >= 0)
	{
	  baseValueId =
	    tdesc->getColumnList()[allColumns[i]->getPosition()];
	  baseItemExpr = baseValueId.getItemExpr();
	}
      else
	{
	  // this column doesn't exist in the base table.
	  // This is the KEYTAG column of sql/mp indices.
	  ItemExpr * keytag = new(wHeap())
            NATypeToItem((NAType *)(allColumns[i]->getType()));
	  keytag->synthTypeAndValueId();
	  baseValueId = keytag->getValueId();

	  baseItemExpr = NULL;
	}

#pragma nowarn(1506)   // warning elimination 
      IndexColumn *ixcol = new(wHeap()) IndexColumn(fileSet_,i,baseValueId);
#pragma warn(1506)  // warning elimination 
      ixcol->synthTypeAndValueId();

      // add the newly obtained value id to the index column list
      indexColumns_.insert(ixcol->getValueId());

      // if the index column is defined as a 1:1 copy of a base
      // column, add it as an equivalent index column (EIC) to the
      // base column item expression
      if ((baseItemExpr) &&
	  (baseItemExpr->getOperatorType() == ITM_BASECOLUMN))
	((BaseColumn *) baseItemExpr)->addEIC(ixcol->getValueId());
    }

  // ---------------------------------------------------------------------
  // make the list of access key columns in the index and make a list
  // of the order that the index provides
  // ---------------------------------------------------------------------
  const NAColumnArray & indexKeyColumns = fileSet_->getIndexKeyColumns();
  for (i = 0; i < indexKeyColumns.entries(); i++)
    {
      // which column of the index is this (usually this will be == i)
#pragma nowarn(1506)   // warning elimination 

      if ( !naTable->isHbaseTable() )
         ixColNumber = allColumns.index(indexKeyColumns[i]);
      else {
         // For Hbase tables, a new NAColumn is created for every column
         // in an index. The above pointer-based lookup for the key column
         // in base table will only find the index column itself. The
         // fix is to lookup by the column name and type as is 
         // implemented by the getColumnPosition() method.
         ixColNumber = allColumns.getColumnPosition(*indexKeyColumns[i]);
         CMPASSERT(ixColNumber >= 0);
      }

#pragma warn(1506)  // warning elimination 

      // insert the value id of the index column into the key column
      // value id list
      keyValueId = indexColumns_[ixColNumber];
      indexKey_.insert(keyValueId);

      // insert the same value id into the order list, if the column
      // is in ascending order, otherwise insert the inverse of the
      // column
      if (indexKeyColumns.isAscending(i))
	{
	  orderOfKeyValues_.insert(keyValueId);
	}
      else
	{
	  InverseOrder *invExpr = new(wHeap())
	    InverseOrder(keyValueId.getItemExpr());
	  invExpr->synthTypeAndValueId();
	  orderOfKeyValues_.insert(invExpr->getValueId());
	}
    }

  markIdentityColumnUniqueIndex(tdesc);

  // ---------------------------------------------------------------------
  // Find the clustering key columns in the index and store their value
  // ids in clusteringKey_
  // ---------------------------------------------------------------------
  NABoolean found = TRUE;
  const NAColumnArray & clustKeyColumns =
                      naTable->getClusteringIndex()->getIndexKeyColumns();

  for (i = 0; i < clustKeyColumns.entries() AND found; i++)
    {
      // which column of the index is this?
#pragma nowarn(1506)   // warning elimination 
      ixColNumber = allColumns.index(clustKeyColumns[i]);
#pragma warn(1506)  // warning elimination 

      found = (ixColNumber != NULL_COLL_INDEX);

      if (found)
	{
	  // insert the value id of the index column into the clustering key
	  // value id list
	  keyValueId = indexColumns_[ixColNumber];
	  clusteringKey_.insert(keyValueId);
	}
      else
	{
	  // clustering key isn't contained in this index, clear the
	  // list that is supposed to indicate the clustering key
	  clusteringKey_.clear();
	}
    }

  // ---------------------------------------------------------------------
  // make the list of partitioning key columns in the index and make a list
  // of the order that the partitioning provides
  // ---------------------------------------------------------------------
  const NAColumnArray & partitioningKeyColumns 
                                    = fileSet_->getPartitioningKeyColumns();
  for (i = 0; i < partitioningKeyColumns.entries(); i++)
    {
      // which column of the index is this 
#pragma nowarn(1506)   // warning elimination 
      ixColNumber = allColumns.index(partitioningKeyColumns[i]);
#pragma warn(1506)  // warning elimination 

      // insert the value id of the index column into the partitioningkey column
      // value id list
      keyValueId = indexColumns_[ixColNumber];
      partitioningKey_.insert(keyValueId);

      // insert the same value id into the order list, if the column
      // is in ascending order, otherwise insert the inverse of the
      // column
      if (partitioningKeyColumns.isAscending(i))
	{
	  orderOfPartitioningKeyValues_.insert(keyValueId);
	}
      else
	{
	  InverseOrder *invExpr = new(wHeap())
	    InverseOrder(keyValueId.getItemExpr());
	  invExpr->synthTypeAndValueId();
	  orderOfPartitioningKeyValues_.insert(invExpr->getValueId());
	}
    }

  // ---------------------------------------------------------------------
  // If this index is partitioned, find the partitioning key columns
  // and build a partitioning function.
  // ---------------------------------------------------------------------
  if ((fileSet_->getCountOfFiles() > 1) ||
      (fileSet_->getPartitioningFunction() &&
       fileSet_->getPartitioningFunction()->
       isARoundRobinPartitioningFunction()))
    partFunc_ = fileSet_->getPartitioningFunction()->
      createPartitioningFunctionForIndexDesc(this);
  
} // IndexDesc::IndexDesc()
コード例 #17
0
short RangePartitioningFunction::codeGen(Generator *generator,
					 Lng32 partInputDataLength)
{
  ExpGenerator         * exp_gen = generator->getExpGenerator();
  Lng32                 myOwnPartInputDataLength;

  const Int32            pivMoveAtp = 0; // only one atp is used for this expr
  const Int32            pivMoveAtpIndex = 2; // 0: consts, 1: temps, 2: result
  const ExpTupleDesc::TupleDataFormat pivFormat = // format of PIVs
                          ExpTupleDesc::SQLARK_EXPLODED_FORMAT;
  ex_cri_desc          *partInputCriDesc = new(generator->getSpace())
                           ex_cri_desc(pivMoveAtpIndex+1,
				       generator->getSpace());
  ExpTupleDesc         *partInputTupleDesc;
  ExRangePartInputData *generatedObject = NULL;

  // get the list of partition input variables
  ValueIdList          piv(getPartitionInputValuesLayout());

  CollIndex numPartInputs = piv.entries();
  CollIndex numPartKeyCols = (numPartInputs - 1) / 2;
  // the number of partition input variables must be odd
  GenAssert(2*numPartKeyCols+1 == numPartInputs,
	    "NOT 2*numPartKeyCols+1 == numPartInputs");

  Attributes **begEndAttrs;
  Int32 alignedPartKeyLen;

  // make a layout of the partition input data record
  generatePivLayout(
       generator,
       myOwnPartInputDataLength,
       pivMoveAtp,
       pivMoveAtpIndex,
       &begEndAttrs);
  
  // the aligned part key length is where the end key values start
  alignedPartKeyLen = (Int32) begEndAttrs[numPartKeyCols]->getOffset();

  if (begEndAttrs[numPartKeyCols]->getNullIndicatorLength() > 0)
    alignedPartKeyLen = MINOF(
	 alignedPartKeyLen,
	 (Int32)begEndAttrs[numPartKeyCols]->getNullIndOffset());
    
  if (begEndAttrs[numPartKeyCols]->getVCIndicatorLength() > 0)
    alignedPartKeyLen = MINOF(
	 alignedPartKeyLen,
	 begEndAttrs[numPartKeyCols]->getVCLenIndOffset());
    
  // generate a tuple desc for the whole PIV record and a cri desc
  partInputTupleDesc = new(generator->getSpace()) ExpTupleDesc(
       numPartInputs,
       begEndAttrs,
       myOwnPartInputDataLength,
       pivFormat,
       ExpTupleDesc::LONG_FORMAT,
       generator->getSpace());
  partInputCriDesc->setTupleDescriptor(pivMoveAtpIndex,partInputTupleDesc);

  // make sure we fulfill the assertions we made

  // optimizer and generator should agree on the part input data length
  GenAssert(partInputDataLength == (Lng32) myOwnPartInputDataLength,
	    "NOT partInputDataLength == myOwnPartInputDataLength");
  // the length of the begin key and the end key must be the same
  // (compare offsets of their last fields)
// Commented out because this check does not work. The check needs
// to compute the LENGTH of each key field, by subtracting the current
// offset from the next offset, taking into account varchar length
// and null indicator fields (which are not part of the length but
// increase the offset).
//GenAssert(begEndAttrs[numPartKeyCols-1]->getOffset() + alignedPartKeyLen ==
//  begEndAttrs[2*numPartKeyCols-1]->getOffset(),
//    "begin/end piv keys have different layouts");

#pragma nowarn(1506)   // warning elimination 
  generatedObject = new(generator->getSpace()) ExRangePartInputData(
       partInputCriDesc,
       partInputDataLength,
       alignedPartKeyLen, //len of one part key + filler
       begEndAttrs[numPartInputs-1]->getOffset(),//offset of last field
       getCountOfPartitions(),
       generator->getSpace(),
       TRUE); // uses expressions to calculate ranges in the executor
  generatedObject->setPartitionExprAtp(pivMoveAtp);
  generatedObject->setPartitionExprAtpIndex(pivMoveAtpIndex);
#pragma warn(1506)  // warning elimination 

  // now fill in the individual partition boundaries
  // (NOTE: there is one more than there are partitions)
  ULng32 boundaryDataLength = 0;
  for (Lng32 i = 0; i <= getCountOfPartitions(); i++)
    {
      const ItemExprList *iel = partitionBoundaries_->getBoundaryValues(i);
      ex_expr * generatedExpr = NULL;

      ValueIdList boundaryColValues;
      ULng32 checkedBoundaryLength;

      // convert the ItemExpressionList iel into a ValueIdList
      for (CollIndex kc = 0; kc < iel->entries(); kc++)
	{
	  ItemExpr *boundaryVal = (*iel)[kc];

	  // create a cast node to convert the boundary value to the
	  // data type of the column
	  ItemExpr *castBoundaryVal =
	    new(generator->wHeap()) Cast(boundaryVal,&piv[kc].getType());

	  castBoundaryVal->bindNode(generator->getBindWA());

	  boundaryColValues.insert(castBoundaryVal->getValueId());
	}

      // Now generate a contiguous move expression. Only for the first time
      // generate a tuple desc, since all tuples should be the same.
      exp_gen->generateContiguousMoveExpr(
	   boundaryColValues,
	   0, // cast nodes created above will do the move, no conv nodes
	   pivMoveAtp,
	   pivMoveAtpIndex,
	   pivFormat,
	   checkedBoundaryLength,
	   &generatedExpr);

      if (i == 0)
	{
	  // first time set the actual part key data length
	  boundaryDataLength = checkedBoundaryLength;
	}
      else
	{
	  // all boundary values (piv tuples) must have the same layout
	  // and therefore the same length
	  GenAssert(boundaryDataLength == checkedBoundaryLength,
		    "Partition boundary tuple layout mismatch");
	}

      generatedObject->setPartitionStartExpr(i,generatedExpr);
    }

  NADELETEBASIC(begEndAttrs, generator->wHeap());
  generator->setGenObj(NULL, (ComTdb*)generatedObject);
  return 0;
}
コード例 #18
0
static short ft_codegen(Generator *generator,
                        RelExpr &relExpr,
                        ComTdbFastExtract *&newTdb,
                        Cardinality estimatedRowCount,
                        char * targetName,
                        char * hdfsHost,
                        Int32 hdfsPort,
                        char * hiveTableName,
                        char * delimiter,
                        char * header,
                        char * nullString,
                        char * recordSeparator,
                        ULng32 downQueueMaxSize,
                        ULng32 upQueueMaxSize,
                        ULng32 outputBufferSize,
                        ULng32 requestBufferSize,
                        ULng32 replyBufferSize,
                        ULng32 numOutputBuffers,
                        ComTdb * childTdb,
                        NABoolean isSequenceFile)
{
  CmpContext *cmpContext = generator->currentCmpContext();
  Space *space = generator->getSpace();
  ExpGenerator *exp_gen = generator->getExpGenerator();
  MapTable *map_table = generator->getMapTable();
  MapTable *last_map_table = generator->getLastMapTable();
  ex_expr *input_expr = NULL;
  ex_expr *output_expr = NULL;
  ex_expr * childData_expr = NULL ;
  ex_expr * cnvChildData_expr = NULL ;
  ULng32 i;
  ULng32 requestRowLen = 0;
  ULng32 outputRowLen = 0;
  ULng32 childDataRowLen = 0;
  ULng32 cnvChildDataRowLen = 0;
  ExpTupleDesc *requestTupleDesc = NULL;

  ExpTupleDesc *outputTupleDesc = NULL;
  ExpTupleDesc *childDataTupleDesc = NULL;
  ExpTupleDesc *cnvChildDataTupleDesc = NULL;
  newTdb = NULL;

  OperatorTypeEnum relExprType = relExpr.getOperatorType();
  GenAssert(relExprType == REL_FAST_EXTRACT, "Unexpected RelExpr at FastExtract codegen")
  FastExtract * fastExtract = (FastExtract *) &relExpr;

  const Int32 workAtpNumber = 1;
  ex_cri_desc *given_desc = generator->getCriDesc(Generator::DOWN);
  ex_cri_desc *returned_desc = NULL;
  ex_cri_desc *work_cri_desc = NULL;

  returned_desc = given_desc;

  // Setup local variables related to the work ATP
  unsigned short numWorkTupps = 0;
  unsigned short childDataTuppIndex = 0;
  unsigned short cnvChildDataTuppIndex = 0;

  numWorkTupps = 3;
  childDataTuppIndex = numWorkTupps - 1 ;
  numWorkTupps ++;
  cnvChildDataTuppIndex = numWorkTupps - 1;
  work_cri_desc = new (space) ex_cri_desc(numWorkTupps, space);

  ExpTupleDesc::TupleDataFormat childReqFormat = ExpTupleDesc::SQLMX_ALIGNED_FORMAT;

  ValueIdList childDataVids;
  ValueIdList cnvChildDataVids;
  const ValueIdList& childVals = fastExtract->getSelectList();

  const NATable *hiveNATable = NULL;
  const NAColumnArray *hiveNAColArray = NULL;

  // hiveInsertErrMode: 
  //    if 0, do not do error checks.
  //    if 1, do error check and return error.
  //    if 2, do error check and ignore row, if error
  //    if 3, insert null if an error occurs
  Lng32 hiveInsertErrMode = 0;
  if ((fastExtract) && (fastExtract->isHiveInsert()) &&
      (fastExtract->getHiveTableDesc()) &&
      (fastExtract->getHiveTableDesc()->getNATable()) &&
      ((hiveInsertErrMode = CmpCommon::getDefaultNumeric(HIVE_INSERT_ERROR_MODE)) > 0))
    {
      hiveNATable = fastExtract->getHiveTableDesc()->getNATable();
      hiveNAColArray = &hiveNATable->getNAColumnArray();
    }

  for (i = 0; i < childVals.entries(); i++)
  {
    ItemExpr &inputExpr = *(childVals[i].getItemExpr());
    const NAType &formalType = childVals[i].getType();
    ItemExpr *lmExpr = NULL;
    ItemExpr *lmExpr2 = NULL;
    int res;

    lmExpr = &inputExpr; 
    lmExpr = lmExpr->bindNode(generator->getBindWA());
    if (!lmExpr || generator->getBindWA()->errStatus())
      {
        GenAssert(0, "lmExpr->bindNode failed");
      }

    // Hive insert converts child data into string format and inserts
    // it into target table.
    // If child type can into an error during conversion, then
    // add a Cast to convert from child type to target type before
    // converting to string format to be inserted.
    if (hiveNAColArray)
      {
        const NAColumn *hiveNACol = (*hiveNAColArray)[i];
        const NAType *hiveNAType = hiveNACol->getType();
        // if tgt type was a hive 'string', do not return a conversion err
        if ((lmExpr->getValueId().getType().errorsCanOccur(*hiveNAType)) &&
            (NOT ((DFS2REC::isSQLVarChar(hiveNAType->getFSDatatype())) &&
                  (((SQLVarChar*)hiveNAType)->wasHiveString()))))
          {
            ItemExpr *newExpr = 
              new(generator->wHeap()) Cast(lmExpr, hiveNAType);
            newExpr = newExpr->bindNode(generator->getBindWA());
            if (!newExpr || generator->getBindWA()->errStatus())
              {
                GenAssert(0, "newExpr->bindNode failed");
              }
            
            if (hiveInsertErrMode == 3)
              ((Cast*)newExpr)->setConvertNullWhenError(TRUE);
            
            lmExpr = newExpr;
          }
      }

    res = CreateAllCharsExpr(formalType, // [IN] Child output type
        *lmExpr, // [IN] Actual input value
        cmpContext, // [IN] Compilation context
        lmExpr2 // [OUT] Returned expression
        );

    GenAssert(res == 0 && lmExpr != NULL,
        "Error building expression tree for LM child Input value");

    childDataVids.insert(lmExpr->getValueId());
    if (lmExpr2)
    {
      lmExpr2->bindNode(generator->getBindWA());
      cnvChildDataVids.insert(lmExpr2->getValueId());
    }


  } // for (i = 0; i < childVals.entries(); i++)

  if (childDataVids.entries() > 0 &&
    cnvChildDataVids.entries()>0)  //-- convertedChildDataVids
  {
    UInt16 pcm = exp_gen->getPCodeMode();
    if ((hiveNAColArray) &&
        (hiveInsertErrMode == 3))
      {
        // if error mode is 3 (mode null when error), disable pcode.
        // this feature is currently not being handled by pcode.
        // (added as part of JIRA 1920 in FileScan::codeGenForHive).
        exp_gen->setPCodeMode(ex_expr::PCODE_NONE);
      }

    exp_gen->generateContiguousMoveExpr (
      childDataVids,                         //childDataVids// [IN] source ValueIds
      TRUE,                                 // [IN] add convert nodes?
      workAtpNumber,                        // [IN] target atp number (0 or 1)
      childDataTuppIndex,                   // [IN] target tupp index
      childReqFormat,                       // [IN] target tuple data format
      childDataRowLen,                      // [OUT] target tuple length
      &childData_expr,                  // [OUT] move expression
      &childDataTupleDesc,                  // [optional OUT] target tuple desc
      ExpTupleDesc::LONG_FORMAT             // [optional IN] target desc format
      );

    exp_gen->setPCodeMode(pcm);

    exp_gen->processValIdList (
       cnvChildDataVids,                              // [IN] ValueIdList
       ExpTupleDesc::SQLARK_EXPLODED_FORMAT,  // [IN] tuple data format
       cnvChildDataRowLen,                          // [OUT] tuple length
       workAtpNumber,                                     // [IN] atp number
       cnvChildDataTuppIndex,         // [IN] index into atp
       &cnvChildDataTupleDesc,                      // [optional OUT] tuple desc
       ExpTupleDesc::LONG_FORMAT              // [optional IN] tuple desc format
       );
  }
  //
  // Add the tuple descriptor for request values to the work ATP
  //
  work_cri_desc->setTupleDescriptor(childDataTuppIndex, childDataTupleDesc);
  work_cri_desc->setTupleDescriptor(cnvChildDataTuppIndex, cnvChildDataTupleDesc);

  // We can now remove all appended map tables
  generator->removeAll(last_map_table);



  ComSInt32 maxrs = 0;
  UInt32 flags = 0;
  UInt16 numIoBuffers = (UInt16)(ActiveSchemaDB()->getDefaults()).getAsLong(FAST_EXTRACT_IO_BUFFERS);
  UInt16 ioTimeout = (UInt16)(ActiveSchemaDB()->getDefaults()).getAsLong(FAST_EXTRACT_IO_TIMEOUT_SEC);

  Int64 hdfsBufSize = (Int64)CmpCommon::getDefaultNumeric(HDFS_IO_BUFFERSIZE);
  hdfsBufSize = hdfsBufSize * 1024; // convert to bytes
  Int16 replication =  (Int16)CmpCommon::getDefaultNumeric(HDFS_REPLICATION);


  // Create a TDB
  ComTdbFastExtract *tdb = new (space) ComTdbFastExtract (
    flags,
    estimatedRowCount,
    targetName,
    hdfsHost,
    hdfsPort,
    hiveTableName,
    delimiter,
    header,
    nullString,
    recordSeparator,
    given_desc,
    returned_desc,
    work_cri_desc,
    downQueueMaxSize,
    upQueueMaxSize,
    (Lng32) numOutputBuffers,
    outputBufferSize,
    numIoBuffers,
    ioTimeout,
    input_expr,
    output_expr,
    requestRowLen,
    outputRowLen,
    childData_expr,
    childTdb,
    space,
    childDataTuppIndex,
    cnvChildDataTuppIndex,
    childDataRowLen,
    hdfsBufSize,
    replication
    );

  tdb->setSequenceFile(isSequenceFile);
  tdb->setHdfsCompressed(CmpCommon::getDefaultNumeric(TRAF_UNLOAD_HDFS_COMPRESS)!=0);

  tdb->setSkipWritingToFiles(CmpCommon::getDefault(TRAF_UNLOAD_SKIP_WRITING_TO_FILES) == DF_ON);
  tdb->setBypassLibhdfs(CmpCommon::getDefault(TRAF_UNLOAD_BYPASS_LIBHDFS) == DF_ON);

  if ((hiveNAColArray) &&
      (hiveInsertErrMode == 2))
    {
      tdb->setContinueOnError(TRUE);
    }

  generator->initTdbFields(tdb);

  // Generate EXPLAIN info.
  if (!generator->explainDisabled())
  {
    generator->setExplainTuple(relExpr.addExplainInfo(tdb, 0, 0, generator));
  }

  // Tell the generator about our in/out rows and the new TDB
  generator->setCriDesc(given_desc, Generator::DOWN);
  generator->setCriDesc(returned_desc, Generator::UP);
  generator->setGenObj(&relExpr, tdb);


  // Return a TDB pointer to the caller
  newTdb = tdb;

  return 0;

} // ft_codegen()
コード例 #19
0
// getHistoryAttributes
//
// Helper function that traverses the set of root sequence functions
// supplied by the compiler and constructs the set of all of the
// attributes that must be materialized in the history row.
// 
void PhysSequence::getHistoryAttributes(const ValueIdSet &sequenceFunctions,
                                        const ValueIdSet &outputFromChild,
                                        ValueIdSet &historyAttributes,
                                        NABoolean addConvNodes,
                                        CollHeap *wHeap,
                                        ValueIdMap *origAttributes) const
{
  if(addConvNodes && !origAttributes) {
    origAttributes = new (wHeap) ValueIdMap();
  }

  ValueIdSet children;
  for(ValueId valId = sequenceFunctions.init();
      sequenceFunctions.next(valId);
      sequenceFunctions.advance(valId)) {

    if(valId.getItemExpr()->isASequenceFunction()) {
      ItemExpr *itmExpr = valId.getItemExpr();

      switch(itmExpr->getOperatorType())
        {
          // The child needs to be in the history row.
          //
        case ITM_OFFSET:
        case ITM_ROWS_SINCE:
        case ITM_THIS:
        case ITM_NOT_THIS:

          // If the child needs to be in the history buffer, then
          // add a Convert node to force the value to be moved to the
          // history buffer.
          if (addConvNodes)
            {
              itmExpr->child(0) = 
                addConvNode(itmExpr->child(0), origAttributes, wHeap);
            }
          historyAttributes += itmExpr->child(0)->getValueId();
          break;

          // The sequence function needs to be in the history row.
          //
        case ITM_RUNNING_SUM:
        case ITM_RUNNING_COUNT:
        case ITM_RUNNING_MIN:
        case ITM_RUNNING_MAX:
        case ITM_LAST_NOT_NULL:
          historyAttributes += itmExpr->getValueId();
          break;
/*
        // after PhysSequence precode gen OLAP sum and count are already transform,ed into running
        // this is used during optimization phase-- 
        case ITM_OLAP_SUM:
        case ITM_OLAP_COUNT:
        case ITM_OLAP_RANK:
        case ITM_OLAP_DRANK:
          if (addConvNodes)
            {
              itmExpr->child(0) = 
                addConvNode(itmExpr->child(0), origAttributes, wHeap);
            }

          historyAttributes += itmExpr->child(0)->getValueId();
          //historyAttributes += itmExpr->getValueId();	  
          break;
*/
          // The child and sequence function need to be in the history row.
          //
        case ITM_OLAP_MIN:
        case ITM_OLAP_MAX:
        case ITM_MOVING_MIN:
        case ITM_MOVING_MAX:

          // If the child needs to be in the history buffer, then
          // add a Convert node to force the value to be moved to the
          // history buffer.
          if (addConvNodes)
            {
              itmExpr->child(0) = 
                addConvNode(itmExpr->child(0), origAttributes, wHeap);
            }

          historyAttributes += itmExpr->child(0)->getValueId();
          historyAttributes += itmExpr->getValueId();	  
          break;

        case ITM_RUNNING_CHANGE:
          if (itmExpr->child(0)->getOperatorType() == ITM_ITEM_LIST)
            {
              // child is a multi-valued expression
              // 
              ExprValueId treePtr = itmExpr->child(0);

              ItemExprTreeAsList changeValues(&treePtr,
                                              ITM_ITEM_LIST,
                                              RIGHT_LINEAR_TREE);

              CollIndex nc = changeValues.entries();
              
              ItemExpr *newChild = NULL;
              if(addConvNodes) {
                newChild = addConvNode(changeValues[nc-1], origAttributes, wHeap);
                historyAttributes += newChild->getValueId();
              } else {
                historyAttributes += changeValues[nc-1]->getValueId();
              }

              // add each item in the list
              // 
              for (CollIndex i = nc; i > 0; i--)
                {
                  if(addConvNodes) {
                    ItemExpr *conv
                      = addConvNode(changeValues[i-1], origAttributes, wHeap);

                    newChild = new(wHeap) ItemList(conv, newChild);
                    newChild->synthTypeAndValueId(TRUE);
                    historyAttributes += conv->getValueId();
                  } else {
                    historyAttributes += changeValues[i-1]->getValueId();
                  }
                }

              if(addConvNodes) {
                itmExpr->child(0) = newChild;
              }
            }
          else
            {

              // If the child needs to be in the history buffer, then
              // add a Convert node to force the value to be moved to the
              // history buffer.
              if (addConvNodes)
                {
                  itmExpr->child(0) = 
                    addConvNode(itmExpr->child(0), origAttributes, wHeap);
                }

              historyAttributes += itmExpr->child(0)->getValueId();
            }

          historyAttributes += itmExpr->getValueId();  
          break;

        default:
          CMPASSERT(0);
        }
    }

    // Gather all the children, and if not empty, recurse down to the
    // next level of the tree.
    //
    for(Lng32 i = 0; i < valId.getItemExpr()->getArity(); i++) 
    {
      if (!outputFromChild.contains(valId.getItemExpr()->child(i)->getValueId()))
        //!valId.getItemExpr()->child(i)->nodeIsPreCodeGenned()) 
      {
        children += valId.getItemExpr()->child(i)->getValueId();
      }
    }
  }
  
  if (NOT children.isEmpty())
  {
    getHistoryAttributes( children,
                          outputFromChild,
                          historyAttributes, 
                          addConvNodes, 
                          wHeap, 
                          origAttributes);
  }

} // PhysSequence::getHistoryAttributes
コード例 #20
0
ItemExpr *ItmSeqOlapFunction::preCodeGen(Generator *generator)
{
  if (getOperatorType() != ITM_OLAP_MIN && getOperatorType() != ITM_OLAP_MAX)
  {
    GenAssert(0, "ItmSeqOlapFunction::preCodeGen -- Should never get here!");
    return 0;
  }


  if (nodeIsPreCodeGenned())
    return this;
  markAsPreCodeGenned();

  // Get some local handles...
  //
  CollHeap *wHeap = generator->wHeap();
  ItemExpr *itmChild = child(0)->castToItemExpr();
  //ItemExpr *itmWindow = child(1)->castToItemExpr();

  // What scalar operation needs to be done.
  //
  OperatorTypeEnum operation;
  if(getOperatorType() == ITM_OLAP_MIN) operation = ITM_SCALAR_MIN;
  else operation = ITM_SCALAR_MAX;

  // Allocate a HostVar for local storage of the index.
  //
  ItemExpr *itmLocalCounter 
    = new(wHeap) HostVar("_sys_LocalCounter",
			 new(wHeap) SQLInt(wHeap, TRUE,FALSE),
			 TRUE);

  // Expression to initailize the iterator.
  //
  ItemExpr *itmLocalCounterInitialize
              = new(wHeap) Assign(itmLocalCounter, 
                                  new(wHeap) ConstValue(frameStart_),
			          FALSE);

  // Expression to increment the iterator.
  //
  ItemExpr *itmLocalCounterIncrement
    = new(wHeap) Assign(itmLocalCounter,
			new(wHeap) BiArith(ITM_PLUS,
					   itmLocalCounter,
					   new (wHeap) ConstValue(1)),
			FALSE);

  // Allocate a HostVar for referencing the result before it is computed.
  //
  ItemExpr *itmResult 
    = new(wHeap) HostVar("_sys_Result",
			 getValueId().getType().newCopy(wHeap),
			 TRUE);

  // Expression to initialize the result.
  //
  ItemExpr *itmResultInitialize
    = new(wHeap) Assign(itmResult,
			new(wHeap) ConstValue());
			
  // Expression to compute the min/max.
  //

  ItemExpr * invCouter= new(wHeap) BiArith(ITM_MINUS,
                                                new (wHeap) ConstValue(0),
					        itmLocalCounter);
					   
  ItemExpr *  itmOffsetExpr = new(wHeap) ItmSeqOffset( itmChild, invCouter);


  //ItemExpr * itmOffsetIsNotNull = new (wHeap) UnLogic(ITM_IS_NOT_NULL, itmOffsetExpr);

  ((ItmSeqOffset *)itmOffsetExpr)->setIsOLAP(isOLAP());
  ItemExpr *itmResultUpdate
    = new(wHeap) Assign(itmResult,
			new(wHeap) ItmScalarMinMax(operation, 
						   itmResult, 
						   itmOffsetExpr));

  // Construct code blocks for the initialization and body for the while-loop
  //
  ItemExpr *itmInit 
    = new(wHeap) ItmBlockFunction(itmLocalCounterInitialize,
				  itmResultInitialize);
  ItemExpr *itmBody
    = new(wHeap) ItmBlockFunction(itmResultUpdate,
				  itmLocalCounterIncrement);
  
  // Construct the While loop (i < window)
  //
  ItemExpr *itmLoopCondition = new(wHeap) BiRelat
    (ITM_LESS_EQ, itmLocalCounter, new(wHeap) ConstValue(frameEnd_));
  
  if (isFrameEndUnboundedFollowing()) //(frameEnd_ == INT_MAX)// not needed in other cases -- can cause issues fo the preceding part
  {
    ItemExpr *  itmOffset1 = new(wHeap) ItmSeqOffset( itmChild, invCouter,NULL,TRUE);
    ItemExpr * itmOffset1IsNotNull = new (wHeap) UnLogic(ITM_IS_NOT_NULL, itmOffset1);

    ((ItmSeqOffset *)itmOffset1)->setIsOLAP(isOLAP());

    itmLoopCondition = itmOffset1IsNotNull;
    //new (wHeap) BiLogic( ITM_AND,
                        //                  itmLoopCondition,
                       //                   itmOffset1IsNotNull);
  }
  ItemExpr *itmWhile 
    = new(wHeap) ItmWhileFunction(itmBody,
				    itmLoopCondition);
  
  
  // Construct the blocks to contain the initialization and looping.
  // The result is the final value of the min/max.
  //
  ItemExpr *itmBlock = new(wHeap) ItmBlockFunction
    (new(wHeap) ItmBlockFunction(itmInit, itmWhile),
     itmResult);
  
  // Replace the item for this value id with the new item expression.
  //
  getValueId().replaceItemExpr(itmBlock);

  // Run the new expression through type and value Id synthesis.
  //
  itmBlock->synthTypeAndValueId(TRUE);

  // Map the reference to the result to the actual result in the map table.
  //
  Attributes *attr =  generator->getMapInfo(itmBlock->getValueId())->getAttr();
  MapInfo *mapInfo = generator->addMapInfo(itmResult->getValueId(), attr);
  itmResult->markAsPreCodeGenned();
  mapInfo->codeGenerated();

  // Return the preCodeGen of the new expression.
  //
  return itmBlock->preCodeGen(generator);

}
コード例 #21
0
ファイル: GenFastTransport.cpp プロジェクト: hadr4ros/core
static short ft_codegen(Generator *generator,
                        RelExpr &relExpr,
                        ComTdbFastExtract *&newTdb,
                        Cardinality estimatedRowCount,
                        char * targetName,
                        char * hdfsHost,
                        Int32 hdfsPort,
                        char * hiveTableName,
                        char * delimiter,
                        char * header,
                        char * nullString,
                        char * recordSeparator,
                        ULng32 downQueueMaxSize,
                        ULng32 upQueueMaxSize,
                        ULng32 outputBufferSize,
                        ULng32 requestBufferSize,
                        ULng32 replyBufferSize,
                        ULng32 numOutputBuffers,
                        ComTdb * childTdb,
                        NABoolean isSequenceFile)
{
  CmpContext *cmpContext = generator->currentCmpContext();
  Space *space = generator->getSpace();
  ExpGenerator *exp_gen = generator->getExpGenerator();
  MapTable *map_table = generator->getMapTable();
  MapTable *last_map_table = generator->getLastMapTable();
  ex_expr *input_expr = NULL;
  ex_expr *output_expr = NULL;
  ex_expr * childData_expr = NULL ;
  ex_expr * cnvChildData_expr = NULL ;
  ULng32 i;
  ULng32 requestRowLen = 0;
  ULng32 outputRowLen = 0;
  ULng32 childDataRowLen = 0;
  ULng32 cnvChildDataRowLen = 0;
  ExpTupleDesc *requestTupleDesc = NULL;

  ExpTupleDesc *outputTupleDesc = NULL;
  ExpTupleDesc *childDataTupleDesc = NULL;
  ExpTupleDesc *cnvChildDataTupleDesc = NULL;
  newTdb = NULL;

  OperatorTypeEnum relExprType = relExpr.getOperatorType();
  GenAssert(relExprType == REL_FAST_EXTRACT, "Unexpected RelExpr at FastExtract codegen")
  FastExtract * fastExtract = (FastExtract *) &relExpr;

  const Int32 workAtpNumber = 1;
  ex_cri_desc *given_desc = generator->getCriDesc(Generator::DOWN);
  ex_cri_desc *returned_desc = NULL;
  ex_cri_desc *work_cri_desc = NULL;

  returned_desc = given_desc;

  // Setup local variables related to the work ATP
  unsigned short numWorkTupps = 0;
  unsigned short childDataTuppIndex = 0;
  unsigned short cnvChildDataTuppIndex = 0;

  numWorkTupps = 3;
  childDataTuppIndex = numWorkTupps - 1 ;
  numWorkTupps ++;
  cnvChildDataTuppIndex = numWorkTupps - 1;
  work_cri_desc = new (space) ex_cri_desc(numWorkTupps, space);

  ExpTupleDesc::TupleDataFormat childReqFormat = ExpTupleDesc::SQLMX_ALIGNED_FORMAT;

  ValueIdList childDataVids;
  ValueIdList cnvChildDataVids;
  const ValueIdList& childVals = fastExtract->getSelectList();

  for (i = 0; i < childVals.entries(); i++)
  {
    ItemExpr &inputExpr = *(childVals[i].getItemExpr());
    const NAType &formalType = childVals[i].getType();
    ItemExpr *lmExpr = NULL;
    ItemExpr *lmExpr2 = NULL;
    int res;

    lmExpr = &inputExpr; //CreateCastExpr(inputExpr, *inputExpr.getValueId().getType().newCopy(), cmpContext);

    res = CreateAllCharsExpr(formalType, // [IN] Child output type
        *lmExpr, // [IN] Actual input value
        cmpContext, // [IN] Compilation context
        lmExpr2 // [OUT] Returned expression
        );

    GenAssert(res == 0 && lmExpr != NULL,
        "Error building expression tree for LM child Input value");

    lmExpr->bindNode(generator->getBindWA());
    childDataVids.insert(lmExpr->getValueId());
    if (lmExpr2)
    {
      lmExpr2->bindNode(generator->getBindWA());
      cnvChildDataVids.insert(lmExpr2->getValueId());
    }


  } // for (i = 0; i < childVals.entries(); i++)

  if (childDataVids.entries() > 0 &&
    cnvChildDataVids.entries()>0)  //-- convertedChildDataVids
  {
    exp_gen->generateContiguousMoveExpr (
      childDataVids,                         //childDataVids// [IN] source ValueIds
      TRUE,                                 // [IN] add convert nodes?
      workAtpNumber,                        // [IN] target atp number (0 or 1)
      childDataTuppIndex,                   // [IN] target tupp index
      childReqFormat,                       // [IN] target tuple data format
      childDataRowLen,                      // [OUT] target tuple length
      &childData_expr,                  // [OUT] move expression
      &childDataTupleDesc,                  // [optional OUT] target tuple desc
      ExpTupleDesc::LONG_FORMAT             // [optional IN] target desc format
      );

    exp_gen->processValIdList (
       cnvChildDataVids,                              // [IN] ValueIdList
       ExpTupleDesc::SQLARK_EXPLODED_FORMAT,  // [IN] tuple data format
       cnvChildDataRowLen,                          // [OUT] tuple length
       workAtpNumber,                                     // [IN] atp number
       cnvChildDataTuppIndex,         // [IN] index into atp
       &cnvChildDataTupleDesc,                      // [optional OUT] tuple desc
       ExpTupleDesc::LONG_FORMAT              // [optional IN] tuple desc format
       );
  }
  //
  // Add the tuple descriptor for request values to the work ATP
  //
  work_cri_desc->setTupleDescriptor(childDataTuppIndex, childDataTupleDesc);
  work_cri_desc->setTupleDescriptor(cnvChildDataTuppIndex, cnvChildDataTupleDesc);

  // We can now remove all appended map tables
  generator->removeAll(last_map_table);



  ComSInt32 maxrs = 0;
  UInt32 flags = 0;
  UInt16 numIoBuffers = (UInt16)(ActiveSchemaDB()->getDefaults()).getAsLong(FAST_EXTRACT_IO_BUFFERS);
  UInt16 ioTimeout = (UInt16)(ActiveSchemaDB()->getDefaults()).getAsLong(FAST_EXTRACT_IO_TIMEOUT_SEC);

  Int64 hdfsBufSize = (Int64)CmpCommon::getDefaultNumeric(HDFS_IO_BUFFERSIZE);
  hdfsBufSize = hdfsBufSize * 1024; // convert to bytes
  Int16 replication =  (Int16)CmpCommon::getDefaultNumeric(HDFS_REPLICATION);


  // Create a TDB
  ComTdbFastExtract *tdb = new (space) ComTdbFastExtract (
    flags,
    estimatedRowCount,
    targetName,
    hdfsHost,
    hdfsPort,
    hiveTableName,
    delimiter,
    header,
    nullString,
    recordSeparator,
    given_desc,
    returned_desc,
    work_cri_desc,
    downQueueMaxSize,
    upQueueMaxSize,
    (Lng32) numOutputBuffers,
    outputBufferSize,
    numIoBuffers,
    ioTimeout,
    input_expr,
    output_expr,
    requestRowLen,
    outputRowLen,
    childData_expr,
    childTdb,
    space,
    childDataTuppIndex,
    cnvChildDataTuppIndex,
    childDataRowLen,
    hdfsBufSize,
    replication
    );

  tdb->setSequenceFile(isSequenceFile);
  tdb->setHdfsCompressed(CmpCommon::getDefaultNumeric(TRAF_UNLOAD_HDFS_COMPRESS)!=0);

  tdb->setSkipWritingToFiles(CmpCommon::getDefault(TRAF_UNLOAD_SKIP_WRITING_TO_FILES) == DF_ON);
  tdb->setBypassLibhdfs(CmpCommon::getDefault(TRAF_UNLOAD_BYPASS_LIBHDFS) == DF_ON);
  generator->initTdbFields(tdb);

  // Generate EXPLAIN info.
  if (!generator->explainDisabled())
  {
    generator->setExplainTuple(relExpr.addExplainInfo(tdb, 0, 0, generator));
  }

  // Tell the generator about our in/out rows and the new TDB
  generator->setCriDesc(given_desc, Generator::DOWN);
  generator->setCriDesc(returned_desc, Generator::UP);
  generator->setGenObj(&relExpr, tdb);


  // Return a TDB pointer to the caller
  newTdb = tdb;

  return 0;

} // ft_codegen()
コード例 #22
0
ItemExpr * buildEncodeTree(desc_struct * column,
                           desc_struct * key,
                           NAString * dataBuffer, //IN:contains original value
                           Generator * generator,
                           ComDiagsArea * diagsArea)
{
  ExpGenerator * expGen = generator->getExpGenerator();

  // values are encoded by evaluating the expression:
  //    encode (cast (<dataBuffer> as <datatype>))
  // where <dataBuffer> points to the string representation of the
  //      data value to be encoded, and <datatype> contains the
  //      PIC repsentation of the columns's datatype.

  // create the CAST part of the expression using the parser.
  
  // if this is a nullable column and the key value passed in
  // is a NULL value, then treat it as a special case. A NULL value
  // is passed in as an unquoted string of characters NULL in the
  // dataBuffer. This case has to be treated different since the
  // parser doesn't recognize the syntax "CAST (NULL as <datatype>)".
  NAString ns;
  ItemExpr * itemExpr;
  NABoolean nullValue = FALSE;

  NABoolean caseinsensitiveEncode = FALSE;
  if (column->body.columns_desc.caseinsensitive)
    caseinsensitiveEncode = TRUE;

  if (column->body.columns_desc.null_flag &&
      dataBuffer->length() >= 4 &&
      str_cmp(*dataBuffer, "NULL", 4) == 0)
    {
      nullValue = TRUE;

      ns = "CAST ( @A1 AS ";
      ns += column->body.columns_desc.pictureText;
      ns += ");";
  
      // create a NULL constant
      ConstValue * nullConst = new(expGen->wHeap()) ConstValue();
      nullConst->synthTypeAndValueId();

      itemExpr = expGen->createExprTree(ns,
                                        CharInfo::UTF8,
					ns.length(),
					1, nullConst);
    }
  else
    {
      ns = "CAST ( ";
      ns += *dataBuffer;
      ns += " AS ";
      ns += column->body.columns_desc.pictureText;
      ns += ");";
  
      itemExpr = expGen->createExprTree(ns,
                                        CharInfo::UTF8,
					ns.length());
    }

  CMPASSERT(itemExpr != NULL);
  ItemExpr *boundItemExpr =
  itemExpr->bindNode(generator->getBindWA());
  if (boundItemExpr == NULL)
    return NULL;

  // make sure that the source and target values have compatible type.
  // Do this only if source is not a null value.
  NAString srcval;
  srcval = "";
  srcval += *dataBuffer;
  srcval += ";";
  ItemExpr * srcNode = expGen->createExprTree(srcval, CharInfo::UTF8, srcval.length());
  CMPASSERT(srcNode != NULL);
  srcNode->synthTypeAndValueId();
  if ((NOT nullValue) &&
      (NOT srcNode->getValueId().getType().isCompatible(itemExpr->getValueId().getType())))
    {
      if (diagsArea)
	{
	  emitDyadicTypeSQLnameMsg(-4039, 
				   itemExpr->getValueId().getType(),
				   srcNode->getValueId().getType(),
				   column->body.columns_desc.colname,
				   NULL,
				   diagsArea);
	}

      return NULL;
    }

  if (column->body.columns_desc.null_flag)
    ((NAType *)&(itemExpr->getValueId().getType()))->setNullable(TRUE);
  else
    ((NAType *)&(itemExpr->getValueId().getType()))->setNullable(FALSE);
  
  // Explode varchars by moving them to a fixed field
  // whose length is equal to the max length of varchar.
  ////collation??
  DataType datatype = column->body.columns_desc.datatype;
  if (DFS2REC::isSQLVarChar(datatype))
    {
      char lenBuf[10];
      NAString vc((NASize_T)100);	// preallocate a big-enough buf

      size_t len = column->body.columns_desc.length;
      if (datatype == REC_BYTE_V_DOUBLE) len /= SQL_DBCHAR_SIZE;

      vc = "CAST (@A1 as CHAR(";
      vc += str_itoa(len, lenBuf);
      if ( column->body.columns_desc.character_set == CharInfo::UTF8 ||
           ( column->body.columns_desc.character_set == CharInfo::SJIS &&
             column->body.columns_desc.encoding_charset == CharInfo::SJIS ) )
        {
          vc += " BYTE";
          if (len > 1)
            vc += "S";
        }
      vc += ") CHARACTER SET ";
      vc += CharInfo::getCharSetName(column->body.columns_desc.character_set);
      vc += ");";

      itemExpr = expGen->createExprTree(vc, CharInfo::UTF8, vc.length(), 1, itemExpr);
      itemExpr->synthTypeAndValueId();

      ((NAType *)&(itemExpr->getValueId().getType()))->
        setNullable(column->body.columns_desc.null_flag);
  }

  // add the encode node on top of it.
  short desc_flag = TRUE;
  if (key->body.keys_desc.ordering == 0) // ascending
    desc_flag = FALSE;
  
  itemExpr = new(expGen->wHeap()) CompEncode(itemExpr, desc_flag);
  
  itemExpr->synthTypeAndValueId();
  
  ((CompEncode*)itemExpr)->setCaseinsensitiveEncode(caseinsensitiveEncode);

  return itemExpr;
}
コード例 #23
0
// ItmSeqMovingFunction::preCodeGen
//
// All of the moving sequence functions have been transformed away at this
// point except min and max. Transform these operations to a while-loop which 
// iterates over the past rows testing the min/max condition for each row. 
// Use the ItmScalarMin/Max functions for computing the min/max.
//
ItemExpr *ItmSeqMovingFunction::preCodeGen(Generator *generator)
{
  if (nodeIsPreCodeGenned())
    return this;
  markAsPreCodeGenned();

  // Get some local handles...
  //
  CollHeap *wHeap = generator->wHeap();
  ItemExpr *itmChild = child(0)->castToItemExpr();
  ItemExpr *itmWindow = child(1)->castToItemExpr();

  // What scalar operation needs to be done.
  //
  OperatorTypeEnum operation;
  if(getOperatorType() == ITM_MOVING_MIN) operation = ITM_SCALAR_MIN;
  else operation = ITM_SCALAR_MAX;

  // Allocate a HostVar for local storage of the index.
  //
  ItemExpr *itmLocalCounter 
    = new(wHeap) HostVar("_sys_LocalCounter",
			 new(wHeap) SQLInt(wHeap, TRUE,FALSE),
			 TRUE);

  // Expression to initailize the iterator.
  //
  ItemExpr *itmLocalCounterInitialize
    = new(wHeap) Assign(itmLocalCounter, 
			new(wHeap) ConstValue(0),
			FALSE);

  // Expression to increment the iterator.
  //
  ItemExpr *itmLocalCounterIncrement
    = new(wHeap) Assign(itmLocalCounter,
			new(wHeap) BiArith(ITM_PLUS,
					   itmLocalCounter,
					   new (wHeap) ConstValue(1)),
			FALSE);

  // Allocate a HostVar for referencing the result before it is computed.
  //
  ItemExpr *itmResult 
    = new(wHeap) HostVar("_sys_Result",
			 getValueId().getType().newCopy(wHeap),
			 TRUE);

  // Expression to initialize the result.
  //
  ItemExpr *itmResultInitialize
    = new(wHeap) Assign(itmResult,
			new(wHeap) ConstValue());
			
  // Expression to compute the min/max.
  //
  ItemExpr *itmOffsetExpr = new(wHeap) ItmSeqOffset( itmChild, itmLocalCounter);
  ((ItmSeqOffset *)itmOffsetExpr)->setIsOLAP(isOLAP());
  ItemExpr *itmResultUpdate
    = new(wHeap) Assign(itmResult,
			new(wHeap) ItmScalarMinMax(operation, 
						   itmResult, 
						   itmOffsetExpr));

  // Construct code blocks for the initialization and body for the while-loop
  //
  ItemExpr *itmInit 
    = new(wHeap) ItmBlockFunction(itmLocalCounterInitialize,
				  itmResultInitialize);
  ItemExpr *itmBody
    = new(wHeap) ItmBlockFunction(itmResultUpdate,
				  itmLocalCounterIncrement);
  
  // Construct the While loop (i < window)
  //
  ItemExpr *itmLoopCondition = new(wHeap) BiRelat
    (ITM_LESS, itmLocalCounter, itmWindow);
  ItemExpr *itmWhile 
    = new(wHeap) ItmWhileFunction(itmBody,
				    itmLoopCondition);
  
  
  // Construct the blocks to contain the initialization and looping.
  // The result is the final value of the min/max.
  //
  ItemExpr *itmBlock = new(wHeap) ItmBlockFunction
    (new(wHeap) ItmBlockFunction(itmInit, itmWhile),
     itmResult);
  
  // Replace the item for this value id with the new item expression.
  //
  getValueId().replaceItemExpr(itmBlock);

  // Run the new expression through type and value Id synthesis.
  //
  itmBlock->synthTypeAndValueId(TRUE);

  // Map the reference to the result to the actual result in the map table.
  //
  Attributes *attr =  generator->getMapInfo(itmBlock->getValueId())->getAttr();
  MapInfo *mapInfo = generator->addMapInfo(itmResult->getValueId(), attr);
  itmResult->markAsPreCodeGenned();
  mapInfo->codeGenerated();

  // Return the preCodeGen of the new expression.
  //
  return itmBlock->preCodeGen(generator);
}
コード例 #24
0
short RelInternalSP::codeGen(Generator * generator)
{
  Space * space          = generator->getSpace();
  ExpGenerator * exp_gen = generator->getExpGenerator();
  MapTable * last_map_table = generator->getLastMapTable();

  ex_expr * input_expr  = NULL;
  ex_expr * output_expr = NULL;

  ////////////////////////////////////////////////////////////////////////////
  //
  // Returned atp layout:
  //
  // |--------------------------------|
  // | input data  |  stored proc row |
  // | ( I tupps ) |  ( 1 tupp )      |
  // |--------------------------------|
  // <-- returned row to parent ---->
  //
  // input data:        the atp input to this node by its parent. 
  // stored proc row:   tupp where the row read from SP is moved.
  //
  ////////////////////////////////////////////////////////////////////////////

  ex_cri_desc * given_desc 
    = generator->getCriDesc(Generator::DOWN);

  ex_cri_desc * returned_desc 
    = new(space) ex_cri_desc(given_desc->noTuples() + 1, space);
 
  // cri descriptor for work atp has 3 entries:
  // -- the first two entries for consts and temps.
  // -- Entry 3(index #2) is where the input and output rows will be created.
  ex_cri_desc * work_cri_desc = new(space) ex_cri_desc(3, space);
  const Int32 work_atp = 1;
  const Int32 work_atp_index = 2;
 
  ExpTupleDesc * input_tuple_desc = NULL;
  ExpTupleDesc * output_tuple_desc = NULL;

  // Generate expression to create the input row that will be
  // given to the stored proc.
  // The input value is in sp->getProcAllParams() 
  // and has to be converted to sp->procType().
  // Generate Cast node to convert procParam to ProcType.
  // If procType is a varchar, explode it. This is done
  // so that values could be extracted correctly.
  ValueIdList procVIDList;
  for (CollIndex i = 0; i < procTypes().entries(); i++)
    {
      Cast * cn;

      if ((procTypes())[i].getType().getVarLenHdrSize() > 0) 
	{

// 5/9/98: add support for VARNCHAR
          const CharType& char_type =
                (CharType&)((procTypes())[i].getType());

	  // Explode varchars by moving them to a fixed field
	  // whose length is equal to the max length of varchar.
	  cn = new(generator->wHeap()) 
	    Cast ((getProcAllParamsVids())[i].getItemExpr(), 
		  (new(generator->wHeap())
		   SQLChar(generator->wHeap(),
		           CharLenInfo(char_type.getStrCharLimit(), char_type.getDataStorageSize()),
			   char_type.supportsSQLnull(),
			   FALSE, FALSE, FALSE,
			   char_type.getCharSet(),
			   char_type.getCollation(),
			   char_type.getCoercibility()
/*
                           (procTypes())[i].getType().getNominalSize(),
			   (procTypes())[i].getType().supportsSQLnull()
*/
                          )
                  )
                 );
	  
	  // Move the exploded field to a varchar field since 
	  // procType is varchar.
	  // Can optimize by adding an option to convert node to
	  // blankpad. TBD.
	  //
	  cn = new(generator->wHeap())
	    Cast(cn, &((procTypes())[i].getType()));
	} 
      else
 	cn = new(generator->wHeap()) Cast((getProcAllParamsVids())[i].getItemExpr(),
					  &((procTypes())[i].getType()));

      cn->bindNode(generator->getBindWA());
      procVIDList.insert(cn->getValueId());
    }

  ULng32 inputRowlen_ = 0;
  exp_gen->generateContiguousMoveExpr(procVIDList, -1, /*add conv nodes*/
				      work_atp, work_atp_index,
				      ExpTupleDesc::SQLARK_EXPLODED_FORMAT,
				      inputRowlen_,
				      &input_expr,
				      &input_tuple_desc,
				      ExpTupleDesc::LONG_FORMAT);

  // add all columns from this SP to the map table. 
  ULng32 tupleLength;
  exp_gen->processValIdList(getTableDesc()->getColumnList(),
			    ExpTupleDesc::SQLARK_EXPLODED_FORMAT,
			    tupleLength,
			    work_atp, 
			    work_atp_index);
 
  // Generate expression to move the output row returned by the
  // stored proc back to parent.
  ULng32 outputRowlen_ = 0;
  MapTable * returnedMapTable = 0;
  exp_gen->generateContiguousMoveExpr(getTableDesc()->getColumnList(),
				      -1 /*add conv nodes*/,
				      0, returned_desc->noTuples() - 1,
				      ExpTupleDesc::SQLARK_EXPLODED_FORMAT,
				      outputRowlen_,
				      &output_expr,
				      &output_tuple_desc,
				      ExpTupleDesc::LONG_FORMAT,
				      &returnedMapTable);
 
  // Now generate expressions used to extract or move input or
  // output values. See class ExSPInputOutput.
  ExSPInputOutput * extractInputExpr = NULL;
  ExSPInputOutput * moveOutputExpr = NULL;
  
  generateSPIOExpr(this, generator,
		   extractInputExpr,
		   moveOutputExpr);

  // done with expressions at this operator. Remove the appended map tables.
  generator->removeAll(last_map_table);

  // append the map table containing the returned columns
  generator->appendAtEnd(returnedMapTable);

  NAString procNameAsNAString(procName_);
  char * sp_name = 
    space->allocateAndCopyToAlignedSpace(procNameAsNAString,
					 procNameAsNAString.length(), 0);

  ExpGenerator *expGen = generator->getExpGenerator();

  // expression to conditionally return 0 or more rows.
  ex_expr *predExpr = NULL;

  // generate tuple selection expression, if present
  if(NOT selectionPred().isEmpty())
  {
    ItemExpr* pred = selectionPred().rebuildExprTree(ITM_AND,TRUE,TRUE);
    expGen->generateExpr(pred->getValueId(),ex_expr::exp_SCAN_PRED,&predExpr);
  }

  ComTdbStoredProc * sp_tdb = new(space)
    ComTdbStoredProc(sp_name, 
		     input_expr,
		     inputRowlen_,
		     output_expr,
		     outputRowlen_,
		     work_cri_desc,
		     work_atp_index,
		     given_desc,
		     returned_desc,
		     extractInputExpr,
		     moveOutputExpr,
		     2,
		     1024,
		     (Cardinality) getGroupAttr()->
		     getOutputLogPropList()[0]->
		     getResultCardinality().value(),
		     5,
		     64000,  //10240
		     predExpr,
		     (UInt16) arkcmpInfo_);
		      
  generator->initTdbFields(sp_tdb);

  if(!generator->explainDisabled()) 
    {
      generator->setExplainTuple(
				 addExplainInfo(sp_tdb, 0, 0, generator));
    }
  // Do not infer that any transaction started can 
  // be in READ ONLY mode if ISPs are present.
  generator->setNeedsReadWriteTransaction(TRUE);

  generator->setCriDesc(given_desc, Generator::DOWN);
  generator->setCriDesc(returned_desc, Generator::UP);
  generator->setGenObj(this, sp_tdb);

  // Some built-in functions require a TMF transaction
  // because they get their information from catman
  generator->setTransactionFlag(getRequiresTMFTransaction());

  return 0;
}
コード例 #25
0
///////////////////////////////////////////////////////////////////
// This function takes as input an array of key values, where each
// key value is in ASCII string format (the way it is stored in
// catalogs). It encodes the key values and returns the encoded
// value in the encodedKeyBuffer. 
// RETURNS: -1, if error. 0, if all Ok.
///////////////////////////////////////////////////////////////////
short encodeKeyValues(desc_struct   * column_descs,
		      desc_struct   * key_descs,
		      NAString      * inValuesArray[],          // INPUT
                      NABoolean isIndex,
		      char * encodedKeyBuffer,                  // OUTPUT
                      CollHeap * h,
		      ComDiagsArea * diagsArea)
{
  short error = 0;       // assume all will go well
  NABoolean deleteLater = FALSE;

  // set up binder/generator stuff so expressions could be generated.
  InitSchemaDB();
  CmpStatement cmpStatement(CmpCommon::context());
  ActiveSchemaDB()->createStmtTables();
  BindWA       bindWA(ActiveSchemaDB(), CmpCommon::context());
  Generator    generator(CmpCommon::context());
  ExpGenerator expGen(&generator);
  generator.appendAtEnd(); // alloc a new map table
  generator.setBindWA(&bindWA);
  generator.setExpGenerator(&expGen);
  FragmentDir * compFragDir = generator.getFragmentDir();

  // create the fragment (independent code space) for this expression
  CollIndex myFragmentId = compFragDir->pushFragment(FragmentDir::MASTER);
  
  // space where RCB will be generated
  Space * space = generator.getSpace();

  // Let's start with a list of size 4 rather than resizing continuously
  ValueIdList encodedValueIdList(4);
  desc_struct * column = column_descs;
  desc_struct * key    = key_descs;
  Int32 i = 0;

  if (inValuesArray == NULL)
    deleteLater = TRUE;

  while (key)
    {
      // for an index, keys_desc has columns in the same order as columns_desc,
      // the following for loop is not needed.
      if (!isIndex) {
      column = column_descs;
      for (Int32 j = 0; j < key->body.keys_desc.tablecolnumber; j++)
	column = column->header.next;
      }

      if (inValuesArray[i] == NULL)
	inValuesArray[i] = getMinMaxValue(column, key, FALSE, h);
      
      ItemExpr * itemExpr = buildEncodeTree(column, key, inValuesArray[i],
					    &generator, diagsArea);
      if (! itemExpr)
	return -1;

      encodedValueIdList.insert(itemExpr->getValueId());
      
      i++;
      key = key->header.next;
      if (isIndex)
        column = column->header.next;
    }
  
  // allocate a work cri desc to encode keys. It has
  // 3 entries: 0, for consts. 1, for temps. 
  // 2, for the encoded key.
  ex_cri_desc * workCriDesc = new(space) ex_cri_desc(3, space);
  short keyAtpIndex   = 2;  // where the encoded key will be built
  
  ULng32 encodedKeyLen;
  ex_expr * keExpr = 0;
  expGen.generateContiguousMoveExpr(encodedValueIdList,
				    0 /*don't add conv nodes*/,
				    0 /*atp*/, keyAtpIndex,
				    ExpTupleDesc::SQLMX_KEY_FORMAT,
				    encodedKeyLen,
				    &keExpr);

  // create a DP2 expression and initialize it with the key encode expr.
  ExpDP2Expr * keyEncodeExpr = new(space) ExpDP2Expr(keExpr,
						     workCriDesc,
						     space);
  
  keyEncodeExpr->getExpr()->fixup(0,expGen.getPCodeMode(), 
                                  (ex_tcb *)space,space, h, FALSE, NULL);

  atp_struct * workAtp = keyEncodeExpr->getWorkAtp();
  workAtp->getTupp(keyAtpIndex).setDataPointer(encodedKeyBuffer);
  
  if (keyEncodeExpr->getExpr()->eval(workAtp, 0, space) == ex_expr::EXPR_ERROR)
    error = -1;

  if (deleteLater)
    delete [] inValuesArray;

  generator.removeAll(NULL);

  return error;
}
コード例 #26
0
void PhysSequence::computeReadNReturnItems( ValueId topSeqVid,
                                            ValueId vid,
                                            const ValueIdSet &outputFromChild,
                                            CollHeap *wHeap)
{
  ItemExpr * itmExpr = vid.getItemExpr();


  if (outputFromChild.contains(vid)) 
  {
    return;
  }
  //test if itm_minus and then if negative offset ....
  if ( itmExpr->getOperatorType() == ITM_OFFSET &&
      ((ItmSeqOffset *)itmExpr)->getOffsetConstantValue() < 0)
  {
    readSeqFunctions() -= topSeqVid;
    returnSeqFunctions() += topSeqVid;

    readSeqFunctions() += itmExpr->child(0)->castToItemExpr()->getValueId();
    return;
  }
  
  if (itmExpr->getOperatorType() == ITM_MINUS)
  {
    ItemExpr * chld0  = itmExpr->child(0)->castToItemExpr();
    if ( chld0->getOperatorType() == ITM_OFFSET &&
        ((ItmSeqOffset *)chld0)->getOffsetConstantValue() <0)
    {
      readSeqFunctions() -= topSeqVid;
      returnSeqFunctions() += topSeqVid;

      readSeqFunctions() += chld0->child(0)->castToItemExpr()->getValueId();

      ItemExpr * chld1  = itmExpr->child(1)->castToItemExpr();
      if (chld1->getOperatorType() == ITM_OFFSET &&
          ((ItmSeqOffset *)chld1)->getOffsetConstantValue() < 0)
      {
        readSeqFunctions() += chld1->child(0)->castToItemExpr()->getValueId();
      }
      else
      {
        readSeqFunctions() += chld1->getValueId();
      }
      return;
    }
    
  }
  
  
  if (itmExpr->getOperatorType() == ITM_OLAP_MIN || 
           itmExpr->getOperatorType() == ITM_OLAP_MAX) 
  { 
    ItmSeqOlapFunction * olap = (ItmSeqOlapFunction *)itmExpr;
    if (olap->getframeEnd()>0)
    {
      readSeqFunctions() -= topSeqVid;
      returnSeqFunctions() += topSeqVid;

      ItemExpr *newChild = new(wHeap) Convert (itmExpr->child(0)->castToItemExpr());
      newChild->synthTypeAndValueId(TRUE);

      itmExpr->child(0) = newChild;

      readSeqFunctions() += newChild->getValueId();
      return;
    }
  }
  
  if (itmExpr->getOperatorType() == ITM_SCALAR_MIN || 
           itmExpr->getOperatorType() == ITM_SCALAR_MAX) 
  {
    ItemExpr * chld0  = itmExpr->child(0)->castToItemExpr();
    ItemExpr * chld1  = itmExpr->child(1)->castToItemExpr();
    if ((chld0->getOperatorType() == ITM_OLAP_MIN && chld1->getOperatorType() == ITM_OLAP_MIN )|| 
        (chld0->getOperatorType() == ITM_OLAP_MAX && chld1->getOperatorType() == ITM_OLAP_MAX ))
    {
      ItmSeqOlapFunction * olap0 = (ItmSeqOlapFunction *)chld0;
      ItmSeqOlapFunction * olap1 = (ItmSeqOlapFunction *)chld1;
      if ( olap1->getframeEnd()>0)
      { 
        CMPASSERT(olap0->getframeEnd()==0);

        readSeqFunctions() -= topSeqVid;
        returnSeqFunctions() += topSeqVid;
        readSeqFunctions() += olap0->getValueId();
        
        ItemExpr *newChild = new(wHeap) Convert (olap1->child(0)->castToItemExpr());
        newChild->synthTypeAndValueId(TRUE);

        olap1->child(0) = newChild;

        readSeqFunctions() += newChild->getValueId();
      }
      else
      {
        CMPASSERT(olap1->getframeEnd()==0);

        readSeqFunctions() -= topSeqVid;
        returnSeqFunctions() += topSeqVid;
        readSeqFunctions() += olap1->getValueId();

        ItemExpr *newChild = new(wHeap) Convert (olap0->child(0)->castToItemExpr());
        newChild->synthTypeAndValueId(TRUE);

        olap0->child(0) = newChild;

        readSeqFunctions() += newChild->getValueId();
      }
      return;
    }
  }

  for (Int32 i= 0 ; i < itmExpr->getArity(); i++)
  {
    ItemExpr * chld= itmExpr->child(i);
    computeReadNReturnItems(topSeqVid,
                            chld->getValueId(),
                            outputFromChild,
                            wHeap);
  }
}//void PhysSequence::computeReadNReturnItems(ItemExpr * other)
コード例 #27
0
short BiLogic::codeGen(Generator * generator)
{
  Attributes ** attr;

  if (generator->getExpGenerator()->genItemExpr(this, &attr, (1+getArity()), 0) == 1)
    return 0;
    
  Space * space = generator->getSpace();
  ExpGenerator * expGen = generator->getExpGenerator();

  // Normally, if code for a value id has been generated, and if
  // that value id is seen again, then code is not generated. The
  // location where the result is available is returned instead.
  // The case of a logical operator is different. Code is generated
  // again if a value id from the left child is also present in
  // the right child. This is done 
  // because at expression evaluation time, some of the expressions
  // may be skipped due to short circuit evaluation. 
  //
  // Allocate a new map table before generating code for each child.
  // This map table contains all the temporary results produced by
  // the child. 
  // Remove this map table after generating code for each child.
  generator->appendAtEnd();
  expGen->incrementLevel();

  codegen_and_set_attributes(generator, attr, 2); 
  //  generator->getExpGenerator()->setClauseLinked(FALSE);
  // child(0)->codeGen(generator);
  // attr[1] = generator->getAttr(child(0));
  // generator->getExpGenerator()->setClauseLinked(FALSE);

  /* generate boolean short circuit code */
  Attributes ** branch_attr = new(generator->wHeap()) Attributes * [2];

  branch_attr[0] = attr[0]->newCopy(generator->wHeap());
  branch_attr[0]->copyLocationAttrs(attr[0]);

  branch_attr[1] = attr[1]->newCopy(generator->wHeap());
  branch_attr[1]->copyLocationAttrs(attr[1]);

  branch_attr[0]->resetShowplan();
  ex_branch_clause * branch_clause 
    = new(space) ex_branch_clause(getOperatorType(), branch_attr, space);
  
  generator->getExpGenerator()->linkClause(0, branch_clause);
 
  generator->removeLast();
  expGen->decrementLevel();
  generator->appendAtEnd();
  expGen->incrementLevel();

  ValueIdSet markedEntries;
  // This ia a MapTable entry related fix for RangeSpec transformation.
  if( child(1)->getOperatorType() == ITM_RANGE_SPEC_FUNC )
    markGeneratedEntries(generator, child(1)->child(1), markedEntries);
  else
    markGeneratedEntries(generator, child(1), markedEntries);
//  if( child(1)->getOperatorType() == ITM_RANGE_SPEC_FUNC )
//    child(1)->child(1)->codeGen(generator);
//  else
    child(1)->codeGen(generator);
  ItemExpr *rightMost;
  if( child(1)->getOperatorType() == ITM_RANGE_SPEC_FUNC )
    rightMost = child(1)->child(1)->castToItemExpr();
  else
    rightMost = child(1)->castToItemExpr();

  while (rightMost->getOperatorType() == ITM_ITEM_LIST)
    rightMost = rightMost->child(1)->castToItemExpr();

  attr[2] = generator->
    getMapInfo(rightMost->getValueId())->getAttr();

  ex_bool_clause * bool_clause =
    new(space) ex_bool_clause(getOperatorType(), attr, space);

  generator->getExpGenerator()->linkClause(this, bool_clause);

  branch_clause->set_branch_clause((ex_clause *)bool_clause);
  
  generator->removeLast();
  expGen->decrementLevel();
  if( child(1)->getOperatorType() == ITM_RANGE_SPEC_FUNC )
    unGenerate(generator, child(1)->child(1));
  else
    unGenerate(generator, child(1));
  generateMarkedEntries(generator, markedEntries);

  return 0;
}
コード例 #28
0
// computeHistoryBuffer
//
// Helper function that traverses the set of root sequence functions
// supplied by the compiler and dynamically determines the size
// of the history buffer.
// 
void PhysSequence::computeHistoryRows(const ValueIdSet &sequenceFunctions,//historyIds
                                      Lng32 &computedHistoryRows,
                                      Lng32 &unableToCalculate,
                                      NABoolean &unboundedFollowing, 
                                      Lng32 &minFollowingRows,
                                      const ValueIdSet &outputFromChild) 
{
  ValueIdSet children;
  ValueIdSet historyAttributes;
  Lng32 value = 0;

  for(ValueId valId = sequenceFunctions.init();
      sequenceFunctions.next(valId);
      sequenceFunctions.advance(valId)) 
  {
    if(valId.getItemExpr()->isASequenceFunction()) 
    {
      ItemExpr *itmExpr = valId.getItemExpr();

      switch(itmExpr->getOperatorType())
        {

        // THIS and NOT THIS are not dynamically computed
        //
        case ITM_THIS:
        case ITM_NOT_THIS:
          break;

        // The RUNNING functions and LastNotNull all need to go back just one row.
        //
        case ITM_RUNNING_SUM:
        case ITM_RUNNING_COUNT:
        case ITM_RUNNING_MIN:
        case ITM_RUNNING_MAX:
        case ITM_RUNNING_CHANGE:   
        case ITM_LAST_NOT_NULL:
          computedHistoryRows = MAXOF(computedHistoryRows, 2);
          break;
        ///set to unable to compute for now-- will change later to compte values from frameStart_ and frameEnd_
        case ITM_OLAP_SUM:
        case ITM_OLAP_COUNT:
        case ITM_OLAP_MIN:
        case ITM_OLAP_MAX:
        case ITM_OLAP_RANK:
        case ITM_OLAP_DRANK:
        {
          if ( !outputFromChild.contains(itmExpr->getValueId()))
          {
            ItmSeqOlapFunction * olap = (ItmSeqOlapFunction*)itmExpr;

            if (olap->isFrameStartUnboundedPreceding()) //(olap->getframeStart() == - INT_MAX)
            {
              computedHistoryRows = MAXOF(computedHistoryRows, 2);
            }
            else
            {
              computedHistoryRows = MAXOF(computedHistoryRows, ABS(olap->getframeStart()) + 2);
            }
            if (!olap->isFrameEndUnboundedFollowing()) //(olap->getframeEnd() != INT_MAX)
            {
              computedHistoryRows = MAXOF(computedHistoryRows, ABS(olap->getframeEnd()) + 1);
            }

            if (olap->isFrameEndUnboundedFollowing()) //(olap->getframeEnd() == INT_MAX)
            {
              unboundedFollowing = TRUE;
              if (olap->getframeStart() > 0) 
              {
                minFollowingRows = ((minFollowingRows > olap->getframeStart()) ?  
                                    minFollowingRows : olap->getframeStart());
              }
            } else  if (olap->getframeEnd() > 0)
            {
              minFollowingRows = ((minFollowingRows > olap->getframeEnd()) ?  
                                  minFollowingRows : olap->getframeEnd());
            }
          }
        }

        break;

        // If 'rows since', we cannot determine how much history is needed.  
        case ITM_ROWS_SINCE:
          unableToCalculate = 1;
          break;

        // The MOVING and OFFSET functions need to go back as far as the value
        // of their second child.
        //
        //  The second argument can be:
        //    Constant: for these, we can use the constant value to set the upper bound
        //              for the history buffer.
        //    ItmScalarMinMax(child0, child1) (with operType = ITM_SCALAR_MIN)  
        //      - if child0 or child1 is a constant, then we can use either one
        //        to set the upper bound.
        
        case ITM_MOVING_MIN:
        case ITM_MOVING_MAX:
        case ITM_OFFSET:
         
          for(Lng32 i = 1; i < itmExpr->getArity(); i++)
          {
            if (itmExpr->child(i)->getOperatorType() != ITM_NOTCOVERED)
            {
              ItemExpr * exprPtr = itmExpr->child(i);
              NABoolean negate;
              ConstValue *cv = exprPtr->castToConstValue(negate);
              if (cv AND cv->canGetExactNumericValue())
                {
                  Lng32 scale;
                  Int64 value64 = cv->getExactNumericValue(scale);

                  if(scale == 0 && value64 >= 0 && value64 < INT_MAX) 
                    {
                      value64 = (negate ? -value64 : value64);
                      value = MAXOF((Lng32)value64, value);
                    }
                 }
              else
                {
                  if (exprPtr->getOperatorType() == ITM_SCALAR_MIN)
                    {
                      for(Lng32 j = 0; j < exprPtr->getArity(); j++)
                        {
                          if (exprPtr->child(j)->getOperatorType()
                            != ITM_NOTCOVERED)
                            {
                               ItemExpr * exprPtr1 = exprPtr->child(j);
                               NABoolean negate1;
                               ConstValue *cv1 = exprPtr1->castToConstValue(negate1);
                               if (cv1 AND cv1->canGetExactNumericValue())
                                 {
                                   Lng32 scale1;
                                   Int64 value64_1 = cv1->getExactNumericValue(scale1);

                                   if(scale1 == 0 && value64_1 >= 0 && value64_1 < INT_MAX) 
                                     {
                                       value64_1 = (negate1 ? -value64_1 : value64_1);
                                       value = MAXOF((Lng32)value64_1, value);
                                     }
                                  }
                              }
                          }   
                     }   
                }  // end of inner else
            }// end of if

          }// end of for

          // Check if the value is greater than zero.
          // If it is, then save the value, but first
          // increment the returned ConstValue by one.
          // Otherwise, the offset or moving value was unable
          // to be calculated.

          if (value > 0)
          {
            value++;
            computedHistoryRows = MAXOF(computedHistoryRows, value);
            value = 0;
          }
          else
            unableToCalculate = 1;

          break;

        default:
          CMPASSERT(0);
        }
    }
   
    // Gather all the children, and if not empty, recurse down to the
    // next level of the tree.
    //

    for(Lng32 i = 0; i < valId.getItemExpr()->getArity(); i++) {
      if (//valId.getItemExpr()->child(i)->getOperatorType() != ITM_NOTCOVERED //old stuff
          !outputFromChild.contains(valId.getItemExpr()->child(i)->getValueId()))
      {
        children += valId.getItemExpr()->child(i)->getValueId();
      }
    }
  }
  
  if (NOT children.isEmpty())
  {
    computeHistoryRows(children, 
                       computedHistoryRows, 
                       unableToCalculate, 
                       unboundedFollowing, 
                       minFollowingRows,
                       outputFromChild);  
  }
} // PhysSequence::computeHistoryRows
コード例 #29
0
short ProbeCache::codeGen(Generator *generator)
{
  ExpGenerator * exp_gen = generator->getExpGenerator();
  Space * space = generator->getSpace();

  MapTable * last_map_table = generator->getLastMapTable();

  ex_cri_desc * given_desc
    = generator->getCriDesc(Generator::DOWN);

  ex_cri_desc * returned_desc
    = new(space) ex_cri_desc(given_desc->noTuples() + 1, space);

  // cri descriptor for work atp has 5 entries:
  // entry #0 for const
  // entry #1 for temp
  // entry #2 for hash value of probe input data in Probe Cache Manager
  // entry #3 for encoded probe input data in Probe Cache Manager
  // enrry #4 for inner table row data in this operator's cache buffer
  Int32 work_atp = 1;
  ex_cri_desc * work_cri_desc = new(space) ex_cri_desc(5, space);
  unsigned short hashValIdx          = 2; 
  unsigned short encodedProbeDataIdx = 3;
  unsigned short innerRowDataIdx     = 4;

    // generate code for child tree, and get its tdb and explain tuple.
  child(0)->codeGen(generator);
  ComTdb * child_tdb = (ComTdb *)(generator->getGenObj());
  ExplainTuple *childExplainTuple = generator->getExplainTuple();


  //////////////////////////////////////////////////////
  // Generate up to 4 runtime expressions.
  //////////////////////////////////////////////////////

  // Will use child's char. inputs (+ execution count) for the next
  // two runtime expressions.
  ValueIdList inputsToUse = child(0).getGroupAttr()->getCharacteristicInputs();
  
  inputsToUse.insert(generator->getOrAddStatementExecutionCount());

  // Expression #1 gets the hash value of the probe input data
  ValueIdList hvAsList;

  // Executor has hard-coded assumption that the result is long, 
  // so add a Cast node to convert result to a long.

  ItemExpr *probeHashAsIe = new (generator->wHeap())
    HashDistPartHash(inputsToUse.rebuildExprTree(ITM_ITEM_LIST));

  probeHashAsIe->bindNode(generator->getBindWA());

  NumericType &nTyp = (NumericType &)probeHashAsIe->getValueId().getType();
  GenAssert(nTyp.isSigned() == FALSE,
            "Unexpected signed HashDistPartHash.");

  GenAssert(probeHashAsIe->getValueId().getType().supportsSQLnullLogical()
            == FALSE, "Unexpected nullable HashDistPartHash.");

  ItemExpr *hvAsIe = new (generator->wHeap()) Cast(
       probeHashAsIe, 
       new (generator->wHeap()) 
            SQLInt(FALSE,   // false == unsigned.
                   FALSE    // false == not nullable.
                  ));

  hvAsIe->bindNode(generator->getBindWA());

  hvAsList.insert(hvAsIe->getValueId());

  ex_expr *hvExpr   = NULL;
  ULng32 hvLength;
  exp_gen->generateContiguousMoveExpr(
              hvAsList,
              0, // don't add convert node
              work_atp,
              hashValIdx,
              ExpTupleDesc::SQLARK_EXPLODED_FORMAT,
              hvLength,
              &hvExpr);

  GenAssert(hvLength == sizeof(Lng32),
            "Unexpected length of result of hash function.");

  // Expression #2 encodes the probe input data for storage in 
  // the ProbeCacheManager.

  ValueIdList encodeInputAsList;

  CollIndex inputListIndex;
  for (inputListIndex = 0; 
       inputListIndex < inputsToUse.entries(); 
       inputListIndex++) {   

    ItemExpr *inputIe = 
         (inputsToUse[inputListIndex].getValueDesc())->getItemExpr();

    if (inputIe->getValueId().getType().getVarLenHdrSize() > 0)
      {
        // This logic copied from Sort::codeGen().
        // Explode varchars by moving them to a fixed field
        // whose length is equal to the max length of varchar.
        // 5/8/98: add support for VARNCHAR

        const CharType& char_type =
          (CharType&)(inputIe->getValueId().getType());

	if (!CollationInfo::isSystemCollation(char_type.getCollation()))
	{
	  inputIe = new(generator->wHeap())
              Cast (inputIe,
                    (new(generator->wHeap())
                      SQLChar(
		          CharLenInfo(char_type.getStrCharLimit(), char_type.getDataStorageSize()),
                          char_type.supportsSQLnull(),
                          FALSE, FALSE, FALSE,
                          char_type.getCharSet(),
                          char_type.getCollation(),
                          char_type.getCoercibility()
                              )
                    )
                   );
	}
      }

    CompEncode * enode = new(generator->wHeap()) 
      CompEncode(inputIe, FALSE /* ascend/descend doesn't matter*/);

    enode->bindNode(generator->getBindWA());
    encodeInputAsList.insert(enode->getValueId());
  }

  ex_expr *encodeInputExpr = NULL;
  ULng32 encodedInputLength;
  exp_gen->generateContiguousMoveExpr(encodeInputAsList, 
                              0, //don't add conv nodes
                              work_atp, encodedProbeDataIdx,
                              ExpTupleDesc::SQLARK_EXPLODED_FORMAT,
                              encodedInputLength, &encodeInputExpr);


  // Expression #3 moves the inner table data into a buffer pool.  
  // This is also the tuple returned to ProbeCache's parent. 

  ex_expr * innerRecExpr = NULL;
  ValueIdList innerTableAsList = getGroupAttr()->getCharacteristicOutputs();

  //////////////////////////////////////////////////////
  // Describe the returned row and add the returned 
  // values to the map table.
  //////////////////////////////////////////////////////

  // determine internal format
  NABoolean useCif = FALSE;
  ExpTupleDesc::TupleDataFormat tupleFormat = generator->getInternalFormat();
  //tupleFormat = determineInternalFormat( innerTableAsList, this, useCif,generator);

  ULng32 innerRecLength = 0;
  ExpTupleDesc * innerRecTupleDesc = 0;
  MapTable * returnedMapTable = NULL;

  exp_gen->generateContiguousMoveExpr(innerTableAsList, 
                              -1, // do add conv nodes 
			      work_atp, innerRowDataIdx,
			      tupleFormat,
			      innerRecLength, &innerRecExpr,
			      &innerRecTupleDesc, ExpTupleDesc::SHORT_FORMAT,
                              &returnedMapTable);

  returned_desc->setTupleDescriptor(returned_desc->noTuples() - 1, 
        innerRecTupleDesc);

  // remove all appended map tables and return the returnedMapTable
  generator->removeAll(last_map_table);
  generator->appendAtEnd(returnedMapTable);
  // This returnedMapTable will contain the value ids that are being returned 
  // (the inner table probed).

  // Massage the atp and atp_index of the innerTableAsList.
  for (CollIndex i = 0; i < innerTableAsList.entries(); i++)
    {
      ValueId innerValId = innerTableAsList[i];

      Attributes *attrib =
	generator->getMapInfo(innerValId)->getAttr();

      // All reference to the returned values from this point on
      // will be at atp = 0, atp_index = last entry in returned desc.
      attrib->setAtp(0);
      attrib->setAtpIndex(returned_desc->noTuples() - 1);
    }

  // Expression #4 is a selection predicate, to be applied
  // before returning rows to the parent

  ex_expr * selectPred = NULL;

  if (!selectionPred().isEmpty())
    {
      ItemExpr * selPredTree =
        selectionPred().rebuildExprTree(ITM_AND,TRUE,TRUE);
      exp_gen->generateExpr(selPredTree->getValueId(),
                            ex_expr::exp_SCAN_PRED,
                            &selectPred);
    }

  //////////////////////////////////////////////////////
  // Prepare params for ComTdbProbeCache.
  //////////////////////////////////////////////////////

  queue_index pDownSize = (queue_index)getDefault(GEN_PROBE_CACHE_SIZE_DOWN);
  queue_index pUpSize   = (queue_index)getDefault(GEN_PROBE_CACHE_SIZE_UP);

  // Make sure that the ProbeCache queues can support the childs queues.
  if(pDownSize < child_tdb->getInitialQueueSizeDown()) {
    pDownSize = child_tdb->getInitialQueueSizeDown();
    pDownSize = MINOF(pDownSize, 32768);
  }
  if(pUpSize < child_tdb->getInitialQueueSizeUp()) {
    pUpSize = child_tdb->getInitialQueueSizeUp();
    pUpSize = MINOF(pUpSize, 32768);
  }

  ULng32 pcNumEntries = numCachedProbes_;
  
  // Number of entries in the probe cache cannot be less than 
  // max parent down queue size.  Before testing and adjusting the 
  // max queue size, it is necessary to make sure it is a power of
  // two, rounding up if necessary.  This is to match the logic in
  // ex_queue::resize.

  queue_index pdq2 = 1;
  queue_index bits = pDownSize;
  while (bits && pdq2 < pDownSize) {
    bits = bits  >> 1;
    pdq2 = pdq2 << 1;
  }
  if (pcNumEntries < pdq2)
    pcNumEntries = pdq2;

  numInnerTuples_ = getDefault(GEN_PROBE_CACHE_NUM_INNER);

  if (innerRecExpr == NULL)
    {
      // For semi-join and anti-semi-join, executor need not allocate
      // a buffer.  Set the tdb's buffer size to 0 to be consistent.
      numInnerTuples_ = 0;
    }
  else if (numInnerTuples_ == 0)
    {
      // Handle special value, 0, which tells code gen to 
      // decided on buffer size: i.e., large enough to accomodate
      // all parent up queue entries and all probe cache entries 
      // having a different inner table row.

      // As we did for the down queue, make sure the up queue size 
      // specified is a power of two.

      queue_index puq2 = 1;
      queue_index bits = pUpSize;
      while (bits && puq2 < pUpSize) {
        bits = bits  >> 1;
        puq2 = puq2 << 1;
      }
      numInnerTuples_ = puq2 + pcNumEntries;
    }
コード例 #30
0
short
PhysSample::codeGen(Generator *generator) 
{  
  // Get a local handle on some of the generator objects.
  //
  CollHeap *wHeap = generator->wHeap();
  Space *space = generator->getSpace();
  MapTable *mapTable = generator->getMapTable();
  ExpGenerator *expGen = generator->getExpGenerator();

  // Allocate a new map table for this node. This must be done
  // before generating the code for my child so that this local
  // map table will be sandwiched between the map tables already
  // generated and the map tables generated by my offspring.
  //
  // Only the items available as output from this node will
  // be put in the local map table. Before exiting this function, all of
  // my offsprings map tables will be removed. Thus, none of the outputs 
  // from nodes below this node will be visible to nodes above it except 
  // those placed in the local map table and those that already exist in
  // my ancestors map tables. This is the standard mechanism used in the
  // generator for managing the access to item expressions.
  //
  MapTable *localMapTable = generator->appendAtEnd();

  // Since this operation doesn't modify the row on the way down the tree,
  // go ahead and generate the child subtree. Capture the given composite row
  // descriptor and the child's returned TDB and composite row descriptor.
  //
  ex_cri_desc * givenCriDesc = generator->getCriDesc(Generator::DOWN);
  child(0)->codeGen(generator);
  ComTdb *childTdb = (ComTdb*)generator->getGenObj();
  ex_cri_desc * childCriDesc = generator->getCriDesc(Generator::UP);
  ExplainTuple *childExplainTuple = generator->getExplainTuple();

  // Geneate the sampling expression.
  //
  ex_expr *balExpr = NULL;
  Int32 returnFactorOffset = 0;
  ValueId val;
  val = balanceExpr().init();
  if(balanceExpr().next(val))
    expGen->generateSamplingExpr(val, &balExpr, returnFactorOffset);

  // Alias the sampleColumns() so that they reference the underlying
  // expressions directly. This is done to avoid having to generate and
  // execute a project expression that simply moves the columns from
  // one tupp to another to reflect the application of the sampledCol
  // function.
  //
//   ValueId valId;
//   for(valId = sampledColumns().init();
//       sampledColumns().next(valId);
//       sampledColumns().advance(valId))
//     {
//       MapInfo *mapInfoChild = localMapTable->getMapInfoAsIs
// 	(valId.getItemExpr()->child(0)->castToItemExpr()->getValueId());
//       GenAssert(mapInfoChild, "Sample::codeGen -- no child map info.");
//       Attributes *attr = mapInfoChild->getAttr();
//       MapInfo *mapInfo = localMapTable->addMapInfoToThis(valId, attr);
//       mapInfo->codeGenerated();
//     }
// check if any of the columns inthe sampled columns are lob columns. If so, return an error.
  ValueId valId;
  for(valId = sampledColumns().init();
      sampledColumns().next(valId);
      sampledColumns().advance(valId))
    {
      const NAType &colType = valId.getType();
      if ((colType.getFSDatatype() == REC_BLOB) ||
	  (colType.getFSDatatype() == REC_CLOB))
	{
	   *CmpCommon::diags() << DgSqlCode(-4322);
	   GenExit();
	}
    }
  // Now, remove all attributes from the map table except the 
  // the stuff in the local map table -- the result of this node.
  //
//  localMapTable->removeAll();

  // Generate the expression to evaluate predicate on the sampled row.
  //
  ex_expr *postPred = 0;
  if (!selectionPred().isEmpty()) {
    ItemExpr * newPredTree 
      = selectionPred().rebuildExprTree(ITM_AND,TRUE,TRUE);

    expGen->generateExpr(newPredTree->getValueId(), ex_expr::exp_SCAN_PRED,
			 &postPred);
  }

  // Construct the Sample TDB.
  //
  ComTdbSample *sampleTdb
    = new(space) ComTdbSample(NULL,
			      balExpr,
			      returnFactorOffset,
			      postPred,
			      childTdb,
			      givenCriDesc,
			      childCriDesc,
			      (queue_index)getDefault(GEN_SAMPLE_SIZE_DOWN),
			      (queue_index)getDefault(GEN_SAMPLE_SIZE_UP));
  generator->initTdbFields(sampleTdb);

  if(!generator->explainDisabled()) {
    generator->
      setExplainTuple(addExplainInfo(sampleTdb,
                                     childExplainTuple,
                                     0,
                                     generator));
  }

  generator->setCriDesc(givenCriDesc, Generator::DOWN);
  generator->setCriDesc(childCriDesc, Generator::UP);
  generator->setGenObj(this, sampleTdb);

  return 0;
}