// ItmSeqNotTHISFunction::preCodeGen
//
// Transforms the NOT THIS sequence function into an offset of its child
// that uses the saved ROWS SINCE offset in the ExpGenerator. This allows
// the entire part of the expression which changes with each history row to be 
// calculated inside a single offset expression. All other parts of the 
// expression are below THIS, i.e., in the current row.
//
// Note: NOT THIS expressions occur only within a ROWS SINCE.
//
// EXAMPLE:
//   select runningsum(this(a)), 
//          rows since (this (b) > a * (c+5))  
//          from iTab2 sort by a;
//
//          rows since      ----->  becomes:     rows since
//                |                                    |
//                >                                    >
//               /  \                                 /  \
//           this   not this                      this      OFFSET                     
//             /          \                         /        /  \                      
//            b            *                       b        *   <not THIS Loop counter>
//                        / \                              / \                         
//                       a   +                            a   +                        
//                          / \                              / \    
//                         c   5                            c   5
//                                                  
//
ItemExpr *ItmSeqNotTHISFunction::preCodeGen(Generator *generator)
{
  if (nodeIsPreCodeGenned())
    return this;
  markAsPreCodeGenned();

  // Get some local handles...
  //
  CollHeap *wHeap = generator->wHeap();
  ItemExpr *itmChild = child(0)->castToItemExpr();
  ItemExpr *savedRowsSinceCounter =
                 generator->getExpGenerator()->getRowsSinceCounter();

  GenAssert(savedRowsSinceCounter, "ItmSeqNotTHIS::preCodeGen -- ROWS SINCE counter is NULL.");

  // Generate the new OFFSET expression
  //
  ItemExpr *offExpr = new(wHeap) ItmSeqOffset(itmChild, savedRowsSinceCounter);
  ((ItmSeqOffset *)offExpr)->setIsOLAP(isOLAP());
  // Get value Ids and types for all of the items. Must do this typing before
  // replacing this value Id's item expression -- otherwise, the typing
  // will give a result different than the type already computed for this
  // sequence function.
  //
  offExpr->synthTypeAndValueId(TRUE);

  // Replace the original value ID with the new expression.
  //
  getValueId().replaceItemExpr(offExpr);

  // Return the preCodeGen of the new OFFSET expression.
  //
  return offExpr->preCodeGen(generator);
}
Example #2
0
// is any literal in this expr safely coercible to its target type?
NABoolean ItemExpr::isSafelyCoercible(CacheWA &cwa) const
{
  if (cwa.getPhase() >= CmpMain::BIND) {
    Int32 arity = getArity();
    for (Int32 x = 0; x < arity; x++) {
      if (!child(x)->isSafelyCoercible(cwa)) { 
        return FALSE; 
      }
    }
    if (arity == 2) {
      // we have to disallow caching of the following types of exprs:
      //   expr + 123456789012345678901234567890
      //   expr || 'overlylongstringthatwouldoverflow'
      ItemExpr *left = child(0), *right = child(1);
      if (left->getOperatorType() == ITM_CONSTANT) {
        if (right->getOperatorType() == ITM_CONSTANT) {
          // "10 + 1" should be safely coercible
          return TRUE;
        } else {
          return ((ConstValue*)left)->canBeSafelyCoercedTo
            (right->getValueId().getType());
        }
      }      
      else if (right->getOperatorType() == ITM_CONSTANT) {
        return ((ConstValue*)right)->canBeSafelyCoercedTo
          (left->getValueId().getType());
      }      
      // else both are nonliterals; fall thru
    }
    // else nondyadic expr; fall thru
    return TRUE; 
  }
  return FALSE;
}
//----------------------------------------------------------------------------
// if the aggregation is MAX(A) then we can create two types of columns:
// INS_MAX_A = MAX(CASE @OP WHEN -1 THEN null ELSE SYS_DELTA.A).
// DEL_MAX_A = MAX(CASE @OP WHEN  1 THEN null ELSE SYS_DELTA.A).
// the 1 & -1 values will be refered to as constOpValue.
ItemExpr *MavRelRootBuilder::buildExtraMinMaxExpr(const ItemExpr  *pMinMaxExpr, 
						  const NAString&  colName,
						  extraColumnType  columnType) const
{
  // if the pMinMaxExpr is the MAX(A) then pAggregationSubject is A.
  ItemExpr *pAggregationSubject = pMinMaxExpr->child(0);
  ItemExpr *copyOfAggSubject = pAggregationSubject->copyTree(heap_);
  ColReference *opCol = new(heap_) 
    ColReference(new(heap_) ColRefName(MavBuilder::getVirtualOpColumnName()));
  ConstValue *pConstOpValue = new(heap_) 
    SystemLiteral( (columnType == AGG_FOR_DELETE) ? 1 : -1);
 
  Case *pCase = new(heap_) 
    Case(NULL, new(heap_)
	 IfThenElse(new(heap_) BiRelat(ITM_EQUAL, opCol, pConstOpValue), 
		    new(heap_) SystemLiteral(), // NULL
		    copyOfAggSubject));
  Aggregate *pNewAggregate = new(heap_)
    Aggregate(pMinMaxExpr->getOperatorType(), pCase);

  ColRefName *extraColName = 
    createExtraAggName(colName, columnType);

  ItemExpr *result = new(heap_) RenameCol(pNewAggregate, extraColName);
  return result;
} // buildExtraMinMaxExpr
// A transformation method for protecting sequence functions from not
// being evaluated due to short-circuit evaluation. 
//
void ItmScalarMinMax::protectiveSequenceFunctionTransformation
(Generator *generator)
{
  // Recurse on the children
  //
  ItemExpr::protectiveSequenceFunctionTransformation(generator);

  // Remove the original value id from the node being transformed.
  //
  ValueId id = getValueId();
  setValueId(NULL_VALUE_ID);
  synthTypeAndValueId(TRUE);

  // Construct the new subtree.
  //
  // SCALAR_MIN/MAX -- force evaluation of both children
  //
  // SCALAR(LEFT_CHILD, RIGHT_CHILD) ==>
  //   BLOCK(BLOCK(LEFT_CHILD, RIGHT_CHILD), 
  //         SCALAR(LEFT_CHILD, RIGHT_CHILD))
  // 
  ItemExpr *block = new(generator->wHeap()) ItmBlockFunction
    (new(generator->wHeap()) ItmBlockFunction(child(0), child(1)), this);
  
  // Replace the old expression with the new expression for the 
  // orginal value id
  //
  id.replaceItemExpr(block);

  // Run the new expression through type and value id synthesis
  //
  block->synthTypeAndValueId(TRUE);
}
// A transformation method for protecting sequence functions from not
// being evaluated due to short-circuit evaluation. 
//
void BiLogic::protectiveSequenceFunctionTransformation(Generator *generator)
{
  // Recurse on the children
  //
  ItemExpr::protectiveSequenceFunctionTransformation(generator);

  // Remove the original value id from the node being transformed and
  // assign it a new value id.
  //
  ValueId id = getValueId();
  setValueId(NULL_VALUE_ID);
  synthTypeAndValueId(TRUE);

  // Construct the new subtree.
  //
  // AND/OR -- force right child evaluation
  //
  // LOGIC(LEFT_CHILD, RIGHT_CHILD) ==>
  //   BLOCK(RIGHT_CHILD, LOGIC(LEFT_CHILD, RIGHT_CHILD))
  //
  ItemExpr *block = new(generator->wHeap()) ItmBlockFunction(child(1), this);

  // Replace the old expression with the new expression for the 
  // orginal value id
  //
  id.replaceItemExpr(block);

  // Run the new expression through type and value id synthesis
  //
  block->synthTypeAndValueId(TRUE);
}
ItemExpr *
addConvNode(ItemExpr *childExpr,
            ValueIdMap *mapping,
            CollHeap *wHeap)

{
  if(childExpr->getOperatorType() != ITM_CONVERT &&
     !childExpr->isASequenceFunction()) {

    ValueId topValue;
    mapping->mapValueIdUp(topValue,
                          childExpr->getValueId());
    if(topValue == childExpr->getValueId()) {

      // add the convert node
      ItemExpr *newChild = new(wHeap) Convert (childExpr);
      newChild->synthTypeAndValueId(TRUE);
      mapping->addMapEntry(newChild->getValueId(),
                           childExpr->getValueId());
      return newChild;
    } else {
      return topValue.getItemExpr();
    }
  }
  return childExpr;
}
Example #7
0
// change literals of a cacheable query into input parameters 
ItemExpr* ItemList::normalizeListForCache
(CacheWA& cwa, BindWA& bindWA, ItemList *other)
{
  Int32 arity = getArity();
  if (cwa.getPhase() >= CmpMain::BIND && 
      other && arity == other->getArity()) {
    for (Int32 x = 0; x < arity; x++) {
      ItemExpr *leftC = child(x), *rightC = other->child(x);
      OperatorTypeEnum leftO = leftC->getOperatorType();
      OperatorTypeEnum rightO = rightC->getOperatorType();
      if (leftO == ITM_BASECOLUMN && rightO == ITM_CONSTANT) {
        parameterizeMe(cwa, bindWA, other->child(x), (BaseColumn*)leftC,
                       (ConstValue*)rightC);
      }
      else if (rightO == ITM_BASECOLUMN && leftO == ITM_CONSTANT) {
        parameterizeMe(cwa, bindWA, child(x), (BaseColumn*)rightC,
                       (ConstValue*)leftC);
      }
      else if (leftO == ITM_ITEM_LIST && rightO == ITM_ITEM_LIST) {
        child(x) = ((ItemList*)leftC)->normalizeListForCache
          (cwa, bindWA, (ItemList*)rightC);
      }
    }
  }
  return this;
}
// -----------------------------------------------------------------------
// Given a column list providing identifiers for columns of this table,
// this method returns a list of VEG expressions and/or base columns that
// show the equivalence of base columns with index columns.
// -----------------------------------------------------------------------
void TableDesc::getEquivVEGCols (const ValueIdList& columnList,
                                 ValueIdList &VEGColumnList) const
{
    for (CollIndex i=0; i < columnList.entries(); i++)
    {
        ItemExpr   *ie = columnList[i].getItemExpr();
        BaseColumn *bc = NULL;

        switch (ie->getOperatorType())
        {
        case ITM_BASECOLUMN:
            bc = (BaseColumn *) ie;
            break;
        case ITM_INDEXCOLUMN:
            bc = (BaseColumn *) ((IndexColumn *) ie)->getDefinition().
                 getItemExpr();
            CMPASSERT(bc->getOperatorType() == ITM_BASECOLUMN);

            break;
        default:
            ABORT("Invalid argument to TableDesc::getEquivVEGCols()\n");
        }

        CMPASSERT(bc->getTableDesc() == this);
        VEGColumnList.insert(getColumnVEGList()[bc->getColNumber()]);
    }
}
Example #9
0
// is this entire expression cacheable after this phase?
NABoolean Tuple::isCacheableExpr(CacheWA& cwa)
{
  // we do not call RelExpr::isCacheableExpr here because it's redundant
  // -- Tuple is a leaf node and has no predicates.

  ItemExpr *tExpr = tupleExprTree() ? tupleExprTree() :
    tupleExpr_.rebuildExprTree();
  return tExpr->isCacheableExpr(cwa);
}
// ItmSeqOffset::preCodeGen
//
// Casts the second child to SqlInt.
//
ItemExpr *ItmSeqOffset::preCodeGen(Generator *generator)
{
  if (nodeIsPreCodeGenned())
    return this;
  
  CollHeap *wHeap = generator->wHeap();

  // The following code is being disabled (0 && ...) since it will
  // sometimes incorrectly think that the output of a tuple list of
  // contants is a single constant.  For example:
  //   SELECT a, b, c, MOVINGSUM(c,b) as MSUM, MOVINGAVG(c,b) as MAVG
  //   FROM (values
  //             (1,1, 1),
  //             (2,0, 2),
  //             (3,2, NULL),
  //             (4,0, 6),
  //             (5,3, 7)) as T(a,b,c)
  //   SEQUENCE BY a;
  //
  // For this query it will think that the offset index (b) is a
  // constant and it will use the value 3 for the
  // offsetConstantValue_.
  //
  if (0 && getArity() > 1)
  {
    NABoolean negate;
    ConstValue *cv = child(1)->castToConstValue(negate);
    if (cv AND cv->canGetExactNumericValue())
      {
        Lng32 scale;
        Int64 value = cv->getExactNumericValue(scale);

        if(scale == 0 && value >= 0 && value < INT_MAX) 
          {
            value = (negate ? -value : value);
            offsetConstantValue_ = (Int32)value;
            child(1) = NULL;
          }
      }
  }
    
  if (getArity() > 1)
  {

    const NAType &cType = child(1)->getValueId().getType();

    // (must be) signed; nulls allowed (if allowed by child1)   
   ItemExpr *castExpr   = new (wHeap) Cast (child(1),
                                           new (wHeap)
                                           SQLInt(wHeap, TRUE, cType.supportsSQLnullLogical()));
   castExpr->synthTypeAndValueId(TRUE);
   child (1) = castExpr;
  }
  return ItemExpr::preCodeGen(generator);
}
Example #11
0
// is this entire expression cacheable after this phase?
NABoolean Join::isCacheableExpr(CacheWA& cwa)
{
  if (cwa.getPhase() >= CmpMain::BIND) {
    // must first descend to scans to get cwa.numberOfScans_ 
    if (!RelExpr::isCacheableExpr(cwa)) {
      return FALSE;
    }
    if (isCacheableNode(cwa.getPhase())) { 
      cwa.setConditionallyCacheable();
    }
    // if we allow joins of views to be cached, query caching cannot 
    // distinguish between (see note at bottom of cachewa.h)
    //   select avg(f.a) from v f, v s group by f.b;
    //   select avg(s.a) from v f, v s group by f.b;
    //   select avg(t.a) from v f, t   group by f.b;
    // assuming v is "create view v from select * from t". We avoid
    // false cache hits by detecting the possible occurrence of such 
    // view joins here and later using cwa.isViewJoin_ to include
    // their query texts into their cache keys.
    //
    // A view is repsented by a renamed table with isView() returnning 
    // TRUE.

    RelExpr *c0 = child(0);
    RelExpr *c1 = child(1);
    if ((c0->getOperatorType() == REL_RENAME_TABLE &&
        ((RenameTable *)c0)->isView() == TRUE)
        ||
        (c1->getOperatorType() == REL_RENAME_TABLE &&
        ((RenameTable *)c1)->isView() == TRUE)) {
      cwa.foundViewJoin();
    }
    // check its join predicate
    ItemExpr *pred = joinPredTree_ ? joinPredTree_ :
      joinPred_.rebuildExprTree();
    if (pred) {
      cwa.setHasPredicate();
      // is join predicate cacheable?
      if (pred->hasNoLiterals(cwa)) {
        // predicate with no literals is cacheable
      }
      else {
        cwa.setPredHasNoLit(FALSE);
        if (!pred->isCacheableExpr(cwa)) {
          // a non-cacheable predicate renders Join non-cacheable.
          setNonCacheable();
          return FALSE;
        }
      }
    }
    return TRUE; // join may be cacheable
  }
  return FALSE;
}
// PhysSequence::computeHistoryAttributes
//
// Helper function to compute the attribute for the history buffer based 
// on the items projected from the child and the computed history items.
// Also, adds the attribute information the the map table.
//
void
PhysSequence::computeHistoryAttributes(Generator *generator,
                                       MapTable *localMapTable, 
                                       Attributes **attrs,
                                       const ValueIdSet &historyIds) const
{
  // Get a local handle on some of the generator objects.
  //
  CollHeap *wHeap = generator->wHeap();

  // Populate the attribute vector with the flattened list of sequence 
  // functions and/or sequence function arguments that must be in the
  // history row. Add convert nodes for the items that are not sequence
  // functions to force them to be moved into the history row.
  //
  if(NOT historyIds.isEmpty())
    {
      Int32 i = 0;
      ValueId valId;

      for (valId = historyIds.init();
           historyIds.next(valId);
           historyIds.advance(valId))
        {
          // If this is not a sequence function, then insert a convert
          // node.
          //
          if(!valId.getItemExpr()->isASequenceFunction())
             {
               // Get a handle on the original expression and erase
               // the value ID.
               //
               ItemExpr *origExpr = valId.getItemExpr();
               origExpr->setValueId(NULL_VALUE_ID);
               origExpr->markAsUnBound();

               // Construct the cast expression with the original expression
               // as the child -- must have undone the child value ID to
               // avoid recursion later.
               //
               ItemExpr *castExpr = new(wHeap) 
                 Cast(origExpr, &(valId.getType()));

               // Replace the expression for the original value ID and the
               // synthesize the types and value ID for the new expression.
               //
               valId.replaceItemExpr(castExpr);
               castExpr->synthTypeAndValueId(TRUE);
             }
          attrs[i++] = (generator->addMapInfoToThis(localMapTable, valId, 0))->getAttr();
        }
    }
} // PhysSequence::computeHistoryAttributes
Example #13
0
// is this entire expression cacheable after this phase?
NABoolean RelExpr::isCacheableExpr(CacheWA& cwa)
{
  switch (cwa.getPhase()) {
  case CmpMain::PARSE:
  case CmpMain::BIND: {
    // does query have too many ExprNodes?
    if (cwa.inc_N_check_still_cacheable() == FALSE) {
      // yes. query with too many ExprNodes is not cacheable.
      return FALSE;
    }
    if (isNonCacheable()) { // this node is not cacheable
      return FALSE; // so the entire expression is not cacheable
      // don't mark this node non-cacheable because this
      // RelExpr may be cacheable after the next phase.
    }
    if (isCacheableNode(cwa.getPhase())) { 
      // must be an INSERT, UPDATE, DELETE, or SELECT node;
      // so, mark this expression as conditionally cacheable.
      cwa.setConditionallyCacheable();
    }
    // must descend to scans to get cwa.numberOfScans_ 
    if (!cacheableKids(cwa)) {
      return FALSE;
    }
    // this node is either cacheable or maybecacheable
    // check its selection predicate
    ItemExpr *pred = selPredTree() ? selPredTree() :
      getSelectionPred().rebuildExprTree();
    if (pred) {
      cwa.setHasPredicate();
      // is selection predicate cacheable?
      if (pred->hasNoLiterals(cwa)) {
        // predicate with no literals is cacheable
      }
      else {
        cwa.setPredHasNoLit(FALSE);
        if (!pred->isCacheableExpr(cwa)) {
          // a non-cacheable selection predicate 
          // renders entire RelExpr non-cacheable.
          setNonCacheable();
          return FALSE;
        }
      }
    }
    return TRUE; // RelExpr may be cacheable
  }
  default: { const NABoolean notYetImplemented = FALSE; 
  CMPASSERT(notYetImplemented);
  return FALSE;
    }
  }
}
Example #14
0
// append an ascii-version of Join into cachewa.qryText_
void Join::generateCacheKey(CacheWA &cwa) const
{
  RelExpr::generateCacheKeyNode(cwa);
  if (isNaturalJoin_) {
    cwa += " natj "; 
  }
  ItemExpr *pred = joinPredTree_ ? joinPredTree_ :
    joinPred_.rebuildExprTree();
  if (pred) { 
    cwa += " joinPred:";
    pred->generateCacheKey(cwa); 
  }
  generateCacheKeyForKids(cwa);
}
Example #15
0
// is any literal in this expr safely coercible to its target type?
NABoolean Cast::isSafelyCoercible(CacheWA &cwa) const
{
  if (cwa.getPhase() >= CmpMain::BIND) {
    ItemExpr *opd = child(0);
    if (!opd->isSafelyCoercible(cwa)) { 
      return FALSE; 
    }
    if (opd->getOperatorType() == ITM_CONSTANT) {
      return ((ConstValue*)opd)->canBeSafelyCoercedTo(*type_);
    }      
    return TRUE; 
  }
  return FALSE;
}
Example #16
0
// is this entire expression cacheable after this phase?
NABoolean GroupByAgg::isCacheableExpr(CacheWA& cwa)
{
  // descend to scans early to get cwa.numberOfScans_ 
  if (!RelExpr::isCacheableExpr(cwa)) {
    return FALSE;
  }
  // is the group by col/expr cacheable?
  ItemExpr *grpExpr = groupExprTree_ ? groupExprTree_ :
    groupExpr_.rebuildExprTree(ITM_ITEM_LIST);
  if (grpExpr && !grpExpr->isCacheableExpr(cwa)) { 
    return FALSE; 
  }
  return TRUE; // may be cacheable
}
void
RelSequence::addCancelExpr(CollHeap *wHeap)
{
  ItemExpr *cPred = NULL;

  if (this->partition().entries() > 0)
  {
    return;
  }
  if(cancelExpr().entries() > 0) 
  {
    return;
  }

  for(ValueId valId = selectionPred().init();
      selectionPred().next(valId);
      selectionPred().advance(valId)) 
  {
    ItemExpr *pred = valId.getItemExpr();

    // Look for preds that select a prefix of the sequence.
    // Rank() < const; Rank <= const; const > Rank; const >= Rank
    ItemExpr *op1 = NULL;
    ItemExpr *op2 = NULL;

    if(pred->getOperatorType() == ITM_LESS ||
       pred->getOperatorType() == ITM_LESS_EQ) 
    {
      op1 = pred->child(0);
      op2 = pred->child(1);
    }
    else if (pred->getOperatorType() == ITM_GREATER ||
             pred->getOperatorType() == ITM_GREATER_EQ) 
    {
      op1 = pred->child(1);
      op2 = pred->child(0);
    }
    NABoolean negate;
    if (op1 && op2 &&
        (op2->getOperatorType() == ITM_CONSTANT || 
         op2->getOperatorType() == ITM_DYN_PARAM)  &&
         (op1->getOperatorType() == ITM_OLAP_RANK ||
          op1->getOperatorType() == ITM_OLAP_DRANK ||
          (op1->getOperatorType() == ITM_OLAP_COUNT &&
           op1->child(0)->getOperatorType() == ITM_CONSTANT &&
           !op1->child(0)->castToConstValue(negate)->isNull())))
    {
       cPred = new(wHeap) UnLogic(ITM_NOT, pred);
       //break at first occurence
       break;
    }
  }
  
  if(cPred) 
  {
    cPred->synthTypeAndValueId(TRUE);
    cancelExpr().insert(cPred->getValueId());
  }
}
Example #18
0
// append an ascii-version of Tuple into cachewa.qryText_
void Tuple::generateCacheKey(CacheWA &cwa) const
{
  // Do not call RelExpr::generateCacheKey(cwa) here because it's redundant.
  // It does the same things as the code below. RelExpr::generateCacheKey() 
  // calls Tuple::getText() which has logic similar to the following code.
  ItemExpr *tExpr = tupleExprTree() ? tupleExprTree() :
    tupleExpr_.rebuildExprTree();
  if (tExpr) { 
    cwa += " tupExpr:"; 
    tExpr->generateCacheKey(cwa); 
  }
  else {
    RelExpr::generateCacheKey(cwa);
  }
}
void PhysSequence::transformOlapFunctions(CollHeap *wHeap)
{

 
  for(ValueId valId = sequenceFunctions().init();
      sequenceFunctions().next(valId);
      sequenceFunctions().advance(valId)) 
  {
    
    ItemExpr * itmExpr = valId.getItemExpr();

    //NAType *itmType = itmExpr->getValueId().getType().newCopy(wHeap);

    if (itmExpr->isOlapFunction())
    {
      NAType *itmType = itmExpr->getValueId().getType().newCopy(wHeap);

      itmExpr = ((ItmSeqOlapFunction*)itmExpr)->transformOlapFunction(wHeap);

      CMPASSERT(itmExpr);
      if(itmExpr->getValueId() != valId)
      {
	itmExpr = new (wHeap) Cast(itmExpr, itmType);
	itmExpr->synthTypeAndValueId(TRUE);
	valId.replaceItemExpr(itmExpr);
	itmExpr->getValueId().changeType(itmType);//????
      }
    }
      itmExpr->transformOlapFunctions(wHeap);
  }
}
// index access (both reference and value)
ItemExpr * ItemExprTreeAsList::operator [] (CollIndex i)
{
  
  //     think of three different cases:
  // 
  //     a) i is out of range (< 0 or >= #entries)
  // 
  //     b) the node we are looking for is neither the only nor the last
  //        node in the backbone
  // 
  //     c) we are looking for the last element in the backbone (which
  //        may be the only element)
  // 

  ItemExpr *aNodePtr = *treePtr_;
  Int32 j = (Int32) i; // j may become negative, i may be unsigned

  if (j < 0)
    return NULL; // case a

  if (aNodePtr->getOperatorType() != operator_ AND
      j == 0)
    return aNodePtr; // case b

  while (aNodePtr != NULL AND j >= 0)
    {
      if (aNodePtr->getOperatorType() == operator_ AND
	  aNodePtr->getArity() >= 2)
	{
	  if (shape_ == LEFT_LINEAR_TREE)
	    {
	      if (j == 0)
		aNodePtr = aNodePtr->child(1); // case b
	      else
		aNodePtr = aNodePtr->child(0);
	    }
	  else if (shape_ == RIGHT_LINEAR_TREE)
	    {
	      if (j == 0)
		aNodePtr = aNodePtr->child(0); // case b
	      else
		aNodePtr = aNodePtr->child(1);
	    }
	  else
	    ABORT("can't do bushy trees");
	}
      j--;
    }

  // if we are looking for the only element, the while loop
  // is not executed at all

  return aNodePtr;

}
// A transformation method for protecting sequence functions from not
// being evaluated due to short-circuit evaluation.
//
void Case::protectiveSequenceFunctionTransformation(Generator *generator)
{
  // Recurse on the children
  //
  ItemExpr::protectiveSequenceFunctionTransformation(generator);

  // Remove the original value id from the node being transformed and
  // assign it a new value id.
  //
  ValueId id = getValueId();
  setValueId(NULL_VALUE_ID);
  synthTypeAndValueId(TRUE);

  // Construct the new subtree.
  //
  // Case -- force evaluation of all the WHEN, THEN and ELSE parts
  //
  // CASE(IFE1(W1,T1,IFE2(W2,T2,IFE3(...)))) ==>
  //   BLOCK(BLOCK(BLOCK(W1,T1),BLOCK(W2,T2)), CASE(...))
  //
  // Decend the ITM_IF_THEN_ELSE tree pulling out each WHEN and THEN pair.
  // Mate each pair with a block and attach them to the protected block, 
  // which contains all of the WHEN/THEN pairs for the entire tree.
  // Also, pull out any CASE operands and attach them to the protected
  // block as well.
  //
  ItemExpr *block = NULL;
  ItemExpr *ife = child(0);
  for(; (ife != NULL) && (ife->getOperatorType() == ITM_IF_THEN_ELSE);
       ife = ife->child(2))
    {
      ItemExpr *sub = new(generator->wHeap())
	ItmBlockFunction(ife->child(0), ife->child(1));
      if(block)
	block = new(generator->wHeap()) ItmBlockFunction(sub, block);
      else
	block = sub;
    }      

  // Add the ELSE condition, if any to the protected block
  //
  if(ife)
    block = new(generator->wHeap()) ItmBlockFunction(ife, block);

  // Construct the top-level block function. The left child is the protected
  // block, which contains all of the expresssions that need to be 
  // pre-evaluated. This right child is the original case statement.
  //
  block = new(generator->wHeap()) ItmBlockFunction(block, this);

  // Replace the old expression with the new expression for the
  // original id
  //
  id.replaceItemExpr(block);

  // Run the new expression through type and value id synthesis
  //
  block->synthTypeAndValueId(TRUE);
}
ItemExpr *ItmLagOlapFunction::preCodeGen(Generator *generator)
{
  if (nodeIsPreCodeGenned())
    return this;
  
  CollHeap *wHeap = generator->wHeap();
    
  if (getArity() > 1)
  {

    const NAType &cType = child(1)->getValueId().getType();
    ItemExpr *castExpr   = new (wHeap) Cast (child(1),
                                       new (wHeap)
                                       SQLInt(wHeap, TRUE, cType.supportsSQLnullLogical()));
    castExpr->synthTypeAndValueId(TRUE);
    child (1) = castExpr;
  }
  return ItemExpr::preCodeGen(generator);
}
void PhysSequence::addCheckPartitionChangeExpr( Generator *generator,
                                                NABoolean addConvNodes,
                                                ValueIdMap *origAttributes) 
{
  if(!(partition().entries() > 0)) 
  {
    return;
  }
  CollHeap * wHeap = generator->wHeap();
    
  if(addConvNodes && !origAttributes) 
  {
    origAttributes = new (wHeap) ValueIdMap();
  }

  ItemExpr * checkPartChng= NULL;
  for (CollIndex ix = 0; ix < partition().entries(); ix++)
  {
    ItemExpr *iePtr = partition().at(ix).getItemExpr();
    ItemExpr * convIePtr = addConvNode(iePtr, origAttributes,wHeap);

    movePartIdsExpr() += convIePtr->getValueId();
    ItemExpr * comp =  new (wHeap) BiRelat(ITM_EQUAL,
                                           iePtr,
                                           convIePtr,
                                           TRUE);
    if (!checkPartChng)
    {
      checkPartChng = comp;
    }
    else
    {
      checkPartChng = new (wHeap) BiLogic( ITM_AND,
                                           checkPartChng,
                                           comp);
    }
  } 

  checkPartChng->convertToValueIdSet(checkPartitionChangeExpr(),
                                      generator->getBindWA(),
                                      ITM_AND);
}
ValueIdSet TableDesc::getComputedColumns(NAColumnBooleanFuncPtrT fptr)
{
    ValueIdSet computedColumns;

    for (CollIndex j=0; j<getClusteringIndex()->getIndexKey().entries(); j++)
    {
        ItemExpr *ck = getClusteringIndex()->getIndexKey()[j].getItemExpr();

        if (ck->getOperatorType() == ITM_INDEXCOLUMN)
            ck = ((IndexColumn *) ck)->getDefinition().getItemExpr();

        CMPASSERT(ck->getOperatorType() == ITM_BASECOLUMN);

        NAColumn* x = ((BaseColumn *) ck)->getNAColumn();

        if (((*x).*fptr)())
            computedColumns += ck->getValueId();
    }
    return computedColumns;
}
Example #25
0
// append an ascii-version of Insert into cachewa.qryText_
void Insert::generateCacheKey(CacheWA &cwa) const
{
  GenericUpdate::generateCacheKey(cwa);
  if (insertColTree_) { 
    cwa += " insCol:"; 
    insertColTree_->generateCacheKey(cwa); 
  }
  // order by clause is important
  ItemExpr *orderBy = orderByTree_ ? orderByTree_ :
    reqdOrder_.rebuildExprTree();
  if (orderBy) { 
    cwa += " order:"; 
    orderBy->generateCacheKey(cwa); 
  }

  const NATable *tbl;
  if (cwa.getPhase() >= CmpMain::BIND && 
      getTableDesc() && (tbl=getTableDesc()->getNATable()) != NULL) {
    // If PARTITION clause has been used we must reflect that in the key.
    if (tbl->isPartitionNameSpecified()) {
      cwa += " partition:";
      cwa += tbl->getClusteringIndex()->getFileSetName().getQualifiedNameAsString().data();
    }
    // If PARTITION range has been used we must reflect that in the key.
    else if (tbl->isPartitionRangeSpecified()) {
      cwa += " partition:";

      char str[100];
      sprintf(str, " from %d to %d", 
	      tbl->getExtendedQualName().getPartnClause().getBeginPartitionNumber() ,
	      tbl->getExtendedQualName().getPartnClause().getEndPartitionNumber());
      cwa += str;
    }
  }

  if (isUpsert())
    {
      cwa += " upsert:";
    }
}
Example #26
0
// does this query's selection predicate list qualify query 
// to be cacheable after this phase?
NABoolean ItemList::isListOfCacheableSelPred
(CacheWA& cwa, ItemList *other) const
{
  Int32 arity = getArity();
  NABoolean result = FALSE;
  if (cwa.getPhase() >= CmpMain::BIND && 
      other && arity == other->getArity()) {
    // assume this is an AND list, so, we need only one
    // cacheable conjunct to consider the list cacheable.
    for (Int32 x = 0; x < arity; x++) {
      ItemExpr *leftC = child(x), *rightC = other->child(x);
      OperatorTypeEnum leftO = leftC->getOperatorType();
      OperatorTypeEnum rightO = rightC->getOperatorType();
      BaseColumn *base;
      if (leftO == ITM_BASECOLUMN) {
        base = (BaseColumn*)leftC;
        if (base->isKeyColumnValue(*rightC)) {
          cwa.addToUsedKeys(base);
        }
        result = TRUE;
        continue;
      }
      else if (rightO == ITM_BASECOLUMN) {
        base = (BaseColumn*)rightC;
        if (base->isKeyColumnValue(*leftC)) {
          cwa.addToUsedKeys(base);
        }
        result = TRUE;
        continue;
      }
      else if (leftO == ITM_ITEM_LIST && rightO == ITM_ITEM_LIST &&
               ((ItemList*)leftC)->isListOfCacheableSelPred
               (cwa, (ItemList*)rightC)) {
        result = TRUE;
        continue;
      }
    }
  }
  return result;
}
Example #27
0
ValueId NormWA::getEquivalentItmSequenceFunction(ValueId newSeqId)
{
  ValueId equivId = newSeqId;

  ItemExpr *newItem = newSeqId.getItemExpr();
  ItmSequenceFunction *newSeq = NULL;
  if(newItem->isASequenceFunction()) {
    newSeq = (ItmSequenceFunction *)newItem;
  }

  if(newSeq) {
    for(ValueId seqId = allSeqFunctions_.init(); allSeqFunctions_.next(seqId); 
      allSeqFunctions_.advance(seqId) ){
      ItemExpr *seq = seqId.getItemExpr();
      if(newSeq->isEquivalentForBinding(seq)){
	equivId = seqId;
	if(newSeq->origOpType() != seq->origOpType()) {
	  seq->setOrigOpType(seq->getOperatorType());
	}
	break;
      }
    }
  }

  allSeqFunctions_ += equivId;

  //
  return equivId;
}
//----------------------------------------------------------------------------
// The select list here is: (<fixed-count-cols>, <sum-col-refs>)
// The count cols become:
// Case( If <count-col> IS NULL THEN 0 ELSE <count-col>)
// Since the count cols were transformed from COUNT to SUM(@OP), They will 
// now evaluate to NULL instead of 0 when no data is returned, for MAVs 
// without a GROUP BY clause. This is what is fixed here.
//----------------------------------------------------------------------------
RelRoot *MavRelRootBuilder::buildRootForNoGroupBy(RelExpr *topNode)
{
  ItemExprList selectList(heap_);

  for (CollIndex i=0; i<countCols_.entries(); i++)
  {
    const MVColumnInfo *currentMavColumn = countCols_[i];
    const NAString& colName = currentMavColumn->getColName();

    // Build a col reference to the SYS_DELTA column.
    ItemExpr *sysDeltaColExpr = new(heap_)
      ColReference(new(heap_) ColRefName(colName, deltaCorrName_));

    ItemExpr *condExpr = new(heap_)
      Case(NULL, new(heap_)
	IfThenElse(new(heap_) UnLogic(ITM_IS_NULL, sysDeltaColExpr),
		   new(heap_) SystemLiteral(0),
		   sysDeltaColExpr->copyTree(heap_)));

    ItemExpr *renamedColExpr = new(heap_) 
      RenameCol(condExpr, new(heap_) ColRefName(colName, deltaCorrName_));

    selectList.insert(renamedColExpr);
  }

  // Add the SUM cols.
  addColsToSelectList(sumCols_, selectList, deltaCorrName_);

  // Add the extra Min/Max columns.
  selectList.insert(extraMinMaxColRefs_);

  // The select list is ready. Create the root over topNode.
  RelRoot *newRoot = new(heap_) 
    RelRoot(topNode, REL_ROOT, selectList.convertToItemExpr());
  newRoot->setDontOpenNewScope();

  selectList.clear();
  return newRoot;
}  // MavRelRootBuilder::buildRootForNoGroupBy()
Example #29
0
// append an ascii-version of RelExpr node into cachewa.qryText_
void RelExpr::generateCacheKeyNode(CacheWA &cwa) const
{
  // emit any "[firstn_sorted]" modifier
  if (firstNRows_ != -1) {
    char firstN[40]; 
    convertInt64ToAscii(((RelExpr*)this)->getFirstNRows(), firstN); 
    cwa += firstN; 
    cwa += " ";
  }
  // emit other "significant" parts of RelExpr
  cwa += getText();
  ItemExpr *pred = selPredTree() ? selPredTree() :
    getSelectionPred().rebuildExprTree();
  if (pred) { 
    cwa += " selPred:"; 
    pred->generateCacheKey(cwa); 
  }
  // make any optimizer hints part of the postbinder cache key so that
  // 2 cacheable queries with different optimizer hints do not match
  if (hint_) {
    CollIndex x, cnt=hint_->indexCnt();
    if (cnt > 0) {
      cwa += " xhint:";
      for (x = 0; x < cnt; x++) {
        cwa += (*hint_)[x].data();
        cwa += ",";
      }
    }
    char str[100];
    if (hint_->hasCardinality()) {
      sprintf(str, "card:%g", hint_->getCardinality());
      cwa += str;
    }
    if (hint_->hasSelectivity()) {
      sprintf(str, ",sel:%g", hint_->getSelectivity());
      cwa += str;
    }
  }
}
void HbaseSearchSpec::addColumnNames(const ValueIdSet& vs)
{
  // TEMP TEMP. Not all needed column names are being set up.
  // for now, return without populating result.
  // that will cause all columns to be retrieved.
  //return;

   for (ValueId vid = vs.init(); vs.next(vid); vs.advance(vid)) {
      ItemExpr* ie = vid.getItemExpr();

      NAString colName;
      if ( ie->getOperatorType() == ITM_BASECOLUMN ) {
	colName = ((BaseColumn*)ie)->getColName();
      } else
	if ( ie->getOperatorType() == ITM_INDEXCOLUMN ) {
	  colName = ((IndexColumn*)ie)->getNAColumn()->getIndexColName();
	}
      
      if (NOT colNames_.contains(colName))
	colNames_.insert(colName);
   }
}