// virtual destructor
ElemDDLColViewDef::~ElemDDLColViewDef()
{
  // delete all children
  for (Int32 i = 0; i < getArity(); i++)
  {
    delete getChild(i);
  }
}
void MultiJoin::synthLogPropWithMJReuse(NormWA * normWAPtr)
{
  // Check to see whether this GA has already been associated
  // with a logExpr for synthesis.  If so, no need to resynthesize
  // for this equivalent log. expression.
  if (getGroupAttr()->existsLogExprForSynthesis())
  {
    Join * joinExprForSynth = 
      (Join *) getGroupAttr()->getLogExprForSynthesis();
      
    if(joinExprForSynth->isJoinFromMJSynthLogProp())
      return;
  }

  NABoolean reUseMJ = TRUE;

  CMPASSERT ( (jbbSubset_.getGB() == NULL_CA_ID));

  const CANodeIdSet & jbbcs = jbbSubset_.getJBBCs();

  // Instead of always picking the first JBBC as the right child
  // pick the one with minimum JBBC connections. This will avoid
  // all unnecessary crossproducts

  CANodeId jbbcRight;

  jbbcRight = jbbcs.getJBBCwithMinConnectionsToThisJBBSubset();

  CANodeIdSet right(jbbcRight);
  CANodeIdSet left(jbbcs);
  left -= jbbcRight;

  Join* join = splitSubset(*(left.jbbcsToJBBSubset()),
                             *(right.jbbcsToJBBSubset()),
                           reUseMJ);

  //if the left is a MultiJoin, synthesize it using reUse
  //this has to be done before join->synthLogProp to avoid
  //calling MultiJoin::synthLogProp on the left MultiJoin
  //because that does not reUse
  if(left.entries() > 1)
  {
    RelExpr * leftRelExpr = join->child(0)->castToRelExpr();
    if(leftRelExpr &&
       leftRelExpr->getOperator() == REL_MULTI_JOIN)
      ((MultiJoin *) leftRelExpr)->synthLogPropWithMJReuse(normWAPtr);
  }

  join->synthLogProp(normWAPtr);

  join->setJoinFromMJSynthLogProp();

  getGroupAttr()->setLogExprForSynthesis(join);

  jbbSubset_.setSubsetMJ(this);

  CMPASSERT ( getGroupAttr()->getNumJoinedTables() >= getArity());
}
void
StmtDDLInitializeSQL::setChild(Lng32 index, ExprNode * pChildNode)
{
    ComASSERT ((index >= 0) && index < getArity());
    if (pChildNode NEQ NULL)
        children_[index] = pChildNode->castToElemDDLNode();
    else
        children_[index] = NULL;
}
// A transformation method for protecting sequence functions from not
// being evaluated due to short-circuit evaluation. This is the base
// class implementation which simply recurses on the children unless
// they have already been code-generated.
//
void ItemExpr::protectiveSequenceFunctionTransformation(Generator *generator)
{
  for(Int32 i=0; i<getArity(); i++)
    {
      MapInfo *mapInfo = generator->getMapInfoAsIs(child(i));
      if(!mapInfo || !mapInfo->isCodeGenerated())
	child(i)->protectiveSequenceFunctionTransformation(generator);
    }
}
// synthesizeType
//
const NAType *ItmBitMuxFunction::synthesizeType() {
  Int32 size = 0;
  for(Int32 i=0; i<getArity(); i++) {
    const NAType &type = child(i)->getValueId().getType();
    size += type.getTotalSize();
  }

  return new(CmpCommon::statementHeap()) SQLChar(CmpCommon::statementHeap(), size, FALSE);
};
short HostVar::codeGen(Generator * generator)
{
  Attributes ** attr;

  if (generator->getExpGenerator()->genItemExpr(this, &attr, (1 + getArity()), -1) == 1)
    return 0;

  return 0;
}
short ItemList::codeGen(Generator * generator)
{
  for (short i=0; i<getArity(); i++)
    {
      child(i)->codeGen(generator);
    }

  return 0;
}
Exemple #8
0
function* fn_sum::specialize(
   static_context* sctx,
   const std::vector<xqtref_t>& argTypes) const
{
  RootTypeManager& rtm = GENV_TYPESYSTEM;
  TypeManager* tm = sctx->get_typemanager();

  xqtref_t argType = argTypes[0];

  if (TypeOps::is_subtype(tm, *argType, *rtm.UNTYPED_ATOMIC_TYPE_STAR))
  {
    return (getArity() == 1 ?
            BUILTIN_FUNC(OP_SUM_DOUBLE_1) :
            BUILTIN_FUNC(OP_SUM_DOUBLE_2));
  }
  else if (TypeOps::is_subtype(tm, *argType, *rtm.DOUBLE_TYPE_STAR))
  {
    return (getArity() == 1 ?
            BUILTIN_FUNC(OP_SUM_DOUBLE_1) :
            BUILTIN_FUNC(OP_SUM_DOUBLE_2));
  }
  else if (TypeOps::is_subtype(tm, *argType, *rtm.FLOAT_TYPE_STAR))
  {
    return (getArity() == 1 ?
            BUILTIN_FUNC(OP_SUM_FLOAT_1) :
            BUILTIN_FUNC(OP_SUM_FLOAT_2));
  }
  else if (TypeOps::is_subtype(tm, *argType, *rtm.INTEGER_TYPE_STAR))
  {
    return (getArity() == 1 ?
            BUILTIN_FUNC(OP_SUM_INTEGER_1) :
            BUILTIN_FUNC(OP_SUM_INTEGER_2));
  }
  else if (TypeOps::is_subtype(tm, *argType, *rtm.DECIMAL_TYPE_STAR))
  {
    return (getArity() == 1 ?
            BUILTIN_FUNC(OP_SUM_DECIMAL_1) :
            BUILTIN_FUNC(OP_SUM_DECIMAL_2));
  }
  else
  {
    return NULL;
  }
}
short RelExpr::codeGen(Generator * generator)
{
  // well, sometimes it reaches here. Just codeGen the kids and return.
  for (short i=0; i<getArity(); i++)
     child(i)->codeGen(generator);

  // return what you got.
  generator->setCriDesc((ex_cri_desc *)(generator->getCriDesc(Generator::DOWN)),
			Generator::UP); 

  return 0;
}
void
ElemDDLColDef::setChild(Lng32 index, ExprNode * pChildNode)
{
  ComASSERT(index >= 0 AND index < getArity());
  if (pChildNode NEQ NULL)
  {
    ComASSERT(pChildNode->castToElemDDLNode() NEQ NULL);
    children_[index] = pChildNode->castToElemDDLNode();
  }
  else
    children_[index] = NULL;
}
Exemple #11
0
// is any literal in this expr safely coercible to its target type?
NABoolean UDFunction::isSafelyCoercible(CacheWA& cwa) const
{
  if (cwa.getPhase() >= CmpMain::BIND) {
    Int32 arity = getArity();
    for (Int32 x = 0; x < arity; x++) {
      if (!child(x)->isSafelyCoercible(cwa)) { 
        return FALSE; 
      }
    }
    return TRUE;
  }
  return FALSE;
}
// virtual destructor
ElemDDLPartitionRange::~ElemDDLPartitionRange()
{
  //
  // Only deletes the child parse node(s) added specifically
  // for this class.  The destructor of the base class
  // ElemDDLPartitionSystem does the deletion(s) of the other
  // child parse node(s).
  //
  for (Int32 i = ElemDDLPartitionSystem::getArity(); i < getArity(); i++)
  {
    delete getChild(i);
  }
}
ExprNode *
ElemDDLConstraintRI::getChild(Lng32 index)
{ 
  ComASSERT(index >= 0 AND index < getArity());
  if (index < ElemDDLConstraint::getArity())
  {
    return ElemDDLConstraint::getChild(index);
  }
  else
  {
    return children_[index];
  }
}
// return any Scan node from this RelExpr
Scan *RelExpr::getAnyScanNode() const
{
  if (getOperatorType() == REL_SCAN) { 
    return (Scan*)this; 
  }
  Scan *result = NULL;
  Int32 arity = getArity();
  for (Int32 x = 0; x < arity && !result; x++) {
    if (child(x)) { 
      result = child(x)->getAnyScanNode(); 
    }
  }
  return result;
}
void
ElemDDLPartitionByColumnList::setChild(Lng32 index, ExprNode * pChildNode)
{
  ComASSERT(index >= 0 AND index < getArity());
  if (pChildNode NEQ NULL)
  {
    ComASSERT(pChildNode->castToElemDDLNode() NEQ NULL);
    children_[index] = pChildNode->castToElemDDLNode();
  }
  else
  {
    children_[index] = NULL;
  }
}
ExprNode *
ElemDDLConstraintCheck::getChild(Lng32 index)
{
  ComASSERT(index >= 0 AND index < getArity());
  if (index < ElemDDLConstraint::getArity())
  {
    return ElemDDLConstraint::getChild(index);
  }
  else
  {
    ComASSERT(index EQU INDEX_SEARCH_CONDITION);
    return searchCondition_;
  }
}
ExprNode *
ElemDDLConstraintUnique::getChild(Lng32 index)
{ 
  ComASSERT(index >= 0 AND index < getArity());
  if (index < ElemDDLConstraint::getArity())
  {
    return ElemDDLConstraint::getChild(index);
  }
  else
  {
    ComASSERT(index EQU INDEX_COLUMN_NAME_LIST);
    return columnRefList_;
  }
}
//
// virtual destructor
//
ElemDDLConstraintUnique::~ElemDDLConstraintUnique()
{
  //
  // delete all child parse nodes added to this class
  //
  // Note that class ElemDDLConstraintUnique is derived from class
  // ElemDDLConstraint.  ~ElemDDLConstraint() deletes all child parse
  // nodes belong to class ElemDDLConstraint.  So the destructor
  // ~ElemDDLConstraintUnique() only needs to delete the additional
  // child parse nodes that only belong to class ElemDDLConstraintUnique.
  //
  for (Int32 index = ElemDDLConstraint::getArity(); index < getArity(); index++)
  {
    delete getChild(index);
  }
}
// this method is temp
CostScalar MultiJoin::getChildrenDataFlow() const
{
  CostScalar childrenDataFlow(0);

  UInt32 minRecordLength = (ActiveSchemaDB()->getDefaults())\
                                       .getAsLong(COMP_INT_50);
  Int32 arity = getArity();
  for (Int32 i = 0; i < arity; i++)
  {
    childrenDataFlow +=
      child(i)->getGroupAttr()->getResultCardinalityForEmptyInput() *
      MAXOF(child(i)->getGroupAttr()->getRecordLength(), minRecordLength);
  }

  return childrenDataFlow;
}
short UnLogic::codeGen(Generator * generator)
{
  Attributes ** attr;

  if (generator->getExpGenerator()->genItemExpr(this, &attr, 
						(1 + getArity()), -1) == 1)
    return 0;

  ex_unlogic_clause * unlogic_clause =
    new(generator->getSpace()) ex_unlogic_clause(getOperatorType(), attr,
						 generator->getSpace());
  
  generator->getExpGenerator()->linkClause(this, unlogic_clause);
  
  return 0;
}
Exemple #21
0
// is it safe to parameterize this selection predicate term?
// change literals of a cacheable query into input parameters 
ItemExpr* BiRelat::normalizeForCache(CacheWA& cwa, BindWA& bindWA)
{
  if (cwa.getPhase() >= CmpMain::BIND) {
    // NB: we assume here that when a query is cacheable because it has a key
    //     equi-predicate, then its key equi-predicates can be parameterized
    if (getArity() == 2) {
      if (getOperatorType() == ITM_EQUAL) {
        // normalizeForCache only constants that can be safely backpatched.
        // part of fix to CR 10-010726-4109.
        ItemExpr *leftC=child(0), *rightC=child(1);
        OperatorTypeEnum leftO = leftC->getOperatorType();

        // fix case 10-061027-0129: discover the potential base column 
        // below the InstantiateNull node
        if ( leftO == ITM_INSTANTIATE_NULL ) {
           leftC = leftC->child(0);
           leftO = leftC->getOperatorType();
        }

        OperatorTypeEnum rightO = rightC->getOperatorType();

        // fix case 10-061027-0129.
        if ( rightO == ITM_INSTANTIATE_NULL ) {
           rightC = rightC->child(0);
           rightO = rightC->getOperatorType();
        }

        if (leftO == ITM_BASECOLUMN && rightO == ITM_CONSTANT) {
          parameterizeMe(cwa, bindWA, child(1),
                         (BaseColumn*)leftC, (ConstValue*)rightC);
        }
        else if (rightO == ITM_BASECOLUMN && leftO == ITM_CONSTANT) {
          parameterizeMe(cwa, bindWA, child(0),
                         (BaseColumn*)rightC, (ConstValue*)leftC);
        }
        else if (leftO == ITM_ITEM_LIST && rightO == ITM_ITEM_LIST) {
          child(0) = ((ItemList*)leftC)->normalizeListForCache
            (cwa, bindWA, (ItemList*)rightC);
        }
      }
      // FIXME: ie, parameterize other binary comparison predicates
      // if we can guarantee the correctness of such parameterizations
    }
  }
  markAsNormalizedForCache();
  return this;
}
// are RelExpr's kids cacheable after this phase?
NABoolean RelExpr::cacheableKids(CacheWA& cwa)
{
  switch (cwa.getPhase()) {
  case CmpMain::PARSE:
  case CmpMain::BIND: {
    Int32 arity = getArity();
    if (arity <= 0) { // we have no kids
      if (cwa.isConditionallyCacheable()) {
        // we're conditionally cacheable and have no kids
        setCacheableNode(cwa.getPhase()); 
        return TRUE; // so, we're cachable
      }
      else {
        return FALSE; // MAYBECACHEABLE is not cacheable at this phase
        // don't mark this node non-cacheable because this
        // RelExpr may be cacheable after the next phase.
      }
    }
    // cacheability of child(ren) determine our cacheability
    for (Int32 x = 0; x < arity; x++) {
      if (!child(x) || // cases like "insert into t default values"
          // return 1 from getArity() even if child(0) is NULL; so
          // guard against this potential mxcmp crash and consider
          // these cases non-cacheable during the PARSE stage.
          child(x)->isNonCacheable()) {
        // the 1st noncacheable child makes us noncacheable
        setNonCacheable();
        return FALSE;
      }
      else if (!child(x)->isCacheableExpr(cwa)) {
        // noncacheable child
        return FALSE;
        // don't mark this node non-cacheable because this
        // RelExpr may be cacheable after the next phase.
      }
      else { // cacheable child
        continue; // look at next child
      }
    }
    // all children are cacheable, so we're cacheable too
    setCacheableNode(cwa.getPhase());
    return TRUE;
  }
  default:
    return FALSE;
  }
}
short ItemExpr::codeGen(Generator * generator)
{
  if (getOperatorType() == ITM_NATYPE ||
      getOperatorType() == ITM_NAMED_TYPE_TO_ITEM)
    {
      Attributes ** attr;
      
      if (generator->getExpGenerator()->genItemExpr(this, &attr, (1 + getArity()), -1) == 1)
	return 0;
      return 0;
    }
  
  NAString txt(getText());
  txt += " should never reach ItemExpr::codeGen";
  GenAssert(0, txt);
  return -1;
}
// append an ascii-version of RelExpr's kids into cachewa.qryText_
void RelExpr::generateCacheKeyForKids(CacheWA& cwa) const
{
  Int32 maxi = getArity();
  if (maxi) {
    cwa += " kids(";
    for (Lng32 i = 0; i < maxi; i++) {
      if (i > 0) { 
        cwa += ","; 
      }
      if ( child(i).getPtr() == NULL ) { 
        continue; 
      }
      child(i)->generateCacheKey(cwa);
    }
    cwa += ")";
  }
}
// constructor
ElemDDLPartitionArray::ElemDDLPartitionArray(CollHeap *heap)
  : LIST(ElemDDLPartition *)(heap)
{
}

// virtual destructor
ElemDDLPartitionArray::~ElemDDLPartitionArray()
{
}

//----------------------------------------------------------------------------
// methods for class ElemDDLPartitionClause
//
//   Note that class ElemDDLPartitionClause is not derived from class
//   ElemDDLPartition.  The former is derived from class ElemDDLNode.
//----------------------------------------------------------------------------

// virtual destructor
ElemDDLPartitionClause::~ElemDDLPartitionClause()
{
}

// cast virtual function
ElemDDLPartitionClause *
ElemDDLPartitionClause::castToElemDDLPartitionClause()
{
  return this;
}

//
// accessors
//

// get the degree of this node
Int32
ElemDDLPartitionClause::getArity() const
{
  return MAX_ELEM_DDL_PARTITION_CLAUSE_ARITY;
}

ExprNode *
ElemDDLPartitionClause::getChild(Lng32 index)
{ 
  ComASSERT(index >= 0 AND index < getArity());
  return children_[index];
}
short UnArith::codeGen(Generator * generator)
{
  Attributes ** attr;
  ExpGenerator * eg = generator->getExpGenerator();
  
  if (eg->genItemExpr(this, &attr, (1+getArity()), -1) == 1)
    return 0;


  ex_arith_clause * arith_clause = 
    new(generator->getSpace()) 
    ex_arith_clause(getOperatorType(), attr, generator->getSpace(),
                    0, FALSE);
  
  generator->getExpGenerator()->linkClause(this, arith_clause);

  return 0;
}
void MultiJoin::synthLogProp(NormWA * normWAPtr)
{
  // Check to see whether this GA has already been associated
  // with a logExpr for synthesis.  If so, no need to resynthesize
  // for this equivalent log. expression.
  if (getGroupAttr()->existsLogExprForSynthesis()) return;

  CMPASSERT ( (jbbSubset_.getGB() == NULL_CA_ID));

  const CANodeIdSet & jbbcs = jbbSubset_.getJBBCs();

  // Instead of always picking the first JBBC as the right child
  // pick the one with minimum JBBC connections. This will avoid
  // all unnecessary crossproducts

  CANodeId jbbcRight;

  jbbcRight = jbbcs.getJBBCwithMinConnectionsToThisJBBSubset();

  CANodeIdSet right(jbbcRight);
  CANodeIdSet left(jbbcs);
  left -= jbbcRight;

  Join* join = splitSubset(*(left.jbbcsToJBBSubset()),
                             *(right.jbbcsToJBBSubset()));

  join->synthLogProp(normWAPtr);

  getGroupAttr()->setLogExprForSynthesis(join);

  join->setJoinFromMJSynthLogProp();

  jbbSubset_.setSubsetMJ(this);
  
  CASortedList * synthLogPropPath =        
    new (CmpCommon::statementHeap()) 
    CASortedList(CmpCommon::statementHeap(), jbbcs.entries());
      
  synthLogPropPath->insert((*(left.jbbcsToJBBSubset()->getSynthLogPropPath())));
  synthLogPropPath->insert(right.getFirst());
  jbbSubset_.setSynthLogPropPath(synthLogPropPath);

  CMPASSERT ( getGroupAttr()->getNumJoinedTables() >= getArity());
}
short Convert::codeGen(Generator * generator)
{
  Attributes ** attr;

  if (generator->getExpGenerator()->genItemExpr(this, &attr, (1 + getArity()), -1) == 1)
    return 0;

  ex_conv_clause * conv_clause =
	  new(generator->getSpace()) ex_conv_clause(getOperatorType(), attr,
						    generator->getSpace());
  conv_clause->setLastVOAoffset(lastVOAOffset_);
  conv_clause->setLastNullIndicatorLength(lastNullIndicatorLength_);
  conv_clause->setLastVcIndicatorLength(lastVcIndicatorLength_);
  conv_clause->setAlignment(alignment_);

  generator->getExpGenerator()->linkClause(this, conv_clause);      
  
  return 0;
}
ItemExpr *ItmLagOlapFunction::preCodeGen(Generator *generator)
{
  if (nodeIsPreCodeGenned())
    return this;
  
  CollHeap *wHeap = generator->wHeap();
    
  if (getArity() > 1)
  {

    const NAType &cType = child(1)->getValueId().getType();
    ItemExpr *castExpr   = new (wHeap) Cast (child(1),
                                       new (wHeap)
                                       SQLInt(wHeap, TRUE, cType.supportsSQLnullLogical()));
    castExpr->synthTypeAndValueId(TRUE);
    child (1) = castExpr;
  }
  return ItemExpr::preCodeGen(generator);
}
Exemple #30
0
/* input table from csv file */ 
bool RamRelation::load(std::istream &is, SymbolTable& symTable, const SymbolMask& mask) {
    bool error = false; 
    auto arity = getArity();
    while (!is.eof()) {
        std::string line;
        RamDomain tuple[arity];

        getline(is,line);
        if (is.eof()) break;

        size_t start = 0, end = 0;
        for(uint32_t col=0;col<arity;col++) { 
            end = line.find('\t', start);
            if ((size_t)end == std::string::npos) {
                end = line.length();
            }
            std::string element;
            if (start <=  end && (size_t)end <= line.length() ) {
                element = line.substr(start,end-start);
                if (element == "") {
                    element = "n/a";
                }
            } else {
                error = true; 
                element = "n/a";
            }
            if (mask.isSymbol(col)) {
                tuple[col] = symTable.lookup(element.c_str());
            } else {
                tuple[col] = atoi(element.c_str());
            }
            start = end+1;
        }
        if (end != line.length()) {
            error = true; 
        } 
        if (!exists(tuple)) { 
            insert(tuple);
        }
    }
    return error;
}