/**
 * @brief Convert a Postgres LockRow into a Peloton Node.
 *
 *    currently, we just ignore the lock step, and return the
 *    underlying node
 *
 * @return Pointer to the constructed AbstractPlan.
 *
 */
std::unique_ptr<planner::AbstractPlan> PlanTransformer::TransformLockRows(
    const LockRowsPlanState *lr_plan_state) {
  LOG_INFO("Handle LockRows");

  // get the underlying plan
  AbstractPlanState *outer_plan_state = outerAbstractPlanState(lr_plan_state);

  return PlanTransformer::TransformPlan(outer_plan_state);
}
/**
 * @brief Convert a Postgres Result into a Peloton Node.
 *
 * @return Pointer to the constructed AbstractPlan.
 *
 */
const std::unique_ptr<planner::AbstractPlan> PlanTransformer::TransformResult(
    const ResultPlanState *result_state) {
  LOG_INFO("Handle Result");

  // get the underlying plan
  AbstractPlanState *outer_plan_state = outerAbstractPlanState(result_state);

  return PlanTransformer::TransformPlan(outer_plan_state);
}
Beispiel #3
0
/**
 * @brief Convert a Postgres HashState into a Peloton HashPlanNode
 * @return Pointer to the constructed AbstractPlan
 */
const planner::AbstractPlan *PlanTransformer::TransformHash(
    const HashPlanState *hash_state) {
  auto hashkeys = ExprTransformer::TransformExprList(
      reinterpret_cast<ExprState *>(hash_state->hashkeys));

  // Resolve child plan, should be some kind of scan
  AbstractPlanState *subplan_state = outerAbstractPlanState(hash_state);
  auto plan_node = new planner::HashPlan(hashkeys);
  assert(subplan_state != nullptr);
  plan_node->AddChild(TransformPlan(subplan_state));

  return plan_node;
}
Beispiel #4
0
LockRowsPlanState *DMLUtils::PrepareLockRowsState(
    LockRowsState *lr_plan_state) {
  LockRowsPlanState *info =
      (LockRowsPlanState *)palloc(sizeof(LockRowsPlanState));
  info->type = lr_plan_state->ps.type;

  PlanState *outer_plan_state = outerPlanState(lr_plan_state);
  AbstractPlanState *child_plan_state =
      PreparePlanState(nullptr, outer_plan_state, true);
  outerAbstractPlanState(info) = child_plan_state;

  return info;
}
Beispiel #5
0
const planner::AbstractPlan *PlanTransformer::TransformAgg(
    const AggPlanState *plan_state) {
  // Alias all I need
  const Agg *agg = plan_state->agg_plan;
  auto numphases = plan_state->numphases;
  auto numaggs = plan_state->numaggs;
  auto targetlist = plan_state->ps_targetlist;
  auto qual = plan_state->ps_qual;
  auto peragg = plan_state->peragg;
  auto tupleDesc = plan_state->result_tupleDescriptor;
  auto aggstrategy = plan_state->agg_plan->aggstrategy;

  LOG_INFO("Number of Agg phases: %d \n", numphases);

  // When we'll have >1 phases?
  if (numphases != 1) return nullptr;

  /* Get project info */
  std::unique_ptr<const planner::ProjectInfo> proj_info(
      BuildProjectInfoFromTLSkipJunk(targetlist));
  LOG_INFO("proj_info : \n%s", proj_info->Debug().c_str());

  /* Get predicate */
  std::unique_ptr<const expression::AbstractExpression> predicate(
      BuildPredicateFromQual(qual));

  /* Get Aggregate terms */
  std::vector<planner::AggregatePlan::AggTerm> unique_agg_terms;

  LOG_INFO("Number of (unique) Agg nodes: %d \n", numaggs);
  for (int aggno = 0; aggno < numaggs; aggno++) {
    auto transfn_oid = peragg[aggno].transfn_oid;

    auto itr = peloton::bridge::kPgTransitFuncMap.find(transfn_oid);
    if (kPgFuncMap.end() == itr) {
      LOG_ERROR("Unmapped Transit function Id : %u\n", transfn_oid);
      return nullptr;
    }

    // We don't check whether the mapped exprtype is a valid aggregate type
    // here.
    PltFuncMetaInfo fn_meta = itr->second;
    // We only take the first argument as input to aggregator because
    // we don't have multi-argument aggregator in Peloton at the moment.
    // WARNING: there can be no arguments (e.g., COUNT(*))
    auto arguments = peragg[aggno].aggrefstate->args;
    expression::AbstractExpression *agg_expr = nullptr;
    if (arguments) {
      GenericExprState *gstate =
          (GenericExprState *)lfirst(list_head(arguments));
      LOG_INFO("Creating Agg Expr");
      agg_expr = ExprTransformer::TransformExpr(gstate->arg);
      LOG_INFO("Done creating Agg Expr");
    }

    /*
     * AggStatePerAggData.sortColIdx along with other related attributes
     * are used to handle ORDER BY and DISTINCT *within* aggregation.
     * E.g.,
     * SELECT count(DISTINCT x) ...
     * SELECT str_agg(y ORDER BY x) ...
     * Currently, we only handle the agg(DISTINCT x) case by
     * checking whether numDistinctCols > 0.
     * Note that numDistinctCols > 0 may be a necessary but not sufficient
     * condition for agg(DISTINCT x).
     */

    bool distinct = (peragg[aggno].numDistinctCols > 0);

    unique_agg_terms.emplace_back(fn_meta.exprtype, agg_expr, distinct);

    LOG_INFO(
        "Unique Agg # : %d , transfn_oid : %u\n , aggtype = %s \n expr = %s, "
        "numDistinctCols = %d",
        aggno, transfn_oid, ExpressionTypeToString(fn_meta.exprtype).c_str(),
        agg_expr ? agg_expr->Debug().c_str() : "<NULL>",
        peragg[aggno].numDistinctCols);

    for (int i = 0; i < peragg[aggno].numDistinctCols; i++) {
      LOG_INFO("sortColIdx[%d] : %d \n", i, peragg[aggno].sortColIdx[i]);
    }

  }  // end loop aggno

  /* Get Group by columns */
  std::vector<oid_t> groupby_col_ids;
  LOG_INFO("agg.numCols = %d", agg->numCols);
  for (int i = 0; i < agg->numCols; i++) {
    LOG_INFO("agg.grpColIdx[%d] = %d \n", i, agg->grpColIdx[i]);

    auto attrno = agg->grpColIdx[i];
    if (AttributeNumberIsValid(attrno) &&
        AttrNumberIsForUserDefinedAttr(attrno)) {
      groupby_col_ids.emplace_back(AttrNumberGetAttrOffset(attrno));
    }
  }

  /* Get output schema */
  std::unique_ptr<catalog::Schema> output_schema(
      SchemaTransformer::GetSchemaFromTupleDesc(tupleDesc));

  /* Map agg stragegy */
  LOG_INFO("aggstrategy : %s\n", (AGG_HASHED == aggstrategy)
                                     ? "HASH"
                                     : (AGG_SORTED ? "SORT" : "PLAIN"));

  PelotonAggType agg_type = AGGREGATE_TYPE_INVALID;

  switch (aggstrategy) {
    case AGG_SORTED:
      agg_type = AGGREGATE_TYPE_SORTED;
      break;
    case AGG_HASHED:
      agg_type = AGGREGATE_TYPE_HASH;
      break;
    case AGG_PLAIN:
      agg_type = AGGREGATE_TYPE_PLAIN;
      break;
  }

  std::vector<oid_t> column_ids;
  for (auto agg_term : unique_agg_terms) {
    if (agg_term.expression) {
      LOG_INFO("AGG TERM :: %s", agg_term.expression->Debug().c_str());
    }
    BuildColumnListFromExpr(column_ids, agg_term.expression);
  }

  auto retval = new planner::AggregatePlan(
      proj_info.release(), predicate.release(), std::move(unique_agg_terms),
      std::move(groupby_col_ids), output_schema.release(), agg_type);

  ((planner::AggregatePlan *)retval)->SetColumnIds(column_ids);

  // Find children
  auto lchild = TransformPlan(outerAbstractPlanState(plan_state));
  retval->AddChild(lchild);

  return retval;
}
/**
 * @brief Convert a Postgres NestLoop into a Peloton SeqScanNode.
 * @return Pointer to the constructed AbstractPlanNode.
 */
const planner::AbstractPlan *PlanTransformer::TransformNestLoop(
    const NestLoopPlanState *nl_plan_state) {
  PelotonJoinType peloton_join_type =
      PlanTransformer::TransformJoinType(nl_plan_state->jointype);

  NestLoop *nl = nl_plan_state->nl;

  if (peloton_join_type == JOIN_TYPE_INVALID) {
    LOG_ERROR("unsupported join type: %d", nl_plan_state->jointype);
    return nullptr;
  }

  expression::AbstractExpression *join_filter = ExprTransformer::TransformExpr(
      reinterpret_cast<ExprState *>(nl_plan_state->joinqual));

  expression::AbstractExpression *plan_filter = ExprTransformer::TransformExpr(
      reinterpret_cast<ExprState *>(nl_plan_state->qual));

  expression::AbstractExpression *predicate = nullptr;
  if (join_filter && plan_filter) {
    predicate = expression::ExpressionUtil::ConjunctionFactory(
        EXPRESSION_TYPE_CONJUNCTION_AND, join_filter, plan_filter);
  } else if (join_filter) {
    predicate = join_filter;
  } else {
    predicate = plan_filter;
  }

  // TODO: do we need to consider target list here?
  // Transform project info
  std::unique_ptr<const planner::ProjectInfo> project_info(nullptr);

  project_info.reset(BuildProjectInfo(nl_plan_state->ps_ProjInfo));

  LOG_INFO("%s", project_info.get()->Debug().c_str());

  planner::AbstractPlan *result = nullptr;
  planner::NestedLoopJoinPlan *plan_node = nullptr;
  auto project_schema = SchemaTransformer::GetSchemaFromTupleDesc(
      nl_plan_state->tts_tupleDescriptor);

  if (project_info.get()->isNonTrivial()) {
    // we have non-trivial projection
    LOG_INFO("We have non-trivial projection");

    result =
        new planner::ProjectionPlan(project_info.release(), project_schema);
    plan_node = new planner::NestedLoopJoinPlan(peloton_join_type, predicate,
                                                nullptr, project_schema, nl);
    result->AddChild(plan_node);
  } else {
    LOG_INFO("We have direct mapping projection");
    plan_node = new planner::NestedLoopJoinPlan(peloton_join_type, predicate,
                                                project_info.release(),
                                                project_schema, nl);
    result = plan_node;
  }

  const planner::AbstractPlan *outer =
      PlanTransformer::TransformPlan(outerAbstractPlanState(nl_plan_state));
  const planner::AbstractPlan *inner =
      PlanTransformer::TransformPlan(innerAbstractPlanState(nl_plan_state));

  /* Add the children nodes */
  plan_node->AddChild(outer);
  plan_node->AddChild(inner);

  LOG_INFO("Finishing mapping Nested loop join, JoinType: %d",
           nl_plan_state->jointype);
  return result;
}
/**
 * @brief Convert a Postgres HashState into a Peloton HashPlanNode
 * @return Pointer to the constructed AbstractPlan
 */
std::unique_ptr<planner::AbstractPlan> PlanTransformer::TransformHashJoin(
    const HashJoinPlanState *hj_plan_state) {
  std::unique_ptr<planner::AbstractPlan> result;
  PelotonJoinType join_type =
      PlanTransformer::TransformJoinType(hj_plan_state->jointype);
  if (join_type == JOIN_TYPE_INVALID) {
    LOG_ERROR("unsupported join type: %d", hj_plan_state->jointype);
    return nullptr;
  }

  LOG_INFO("Handle hash join with join type: %d", join_type);

  /*
  std::vector<planner::HashJoinPlan::JoinClause> join_clauses(
      BuildHashJoinClauses(mj_plan_state->mj_Clauses,
                            mj_plan_state->mj_NumClauses);
  */

  expression::AbstractExpression *join_filter = ExprTransformer::TransformExpr(
      reinterpret_cast<ExprState *>(hj_plan_state->joinqual));

  expression::AbstractExpression *plan_filter = ExprTransformer::TransformExpr(
      reinterpret_cast<ExprState *>(hj_plan_state->qual));

  std::unique_ptr<const expression::AbstractExpression> predicate(nullptr);
  if (join_filter && plan_filter) {
    predicate.reset(expression::ExpressionUtil::ConjunctionFactory(
        EXPRESSION_TYPE_CONJUNCTION_AND, join_filter, plan_filter));
  } else if (join_filter) {
    predicate.reset(join_filter);
  } else {
    predicate.reset(plan_filter);
  }

  /* Transform project info */
  std::unique_ptr<const planner::ProjectInfo> project_info(nullptr);

  project_info.reset(BuildProjectInfoFromTLSkipJunk(hj_plan_state->targetlist));

  if (project_info.get() != nullptr) {
    LOG_INFO("%s", project_info.get()->Debug().c_str());
  } else {
    LOG_INFO("empty projection info");
  }

  std::shared_ptr<const catalog::Schema> project_schema(
      SchemaTransformer::GetSchemaFromTupleDesc(
          hj_plan_state->tts_tupleDescriptor));

  std::vector<oid_t> outer_hashkeys =
      BuildColumnListFromExpStateList(hj_plan_state->outer_hashkeys);

  bool non_trivial = (project_info.get() != nullptr &&
                      project_info.get()->isNonTrivial());
  if (non_trivial) {
    // we have non-trivial projection
    LOG_INFO("We have non-trivial projection");
    result = std::unique_ptr<planner::AbstractPlan>(
        new planner::ProjectionPlan(std::move(project_info), project_schema));
    // set project_info to nullptr
    project_info.reset();
  } else {
    LOG_INFO("We have direct mapping projection");
  }

  std::unique_ptr<planner::HashJoinPlan> plan_node(new planner::HashJoinPlan(
      join_type, std::move(predicate), std::move(project_info), project_schema,
      outer_hashkeys));

  std::unique_ptr<planner::AbstractPlan> outer{std::move(
      PlanTransformer::TransformPlan(outerAbstractPlanState(hj_plan_state)))};
  std::unique_ptr<planner::AbstractPlan> inner{std::move(
      PlanTransformer::TransformPlan(innerAbstractPlanState(hj_plan_state)))};

  /* Add the children nodes */
  plan_node->AddChild(std::move(outer));
  plan_node->AddChild(std::move(inner));

  if (non_trivial) {
    result->AddChild(std::move(plan_node));
  } else {
    result.reset(plan_node.release());
  }

  LOG_INFO("Finishing mapping Hash join, JoinType: %d", join_type);
  return result;
}
/**
 * @brief Convert a Postgres BitmapScan into a Peloton IndexScanPlan
 *        We currently only handle the case where the lower plan is a
 *BitmapIndexScan
 *
 * @return Pointer to the constructed AbstractPlan
 */
const planner::AbstractPlan *PlanTransformer::TransformBitmapHeapScan(
    const BitmapHeapScanPlanState *bhss_plan_state,
    const TransformOptions options) {
  planner::IndexScanPlan::IndexScanDesc index_scan_desc;

  /* resolve target relation */
  Oid table_oid = bhss_plan_state->table_oid;
  Oid database_oid = bhss_plan_state->database_oid;

  const BitmapIndexScanPlanState *biss_state =
      reinterpret_cast<const BitmapIndexScanPlanState *>(
          outerAbstractPlanState(bhss_plan_state));
  const BitmapIndexScan *biss_plan =
      reinterpret_cast<const BitmapIndexScan *>(biss_state->biss_plan);

  storage::DataTable *table = static_cast<storage::DataTable *>(
      catalog::Manager::GetInstance().GetTableWithOid(database_oid, table_oid));

  assert(table);
  LOG_TRACE("Scan from: database oid %u table oid %u", database_oid, table_oid);

  /* Resolve index  */
  index_scan_desc.index = table->GetIndexWithOid(biss_plan->indexid);

  if (nullptr == index_scan_desc.index) {
    LOG_ERROR("Can't find Index oid %u \n", biss_plan->indexid);
  }
  LOG_INFO("BitmapIdxmap scan on %s using Index oid %u, index name: %s",
           table->GetName().c_str(), biss_plan->indexid,
           index_scan_desc.index->GetName().c_str());

  assert(index_scan_desc.index);

  /* Resolve index order */
  /* Only support forward scan direction */

  /* index qualifier and scan keys */

  LOG_TRACE("num of scan keys = %d, num of runtime key = %d",
            biss_state->biss_NumScanKeys, biss_state->biss_NumRuntimeKeys);
  BuildScanKey(biss_state->biss_ScanKeys, biss_state->biss_NumScanKeys,
               biss_state->biss_RuntimeKeys, biss_state->biss_NumRuntimeKeys,
               index_scan_desc);

  /* handle simple cases */

  /* ORDER BY, not support */

  /* Extract the generic scan info (including qual and projInfo) */
  planner::AbstractPlan *parent = nullptr;
  expression::AbstractExpression *predicate = nullptr;
  std::vector<oid_t> column_ids;

  GetGenericInfoFromScanState(parent, predicate, column_ids, bhss_plan_state,
                              options.use_projInfo);

  auto scan_node =
      new planner::IndexScanPlan(table, predicate, column_ids, index_scan_desc);

  planner::AbstractPlan *rv = nullptr;
  /* Check whether a parent is presented, connect with the scan node if yes */
  if (parent) {
    parent->AddChild(scan_node);
    rv = parent;
  } else {
    rv = scan_node;
  }

  return rv;
}
Beispiel #9
0
LimitPlanState *DMLUtils::PrepareLimitState(LimitState *limit_plan_state) {
  LimitPlanState *info = (LimitPlanState *)palloc(sizeof(LimitPlanState));
  info->type = limit_plan_state->ps.type;

  ExprContext *econtext = limit_plan_state->ps.ps_ExprContext;
  Datum val;
  bool isNull;
  int64 limit;
  int64 offset;
  bool noLimit;
  bool noOffset;

  /* Resolve limit and offset */
  if (limit_plan_state->limitOffset) {
    val = ExecEvalExprSwitchContext(limit_plan_state->limitOffset, econtext,
                                    &isNull, NULL);
    /* Interpret NULL offset as no offset */
    if (isNull)
      offset = 0;
    else {
      offset = DatumGetInt64(val);
      if (offset < 0) {
        LOG_ERROR("OFFSET must not be negative, offset = %ld", offset);
      }
      noOffset = false;
    }
  } else {
    /* No OFFSET supplied */
    offset = 0;
  }

  if (limit_plan_state->limitCount) {
    val = ExecEvalExprSwitchContext(limit_plan_state->limitCount, econtext,
                                    &isNull, NULL);
    /* Interpret NULL count as no limit (LIMIT ALL) */
    if (isNull) {
      limit = 0;
      noLimit = true;
    } else {
      limit = DatumGetInt64(val);
      if (limit < 0) {
        LOG_ERROR("LIMIT must not be negative, limit = %ld", limit);
      }
      noLimit = false;
    }
  } else {
    /* No LIMIT supplied */
    limit = 0;
    noLimit = true;
  }

  info->limit = limit;
  info->offset = offset;
  info->noLimit = noLimit;
  info->noOffset = noOffset;

  PlanState *outer_plan_state = outerPlanState(limit_plan_state);
  AbstractPlanState *child_plan_state =
      PreparePlanState(nullptr, outer_plan_state, true);
  outerAbstractPlanState(info) = child_plan_state;

  return info;
}
Beispiel #10
0
/**
 * @brief Convert a Postgres HashState into a Peloton HashPlanNode
 * @return Pointer to the constructed AbstractPlan
 */
const planner::AbstractPlan *PlanTransformer::TransformHashJoin(
    const HashJoinPlanState *hj_plan_state) {
  planner::AbstractPlan *result = nullptr;
  planner::HashJoinPlan *plan_node = nullptr;
  PelotonJoinType join_type =
      PlanTransformer::TransformJoinType(hj_plan_state->jointype);
  if (join_type == JOIN_TYPE_INVALID) {
    LOG_ERROR("unsupported join type: %d", hj_plan_state->jointype);
    return nullptr;
  }

  LOG_INFO("Handle hash join with join type: %d", join_type);

  /*
  std::vector<planner::HashJoinPlan::JoinClause> join_clauses(
      BuildHashJoinClauses(mj_plan_state->mj_Clauses,
                            mj_plan_state->mj_NumClauses);
  */

  expression::AbstractExpression *join_filter = ExprTransformer::TransformExpr(
      reinterpret_cast<ExprState *>(hj_plan_state->joinqual));

  expression::AbstractExpression *plan_filter = ExprTransformer::TransformExpr(
      reinterpret_cast<ExprState *>(hj_plan_state->qual));

  expression::AbstractExpression *predicate = nullptr;
  if (join_filter && plan_filter) {
    predicate = expression::ExpressionUtil::ConjunctionFactory(
        EXPRESSION_TYPE_CONJUNCTION_AND, join_filter, plan_filter);
  } else if (join_filter) {
    predicate = join_filter;
  } else {
    predicate = plan_filter;
  }

  /* Transform project info */
  std::unique_ptr<const planner::ProjectInfo> project_info(nullptr);

  project_info.reset(BuildProjectInfoFromTLSkipJunk(hj_plan_state->targetlist));

  LOG_INFO("%s", project_info.get()->Debug().c_str());

  auto project_schema = SchemaTransformer::GetSchemaFromTupleDesc(
      hj_plan_state->tts_tupleDescriptor);

  std::vector<oid_t> outer_hashkeys =
      BuildColumnListFromExpStateList(hj_plan_state->outer_hashkeys);

  if (project_info.get()->isNonTrivial()) {
    // we have non-trivial projection
    LOG_INFO("We have non-trivial projection");
    result =
        new planner::ProjectionPlan(project_info.release(), project_schema);
    plan_node = new planner::HashJoinPlan(join_type, predicate, nullptr,
                                          project_schema, outer_hashkeys);
    result->AddChild(plan_node);
  } else {
    LOG_INFO("We have direct mapping projection");
    plan_node =
        new planner::HashJoinPlan(join_type, predicate, project_info.release(),
                                  project_schema, outer_hashkeys);
    result = plan_node;
  }

  const planner::AbstractPlan *outer =
      PlanTransformer::TransformPlan(outerAbstractPlanState(hj_plan_state));
  const planner::AbstractPlan *inner =
      PlanTransformer::TransformPlan(innerAbstractPlanState(hj_plan_state));

  /* Add the children nodes */
  plan_node->AddChild(outer);
  plan_node->AddChild(inner);

  LOG_INFO("Finishing mapping Hash join, JoinType: %d", join_type);
  return result;
}