Ejemplo n.º 1
0
const _PlanOperation *_PlanOperation::execute() {
  epoch_t startTime = get_epoch_nanoseconds();

  refreshInput();

  setupPlanOperation();

  PapiTracer pt;
  pt.addEvent("PAPI_TOT_CYC");
  pt.addEvent(getEvent());

  pt.start();
  executePlanOperation();
  pt.stop();

  teardownPlanOperation();

  epoch_t endTime = get_epoch_nanoseconds();
  std::string threadId = boost::lexical_cast<std::string>(std::this_thread::get_id());

  if (_performance_attr != nullptr)
    *_performance_attr = (performance_attributes_t) {
      pt.value("PAPI_TOT_CYC"), pt.value(getEvent()), getEvent() , planOperationName(), _operatorId, startTime, endTime, threadId
    };

  setState(OpSuccess);
  return this;
}
Ejemplo n.º 2
0
const PlanOperation* PlanOperation::execute() {
  const bool recordPerformance = _performance_attr != nullptr;

  // Check if we really need this
  epoch_t startTime = 0;
  if (recordPerformance)
    startTime = get_epoch_nanoseconds();

  PapiTracer pt;

  // Start the execution
  refreshInput();
  setupPlanOperation();

  if (recordPerformance) {
    pt.addEvent("PAPI_TOT_CYC");
    pt.addEvent(getEvent());
    pt.start();
  }

  executePlanOperation();

  if (recordPerformance)
    pt.stop();

  teardownPlanOperation();

  if (recordPerformance) {
    epoch_t endTime = get_epoch_nanoseconds();
    std::string threadId = boost::lexical_cast<std::string>(std::this_thread::get_id());

    size_t cardinality;
    if (getResultTable() != empty_result)
      cardinality = getResultTable()->size();
    else
      // the cardinality is max(size_t) by convention if there is no return table
      cardinality = std::numeric_limits<size_t>::max();

    *_performance_attr = (performance_attributes_t) {pt.value("PAPI_TOT_CYC"), pt.value(getEvent()), getEvent(),
                                                     planOperationName(),      _operatorId,          startTime,
                                                     endTime,                  threadId,             cardinality};
  }

  setState(OpSuccess);
  return this;
}
Ejemplo n.º 3
0
void StartProfiling::executePlanOperation() {
  output = input;
#ifdef HYRISE_USE_GOOGLE_PROFILER
  ProfilerStart((Settings::getInstance()->getProfilePath() + "/profile_" + std::to_string(get_epoch_nanoseconds()) +
                 ".gprof").c_str());
#endif
}
Ejemplo n.º 4
0
Json::Value ResponseTask::generateResponseJson() {
  Json::Value response;
  epoch_t responseStart = _recordPerformanceData ? get_epoch_nanoseconds() : 0;
  PapiTracer pt;
  pt.addEvent("PAPI_TOT_CYC");

  if (_recordPerformanceData)
    pt.start();

  auto predecessor = getResultTask();
  const auto& result = predecessor->getResultTable();

  if (getState() != OpFail) {
    if (!_isAutoCommit) {
      response["session_context"] =
          std::to_string(_txContext.tid).append(" ").append(std::to_string(_txContext.lastCid));
    }

    if (result) {
      // Make header
      Json::Value json_header(Json::arrayValue);
      for (unsigned col = 0; col < result->columnCount(); ++col) {
        Json::Value colname(result->nameOfColumn(col));
        json_header.append(colname);
      }

      // Copy the complete result
      response["real_size"] = result->size();
      response["rows"] = generateRowsJson(result, _transmitLimit, _transmitOffset);
      response["header"] = json_header;
    }

    ////////////////////////////////////////////////////////////////////////////////////////
    // Copy Performance Data
    if (_recordPerformanceData) {
      Json::Value json_perf(Json::arrayValue);
      for (const auto& attr : performance_data) {
        Json::Value element;
        element["papi_event"] = Json::Value(attr->papiEvent);
        element["duration"] = Json::Value((Json::UInt64)attr->duration);
        element["data"] = Json::Value((Json::UInt64)attr->data);
        element["name"] = Json::Value(attr->name);
        element["id"] = Json::Value(attr->operatorId);
        element["startTime"] = Json::Value((double)(attr->startTime - queryStart) / 1000000);
        element["endTime"] = Json::Value((double)(attr->endTime - queryStart) / 1000000);
        element["executingThread"] = Json::Value(attr->executingThread);
        element["lastCore"] = Json::Value(attr->core);
        element["lastNode"] = Json::Value(attr->node);
        // Put null for in/outRows if -1 was set
        element["inRows"] = attr->in_rows ? Json::Value(*(attr->in_rows)) : Json::Value();
        element["outRows"] = attr->out_rows ? Json::Value(*(attr->out_rows)) : Json::Value();

        if (_getSubQueryPerformanceData) {
          element["subQueryPerformanceData"] = _scriptOperation->getSubQueryPerformanceData();
        }

        json_perf.append(element);
      }

      pt.stop();

      Json::Value responseElement;
      responseElement["duration"] = Json::Value((Json::UInt64)pt.value("PAPI_TOT_CYC"));
      responseElement["name"] = Json::Value("ResponseTask");
      responseElement["id"] = Json::Value("respond");
      responseElement["startTime"] = Json::Value((double)(responseStart - queryStart) / 1000000);
      responseElement["endTime"] = Json::Value((double)(get_epoch_nanoseconds() - queryStart) / 1000000);

      std::string threadId = boost::lexical_cast<std::string>(std::this_thread::get_id());
      responseElement["executingThread"] = Json::Value(threadId);

      responseElement["lastCore"] = Json::Value(getCurrentCore());
      responseElement["lastNode"] = Json::Value(getCurrentNode());

      std::optional<size_t> result_size;
      if (result) {
        result_size = result->size();
      }
      responseElement["inRows"] = result_size ? Json::Value(*result_size) : Json::Value();
      responseElement["outRows"] = Json::Value();

      json_perf.append(responseElement);

      response["performanceData"] = json_perf;
    }

    Json::Value jsonKeys(Json::arrayValue);
    for (const auto& x : _generatedKeyRefs) {
      for (const auto& key : *x) {
        Json::Value element(key);
        jsonKeys.append(element);
      }
    }
    response["generatedKeys"] = jsonKeys;
    response["affectedRows"] = Json::Value(_affectedRows);

    if (_getSubQueryPerformanceData) {
      response["subQueryDataflow"] = _scriptOperation->getSubQueryDataflow();
    }
  }
  LOG4CXX_DEBUG(_logger, "Table Use Count: " << result.use_count());

  return response;
}
Ejemplo n.º 5
0
void ResponseTask::operator()() {
  epoch_t responseStart = get_epoch_nanoseconds();
  Json::Value response;

  if (getDependencyCount() > 0) {
    PapiTracer pt;
    pt.addEvent("PAPI_TOT_CYC");
    pt.start();

    auto predecessor = getResultTask();
    const auto& result = predecessor->getResultTable();

    if (predecessor->getState() != OpFail) {
      if (result) {
        // Make header
        Json::Value json_header(Json::arrayValue);
        for (unsigned col = 0; col < result->columnCount(); ++col) {
          Json::Value colname(result->nameOfColumn(col));
          json_header.append(colname);
        }

        // Copy the complete result
        response["real_size"] = result->size();
        response["rows"] = generateRowsJson(result, _transmitLimit);
        response["header"] = json_header;
      }

      // Copy Performance Data
      Json::Value json_perf(Json::arrayValue);
      for (const auto & attr: performance_data) {
        Json::Value element;
        element["papi_event"] = Json::Value(attr->papiEvent);
        element["duration"] = Json::Value((Json::UInt64) attr->duration);
        element["data"] = Json::Value((Json::UInt64) attr->data);
        element["name"] = Json::Value(attr->name);
        element["id"] = Json::Value(attr->operatorId);
        element["startTime"] = Json::Value((double)(attr->startTime - queryStart) / 1000000);
        element["endTime"] = Json::Value((double)(attr->endTime - queryStart) / 1000000);
        element["executingThread"] = Json::Value(attr->executingThread);
        json_perf.append(element);
      }
      pt.stop();

      Json::Value responseElement;
      responseElement["duration"] = Json::Value((Json::UInt64) pt.value("PAPI_TOT_CYC"));
      responseElement["name"] = Json::Value("ResponseTask");
      responseElement["id"] = Json::Value("respond");
      responseElement["startTime"] = Json::Value((double)(responseStart - queryStart) / 1000000);
      responseElement["endTime"] = Json::Value((double)(get_epoch_nanoseconds() - queryStart) / 1000000);

      std::string threadId = boost::lexical_cast<std::string>(std::this_thread::get_id());
      responseElement["executingThread"] = Json::Value(threadId);
      json_perf.append(responseElement);

      response["performanceData"] = json_perf;
    } else {
      LOG4CXX_ERROR(_logger, "Error during plan execution: " << predecessor->getErrorMessage());
      response["error"] = predecessor->getErrorMessage();
    }
    LOG4CXX_DEBUG(_logger, "Table Use Count: " << result.use_count());
  } else {
    response["error"] = "Query parsing failed, see server error log";
  }

  connection->respond(response.toStyledString());
}
Ejemplo n.º 6
0
void RequestParseTask::operator()() {
  assert((_responseTask != nullptr) && "Response needs to be set");
  const auto& scheduler = SharedScheduler::getInstance().getScheduler();

  performance_vector_t& performance_data = _responseTask->getPerformanceData();

  bool recordPerformance = false;
  std::vector<std::shared_ptr<Task> > tasks;

  int priority = Task::DEFAULT_PRIORITY;
  int sessionId = 0;

  if (_connection->hasBody()) {
    // The body is a wellformed HTTP Post body, with key value pairs
    std::string body(_connection->getBody());
    std::map<std::string, std::string> body_data = parseHTTPFormData(body);

    boost::optional<tx::TXContext> ctx;
    auto ctx_it = body_data.find("session_context");
    if (ctx_it != body_data.end()) {
      boost::optional<tx::transaction_id_t> tid;
      if ((tid = parseNumeric<tx::transaction_id_t>(ctx_it->second)) &&
          (tx::TransactionManager::isRunningTransaction(*tid))) {
        LOG4CXX_DEBUG(_logger, "Picking up transaction id " << *tid);
        ctx = tx::TransactionManager::getContext(*tid);
      } else {
        LOG4CXX_ERROR(_logger, "Invalid transaction id " << *tid);
        _responseTask->addErrorMessage("Invalid transaction id set, aborting execution.");
      }
    } else {
      ctx = tx::TransactionManager::beginTransaction();
      LOG4CXX_DEBUG(_logger, "Creating new transaction context " << (*ctx).tid);
    }

    Json::Value request_data;
    Json::Reader reader;

    const std::string& query_string = urldecode(body_data["query"]);

    if (ctx && reader.parse(query_string, request_data)) {
      _responseTask->setTxContext(*ctx);
      recordPerformance = getOrDefault(body_data, "performance", "false") == "true";

      // the performance attribute for this operation (at [0])
      if (recordPerformance) {
        performance_data.push_back(std::unique_ptr<performance_attributes_t>(new performance_attributes_t));
      }

      LOG4CXX_DEBUG(_query_logger, request_data);

      const std::string& final_hash = hash(query_string);
      std::shared_ptr<Task> result = nullptr;

      if(request_data.isMember("priority"))
        priority = request_data["priority"].asInt();
      if(request_data.isMember("sessionId"))
        sessionId = request_data["sessionId"].asInt();
      _responseTask->setPriority(priority);
      _responseTask->setSessionId(sessionId);
      _responseTask->setRecordPerformanceData(recordPerformance);
      try {
        tasks = QueryParser::instance().deserialize(
                  QueryTransformationEngine::getInstance()->transform(request_data),
                  &result);

      } catch (const std::exception &ex) {
        // clean up, so we don't end up with a whole mess due to thrown exceptions
        LOG4CXX_ERROR(_logger, "Received\n:" << request_data);
        LOG4CXX_ERROR(_logger, "Exception thrown during query deserialization:\n" << ex.what());
        _responseTask->addErrorMessage(std::string("RequestParseTask: ") + ex.what());
        tasks.clear();
        result = nullptr;
      }

      auto autocommit_it = body_data.find("autocommit");
      if (autocommit_it != body_data.end() && (autocommit_it->second == "true")) {
        auto commit = std::make_shared<Commit>();
        commit->setOperatorId("__autocommit");
        commit->setPlanOperationName("Commit");
        commit->addDependency(result);
        result = commit;
        tasks.push_back(commit);
      }


      if (result != nullptr) {
        _responseTask->addDependency(result);
      } else {
        LOG4CXX_ERROR(_logger, "Json did not yield tasks");
      }

      for (const auto & func: tasks) {
        if (auto task = std::dynamic_pointer_cast<PlanOperation>(func)) {
          task->setPriority(priority);
          task->setSessionId(sessionId);
          task->setPlanId(final_hash);
          task->setTXContext(*ctx);
	  task->setId((*ctx).tid);
	  _responseTask->registerPlanOperation(task);
          if (!task->hasSuccessors()) {
            // The response has to depend on all tasks, ie. we don't
            // want to respond before all tasks finished running, even
            // if they don't contribute to the result. This prevents
            // dangling tasks
            _responseTask->addDependency(task);
          }
        }
      }
    } else {
      LOG4CXX_ERROR(_logger, "Failed to parse: "
                    << urldecode(body_data["query"]) << "\n"
                    << body_data["query"] << "\n"
                    << reader.getFormatedErrorMessages());
    }
    // Update the transmission limit for the response task
    if (atoi(body_data["limit"].c_str()) > 0)
      _responseTask->setTransmitLimit(atol(body_data["limit"].c_str()));

    if (atoi(body_data["offset"].c_str()) > 0)
      _responseTask->setTransmitOffset(atol(body_data["offset"].c_str()));

  } else {
    LOG4CXX_WARN(_logger, "no body received!");
  }




  // high priority tasks are expected to be scheduled sequentially
  if(priority == Task::HIGH_PRIORITY){
    if (recordPerformance) {
      *(performance_data.at(0)) = { 0, 0, "NO_PAPI", "RequestParseTask", 
                                    "requestParse", _queryStart, get_epoch_nanoseconds(), 
                                    boost::lexical_cast<std::string>(std::this_thread::get_id()) };
    }

    int number_of_tasks = tasks.size();
    std::vector<bool> isExecuted(number_of_tasks, false);
    int executedTasks = 0;
    while(executedTasks < number_of_tasks){
      for(int i = 0; i < number_of_tasks; i++){
        if(!isExecuted[i] && tasks[i]->isReady()){
          (*tasks[i])();
          tasks[i]->notifyDoneObservers();
          executedTasks++;
          isExecuted[i] = true;
        }
      }
    }
    _responseTask->setQueryStart(_queryStart);
    (*_responseTask)();
    _responseTask.reset();  // yield responsibility

  } else {
    scheduler->schedule(_responseTask);
    scheduler->scheduleQuery(tasks);

    if (recordPerformance) {
      *(performance_data.at(0)) = { 0, 0, "NO_PAPI", "RequestParseTask", "requestParse", 
                                    _queryStart, get_epoch_nanoseconds(), 
                                    boost::lexical_cast<std::string>(std::this_thread::get_id()) };
    }
    _responseTask->setQueryStart(_queryStart);
    _responseTask.reset();  // yield responsibility
  }
}
Ejemplo n.º 7
0
RequestParseTask::RequestParseTask(net::AbstractConnection* connection)
    : _connection(connection),
      _responseTask(std::make_shared<ResponseTask>(connection)),
      _queryStart(get_epoch_nanoseconds()){}