예제 #1
0
void SpriterMainlineKey::load(const core::Json & json, const SpriterAnimation & animation)
{
    time = getOrDefault(json, "time", 0u);

    fill(boneKeys, json["bone_ref"], animation);
    fill(objectKeys, json["object_ref"], animation);
}
예제 #2
0
void SpriterTimelineKey::load(const core::Json & json, const SpriterFile & file)
{
    if(json.has("object"))
    {
        const core::Json & object = json["object"];

        angle = getOrDefault(object, "angle", 0.0f) / (180.0f / math::pi);
        alpha = getOrDefault(object, "alpha", 1.0f);

        asset = & file.allAssets[object["folder"].getUint()][object["file"].getUint()];

        pivot.x = getOrDefault(object, "pivot_x", asset->pivot.x) - 0.5f;
        pivot.y = getOrDefault(object, "pivot_y", asset->pivot.y) - 0.5f;

        position.x = getOrDefault(object, "x", 0.0f);
        position.y = getOrDefault(object, "y", 0.0f);
        scale.x = getOrDefault(object, "scale_x", 1.0f);
        scale.y = getOrDefault(object, "scale_y", 1.0f);
    }
    else if(json.has("bone"))
    {
        const core::Json & bone = json["bone"];

        angle = getOrDefault(bone, "angle", 0.0f) / (180.0f / math::pi);
        alpha = getOrDefault(bone, "alpha", 1.0f);
        position.x = getOrDefault(bone, "x", 0.0f);
        position.y = getOrDefault(bone, "y", 0.0f);
        scale.x = getOrDefault(bone, "scale_x", 1.0f);
        scale.y = getOrDefault(bone, "scale_y", 1.0f);
    }

    time = getOrDefault(json, "time", 0u);
    spin = getOrDefault(json, "spin", 1);
}
예제 #3
0
void RequestParseTask::operator()() {
  assert((_responseTask != nullptr) && "Response needs to be set");
  const auto& scheduler = SharedScheduler::getInstance().getScheduler();

  performance_vector_t& performance_data = _responseTask->getPerformanceData();

  bool recordPerformance = false;
  std::vector<std::shared_ptr<Task> > tasks;

  int priority = Task::DEFAULT_PRIORITY;
  int sessionId = 0;

  if (_connection->hasBody()) {
    // The body is a wellformed HTTP Post body, with key value pairs
    std::string body(_connection->getBody());
    std::map<std::string, std::string> body_data = parseHTTPFormData(body);

    boost::optional<tx::TXContext> ctx;
    auto ctx_it = body_data.find("session_context");
    if (ctx_it != body_data.end()) {
      boost::optional<tx::transaction_id_t> tid;
      if ((tid = parseNumeric<tx::transaction_id_t>(ctx_it->second)) &&
          (tx::TransactionManager::isRunningTransaction(*tid))) {
        LOG4CXX_DEBUG(_logger, "Picking up transaction id " << *tid);
        ctx = tx::TransactionManager::getContext(*tid);
      } else {
        LOG4CXX_ERROR(_logger, "Invalid transaction id " << *tid);
        _responseTask->addErrorMessage("Invalid transaction id set, aborting execution.");
      }
    } else {
      ctx = tx::TransactionManager::beginTransaction();
      LOG4CXX_DEBUG(_logger, "Creating new transaction context " << (*ctx).tid);
    }

    Json::Value request_data;
    Json::Reader reader;

    const std::string& query_string = urldecode(body_data["query"]);

    if (ctx && reader.parse(query_string, request_data)) {
      _responseTask->setTxContext(*ctx);
      recordPerformance = getOrDefault(body_data, "performance", "false") == "true";

      // the performance attribute for this operation (at [0])
      if (recordPerformance) {
        performance_data.push_back(std::unique_ptr<performance_attributes_t>(new performance_attributes_t));
      }

      LOG4CXX_DEBUG(_query_logger, request_data);

      const std::string& final_hash = hash(query_string);
      std::shared_ptr<Task> result = nullptr;

      if(request_data.isMember("priority"))
        priority = request_data["priority"].asInt();
      if(request_data.isMember("sessionId"))
        sessionId = request_data["sessionId"].asInt();
      _responseTask->setPriority(priority);
      _responseTask->setSessionId(sessionId);
      _responseTask->setRecordPerformanceData(recordPerformance);
      try {
        tasks = QueryParser::instance().deserialize(
                  QueryTransformationEngine::getInstance()->transform(request_data),
                  &result);

      } catch (const std::exception &ex) {
        // clean up, so we don't end up with a whole mess due to thrown exceptions
        LOG4CXX_ERROR(_logger, "Received\n:" << request_data);
        LOG4CXX_ERROR(_logger, "Exception thrown during query deserialization:\n" << ex.what());
        _responseTask->addErrorMessage(std::string("RequestParseTask: ") + ex.what());
        tasks.clear();
        result = nullptr;
      }

      auto autocommit_it = body_data.find("autocommit");
      if (autocommit_it != body_data.end() && (autocommit_it->second == "true")) {
        auto commit = std::make_shared<Commit>();
        commit->setOperatorId("__autocommit");
        commit->setPlanOperationName("Commit");
        commit->addDependency(result);
        result = commit;
        tasks.push_back(commit);
      }


      if (result != nullptr) {
        _responseTask->addDependency(result);
      } else {
        LOG4CXX_ERROR(_logger, "Json did not yield tasks");
      }

      for (const auto & func: tasks) {
        if (auto task = std::dynamic_pointer_cast<PlanOperation>(func)) {
          task->setPriority(priority);
          task->setSessionId(sessionId);
          task->setPlanId(final_hash);
          task->setTXContext(*ctx);
	  task->setId((*ctx).tid);
	  _responseTask->registerPlanOperation(task);
          if (!task->hasSuccessors()) {
            // The response has to depend on all tasks, ie. we don't
            // want to respond before all tasks finished running, even
            // if they don't contribute to the result. This prevents
            // dangling tasks
            _responseTask->addDependency(task);
          }
        }
      }
    } else {
      LOG4CXX_ERROR(_logger, "Failed to parse: "
                    << urldecode(body_data["query"]) << "\n"
                    << body_data["query"] << "\n"
                    << reader.getFormatedErrorMessages());
    }
    // Update the transmission limit for the response task
    if (atoi(body_data["limit"].c_str()) > 0)
      _responseTask->setTransmitLimit(atol(body_data["limit"].c_str()));

    if (atoi(body_data["offset"].c_str()) > 0)
      _responseTask->setTransmitOffset(atol(body_data["offset"].c_str()));

  } else {
    LOG4CXX_WARN(_logger, "no body received!");
  }




  // high priority tasks are expected to be scheduled sequentially
  if(priority == Task::HIGH_PRIORITY){
    if (recordPerformance) {
      *(performance_data.at(0)) = { 0, 0, "NO_PAPI", "RequestParseTask", 
                                    "requestParse", _queryStart, get_epoch_nanoseconds(), 
                                    boost::lexical_cast<std::string>(std::this_thread::get_id()) };
    }

    int number_of_tasks = tasks.size();
    std::vector<bool> isExecuted(number_of_tasks, false);
    int executedTasks = 0;
    while(executedTasks < number_of_tasks){
      for(int i = 0; i < number_of_tasks; i++){
        if(!isExecuted[i] && tasks[i]->isReady()){
          (*tasks[i])();
          tasks[i]->notifyDoneObservers();
          executedTasks++;
          isExecuted[i] = true;
        }
      }
    }
    _responseTask->setQueryStart(_queryStart);
    (*_responseTask)();
    _responseTask.reset();  // yield responsibility

  } else {
    scheduler->schedule(_responseTask);
    scheduler->scheduleQuery(tasks);

    if (recordPerformance) {
      *(performance_data.at(0)) = { 0, 0, "NO_PAPI", "RequestParseTask", "requestParse", 
                                    _queryStart, get_epoch_nanoseconds(), 
                                    boost::lexical_cast<std::string>(std::this_thread::get_id()) };
    }
    _responseTask->setQueryStart(_queryStart);
    _responseTask.reset();  // yield responsibility
  }
}