示例#1
0
bool LipmConstHeightPlanner::getCOMTraj(const Traj<1,1> &zmpTraj, const double *x0, Traj<1,1> &traj)
{
  setZMPTraj(zmpTraj);

  forwardPass(x0, traj);
  backwardPass(traj);
  forwardPass(x0, traj);

  return true;
}
// traj z part should be offset to ground, not abs z coord!!
void LipmVarHeightPlanner::setZMPTraj(const Traj<3,3> &traj, const std::vector <double> &z0)
{
  assert(traj.size() > 0 && traj.size() == z0.size());
  _zmp_d = traj;
  _z0 = z0;
 
  // get last step Vxx and initial Ks
  const TrajPoint<3,3> &end = traj[traj.size()-1];
  double x[6] = {0};
  double u[3] = {0};
  dvec_copy(x, end.x, 6);
  dvec_copy(u, end.u, 3);
 
  Eigen::Matrix<double,6,6> A;
  Eigen::Matrix<double,6,3> B;
  
  getAB(x, u, _z0[_z0.size()-1], A, B);
  
  _lqr.setQ(_Q);
  _lqr.setR(_R);
  _lqr.infTimeLQR(A, B);
  
  _Vxx = _lqr.getV();
  
  if (traj.size() == _du.size()) {
    for (size_t i = 0; i < traj.size(); i++) {
      _du[i].setZero();
      _K[i] = _lqr.getK();
    }
  }
  else {
    _du.resize(traj.size(), Eigen::Matrix<double,3,1>::Zero());
    _K.resize(traj.size(), _lqr.getK());
  }

  // initial trajectory
  _traj0 = Traj<3,3>();
  for (size_t i = 0; i < _zmp_d.size(); i++) {
    const TrajPoint<3,3> &end = traj[i];
    dvec_copy(x, end.x, 6);
    dvec_copy(u, end.u, 3);
    _traj0.append(end.time, end.type, x, x+3, NULL, u);
  }

  Traj<3,3> tmpTraj;
  forwardPass(traj[0].x, tmpTraj);
  _traj0 = tmpTraj;

  assert(_zmp_d.size() == _du.size() && 
         _zmp_d.size() == _K.size() && 
         _zmp_d.size() == _traj0.size());  
}
bool LipmVarHeightPlanner::getCOMTraj(const Traj<3,3> &zmp, const double *state,  const std::vector<double> &z0, Traj<3,3> &com)
{
  //FILE *out;

  setZMPTraj(zmp, z0);
  double cost, cost0;
  
  //zmp.toFile("tmp/lipmz_d", true, true);
  //_traj0.toFile("tmp/lipmz_ini", true, true);
  cost0 = INFINITY;
  
  //char buf[1000];
  //sprintf(buf, "/home/sfeng/papers/humanoids13/data/lipm_it0");
  //_traj0.toFile(buf, true, true);

  for (int i = 0; i < 20; i++) {
    //printf("it %d\n", i);
    backwardPass(_traj0);
    cost = forwardPass(state, com);
    _traj0 = com;
    
    //sprintf(buf, "/home/sfeng/papers/humanoids13/data/lipm_it%d", i);
    //_traj0.toFile(buf, true, true);
    
    //printf("cost %g\n", cost);
    assert(!isnan(cost) && !isinf(cost));

    if (fabs(cost - cost0) < 1e-3)
      break;

    cost0 = cost;
    //com.toFile("tmp/lipmz_it", true, true);

    //getchar();
  }

  return true;
}
int openPoseTutorialPose2()
{
    try
    {
        op::log("Starting OpenPose demo...", op::Priority::High);
        const auto timerBegin = std::chrono::high_resolution_clock::now();

        // ------------------------- INITIALIZATION -------------------------
        // Step 1 - Set logging level
            // - 0 will output all the logging messages
            // - 255 will output nothing
        op::check(0 <= FLAGS_logging_level && FLAGS_logging_level <= 255, "Wrong logging_level value.",
                  __LINE__, __FUNCTION__, __FILE__);
        op::ConfigureLog::setPriorityThreshold((op::Priority)FLAGS_logging_level);
        op::log("", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
        // Step 2 - Read Google flags (user defined configuration)
        // outputSize
        const auto outputSize = op::flagsToPoint(FLAGS_output_resolution, "-1x-1");
        // netInputSize
        const auto netInputSize = op::flagsToPoint(FLAGS_net_resolution, "-1x368");
        // poseModel
        const auto poseModel = op::flagsToPoseModel(FLAGS_model_pose);
        // Check no contradictory flags enabled
        if (FLAGS_alpha_pose < 0. || FLAGS_alpha_pose > 1.)
            op::error("Alpha value for blending must be in the range [0,1].", __LINE__, __FUNCTION__, __FILE__);
        if (FLAGS_scale_gap <= 0. && FLAGS_scale_number > 1)
            op::error("Incompatible flag configuration: scale_gap must be greater than 0 or scale_number = 1.",
                      __LINE__, __FUNCTION__, __FILE__);
        // Logging
        op::log("", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
        // Step 3 - Initialize all required classes
        op::ScaleAndSizeExtractor scaleAndSizeExtractor(netInputSize, outputSize, FLAGS_scale_number, FLAGS_scale_gap);
        op::CvMatToOpInput cvMatToOpInput{poseModel};
        op::CvMatToOpOutput cvMatToOpOutput;
        auto poseExtractorPtr = std::make_shared<op::PoseExtractorCaffe>(poseModel, FLAGS_model_folder,
                                                                         FLAGS_num_gpu_start);
        op::PoseGpuRenderer poseGpuRenderer{poseModel, poseExtractorPtr, (float)FLAGS_render_threshold,
                                            !FLAGS_disable_blending, (float)FLAGS_alpha_pose, (float)FLAGS_alpha_heatmap};
        poseGpuRenderer.setElementToRender(FLAGS_part_to_show);
        op::OpOutputToCvMat opOutputToCvMat;
        op::FrameDisplayer frameDisplayer{"OpenPose Tutorial - Example 2", outputSize};
        // Step 4 - Initialize resources on desired thread (in this case single thread, i.e. we init resources here)
        poseExtractorPtr->initializationOnThread();
        poseGpuRenderer.initializationOnThread();

        // ------------------------- POSE ESTIMATION AND RENDERING -------------------------
        // Step 1 - Read and load image, error if empty (possibly wrong path)
        // Alternative: cv::imread(FLAGS_image_path, CV_LOAD_IMAGE_COLOR);
        cv::Mat inputImage = op::loadImage(FLAGS_image_path, CV_LOAD_IMAGE_COLOR);
        if(inputImage.empty())
            op::error("Could not open or find the image: " + FLAGS_image_path, __LINE__, __FUNCTION__, __FILE__);
        const op::Point<int> imageSize{inputImage.cols, inputImage.rows};
        // Step 2 - Get desired scale sizes
        std::vector<double> scaleInputToNetInputs;
        std::vector<op::Point<int>> netInputSizes;
        double scaleInputToOutput;
        op::Point<int> outputResolution;
        std::tie(scaleInputToNetInputs, netInputSizes, scaleInputToOutput, outputResolution)
            = scaleAndSizeExtractor.extract(imageSize);
        // Step 3 - Format input image to OpenPose input and output formats
        const auto netInputArray = cvMatToOpInput.createArray(inputImage, scaleInputToNetInputs, netInputSizes);
        auto outputArray = cvMatToOpOutput.createArray(inputImage, scaleInputToOutput, outputResolution);
        // Step 4 - Estimate poseKeypoints
        poseExtractorPtr->forwardPass(netInputArray, imageSize, scaleInputToNetInputs);
        const auto poseKeypoints = poseExtractorPtr->getPoseKeypoints();
        const auto scaleNetToOutput = poseExtractorPtr->getScaleNetToOutput();
        // Step 5 - Render pose
        poseGpuRenderer.renderPose(outputArray, poseKeypoints, scaleInputToOutput, scaleNetToOutput);
        // Step 6 - OpenPose output format to cv::Mat
        auto outputImage = opOutputToCvMat.formatToCvMat(outputArray);

        // ------------------------- SHOWING RESULT AND CLOSING -------------------------
        // Show results
        frameDisplayer.displayFrame(outputImage, 0); // Alternative: cv::imshow(outputImage) + cv::waitKey(0)
        // Measuring total time
        const auto now = std::chrono::high_resolution_clock::now();
        const auto totalTimeSec = (double)std::chrono::duration_cast<std::chrono::nanoseconds>(now-timerBegin).count()
                                * 1e-9;
        const auto message = "OpenPose demo successfully finished. Total time: "
                           + std::to_string(totalTimeSec) + " seconds.";
        op::log(message, op::Priority::High);
        // Return successful message
        return 0;
    }
    catch (const std::exception& e)
    {
        op::error(e.what(), __LINE__, __FUNCTION__, __FILE__);
        return -1;
    }
}