template <typename PointT> void
pcl::people::GroundBasedPeopleDetectionApp<PointT>::compute (std::vector<pcl::people::PersonCluster<PointT> >& clusters)
{
  // Check if all mandatory variables have been set:
  if (sqrt_ground_coeffs_ != sqrt_ground_coeffs_)
  {
    PCL_ERROR ("[pcl::people::GroundBasedPeopleDetectionApp::compute] Floor parameters have not been set or they are not valid!\n");
    return;
  }
  if (cloud_ == NULL)
  {
    PCL_ERROR ("[pcl::people::GroundBasedPeopleDetectionApp::compute] Input cloud has not been set!\n");
    return;
  }
  if (intrinsics_matrix_(0) == 0)
  {
    PCL_ERROR ("[pcl::people::GroundBasedPeopleDetectionApp::compute] Camera intrinsic parameters have not been set!\n");
    return;
  }
  if (!person_classifier_set_flag_)
  {
    PCL_ERROR ("[pcl::people::GroundBasedPeopleDetectionApp::compute] Person classifier has not been set!\n");
    return;
  }

  if (!dimension_limits_set_)    // if dimension limits have not been set by the user
  {
    // Adapt thresholds for clusters points number to the voxel size:
    max_points_ = int(float(max_points_) * std::pow(0.06/voxel_size_, 2));
    if (voxel_size_ > 0.06)
      min_points_ = int(float(min_points_) * std::pow(0.06/voxel_size_, 2));
  }

  // Fill rgb image:
  rgb_image_->points.clear();                            // clear RGB pointcloud
  extractRGBFromPointCloud(cloud_, rgb_image_);          // fill RGB pointcloud

  // Voxel grid filtering:
  PointCloudPtr cloud_filtered(new PointCloud);
  pcl::VoxelGrid<PointT> voxel_grid_filter_object;
  voxel_grid_filter_object.setInputCloud(cloud_);
  voxel_grid_filter_object.setLeafSize (voxel_size_, voxel_size_, voxel_size_);
  voxel_grid_filter_object.filter (*cloud_filtered);

  // Ground removal and update:
  pcl::IndicesPtr inliers(new std::vector<int>);
  boost::shared_ptr<pcl::SampleConsensusModelPlane<PointT> > ground_model(new pcl::SampleConsensusModelPlane<PointT>(cloud_filtered));
  ground_model->selectWithinDistance(ground_coeffs_, voxel_size_, *inliers);
  no_ground_cloud_ = PointCloudPtr (new PointCloud);
  pcl::ExtractIndices<PointT> extract;
  extract.setInputCloud(cloud_filtered);
  extract.setIndices(inliers);
  extract.setNegative(true);
  extract.filter(*no_ground_cloud_);
  if((inliers->size() >= 300*0.06/voxel_size_))
    ground_model->optimizeModelCoefficients(*inliers, ground_coeffs_, ground_coeffs_);
  else
    std::cout << "No groundplane update!" << std::endl;

  // Euclidean Clustering:
  std::vector<pcl::PointIndices> cluster_indices;
  typename pcl::search::KdTree<PointT>::Ptr tree (new pcl::search::KdTree<PointT>);
  tree->setInputCloud(no_ground_cloud_);
  pcl::EuclideanClusterExtraction<PointT> ec;
  ec.setClusterTolerance(2 * 0.06);
  ec.setMinClusterSize(min_points_);
  ec.setMaxClusterSize(max_points_);
  ec.setSearchMethod(tree);
  ec.setInputCloud(no_ground_cloud_);
  ec.extract(cluster_indices);

  // Head based sub-clustering //
  pcl::people::HeadBasedSubclustering<PointT> subclustering;
  subclustering.setInputCloud(no_ground_cloud_);
  subclustering.setGround(ground_coeffs_);
  subclustering.setInitialClusters(cluster_indices);
  subclustering.setHeightLimits(min_height_, max_height_);
  subclustering.setMinimumDistanceBetweenHeads(heads_minimum_distance_);
  subclustering.setSensorPortraitOrientation(vertical_);
  subclustering.subcluster(clusters);

  // Person confidence evaluation with HOG+SVM:
  if (vertical_)  // Rotate the image if the camera is vertical
  {
    swapDimensions(rgb_image_);
  }
  for(typename std::vector<pcl::people::PersonCluster<PointT> >::iterator it = clusters.begin(); it != clusters.end(); ++it)
  {
    //Evaluate confidence for the current PersonCluster:
    Eigen::Vector3f centroid = intrinsics_matrix_ * (it->getTCenter());
    centroid /= centroid(2);
    Eigen::Vector3f top = intrinsics_matrix_ * (it->getTTop());
    top /= top(2);
    Eigen::Vector3f bottom = intrinsics_matrix_ * (it->getTBottom());
    bottom /= bottom(2);
    it->setPersonConfidence(person_classifier_.evaluate(rgb_image_, bottom, top, centroid, intrinsics_matrix_, vertical_));
  }
}
template<typename PointT> void PersonClusterVisualizer<PointT>::visualize(const std::string& nameSpace, std::vector<pcl::people::PersonCluster<PointT> >& clusters)
{
    // Save us some computation time if there are no subscribers.
    if(_markerArrayPublisher.getNumSubscribers() == 0) return;

    // Look up values for this particular namespace
    std::set<unsigned int>& oldMarkerIds = _oldMarkerIds[nameSpace];
    unsigned int& lastMarkerId = _lastMarkerIds[nameSpace];

    visualization_msgs::MarkerArray markerArray;

    // Remove old clusters
    for(std::set<unsigned int>::const_iterator it = oldMarkerIds.begin(); it != oldMarkerIds.end(); ++it)  {
        visualization_msgs::Marker oldClusterMarker = MarkerUtils::createMarker(nameSpace, _detectionFrame);
        oldClusterMarker.id = *it;
        oldClusterMarker.action = visualization_msgs::Marker::DELETE;
        markerArray.markers.push_back(oldClusterMarker);
    }
    oldMarkerIds.clear();

    double lifetime = 1.0 / 15;
    unsigned int k = lastMarkerId + 1;
    for (typename std::vector<pcl::people::PersonCluster<PointT> >::iterator it = clusters.begin(); it != clusters.end(); ++it) {
        //
        // 3D bounding box
        //
        visualization_msgs::Marker clusterMarker = MarkerUtils::createMarker(nameSpace, _detectionFrame);
        clusterMarker.id = k;
        clusterMarker.type = visualization_msgs::Marker::CUBE;
        clusterMarker.color = MarkerUtils::getPaletteColor(_visualizationCounter);
        clusterMarker.color.a = 0.35;
        clusterMarker.scale.x = it->getMax().x() - it->getMin().x();
        clusterMarker.scale.y = it->getHeight(); // note that in the detection frame, y is up
        clusterMarker.scale.z = it->getMax().z() - it->getMin().z();
        clusterMarker.lifetime.fromSec(lifetime); // just to be safe

        Eigen::Vector3f tcenter = it->getTCenter();
        tf::poseEigenToMsg(Eigen::Translation3d(tcenter.cast<double>()) * Eigen::Affine3d::Identity(), clusterMarker.pose);
        markerArray.markers.push_back(clusterMarker);
        oldMarkerIds.insert(clusterMarker.id);
        k++;

        //
        // Center of gravity
        //
        visualization_msgs::Marker cogMarker = MarkerUtils::createMarker(nameSpace, _detectionFrame);
        cogMarker.id = k;
        cogMarker.type = visualization_msgs::Marker::SPHERE;
        cogMarker.color = MarkerUtils::getPaletteColor(_visualizationCounter);
        cogMarker.color.a = 1.0;
        cogMarker.scale.x = cogMarker.scale.y = cogMarker.scale.z = 0.1; // 10 cm
        cogMarker.lifetime.fromSec(lifetime); // just to be safe

        Eigen::Vector3f center = it->getCenter();
        tf::poseEigenToMsg(Eigen::Translation3d(center.cast<double>()) * Eigen::Affine3d::Identity(), cogMarker.pose);
        markerArray.markers.push_back(cogMarker);
        oldMarkerIds.insert(cogMarker.id);
        k++;

        //
        // Person confidence (if set)
        //
        if(it->getPersonConfidence() == it->getPersonConfidence()) { // must not be NaN
            std::stringstream confidenceStr; confidenceStr << std::fixed << std::setprecision(1) << it->getPersonConfidence();
            visualization_msgs::Marker confidenceMarker = MarkerUtils::createMarker(nameSpace, _detectionFrame);
            confidenceMarker.id = k;
            confidenceMarker.type = visualization_msgs::Marker::TEXT_VIEW_FACING;
            //confidenceMarker.color = MarkerUtils::getPaletteColor(_visualizationCounter);
            confidenceMarker.color.r = confidenceMarker.color.g = confidenceMarker.color.b = 1.0;
            confidenceMarker.color.a = 1.0;
            confidenceMarker.scale.z = 0.2; // height of letters in m(?)
            confidenceMarker.lifetime.fromSec(lifetime); // just to be safe
            confidenceMarker.text = confidenceStr.str();

            Eigen::Vector3f top = it->getTop();
            tf::poseEigenToMsg(Eigen::Translation3d(top.cast<double>()) * Eigen::Affine3d::Identity(), confidenceMarker.pose);
            markerArray.markers.push_back(confidenceMarker);
            oldMarkerIds.insert(confidenceMarker.id);
            k++;
        }
    }

    lastMarkerId = k;
    _markerArrayPublisher.publish(markerArray);
    _visualizationCounter++;
}
template <typename PointT> bool
pcl::people::GroundBasedPeopleDetectionApp<PointT>::compute (std::vector<pcl::people::PersonCluster<PointT> >& clusters)
{
  // Check if all mandatory variables have been set:
  if (!ground_coeffs_set_)
  {
    PCL_ERROR ("[pcl::people::GroundBasedPeopleDetectionApp::compute] Floor parameters have not been set or they are not valid!\n");
    return (false);
  }
  if (cloud_ == NULL)
  {
    PCL_ERROR ("[pcl::people::GroundBasedPeopleDetectionApp::compute] Input cloud has not been set!\n");
    return (false);
  }
  if (!intrinsics_matrix_set_)
  {
    PCL_ERROR ("[pcl::people::GroundBasedPeopleDetectionApp::compute] Camera intrinsic parameters have not been set!\n");
    return (false);
  }
  if (!person_classifier_set_flag_)
  {
    PCL_ERROR ("[pcl::people::GroundBasedPeopleDetectionApp::compute] Person classifier has not been set!\n");
    return (false);
  }

  // Fill rgb image:
  rgb_image_->points.clear();                            // clear RGB pointcloud
  extractRGBFromPointCloud(cloud_, rgb_image_);          // fill RGB pointcloud

  // Downsample of sampling_factor in every dimension:
  if (sampling_factor_ != 1)
  {
    PointCloudPtr cloud_downsampled(new PointCloud);
    cloud_downsampled->width = (cloud_->width)/sampling_factor_;
    cloud_downsampled->height = (cloud_->height)/sampling_factor_;
    cloud_downsampled->points.resize(cloud_downsampled->height*cloud_downsampled->width);
    cloud_downsampled->is_dense = cloud_->is_dense;
    for (uint32_t j = 0; j < cloud_downsampled->width; j++)
    {
      for (uint32_t i = 0; i < cloud_downsampled->height; i++)
      {
        (*cloud_downsampled)(j,i) = (*cloud_)(sampling_factor_*j,sampling_factor_*i);
      }
    }
    (*cloud_) = (*cloud_downsampled);
  }

  applyTransformationPointCloud();

  filter();

  // Ground removal and update:
  pcl::IndicesPtr inliers(new std::vector<int>);
  boost::shared_ptr<pcl::SampleConsensusModelPlane<PointT> > ground_model(new pcl::SampleConsensusModelPlane<PointT>(cloud_filtered_));
  ground_model->selectWithinDistance(ground_coeffs_transformed_, 2 * voxel_size_, *inliers);
  no_ground_cloud_ = PointCloudPtr (new PointCloud);
  pcl::ExtractIndices<PointT> extract;
  extract.setInputCloud(cloud_filtered_);
  extract.setIndices(inliers);
  extract.setNegative(true);
  extract.filter(*no_ground_cloud_);
  if (inliers->size () >= (300 * 0.06 / voxel_size_ / std::pow (static_cast<double> (sampling_factor_), 2)))
    ground_model->optimizeModelCoefficients (*inliers, ground_coeffs_transformed_, ground_coeffs_transformed_);
  else
    PCL_INFO ("No groundplane update!\n");

  // Euclidean Clustering:
  std::vector<pcl::PointIndices> cluster_indices;
  typename pcl::search::KdTree<PointT>::Ptr tree (new pcl::search::KdTree<PointT>);
  tree->setInputCloud(no_ground_cloud_);
  pcl::EuclideanClusterExtraction<PointT> ec;
  ec.setClusterTolerance(2 * voxel_size_);
  ec.setMinClusterSize(min_points_);
  ec.setMaxClusterSize(max_points_);
  ec.setSearchMethod(tree);
  ec.setInputCloud(no_ground_cloud_);
  ec.extract(cluster_indices);

  // Head based sub-clustering //
  pcl::people::HeadBasedSubclustering<PointT> subclustering;
  subclustering.setInputCloud(no_ground_cloud_);
  subclustering.setGround(ground_coeffs_transformed_);
  subclustering.setInitialClusters(cluster_indices);
  subclustering.setHeightLimits(min_height_, max_height_);
  subclustering.setMinimumDistanceBetweenHeads(heads_minimum_distance_);
  subclustering.setSensorPortraitOrientation(vertical_);
  subclustering.subcluster(clusters);

  // Person confidence evaluation with HOG+SVM:
  if (vertical_)  // Rotate the image if the camera is vertical
  {
    swapDimensions(rgb_image_);
  }
  for(typename std::vector<pcl::people::PersonCluster<PointT> >::iterator it = clusters.begin(); it != clusters.end(); ++it)
  {
    //Evaluate confidence for the current PersonCluster:
    Eigen::Vector3f centroid = intrinsics_matrix_transformed_ * (it->getTCenter());
    centroid /= centroid(2);
    Eigen::Vector3f top = intrinsics_matrix_transformed_ * (it->getTTop());
    top /= top(2);
    Eigen::Vector3f bottom = intrinsics_matrix_transformed_ * (it->getTBottom());
    bottom /= bottom(2);
    it->setPersonConfidence(person_classifier_.evaluate(rgb_image_, bottom, top, centroid, vertical_));
  }
 
  return (true);
}
template <typename PointT> bool
open_ptrack::detection::GroundBasedPeopleDetectionApp<PointT>::compute (std::vector<pcl::people::PersonCluster<PointT> >& clusters)
{
  frame_counter_++;

  // Define if debug info should be written or not for this frame:
  bool debug_flag = false;
  if ((frame_counter_ % 60) == 0)
  {
    debug_flag = true;
  }

  // Check if all mandatory variables have been set:
  if (debug_flag)
  {
    if (sqrt_ground_coeffs_ != sqrt_ground_coeffs_)
    {
      PCL_ERROR ("[open_ptrack::detection::GroundBasedPeopleDetectionApp::compute] Floor parameters have not been set or they are not valid!\n");
      return (false);
    }
    if (cloud_ == NULL)
    {
      PCL_ERROR ("[open_ptrack::detection::GroundBasedPeopleDetectionApp::compute] Input cloud has not been set!\n");
      return (false);
    }
    if (intrinsics_matrix_(0) == 0)
    {
      PCL_ERROR ("[open_ptrack::detection::GroundBasedPeopleDetectionApp::compute] Camera intrinsic parameters have not been set!\n");
      return (false);
    }
    if (!person_classifier_set_flag_)
    {
      PCL_ERROR ("[open_ptrack::detection::GroundBasedPeopleDetectionApp::compute] Person classifier has not been set!\n");
      return (false);
    }
  }

  if (!dimension_limits_set_)    // if dimension limits have not been set by the user
  {
    // Adapt thresholds for clusters points number to the voxel size:
    max_points_ = int(float(max_points_) * std::pow(0.06/voxel_size_, 2));
    if (voxel_size_ > 0.06)
      min_points_ = int(float(min_points_) * std::pow(0.06/voxel_size_, 2));
  }

  // Fill rgb image:
  rgb_image_->points.clear();                            // clear RGB pointcloud
  extractRGBFromPointCloud(cloud_, rgb_image_);          // fill RGB pointcloud

  // Downsample of sampling_factor in every dimension:
  if (sampling_factor_ != 1)
  {
    PointCloudPtr cloud_downsampled(new PointCloud);
    cloud_downsampled->width = (cloud_->width)/sampling_factor_;
    cloud_downsampled->height = (cloud_->height)/sampling_factor_;
    cloud_downsampled->points.resize(cloud_downsampled->height*cloud_downsampled->width);
    cloud_downsampled->is_dense = cloud_->is_dense;
    cloud_downsampled->header = cloud_->header;
    for (int j = 0; j < cloud_downsampled->width; j++)
    {
      for (int i = 0; i < cloud_downsampled->height; i++)
      {
        (*cloud_downsampled)(j,i) = (*cloud_)(sampling_factor_*j,sampling_factor_*i);
      }
    }
    (*cloud_) = (*cloud_downsampled);
  }

  if (use_rgb_)
  {
    // Compute mean luminance:
    int n_points = cloud_->points.size();
    double sumR, sumG, sumB = 0.0;
    for (int j = 0; j < cloud_->width; j++)
    {
      for (int i = 0; i < cloud_->height; i++)
      {
        sumR += (*cloud_)(j,i).r;
        sumG += (*cloud_)(j,i).g;
        sumB += (*cloud_)(j,i).b;
      }
    }
    mean_luminance_ = 0.3 * sumR/n_points + 0.59 * sumG/n_points + 0.11 * sumB/n_points;
    //    mean_luminance_ = 0.2126 * sumR/n_points + 0.7152 * sumG/n_points + 0.0722 * sumB/n_points;
  }

  // Voxel grid filtering:
  PointCloudPtr cloud_filtered(new PointCloud);
  pcl::VoxelGrid<PointT> voxel_grid_filter_object;
  voxel_grid_filter_object.setInputCloud(cloud_);
  voxel_grid_filter_object.setLeafSize (voxel_size_, voxel_size_, voxel_size_);
  voxel_grid_filter_object.setFilterFieldName("z");
  voxel_grid_filter_object.setFilterLimits(0.0, max_distance_);
  voxel_grid_filter_object.filter (*cloud_filtered);

  // Ground removal and update:
  pcl::IndicesPtr inliers(new std::vector<int>);
  boost::shared_ptr<pcl::SampleConsensusModelPlane<PointT> > ground_model(new pcl::SampleConsensusModelPlane<PointT>(cloud_filtered));
  ground_model->selectWithinDistance(ground_coeffs_, voxel_size_, *inliers);
  no_ground_cloud_ = PointCloudPtr (new PointCloud);
  pcl::ExtractIndices<PointT> extract;
  extract.setInputCloud(cloud_filtered);
  extract.setIndices(inliers);
  extract.setNegative(true);
  extract.filter(*no_ground_cloud_);
  if (inliers->size () >= (300 * 0.06 / voxel_size_ / std::pow (static_cast<double> (sampling_factor_), 2)))
    ground_model->optimizeModelCoefficients (*inliers, ground_coeffs_, ground_coeffs_);
  else
  {
    if (debug_flag)
    {
      PCL_INFO ("No groundplane update!\n");
    }
  }

  // Background Subtraction (optional):
  if (background_subtraction_)
  {
    PointCloudPtr foreground_cloud(new PointCloud);
    for (unsigned int i = 0; i < no_ground_cloud_->points.size(); i++)
    {
      if (not (background_octree_->isVoxelOccupiedAtPoint(no_ground_cloud_->points[i].x, no_ground_cloud_->points[i].y, no_ground_cloud_->points[i].z)))
      {
        foreground_cloud->points.push_back(no_ground_cloud_->points[i]);
      }
    }
    no_ground_cloud_ = foreground_cloud;
  }

  if (no_ground_cloud_->points.size() > 0)
  {
    // Euclidean Clustering:
    std::vector<pcl::PointIndices> cluster_indices;
    typename pcl::search::KdTree<PointT>::Ptr tree (new pcl::search::KdTree<PointT>);
    tree->setInputCloud(no_ground_cloud_);
    pcl::EuclideanClusterExtraction<PointT> ec;
    ec.setClusterTolerance(2 * 0.06);
    ec.setMinClusterSize(min_points_);
    ec.setMaxClusterSize(max_points_);
    ec.setSearchMethod(tree);
    ec.setInputCloud(no_ground_cloud_);
    ec.extract(cluster_indices);

    // Sensor tilt compensation to improve people detection:
    PointCloudPtr no_ground_cloud_rotated(new PointCloud);
    Eigen::VectorXf ground_coeffs_new;
    if(sensor_tilt_compensation_)
    {
      // We want to rotate the point cloud so that the ground plane is parallel to the xOz plane of the sensor:
      Eigen::Vector3f input_plane, output_plane;
      input_plane << ground_coeffs_(0), ground_coeffs_(1), ground_coeffs_(2);
      output_plane << 0.0, -1.0, 0.0;

      Eigen::Vector3f axis = input_plane.cross(output_plane);
      float angle = acos( input_plane.dot(output_plane)/ ( input_plane.norm()/output_plane.norm() ) );
      transform_ = Eigen::AngleAxisf(angle, axis);

      // Setting also anti_transform for later
      anti_transform_ = transform_.inverse();
      no_ground_cloud_rotated = rotateCloud(no_ground_cloud_, transform_);
      ground_coeffs_new.resize(4);
      ground_coeffs_new = rotateGround(ground_coeffs_, transform_);
    }
    else
    {
      transform_ = transform_.Identity();
      anti_transform_ = transform_.inverse();
      no_ground_cloud_rotated = no_ground_cloud_;
      ground_coeffs_new = ground_coeffs_;
    }

    // To avoid PCL warning:
    if (cluster_indices.size() == 0)
      cluster_indices.push_back(pcl::PointIndices());

    // Head based sub-clustering //
    pcl::people::HeadBasedSubclustering<PointT> subclustering;
    subclustering.setInputCloud(no_ground_cloud_rotated);
    subclustering.setGround(ground_coeffs_new);
    subclustering.setInitialClusters(cluster_indices);
    subclustering.setHeightLimits(min_height_, max_height_);
    subclustering.setMinimumDistanceBetweenHeads(heads_minimum_distance_);
    subclustering.setSensorPortraitOrientation(vertical_);
    subclustering.subcluster(clusters);

//    for (unsigned int i = 0; i < rgb_image_->points.size(); i++)
//    {
//      if ((rgb_image_->points[i].r < 0) | (rgb_image_->points[i].r > 255) | isnan(rgb_image_->points[i].r))
//      {
//        std::cout << "ERROR in RGB data!" << std::endl;
//      }
//    }

    if (use_rgb_) // if RGB information can be used
    {
      // Person confidence evaluation with HOG+SVM:
      if (vertical_)  // Rotate the image if the camera is vertical
      {
        swapDimensions(rgb_image_);
      }
      for(typename std::vector<pcl::people::PersonCluster<PointT> >::iterator it = clusters.begin(); it != clusters.end(); ++it)
      {
        //Evaluate confidence for the current PersonCluster:
        Eigen::Vector3f centroid = intrinsics_matrix_ * (anti_transform_ * it->getTCenter());
        centroid /= centroid(2);
        Eigen::Vector3f top = intrinsics_matrix_ * (anti_transform_ * it->getTTop());
        top /= top(2);
        Eigen::Vector3f bottom = intrinsics_matrix_ * (anti_transform_ * it->getTBottom());
        bottom /= bottom(2);

        it->setPersonConfidence(person_classifier_.evaluate(rgb_image_, bottom, top, centroid, vertical_));
      }
    }
    else
    {
      for(typename std::vector<pcl::people::PersonCluster<PointT> >::iterator it = clusters.begin(); it != clusters.end(); ++it)
      {
        it->setPersonConfidence(-100.0);
      }
    }
  }

  return (true);
}