void PointCloudImageCreator::callback(
    const sensor_msgs::PointCloud2::ConstPtr &cloud_msg,
    const jsk_recognition_msgs::ClusterPointIndices::ConstPtr &indices_msg) {

    if (!is_info_) {
       ROS_WARN("CAMERA INFO NOT SET");
       return;
    }
    ROS_INFO("PROCESSING");
   
    boost::mutex::scoped_lock lock(this->lock_);
    pcl::PointCloud<PointT>::Ptr cloud (new pcl::PointCloud<PointT>);
    pcl::fromROSMsg(*cloud_msg, *cloud);
    if (cloud->empty()) {
       ROS_WARN("EMPTY CLOUD POINT");
       return;
    }

    cv::Mat mask;
    cv::Mat img_out = this->projectPointCloudToImagePlane(
       cloud, indices_msg, this->camera_info_, mask);

    cv_bridge::CvImagePtr image_msg(new cv_bridge::CvImage);
    image_msg->header = cloud_msg->header;
    image_msg->encoding = sensor_msgs::image_encodings::BGR8;
    image_msg->image = img_out.clone();
    pub_image_.publish(image_msg->toImageMsg());
}
void PointCloudImageCreator::cloudCallback(
    const sensor_msgs::PointCloud2::ConstPtr &cloud_msg) {
    if (!is_info_) {
       return;
    }
    boost::mutex::scoped_lock lock(this->lock_);
    pcl::PointCloud<PointT>::Ptr cloud (new pcl::PointCloud<PointT>);
    pcl::fromROSMsg(*cloud_msg, *cloud);
    if (cloud->empty()) {
       return;
    }
    cv::Mat mask;
    cv::Mat img_out = this->projectPointCloudToImagePlane(
       cloud, this->camera_info_, mask);
    cv::Mat interpolate_img = this->interpolateImage(img_out, mask);
    if (is_mask_image_) {
       cv::Mat foreground_mask;
       cv::Mat background_mask;
       this->rect_ = this->createMaskImages(img_out,
                                            foreground_mask,
                                            background_mask);
       this->foreground_mask_ = foreground_mask.clone();
       
       cv_bridge::CvImagePtr fmask_msg(new cv_bridge::CvImage);
       cv_bridge::CvImagePtr bmask_msg(new cv_bridge::CvImage);
       fmask_msg->header = cloud_msg->header;
       fmask_msg->encoding = sensor_msgs::image_encodings::MONO8;
       fmask_msg->image = foreground_mask.clone();
       
       bmask_msg->header = cloud_msg->header;
       bmask_msg->encoding = sensor_msgs::image_encodings::MONO8;
       bmask_msg->image = background_mask.clone();

       this->pub_fmask_.publish(fmask_msg->toImageMsg());
       this->pub_bmask_.publish(bmask_msg->toImageMsg());
    } else {
       sensor_msgs::PointCloud2 ros_cloud;
       pcl::toROSMsg(*cloud, ros_cloud);
       ros_cloud.header = cloud_msg->header;
       pub_cloud_.publish(ros_cloud);

       cv_bridge::CvImagePtr iimage_msg(new cv_bridge::CvImage);
       iimage_msg->header = cloud_msg->header;
       iimage_msg->encoding = sensor_msgs::image_encodings::BGR8;
       iimage_msg->image = interpolate_img.clone();

       cv_bridge::CvImagePtr image_msg(new cv_bridge::CvImage);
       image_msg->header = cloud_msg->header;
       image_msg->encoding = sensor_msgs::image_encodings::BGR8;
       image_msg->image = img_out.clone();
       pub_image_.publish(image_msg->toImageMsg());
       pub_iimage_.publish(iimage_msg->toImageMsg());
    }
    this->header_ = cloud_msg->header;
}
Example #3
0
  /* 
   * One and only one callback, now takes cloud, does everything else needed. 
   */
  void ARPublisher::getTransformationCallback (const sensor_msgs::PointCloud2ConstPtr & msg)
  {
    sensor_msgs::ImagePtr image_msg(new sensor_msgs::Image);
    ARUint8 *dataPtr;
    ARMarkerInfo *marker_info;
    int marker_num;
    int i, k, j;

    /* do we need to initialize? */
    if(!configured_)
    {
      if(msg->width == 0 || msg->height == 0)
      {
        ROS_ERROR ("Deformed cloud! Size = %d, %d.", msg->width, msg->height);
        return;
      }

      cam_param_.xsize = msg->width;
      cam_param_.ysize = msg->height;

      cam_param_.dist_factor[0] = msg->width/2;         // x0 = cX from openCV calibration
      cam_param_.dist_factor[1] = msg->height/2;        // y0 = cY from openCV calibration
      cam_param_.dist_factor[2] = 0;                    // f = -100*k1 from CV. Note, we had to do mm^2 to m^2, hence 10^8->10^2
      cam_param_.dist_factor[3] = 1.0;                  // scale factor, should probably be >1, but who cares...
      
      arInit ();
    }

    /* convert cloud to PCL */
    PointCloud cloud;
    pcl::fromROSMsg(*msg, cloud);
 
    /* get an OpenCV image from the cloud */
    pcl::toROSMsg (cloud, *image_msg);

    cv_bridge::CvImagePtr cv_ptr;
    try
    {
        cv_ptr = cv_bridge::toCvCopy(image_msg, sensor_msgs::image_encodings::BGR8);
    }
    catch (cv_bridge::Exception& e)
    {
      ROS_ERROR ("Could not convert from '%s' to 'bgr8'.", image_msg->encoding.c_str ());
    }
    dataPtr = (ARUint8 *) cv_ptr->image.ptr();

    /* detect the markers in the video frame */
    if (arDetectMarkerLite (dataPtr, threshold_, &marker_info, &marker_num) < 0)
    {
      argCleanup ();
      return;
    }
 
    arPoseMarkers_.markers.clear ();
    /* check for known patterns */
    for (i = 0; i < objectnum; i++)
    {
      k = -1;
      for (j = 0; j < marker_num; j++)
      {
        if (object[i].id == marker_info[j].id)
        {
          if (k == -1)
            k = j;
          else                  // make sure you have the best pattern (highest confidence factor)
          if (marker_info[k].cf < marker_info[j].cf)
            k = j;
        }
      }
      if (k == -1)
      {
        object[i].visible = 0;
        continue;
      }
      
      /* create a cloud for marker corners */
      int d = marker_info[k].dir;
      PointCloud marker;
      marker.push_back( cloud.at( (int)marker_info[k].vertex[(4-d)%4][0], (int)marker_info[k].vertex[(4-d)%4][1] ) ); // upper left
      marker.push_back( cloud.at( (int)marker_info[k].vertex[(5-d)%4][0], (int)marker_info[k].vertex[(5-d)%4][1] ) ); // upper right
      marker.push_back( cloud.at( (int)marker_info[k].vertex[(6-d)%4][0], (int)marker_info[k].vertex[(6-d)%4][1] ) ); // lower right
      marker.push_back( cloud.at( (int)marker_info[k].vertex[(7-d)%4][0], (int)marker_info[k].vertex[(7-d)%4][1] ) );

      /* create an ideal cloud */
      double w = object[i].marker_width;
      PointCloud ideal;
      ideal.push_back( makeRGBPoint(-w/2,w/2,0) );
      ideal.push_back( makeRGBPoint(w/2,w/2,0) );
      ideal.push_back( makeRGBPoint(w/2,-w/2,0) );
      ideal.push_back( makeRGBPoint(-w/2,-w/2,0) );

      /* get transformation */
      Eigen::Matrix4f t;
      TransformationEstimationSVD obj;
      obj.estimateRigidTransformation( marker, ideal, t );

      
      /* get final transformation */
      tf::Transform transform = tfFromEigen(t.inverse());
   
      // any(transform == nan)
      tf::Matrix3x3  m = transform.getBasis();
      tf::Vector3    p = transform.getOrigin();
      bool invalid = false;
      for(int i=0; i < 3; i++)
        for(int j=0; j < 3; j++)
          invalid = (invalid || isnan(m[i][j]) || fabs(m[i][j]) > 1.0);

      for(int i=0; i < 3; i++)
          invalid = (invalid || isnan(p[i]));
       

      if(invalid)
        continue; 

      /* publish the marker */
      ar_pose::ARMarker ar_pose_marker;
      ar_pose_marker.header.frame_id = msg->header.frame_id;
      ar_pose_marker.header.stamp = msg->header.stamp;
      ar_pose_marker.id = object[i].id;

      ar_pose_marker.pose.pose.position.x = transform.getOrigin().getX();
      ar_pose_marker.pose.pose.position.y = transform.getOrigin().getY();
      ar_pose_marker.pose.pose.position.z = transform.getOrigin().getZ();

      ar_pose_marker.pose.pose.orientation.x = transform.getRotation().getAxis().getX();
      ar_pose_marker.pose.pose.orientation.y = transform.getRotation().getAxis().getY();
      ar_pose_marker.pose.pose.orientation.z = transform.getRotation().getAxis().getZ();
      ar_pose_marker.pose.pose.orientation.w = transform.getRotation().getW();

      ar_pose_marker.confidence = marker_info->cf;
      arPoseMarkers_.markers.push_back (ar_pose_marker);

      /* publish transform */
      if (publishTf_)
      {
	    broadcaster_.sendTransform(tf::StampedTransform(transform, msg->header.stamp, msg->header.frame_id, object[i].name));
      }

      /* publish visual marker */

      if (publishVisualMarkers_)
      {
        tf::Vector3 markerOrigin (0, 0, 0.25 * object[i].marker_width * AR_TO_ROS);
        tf::Transform m (tf::Quaternion::getIdentity (), markerOrigin);
        tf::Transform markerPose = transform * m; // marker pose in the camera frame

        tf::poseTFToMsg (markerPose, rvizMarker_.pose);

        rvizMarker_.header.frame_id = msg->header.frame_id;
        rvizMarker_.header.stamp = msg->header.stamp;
        rvizMarker_.id = object[i].id;

        rvizMarker_.scale.x = 1.0 * object[i].marker_width * AR_TO_ROS;
        rvizMarker_.scale.y = 1.0 * object[i].marker_width * AR_TO_ROS;
        rvizMarker_.scale.z = 0.5 * object[i].marker_width * AR_TO_ROS;
        rvizMarker_.ns = "basic_shapes";
        rvizMarker_.type = visualization_msgs::Marker::CUBE;
        rvizMarker_.action = visualization_msgs::Marker::ADD;
        switch (i)
        {
          case 0:
            rvizMarker_.color.r = 0.0f;
            rvizMarker_.color.g = 0.0f;
            rvizMarker_.color.b = 1.0f;
            rvizMarker_.color.a = 1.0;
            break;
          case 1:
            rvizMarker_.color.r = 1.0f;
            rvizMarker_.color.g = 0.0f;
            rvizMarker_.color.b = 0.0f;
            rvizMarker_.color.a = 1.0;
            break;
          default:
            rvizMarker_.color.r = 0.0f;
            rvizMarker_.color.g = 1.0f;
            rvizMarker_.color.b = 0.0f;
            rvizMarker_.color.a = 1.0;
        }
        rvizMarker_.lifetime = ros::Duration ();

        rvizMarkerPub_.publish (rvizMarker_);
        ROS_DEBUG ("Published visual marker");
      }
    }
    arMarkerPub_.publish (arPoseMarkers_);
    ROS_DEBUG ("Published ar_multi markers");
  }
  // Processes the point cloud with OpenCV using the PCL cluster indices
  void processClusters( const std::vector<pcl::PointIndices> cluster_indices,
                        //                        const sensor_msgs::PointCloud2ConstPtr& pointcloud_msg,
                        const pcl::PointCloud<pcl::PointXYZRGB>::ConstPtr  cloud_transformed,
                        const pcl::PointCloud<pcl::PointXYZRGB>::ConstPtr& cloud_filtered,
                        const pcl::PointCloud<pcl::PointXYZRGB>&  cloud )
  {

    // -------------------------------------------------------------------------------------------------------
    // Convert image
    ROS_INFO_STREAM_NAMED("perception","Converting image to OpenCV format");

    try
    {
      sensor_msgs::ImagePtr image_msg(new sensor_msgs::Image);
      //      pcl::toROSMsg (*pointcloud_msg, *image_msg);
      pcl::toROSMsg (*cloud_transformed, *image_msg);
      cv_bridge::CvImagePtr input_bridge = cv_bridge::toCvCopy(image_msg, "rgb8");
      full_input_image = input_bridge->image;
    }
    catch (cv_bridge::Exception& ex)
    {
      ROS_ERROR_STREAM_NAMED("perception","[calibrate] Failed to convert image");
      return;
    }

    // -------------------------------------------------------------------------------------------------------
    // Process Image

    // Convert image to gray
    cv::cvtColor( full_input_image, full_input_image_gray, CV_BGR2GRAY );
    //cv::adaptiveThreshold( full_input_image, full_input_image_gray, 255, CV_ADAPTIVE_THRESH_MEAN_C, CV_THRESH_BINARY,5,10);

    // Blur image - reduce noise with a 3x3 kernel
    cv::blur( full_input_image_gray, full_input_image_gray, cv::Size(3,3) );

    ROS_INFO_STREAM_NAMED("perception","Finished coverting");

    // -------------------------------------------------------------------------------------------------------
    // Check OpenCV and PCL image height for errors
    int image_width = cloud.width;
    int image_height = cloud.height;
    ROS_DEBUG_STREAM( "PCL Image height " << image_height << " -- width " << image_width << "\n");
    int image_width_cv = full_input_image.size.p[1];
    int image_height_cv = full_input_image.size.p[0];
    ROS_DEBUG_STREAM( "OpenCV Image height " << image_height_cv << " -- width " << image_width_cv << "\n");

    if( image_width != image_width_cv || image_height != image_height_cv )
    {
      ROS_ERROR_STREAM_NAMED("perception","PCL and OpenCV image heights/widths do not match!");
      return;
    }

    // -------------------------------------------------------------------------------------------------------
    // GUI Stuff

    // First window
    const char* opencv_window = "Source";
    /*
      cv::namedWindow( opencv_window, CV_WINDOW_AUTOSIZE );
      cv::imshow( opencv_window, full_input_image_gray );
    */


    //    while(true)  // use this when we want to tweak the image
    {

      output_image = full_input_image.clone();

      // -------------------------------------------------------------------------------------------------------
      // Start processing clusters
      ROS_INFO_STREAM_NAMED("perception","Finding min/max in x/y axis");

      int top_image_overlay_x = 0; // tracks were to copyTo the mini images

      // for each cluster, see if it is a block
      for(size_t c = 0; c < cluster_indices.size(); ++c)
      {
        ROS_INFO_STREAM_NAMED("perception","\n\n");
        ROS_INFO_STREAM_NAMED("perception","On cluster " << c);

        // find the outer dimensions of the cluster
        double xmin = 0; double xmax = 0;
        double ymin = 0; double ymax = 0;

        // also remember each min & max's correponding other coordinate (not needed for z)
        double xminy = 0; double xmaxy = 0;
        double yminx = 0; double ymaxx = 0;

        // also remember their corresponding indice
        int xmini = 0; int xmaxi = 0;
        int ymini = 0; int ymaxi = 0;

        // loop through and find all min/max of x/y
        for(size_t i = 0; i < cluster_indices[c].indices.size(); i++)
        {
          int j = cluster_indices[c].indices[i];

          // Get RGB from point cloud
          pcl::PointXYZRGB p = cloud_transformed->points[j];

          double x = p.x;
          double y = p.y;

          if(i == 0) // initial values
          {
            xmin = xmax = x;
            ymin = ymax = y;
            xminy = xmaxy = y;
            yminx = ymaxx = x;
            xmini = xmaxi = ymini = ymaxi = j; // record the indice corresponding to the min/max
          }
          else
          {
            if( x < xmin )
            {
              xmin = x;
              xminy = y;
              xmini = j;
            }
            if( x > xmax )
            {
              xmax = x;
              xmaxy = y;
              xmaxi = j;
            }
            if( y < ymin )
            {
              ymin = y;
              yminx = x;
              ymini = j;
            }
            if( y > ymax )
            {
              ymax = y;
              ymaxx = x;
              ymaxi = j;
            }
          }
        }

        ROS_DEBUG_STREAM_NAMED("perception","Cluster size - xmin: " << xmin << " xmax: " << xmax << " ymin: " << ymin << " ymax: " << ymax);
        ROS_DEBUG_STREAM_NAMED("perception","Cluster size - xmini: " << xmini << " xmaxi: " << xmaxi << " ymini: " << ymini << " ymaxi: " << ymaxi);

        // ---------------------------------------------------------------------------------------------
        // Check if these dimensions make sense for the block size specified
        double xside = xmax-xmin;
        double yside = ymax-ymin;

        const double tol = 0.01; // 1 cm error tolerance

        // In order to be part of the block, xside and yside must be between
        // blocksize and blocksize*sqrt(2)
        if(xside > block_size-tol &&
           xside < block_size*sqrt(2)+tol &&
                   yside > block_size-tol &&
          yside < block_size*sqrt(2)+tol )
        {

          // -------------------------------------------------------------------------------------------------------
          // Get the four farthest corners of the block - use OpenCV only on the region identified by PCL

          // Get the pixel coordinates of the xmax and ymax indicies
          int px_xmax = 0; int py_xmax = 0;
          int px_ymax = 0; int py_ymax = 0;
          getXYCoordinates( xmaxi, image_height, image_width, px_xmax, py_xmax);
          getXYCoordinates( ymaxi, image_height, image_width, px_ymax, py_ymax);

          // Get the pixel coordinates of the xmin and ymin indicies
          int px_xmin = 0; int py_xmin = 0;
          int px_ymin = 0; int py_ymin = 0;
          getXYCoordinates( xmini, image_height, image_width, px_xmin, py_xmin);
          getXYCoordinates( ymini, image_height, image_width, px_ymin, py_ymin);

          ROS_DEBUG_STREAM_NAMED("perception","px_xmin " << px_xmin << " px_xmax: " << px_xmax << " py_ymin: " << py_ymin << " py_ymax: " << py_ymax );

          // -------------------------------------------------------------------------------------------------------
          // Change the frame of reference from the robot to the camera

          // Create an array of all the x value options
          const int x_values_a[] = {px_xmax, px_ymax, px_xmin, px_ymin};
          const int y_values_a[] = {py_xmax, py_ymax, py_xmin, py_ymin};
          // Turn it into a vector
          std::vector<int> x_values (x_values_a, x_values_a + sizeof(x_values_a) / sizeof(x_values_a[0]));
          std::vector<int> y_values (y_values_a, y_values_a + sizeof(y_values_a) / sizeof(y_values_a[0]));
          // Find the min
          int x1 = *std::min_element(x_values.begin(), x_values.end());
          int y1 = *std::min_element(y_values.begin(), y_values.end());
          // Find the max
          int x2 = *std::max_element(x_values.begin(), x_values.end());
          int y2 = *std::max_element(y_values.begin(), y_values.end());

          ROS_DEBUG_STREAM_NAMED("perception","x1: " << x1 << " y1: " << y1 << " x2: " << x2 << " y2: " << y2);

          // -------------------------------------------------------------------------------------------------------
          // Expand the ROI by a fudge factor, if possible
          const int FUDGE_FACTOR = 5; // pixels
          if( x1 > FUDGE_FACTOR)
            x1 -= FUDGE_FACTOR;
          if( y1 > FUDGE_FACTOR )
            y1 -= FUDGE_FACTOR;
          if( x2 < image_width - FUDGE_FACTOR )
            x2 += FUDGE_FACTOR;
          if( y2 < image_height - FUDGE_FACTOR )
            y2 += FUDGE_FACTOR;

          ROS_DEBUG_STREAM_NAMED("perception","After Fudge Factor - x1: " << x1 << " y1: " << y1 << " x2: " << x2 << " y2: " << y2);

          // -------------------------------------------------------------------------------------------------------
          // Create ROI parameters
          //        (x1,y1)----------------------
          //       |                            |
          //       |            ROI             |
          //       |                            |
          //       |_____________________(x2,y2)|

          // Create Region of Interest
          int roi_width = x2 - x1;
          int roi_height = y2 - y1;
          cv::Rect region_of_interest = cv::Rect( x1, y1, roi_width, roi_height );
          ROS_DEBUG_STREAM_NAMED("perception","ROI: x " << x1 << " -- y " << y1 << " -- height " << roi_height << " -- width " << roi_width );

          // -------------------------------------------------------------------------------------------------------
          // Find paramters of the block in pixel coordiantes
          int block_center_x = x1 + 0.5*roi_width;
          int block_center_y = y1 + 0.5*roi_height;
          int block_center_z = block_size / 2; // TODO: make this better
          const cv::Point block_center = cv::Point( block_center_x, block_center_y );

          // -------------------------------------------------------------------------------------------------------
          // Create a sub image of just the block
          cv::Point a1 = cv::Point(x1, y1);
          cv::Point a2 = cv::Point(x2, y2);
          cv::rectangle( output_image, a1, a2, cv::Scalar(0, 255, 255), 1, 8);

          // Crop image (doesn't actually copy the data)
          cropped_image = full_input_image_gray(region_of_interest);

          // -------------------------------------------------------------------------------------------------------
          // Detect edges using canny
          ROS_INFO_STREAM_NAMED("perception","Detecting edges using canny");

          // Find edges
          cv::Mat canny_output;
          cv::Canny( cropped_image, canny_output, canny_threshold, canny_threshold*2, 3 );

          // Get mini window stats
          const int mini_width = canny_output.size.p[1];
          const int mini_height = canny_output.size.p[0];
          const cv::Size mini_size = canny_output.size();
          const cv::Point mini_center = cv::Point( mini_width/2, mini_height/2 );

          // Find contours
          vector<vector<cv::Point> > contours;
          vector<cv::Vec4i> hierarchy;
          cv::findContours( canny_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cv::Point(0, 0) );
          ROS_INFO_STREAM_NAMED("perception","Contours");

          // Draw contours
          cv::Mat drawing = cv::Mat::zeros( mini_size, CV_8UC3 );
          ROS_INFO_STREAM_NAMED("perception","Drawing contours");

          // Find the largest contour for getting the angle
          double max_contour_length = 0;
          int max_contour_length_i;
          for( size_t i = 0; i< contours.size(); i++ )
          {
            double contour_length = cv::arcLength( contours[i], false );
            if( contour_length > max_contour_length )
            {
              max_contour_length = contour_length;
              max_contour_length_i = i;
            }
            //ROS_DEBUG_STREAM_NAMED("perception","Contour length = " << contour_length << " of index " << max_contour_length_i);


            cv::Scalar color = cv::Scalar( (30 + i*10) % 255, (30 + i*10) % 255, (30 + i*10) % 255);
            cv::drawContours( drawing, contours, (int)i, color, 1, 8, hierarchy, 0, cv::Point() );
            //drawContours( image, contours, contourIdx, color, thickness, lineType, hierarchy, maxLevel, offset )

          }

          // -------------------------------------------------------------------------------------------------------
          // Copy largest contour to main image
          cv::Scalar color = cv::Scalar( 0, 255, 0 );
          cv::drawContours( output_image, contours, (int)max_contour_length_i, color, 1, 8, hierarchy, 0, a1 );
          //drawContours( image, contours, contourIdx, color, thickness, lineType, hierarchy, maxLevel, offset )

          // -------------------------------------------------------------------------------------------------------
          // Copy largest contour to seperate image
          cv::Mat hough_input = cv::Mat::zeros( mini_size, CV_8UC1 );
          cv::Mat hough_input_color;
          cv::Scalar hough_color = cv::Scalar( 200 );
          cv::drawContours( hough_input, contours, (int)max_contour_length_i, hough_color, 1, 8, hierarchy, 0 );
          cv::cvtColor(hough_input, hough_input_color, CV_GRAY2BGR);

          // -------------------------------------------------------------------------------------------------------
          // Hough Transform
          cv::Mat hough_drawing = cv::Mat::zeros( mini_size, CV_8UC3 );
          std::vector<cv::Vec4i> lines;


          ROS_DEBUG_STREAM_NAMED("perception","hough_rho " << hough_rho << " hough_theta " << hough_theta <<
                                 " theta_converted " << (1/hough_theta)*CV_PI/180 << " hough_threshold " <<
                                 hough_threshold << " hough_minLineLength " << hough_minLineLength <<
                                 " hough_maxLineGap " << hough_maxLineGap );

          cv::HoughLinesP(hough_input, lines, hough_rho, (1/hough_theta)*CV_PI/180, hough_threshold, hough_minLineLength, hough_maxLineGap);

          ROS_WARN_STREAM_NAMED("perception","Found " << lines.size() << " lines");

          std::vector<double> line_angles;

          // Copy detected lines to the drawing image
          for( size_t i = 0; i < lines.size(); i++ )
          {
            cv::Vec4i line = lines[i];
            cv::line( hough_drawing, cv::Point(line[0], line[1]), cv::Point(line[2], line[3]),
                      cv::Scalar(255,255,255), 1, CV_AA);

            // Error check
            if(line[3] - line[1] == 0 && line[2] - line[0] == 0)
            {
              ROS_ERROR_STREAM_NAMED("perception","Line is actually two points at the origin, unable to calculate. TODO: handle better?");
              continue;
            }

            // Find angle
            double line_angle = atan2(line[3] - line[1], line[2] - line[0]); //in radian, degrees: * 180.0 / CV_PI;
            // Reverse angle direction if negative
            if( line_angle < 0 )
            {
              line_angle += CV_PI;
            }
            line_angles.push_back(line_angle);
            ROS_DEBUG_STREAM_NAMED("perception","Hough Line angle: " << line_angle * 180.0 / CV_PI;);
          }

          double block_angle = 0; // the overall result of the block's angle

          // Everything is based on the first angle
          if( line_angles.size() == 0 ) // make sure we have at least 1 angle
          {
            ROS_ERROR_STREAM_NAMED("perception","No lines were found for this cluster, unable to calculate block angle");
          }
          else
          {
            calculateBlockAngle( line_angles, block_angle );
          }

          // -------------------------------------------------------------------------------------------------------
          // Draw chosen angle
          ROS_INFO_STREAM_NAMED("perception","Using block angle " << block_angle*180.0/CV_PI);

          // Draw chosen angle on mini image
          cv::Mat angle_drawing = cv::Mat::zeros( mini_size, CV_8UC3 );
          int line_length = 0.5*double(mini_width); // have the line go 1/4 across the screen
          int new_x = mini_center.x + line_length*cos( block_angle );
          int new_y = mini_center.y + line_length*sin( block_angle );
          ROS_INFO_STREAM("Origin (" << mini_center.x << "," << mini_center.y << ") New (" << new_x << "," << new_y <<
                          ") length " << line_length << " angle " << block_angle <<
                          " mini width " << mini_width << " mini height " << mini_height);
          cv::Point angle_point = cv::Point(new_x, new_y);
          cv::line( angle_drawing, mini_center, angle_point, cv::Scalar(255,255,255), 1, CV_AA);

          // Draw chosen angle on contours image
          cv::line( hough_drawing, mini_center, angle_point, cv::Scalar(255,0, 255), 1, CV_AA);

          // Draw chosen angle on main image
          line_length = 0.75 * double(mini_width); // have the line go 1/2 across the box
          new_x = block_center_x + line_length*cos( block_angle );
          new_y = block_center_y + line_length*sin( block_angle );
          ROS_INFO_STREAM_NAMED("perception",block_center_x << ", " << block_center_y << ", " << new_x << ", " << new_y);
          angle_point = cv::Point(new_x, new_y);
          cv::line( output_image, block_center, angle_point, cv::Scalar(255,0,255), 2, CV_AA);


          // -------------------------------------------------------------------------------------------------------
          // Get world coordinates

          // Find the block's center point
          double world_x1 = xmin+(xside)/2.0;
          double world_y1 = ymin+(yside)/2.0;
          double world_z1 = table_height + block_size / 2;

          // Convert pixel coordiantes back to world coordinates
          double world_x2 = cloud_transformed->at(new_x, new_y).x;
          double world_y2 = cloud_transformed->at(new_x, new_y).y;
          double world_z2 = world_z1; // is this even necessary?

          // Get angle from two world coordinates...
          double world_theta = abs( atan2(world_y2 - world_y1, world_x2 - world_x1) );

          // Attempt to make all angles point in same direction
          makeAnglesUniform( world_theta );

          // -------------------------------------------------------------------------------------------------------
          // GUI Stuff

          // Copy the cluster image to the main image in the top left corner
          if( top_image_overlay_x + mini_width < image_width )
          {
            const int common_height = 42;
            cv::Rect small_roi_row0 = cv::Rect(top_image_overlay_x, common_height*0, mini_width, mini_height);
            cv::Rect small_roi_row1 = cv::Rect(top_image_overlay_x, common_height*1, mini_width, mini_height);
            cv::Rect small_roi_row2 = cv::Rect(top_image_overlay_x, common_height*2, mini_width, mini_height);
            cv::Rect small_roi_row3 = cv::Rect(top_image_overlay_x, common_height*3, mini_width, mini_height);

            drawing.copyTo(              output_image(small_roi_row0) );
            hough_input_color.copyTo(    output_image(small_roi_row1) );
            hough_drawing.copyTo(        output_image(small_roi_row2) );
            angle_drawing.copyTo(        output_image(small_roi_row3) );

            top_image_overlay_x += mini_width;
          }

          // figure out the position and the orientation of the block
          //double angle = atan(block_size/((xside+yside)/2));
          //double angle = atan( (xmaxy - xminy) / (xmax - xmin ) );
          // Then add it to our set
          //addBlock( xmin+(xside)/2.0, ymin+(yside)/2.0, zmax - block_size/2.0, angle);
          //ROS_INFO_STREAM_NAMED("perception","FOUND -> xside: " << xside << " yside: " << yside << " angle: " << block_angle);


          addBlock( world_x1, world_y1, world_z1, world_theta );
          //addBlock( block_center_x, block_center_y, block_center_z, block_angle);
        }
        else
        {