예제 #1
0
void getCameraRay(const image_geometry::PinholeCameraModel& model, const cv::Point2d& pt, cv::Point3d* ray)
{
    cv::Point2d rect_point;
    rect_point = model.rectifyPoint(pt);
    ROS_DEBUG("Rect Point: %f, %f",rect_point.x,rect_point.y);
    *ray = model.projectPixelTo3dRay(rect_point);
}
예제 #2
0
pcl::PointCloud<pcl::PointXYZ>::Ptr Conversions::toPointCloud(const Eigen::Isometry3d &T, const image_geometry::PinholeCameraModel &cam, const cv::Mat &depth_img)
{
    pcl::PointCloud<pcl::PointXYZ>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZ>());
    cloud->header.frame_id = "cam";
    cloud->is_dense = false;
    cloud->height = depth_img.rows;
    cloud->width = depth_img.cols;
    cloud->sensor_origin_ = Eigen::Vector4f( T.translation()(0), T.translation()(1), T.translation()(2), 1.f );
    cloud->sensor_orientation_ = Eigen::Quaternionf(T.rotation().cast<float>());
    cloud->points.resize( cloud->height * cloud->width );

    size_t idx = 0;
    const float* depthdata = reinterpret_cast<float*>( &depth_img.data[0] );
    for(int v = 0; v < depth_img.rows; ++v) {
        for(int u = 0; u < depth_img.cols; ++u) {
            pcl::PointXYZ& p = cloud->points[ idx ]; ++idx;

            float d = (*depthdata++);

            if (d > 0 && !isnan(d)) {
                p.z = d;
                p.x = (u - cam.cx()) * d / cam.fx();
                p.y = (v - cam.cy()) * d / cam.fy();
            } else {
                p.x = std::numeric_limits<float>::quiet_NaN();
                p.y = std::numeric_limits<float>::quiet_NaN();
                p.z = std::numeric_limits<float>::quiet_NaN();
            }
        }
    }
    return cloud;
}
 void FindObjectOnPlane::generateStartPoints(
   const cv::Point2f& point_2d,
   const image_geometry::PinholeCameraModel& model,
   const pcl::ModelCoefficients::Ptr& coefficients,
   std::vector<cv::Point3f>& search_points_3d,
   std::vector<cv::Point2f>& search_points_2d)
 {
   NODELET_INFO("generateStartPoints");
   jsk_recognition_utils::Plane::Ptr plane
     (new jsk_recognition_utils::Plane(coefficients->values));
   cv::Point3d ray = model.projectPixelTo3dRay(point_2d);
   Eigen::Vector3f projected_point = rayPlaneInteersect(ray, plane);
   const double resolution_step = 0.01;
   const int resolution = 5;
   search_points_3d.clear();
   search_points_2d.clear();
   for (int i = - resolution; i < resolution; ++i) {
     for (int j = - resolution; j < resolution; ++j) {
       double x_diff = resolution_step * i;
       double y_diff = resolution_step * j;
       Eigen::Vector3f moved_point = projected_point + Eigen::Vector3f(x_diff, y_diff, 0);
       Eigen::Vector3f projected_moved_point;
       plane->project(moved_point, projected_moved_point);
       cv::Point3f projected_moved_point_cv(projected_moved_point[0],
                                            projected_moved_point[1],
                                            projected_moved_point[2]);
       search_points_3d.push_back(cv::Point3f(projected_moved_point_cv));
       cv::Point2d p2d = model.project3dToPixel(projected_moved_point_cv);
       search_points_2d.push_back(p2d);
     }
   }
 }
 void infoCallback(const sensor_msgs::CameraInfoConstPtr& info_msg)
 {
   if (calibrated)
     return;
   cam_model_.fromCameraInfo(info_msg);
   pattern_detector_.setCameraMatrices(cam_model_.intrinsicMatrix(), cam_model_.distortionCoeffs());
   
   calibrated = true;
   image_sub_ = nh_.subscribe("/camera/rgb/image_mono", 1, &CalibrateKinectCheckerboard::imageCallback, this);
   
   ROS_INFO("[calibrate] Got image info!");
 }
예제 #5
0
 void camerainfoCb(const sensor_msgs::CameraInfoConstPtr& info_msg)
 {
     ROS_INFO("infocallback :shutting down camerainfosub");
     cam_model_.fromCameraInfo(info_msg);
     camera_topic = info_msg->header.frame_id;
     camerainfosub_.shutdown();
 }
예제 #6
0
void projectPoints(const image_geometry::PinholeCameraModel &cam_model,
                   const cv::Point3d &points3D,
                   cv::Point2d *points2D)
{
  *points2D = cam_model.project3dToPixel(points3D);
//   *points2D = cam_model.rectifyPoint(*points2D);
}
  void overlayPoints(pcl::PointCloud<pcl::PointXYZ> detector_points, tf::Transform &transform, cv_bridge::CvImagePtr& image)
  {  
    // Overlay calibration points on the image
    pcl::PointCloud<pcl::PointXYZ> transformed_detector_points;
    
    pcl_ros::transformPointCloud(detector_points, transformed_detector_points, transform);
    
    int font_face = cv::FONT_HERSHEY_SCRIPT_SIMPLEX;
    double font_scale = 1;
    int thickness = 2;
    int radius = 5;
   
    for (unsigned int i=0; i < transformed_detector_points.size(); i++)
    {
      pcl::PointXYZ pt = transformed_detector_points[i];
      cv::Point3d pt_cv(pt.x, pt.y, pt.z);
      cv::Point2d uv;
      uv = cam_model_.project3dToPixel(pt_cv);

      cv::circle(image->image, uv, radius, CV_RGB(255,0,0), -1);
      cv::Size text_size;
      int baseline = 0;
      std::stringstream out;
      out << i+1;
      
      text_size = cv::getTextSize(out.str(), font_face, font_scale, thickness, &baseline);
                            
      cv::Point origin = cvPoint(uv.x - text_size.width / 2,
                               uv.y - radius - baseline/* - thickness*/);
      cv::putText(image->image, out.str(), origin, font_face, font_scale, CV_RGB(255,0,0), thickness);
    }
  }
예제 #8
0
pcl::PointXYZ PointFromPixel(const cv::Point& pixel, const tf::Transform& cameraFrameToWorldFrame, image_geometry::PinholeCameraModel cam) {
    cv::Point3d cameraRay = cam.projectPixelTo3dRay(pixel);
    tf::Point worldCameraOrigin = cameraFrameToWorldFrame * tf::Vector3(0, 0, 0);
    tf::Point worldCameraStep = cameraFrameToWorldFrame * tf::Vector3(cameraRay.x, cameraRay.y, cameraRay.z) - worldCameraOrigin;
    double zScale = -worldCameraOrigin.z()/worldCameraStep.z();
    tf::Point ret = worldCameraOrigin + zScale * worldCameraStep;
    return pcl::PointXYZ(ret.x(), ret.y(), 0);
}
예제 #9
0
파일: main.cpp 프로젝트: Aharobot/bk-ros
void cam_info_cb(const sensor_msgs::CameraInfo::ConstPtr& msg)
{
	if( cam_model_.fromCameraInfo(msg) )
	{
		got_cam_info_ = true;
		ROS_INFO("[bk_skeletal_tracker] Got RGB camera info.");
	} else {
		ROS_ERROR("[bk_skeletal_tracker] Couldn't read camera info.");
	}
}
 cv::Point2d FindObjectOnPlane::getUyEnd(
   const cv::Point2d& ux_start,
   const cv::Point2d& ux_end,
   const image_geometry::PinholeCameraModel& model,
   const jsk_recognition_utils::Plane::Ptr& plane)
 {
   cv::Point3d start_ray = model.projectPixelTo3dRay(ux_start);
   cv::Point3d end_ray = model.projectPixelTo3dRay(ux_end);
   Eigen::Vector3f ux_start_3d = rayPlaneInteersect(start_ray, plane);
   Eigen::Vector3f ux_end_3d = rayPlaneInteersect(end_ray, plane);
   Eigen::Vector3f ux_3d = ux_end_3d - ux_start_3d;
   Eigen::Vector3f normal = plane->getNormal();
   Eigen::Vector3f uy_3d = normal.cross(ux_3d).normalized();
   Eigen::Vector3f uy_end_3d = ux_start_3d + uy_3d;
   cv::Point2d uy_end = model.project3dToPixel(cv::Point3d(uy_end_3d[0],
                                                           uy_end_3d[1],
                                                           uy_end_3d[2]));
   return uy_end;
 }
예제 #11
0
void doOverlay(const sensor_msgs::ImageConstPtr& img_msg,
               const sensor_msgs::CameraInfoConstPtr& info_msg) {

    // convert camera image into opencv
    cam_model.fromCameraInfo(info_msg);
    cv_bridge::CvImagePtr cv_img = cv_bridge::toCvCopy(img_msg, sensor_msgs::image_encodings::RGB8);

    double alpha_mult;
    ros::param::param<double>("~alpha_mult", alpha_mult, 0.5);

    uint8_t r, g, b;
    if(aligned_pc) {
        if(!tf_list->waitForTransform(img_msg->header.frame_id, "/base_link",
                                     img_msg->header.stamp, ros::Duration(3)))
            return;
        tf::StampedTransform transform;
        tf_list->lookupTransform(img_msg->header.frame_id, "/base_link", 
                                img_msg->header.stamp, transform);
        PCRGB::Ptr tf_pc(new PCRGB());
        pcl_ros::transformPointCloud<PRGB>(*aligned_pc, *tf_pc, transform);
        for(uint32_t i=0;i<tf_pc->size();i++) {
            cv::Point3d proj_pt_cv(tf_pc->points[i].x, tf_pc->points[i].y, 
                                   tf_pc->points[i].z);
            cv::Point pt2d = cam_model.project3dToPixel(proj_pt_cv);
            extractRGB(tf_pc->points[i].rgb, r, g, b);
            if(pt2d.x >= 0 && pt2d.y >= 0 && 
               pt2d.x < cv_img->image.rows && pt2d.y < cv_img->image.cols) {
                double old_r = cv_img->image.at<cv::Vec3b>(pt2d.y, pt2d.x)[0];
                double old_g = cv_img->image.at<cv::Vec3b>(pt2d.y, pt2d.x)[1];
                double old_b = cv_img->image.at<cv::Vec3b>(pt2d.y, pt2d.x)[2];
                cv_img->image.at<cv::Vec3b>(pt2d.y, pt2d.x)[0] = 
                    (uint8_t) min(alpha_mult*old_r+r, 255.0);
                cv_img->image.at<cv::Vec3b>(pt2d.y, pt2d.x)[1] = 
                    (uint8_t) min(alpha_mult*old_g+g, 255.0);
                cv_img->image.at<cv::Vec3b>(pt2d.y, pt2d.x)[2] = 
                    (uint8_t) min(alpha_mult*old_b+b, 255.0);
            }
        }
    }
    
    overlay_pub.publish(cv_img->toImageMsg());
}
예제 #12
0
cv::Point3d GraphGridMapper::to3D(cv::Point3d &p, const Eigen::Isometry3d &camera_transform, const image_geometry::PinholeCameraModel &camera_model)
{
    int width = camera_model.cameraInfo().width;
    int height = camera_model.cameraInfo().height;

    int u = round(p.x);
    if(u < 0) {
        u = 0;
    } else if (u >= width) {
        u = width -1;
    }
    int v = round(p.y);
    if(v < 0) {
        v = 0;
    } else if (v >= height) {
        v = height - 1;
    }
    cv::Point3d p3D(-1.0,-1.0,std::numeric_limits<double>::infinity());

    if (p.z != 0 && !isnan(p.z))
    {
        p3D.x = (u - camera_model.cx()) * p.z / camera_model.fx();
        p3D.y = (v - camera_model.cy()) * p.z / camera_model.fy();
        p3D.z = p.z;

        Eigen::Vector3d vec(p3D.x, p3D.y, p3D.z);
        vec = camera_transform * vec.homogeneous();
        p3D.x = vec(0);
        p3D.y = vec(1);
        p3D.z = vec(2);
    }

    return p3D;
}
예제 #13
0
파일: geometry.hpp 프로젝트: DSsoto/Sub8
size_t project_uv_to_cloud_index(const pcl::PointCloud<PointT>& cloud, const cv::Point2d& image_point,
                                 const image_geometry::PinholeCameraModel camera_model, double& distance)
{
  // Assumes camera is at origin, pointed in the normal direction
  size_t pt_pcl_index;
  cv::Point3d pt_cv;
  pt_cv = camera_model.projectPixelTo3dRay(image_point);
  Eigen::Vector3f direction_eig(pt_cv.x, pt_cv.y, pt_cv.z);
  Eigen::Vector3f origin_eig(0.0, 0.0, 0.0);

  pt_pcl_index = closest_point_index_rayOMP(cloud, direction_eig, origin_eig, distance);
  return (pt_pcl_index);
}
예제 #14
0
void convert(const sensor_msgs::ImageConstPtr& depth_msg, PointCloud::Ptr& cloud_msg)
{
	// Use correct principal point from calibration
	float center_x = cameraModel.cx();
	float center_y = cameraModel.cy();

	// Combine unit conversion (if necessary) with scaling by focal length for computing (X,Y)
	double unit_scaling = DepthTraits<T>::toMeters( T(1) );
	float constant_x = unit_scaling / cameraModel.fx();
	float constant_y = unit_scaling / cameraModel.fy();
	float bad_point = std::numeric_limits<float>::quiet_NaN();

	sensor_msgs::PointCloud2Iterator<float> iter_x(*cloud_msg, "x");
	sensor_msgs::PointCloud2Iterator<float> iter_y(*cloud_msg, "y");
	sensor_msgs::PointCloud2Iterator<float> iter_z(*cloud_msg, "z");
	const T* depth_row = reinterpret_cast<const T*>(&depth_msg->data[0]);
	int row_step = depth_msg->step / sizeof(T);
	for (int v = 0; v < (int)cloud_msg->height; ++v, depth_row += row_step)
	{
		for (int u = 0; u < (int)cloud_msg->width; ++u, ++iter_x, ++iter_y, ++iter_z)
		{
			T depth = depth_row[u];

			// Missing points denoted by NaNs
			if (!DepthTraits<T>::valid(depth))
			{
				*iter_x = *iter_y = *iter_z = bad_point;
				continue;
			}

			// Fill in XYZ
			*iter_x = (u - center_x) * depth * constant_x;
			*iter_y = (v - center_y) * depth * constant_y;
			*iter_z = DepthTraits<T>::toMeters(depth);
		}
	}
}
예제 #15
0
 bool objectSrv(jsk_smart_gui::point2screenpoint::Request &req,
                jsk_smart_gui::point2screenpoint::Response &res)
 {
     ROS_INFO("3dtopixel request:x=%lf,y=%lf,z=%lf",req.point.point.x,req.point.point.y,req.point.point.z);
     geometry_msgs::PointStamped point_transformed;
     tf_listener_.transformPoint(camera_topic, req.point, point_transformed);
     cv::Point3d xyz;
     cv::Point2d uv_rect;
     xyz.x = point_transformed.point.x;
     xyz.y = point_transformed.point.y;
     xyz.z = point_transformed.point.z;
     uv_rect = cam_model_.project3dToPixel(xyz);
     res.x=uv_rect.x;
     res.y=uv_rect.y;
     return true;
 }
예제 #16
0
파일: locator.cpp 프로젝트: ashokzg/cpb
	void infoCb(const sensor_msgs::CameraInfo& msg)
	{
		model.fromCameraInfo(msg);
		//SHUTDOWN LATER #HACK
	}
예제 #17
0
   void imageCb2(const sensor_msgs::ImageConstPtr& msg, const sensor_msgs::CameraInfoConstPtr& info_msg) {
      
      MySecond.clear();
      Ccount++;

      cam_model_right.fromCameraInfo(info_msg);

      cv_bridge::CvImagePtr cv_ptr;
      try
      {
         cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::MONO8);
      }
      catch (cv_bridge::Exception& e)
      {
         ROS_ERROR("cv_bridge exception: %s", e.what());
         return;
      }

      if (Ccount == 0)
         return;

      Mat image, gray, temp, mask;
      image = cv_ptr->image;

      mask = image.clone();

      int niters = 1;
      dilate(mask, temp, Mat(), Point(-1, -1), niters);
      erode(temp, temp, Mat(), Point(-1, -1), niters * 2);
      dilate(temp, temp, Mat(), Point(-1, -1), niters);

      threshold(temp, temp, 200, 255, CV_THRESH_BINARY);

      for (int i = 0; i <= 5; i++) {
         for (int j = 1000; j < temp.cols; j++) {
            temp.at<uchar>(i, j) = 0;
         }
      }

      vector<vector<Point> > contours1, contours2;
      vector<Vec4i> hierarchy;
      findContours(temp, contours2, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);

      int cmin = 0;
      int cmax = 100;

      for (int i = 0; i < contours2.size(); i++) {
         if (contours2[i].size() < cmin || contours2[i].size() > cmax) {
            continue;
         }
         else
            contours1.push_back(contours2[i]);
      }


      image = Scalar(255, 255, 255);

      std::vector<std::vector<cv::Point> > ::iterator itc;
      itc = contours1.begin();

      MyPoint TTEMP;
      while (itc != contours1.end()) {

         cv::Moments mom = cv::moments(cv::Mat(*itc++));
#ifdef DEBUG
         cv::circle(image, cv::Point(mom.m10 / mom.m00, mom.m01 / mom.m00), 2, cv::Scalar(0), 2);
#endif
         TTEMP.x = mom.m10 / mom.m00;
         TTEMP.y = mom.m01 / mom.m00;

         if (TTEMP.x == 0 && TTEMP.y == 0)
            return;

         MySecond.push_back(TTEMP);
      }

#ifdef DEBUG
      cv::imshow(OPENCV_WINDOW2, cv_ptr->image);
      cv::waitKey(3);
#endif
   }
예제 #18
0
   void imageCb(const sensor_msgs::ImageConstPtr& msg, const sensor_msgs::CameraInfoConstPtr& info_msg) {
      MyFirst.clear();
      Ccount1++;
      cv_bridge::CvImageConstPtr cv_ptr;
      double cur = ros::Time::now().toSec();

      // lpf_x.set_cutoff_freq(0);
      // lpf_x.get_lpf(10);
      try
      {
         cv_ptr = cv_bridge::toCvShare(msg, sensor_msgs::image_encodings::MONO8);
      }
      catch (cv_bridge::Exception& e)
      {
         ROS_ERROR("cv_bridge exception: %s", e.what());
         return;
      }

      cam_model_left.fromCameraInfo(info_msg);
      // cout << "dist " << info_msg->D[0] << " " << info_msg->D[1] << " " << info_msg->D[2] << " " <<  endl;

      Mat image, gray, temp, mask;
      image = cv_ptr->image;

      mask = image.clone();

      int niters = 1;
      dilate(mask, temp, Mat(), Point(-1, -1), niters);
      erode(temp, temp, Mat(), Point(-1, -1), niters * 2);
      dilate(temp, temp, Mat(), Point(-1, -1), niters);

      threshold(temp, temp, 200, 255, CV_THRESH_BINARY);

      for (int i = 0; i <= 5; i++) {
         for (int j = 1000; j < temp.cols; j++) {
            temp.at<uchar>(i, j) = 0;
         }
      }

      vector<vector<Point> > contours1, contours2;
      vector<Vec4i> hierarchy;
      findContours(temp, contours2, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);


      int cmin = 0;
      int cmax = 100;


      for (int i = 0; i < contours2.size(); i++) {
         if (contours2[i].size() < cmin || contours2[i].size() > cmax) {
            continue;
         }
         else
            contours1.push_back(contours2[i]);
      }

      image = Scalar(255, 255, 255);

      std::vector<std::vector<cv::Point> > ::iterator itc;
      itc = contours1.begin();

      MyPoint TTEMP;
      while (itc != contours1.end()) {

         cv::Moments mom = cv::moments(cv::Mat(*itc++));
#ifdef DEBUG
         cv::circle(image, cv::Point(mom.m10 / mom.m00, mom.m01 / mom.m00), 2, cv::Scalar(0), 2);
#endif
         TTEMP.x = mom.m10 / mom.m00;
         TTEMP.y = mom.m01 / mom.m00;

         MyFirst.push_back(TTEMP);
      }


#ifdef DEBUG
      cv::imshow(OPENCV_WINDOW, cv_ptr->image);
      //Rect rect(MyFirst[0].x - 50, MyFirst[0].y- 50, 100, 100);
      //Mat subimage = cv_ptr->image(rect);
      //cv::imshow(OPENCV_WINDOW, subimage);
      cv::waitKey(3);
#endif

      vector<My3DPoint> myvec;
      My3DPoint myTempVec;

#ifdef DEBUG
      //cout << Ccount << " " << MyFirst[0].y << " " << MySecond[0].y << endl;
#endif
      // cout << "size : " << MyFirst.size() << "," << MySecond.size() << endl;

      if (MyFirst.size() && MySecond.size()) {
         // cout << MyFirst[0].x << "," << MyFirst[0].y << endl;
         cv::Point2d left_pt(MyFirst[0].x, MyFirst[0].y);
         cv::Point2d right_pt(MySecond[0].x, MySecond[0].y);
         cam_model_left.rectifyPoint(left_pt);

            Matrix<float, 3, 1> Z;
            Matrix<float, 6, 1> RES_KALMAN;
            Z<< 
            MyFirst[0].x, 
            MySecond[0].x, 
            (MyFirst[0].y + MySecond[0].y) / 2.0f;

            RES_KALMAN = kalman_xyz.getKalman(Z);
            cout << "???" << endl;
            cout << RES_KALMAN << endl;;

         // cout << cam_model_left.rectifyPoint(left_pt).x << "," << cam_model_left.rectifyPoint(left_pt).y << "rectified" << endl;
         // cout << cam_model_right.rectifyPoint(right_pt).x << "," << cam_model_right.rectifyPoint(right_pt).y << "rectified" << endl;

         // cout << "notcal :: " << MyFirst[0].y - MySecond[0].y << endl;
         // cout << "calibr  :: " << cam_model_left.rectifyPoint(left_pt).y - cam_model_right.rectifyPoint(right_pt).y << endl;

         float leftpt_y = MyFirst[0].y;

         Matrix<float, 2, 1> X_kalman = kalman_x.getKalman(leftpt_y);
         // cout << "KALMAN" << kalman_x.getKalman_1(leftpt_y) << "," << leftpt_y << endl;;

         geometry_msgs::Point drone1_msg;
         drone1_msg.x = leftpt_y;//get_lpf(&lpf_x,10);
         drone1_msg.y = X_kalman(0, 0); //get_lpf(&lpf_z,10);
         drone1_msg.z = X_kalman(1, 0); //get_lpf(&lpf_y,5);
         pub_drone[0].publish(drone1_msg);



         cv::Point3d ptr_left = cam_model_left.projectPixelTo3dRay(left_pt);
         cv::Point3d ptr_right = cam_model_right.projectPixelTo3dRay(right_pt);


         // getKalman_1();

         // cout << ptr_left.y - ptr_right.y << endl;
         // cout << "left 3d   :: " <<  ptr_left.x  << "," << ptr_left.y << "," << ptr_left.z << endl;
         // cout << "right 3d  :: " <<  ptr_right.x  << "," << ptr_right.y << "," << ptr_right.z << endl << endl;



         ptr_left = cam_model_left.projectPixelTo3dRay(cam_model_left.rectifyPoint(left_pt));
         ptr_right = cam_model_right.projectPixelTo3dRay(cam_model_right.rectifyPoint(right_pt));

         // cout << ptr_left.y - ptr_right.y << endl;
         // cout << "left 3d   :: " <<  ptr_left.x  << "," << ptr_left.y << "," << ptr_left.z << endl;
         // cout << "right 3d  :: " <<  ptr_right.x  << "," << ptr_right.y << "," << ptr_right.z << endl;


      }
      // cout << MySecond[0].x << "," << MySecond[0].y << endl;
      if (Ccount == 1) {
         sort(MyFirst.begin(), MyFirst.end(), MyCompare);
         sort(MySecond.begin(), MySecond.end(), MyCompare);




         Dron_size = MyFirst.size();

         for (int i = 0; i < Dron_size; i++) {
            double a1, b1, c1;
            // cout << "hello " << MyFirst[i].x - MySecond[i].x << endl;
            c1 =  2.97 * 450 / ((MyFirst[i].x - MySecond[i].x) * 0.00375);
            a1 = (2.97 * 450 / ((MyFirst[i].x - MySecond[i].x) * 0.00375)) * (0.00375 * (MyFirst[i].x + MySecond[i].x - 1280)) / (2 * 2.97);

            b1 =  (-0.00375) * ((MyFirst[i].y + MySecond[i].y) / 2 - 480) * (2.97 * 450 / ((MyFirst[i].x - MySecond[i].x) * 0.00375)) / 2.97;





            Current_Loc[i].x = a1;
            Current_Loc[i].y = b1 * cos(Angle * PI / 180.0) - c1 * sin(Angle * PI / 180.0);
            Current_Loc[i].z = b1 * sin(Angle * PI / 180.0) + c1 * cos(Angle * PI / 180.0);
            a1 = Current_Loc[i].x;
            b1 = Current_Loc[i].y;
            c1 = Current_Loc[i].z;

            Current_Loc[i].x = a1 * cos(Angle2 * PI / 180.0) - b1 * sin(Angle2 * PI / 180.0);
            Current_Loc[i].y = a1 * sin(Angle2 * PI / 180.0) + b1 * cos(Angle2 * PI / 180.0);
            Current_Loc[i].z = c1;

            a1 = Current_Loc[i].x;
            b1 = Current_Loc[i].y;
            c1 = Current_Loc[i].z;

            Current_Loc[i].x = a1 * cos(Angle3 * PI / 180.0) + c1 * sin(Angle3 * PI / 180.0);
            Current_Loc[i].y = b1;
            Current_Loc[i].z = -a1 * sin(Angle3 * PI / 180.0) + c1 * cos(Angle3 * PI / 180.0);
         }

         for (int i = 0; i < Dron_size; i++) {
            Real_Dron_Loc[i].x = (Current_Loc[i].x);
            Real_Dron_Loc[i].y = (Current_Loc[i].y);
            Real_Dron_Loc[i].z = (Current_Loc[i].z);
         }

         Frame_S_time = clock();

         for (int i = 0; i < Dron_size; i++) {
            Velocity[i].x = 0;
            Velocity[i].y = 0;
            Velocity[i].z = 0;
         }
      }
      else {
         if (MyFirst.size() != Dron_size || MySecond.size() != Dron_size)
            return;
         Frame_E_time = clock();

         Time = (double)(Frame_E_time - Frame_S_time) / CLOCKS_PER_SEC;
         Frame_S_time = Frame_E_time;

         for (int i = 0; i < Dron_size; i++) {
            Acummulate_Time[i] += Time;
            Track_Loc[i].x = Current_Loc[i].x;// + Acummulate_Time[i] * Velocity[i].x;
            Track_Loc[i].y = Current_Loc[i].y;// + Acummulate_Time[i] * Velocity[i].y;
            Track_Loc[i].z = Current_Loc[i].z;// + Acummulate_Time[i] * Velocity[i].z;
         }

         for (int k = 0; k < Dron_size; k++) {
            double closest_dis = 987654321;
            My3DPoint closest_3DPoint;

            closest_3DPoint.x = 0;
            closest_3DPoint.y = 0;
            closest_3DPoint.z = 0;

            int flag = 0;
            int AA = 999, BB = 888;

            for (int i = 0; i < MyFirst.size(); i++) {
               for (int j = 0; j < MySecond.size(); j++) {
                  double a1, b1, c1;
                  double aa1, bb1, cc1;

                  cout << "depth : " << MyFirst[i].x - MySecond[j].x << endl;




                  c1 =  2.97 * 450.0 / ((MyFirst[i].x - MySecond[j].x) * 0.00375);
                  a1 = (2.97 * 450.0 / ((MyFirst[i].x - MySecond[j].x) * 0.00375)) * (0.00375 * (MyFirst[i].x + MySecond[j].x - 1280.0)) / (2.0 * 2.97);

                  b1 =  (-0.00375) * ((MyFirst[i].y + MySecond[j].y) / 2 - 480.0) * (2.97 * 450.0 / ((MyFirst[i].x - MySecond[j].x) * 0.00375)) / 2.97;

                  aa1 = a1;
                  bb1 = b1 * cos(Angle * PI / 180.0) - c1 * sin(Angle * PI / 180.0);
                  cc1 = b1 * sin(Angle * PI / 180.0) + c1 * cos(Angle * PI / 180.0);

                  a1 = aa1;
                  b1 = bb1;
                  c1 = cc1;

                  aa1 = a1 * cos(Angle2 * PI / 180.0) - b1 * sin(Angle2 * PI / 180.0);
                  bb1 = a1 * sin(Angle2 * PI / 180.0) + b1 * cos(Angle2 * PI / 180.0);
                  cc1 = c1;


                  a1 = aa1;
                  b1 = bb1;
                  c1 = cc1;

                  aa1 = a1 * cos(Angle3 * PI / 180.0) + c1 * sin(Angle3 * PI / 180.0);
                  bb1 = b1;
                  cc1 = -a1 * sin(Angle3 * PI / 180.0) + c1 * cos(Angle3 * PI / 180.0);

                  if ((sqrt(pow(Track_Loc[k].x - aa1, 2) + pow(Track_Loc[k].y - bb1, 2) + pow(Track_Loc[k].z - cc1, 2)) < closest_dis) && sqrt(pow(Track_Loc[k].x - aa1, 2) + pow(Track_Loc[k].y - bb1, 2) + pow(Track_Loc[k].z - cc1, 2)) < 300 ) {
                     closest_dis = sqrt(pow(Track_Loc[k].x - aa1, 2) + pow(Track_Loc[k].y - bb1, 2) + pow(Track_Loc[k].z - cc1, 2));

                     closest_3DPoint.x = aa1;
                     closest_3DPoint.y = bb1;
                     closest_3DPoint.z = cc1;
                     AA = i;
                     BB = j;
                  }

               }

            }
#ifdef DEBUG
            cout << MyFirst.size() << " " << MySecond.size() << endl;
            cout << AA << " " << BB << endl;
            cout << closest_dis << endl;
            cout << closest_3DPoint.x << " " << closest_3DPoint.y << " " << closest_3DPoint.z << endl;
#endif

            if (closest_dis == 987654321) {
               continue;
            }

            else {
               // pass
               Acummulate_Time[k] = 0;

               Current_Loc[k].x = closest_3DPoint.x;
               Current_Loc[k].y = closest_3DPoint.y;
               Current_Loc[k].z = closest_3DPoint.z;
            }

            cout << "Curren " << Current_Loc[k].x << " " << Current_Loc[k].y << " " << Current_Loc[k].z << endl;
            



            cout << "-------------------------------------------------------------------------------" << endl << endl;

         }

         std_msgs::Float64 drone1_x_msg;
         std_msgs::Float64 drone1_y_msg;
         std_msgs::Float64 drone1_z_msg;

         geometry_msgs::Point drone1_msg;
         geometry_msgs::Point drone2_msg;

         for (int i = 0; i < Dron_size; i++) {
            Real_Dron_Loc[i].x = Current_Loc[i].x;
            Real_Dron_Loc[i].y = Current_Loc[i].y;
            Real_Dron_Loc[i].z = Current_Loc[i].z;
#ifdef DEBUG
            cout << setw(10) << Real_Dron_Loc[i].x << " " << setw(10) << Real_Dron_Loc[i].y << " " << setw(10) << Real_Dron_Loc[i].z  << endl;
#endif

            static lpf_t lpf_x = {0, };
            static lpf_t lpf_y = {0, };
            static lpf_t lpf_z = {0, };
            lpf_x.cur_time = lpf_y.cur_time = lpf_z.cur_time = ros::Time::now().toSec();
            lpf_x.input = Real_Dron_Loc[i].x;
            lpf_y.input = Real_Dron_Loc[i].y;
            lpf_z.input = Real_Dron_Loc[i].z;

            drone1_msg.x = Real_Dron_Loc[i].x;//get_lpf(&lpf_x,10);
            drone1_msg.y = Real_Dron_Loc[i].z;//get_lpf(&lpf_z,10);
            drone1_msg.z = Real_Dron_Loc[i].y;//get_lpf(&lpf_y,5);

            drone2_msg.x = get_lpf(&lpf_x, 10);
            drone2_msg.y = get_lpf(&lpf_z, 10);
            drone2_msg.z = get_lpf(&lpf_y, 5);

            // pub_drone[i].publish(drone1_msg);
            // Nasang[i].publish(drone2_msg);
         }

      }
      cout << "cb1 cycletime : " << ros::Time::now().toSec() - cur << endl;;
   }
예제 #19
0
	inline void
	hit_same_point(	const image_geometry::PinholeCameraModel &camera_model,
								const pcl::PointCloud<PointT> &in,
								pcl::PointCloud<PointT> &out,
								int rows,
								int cols,
								float z_threshold = 0.3)
	{
		std::vector<std::vector <std::vector<PointT> > > hit( cols, std::vector< std::vector<PointT> >(rows, std::vector<PointT>()));

		// Project points into image space
		for(unsigned int i=0; i < in.points.size();  ++i){
			const PointT* pt = &in.points.at(i);
			if( pt->z > 1) { // min distance from camera 1m

				cv::Point2i point_image = camera_model.project3dToPixel(cv::Point3d(pt->x, pt->y, pt->z));

				if( between<int>(0, point_image.x, cols )
					&& between<int>( 0, point_image.y, rows )
				)
				{
					// Sort all points into image
					{
						hit[point_image.x][point_image.y].push_back(in.points.at(i));
					}

				}
			}
		}
		assert(out.empty());
		for(int x = 0; x < hit.size(); ++x ){
			for(int y = 0; y < hit[0].size(); ++y){
				if(hit[x][y].size()>1){
					PointT min_z = hit[x][y][0];
					float max_z = min_z.z;
					for(int p = 1; p < hit[x][y].size(); ++p){
					// find min and max z
						max_z = MAX(max_z, hit[x][y][p].z);
#ifdef DEBUG
						std::cout << hit[x][y].size() << "\t";
#endif

						if(hit[x][y][p].z < min_z.z)
						{
							min_z = hit[x][y][p];
						}
					}
#ifdef DEBUG
					std::cout << min_z.z << "\t" << max_z << "\t";
#endif
					if(max_z - min_z.z > z_threshold){
#ifdef DEBUG
						std::cout << min_z << std::endl;
#endif
						out.push_back(min_z);
					}
				}
			}
		}
#ifdef DEBUG
		std::cout << "hit_same_point in: "<< in.size()  << "\t out: " << out.size() << "\n";
#endif
	}
예제 #20
0
void FeatureTracker::image_callback(const sensor_msgs::ImageConstPtr& msg, const sensor_msgs::CameraInfoConstPtr& info_msg) {
  //need pose data for each picture, need to publish a camera pose
  ros::Time acquisition_time = msg->header.stamp;
  geometry_msgs::PoseStamped basePose;
  geometry_msgs::PoseStamped mapPose;
  basePose.pose.orientation.w=1.0;
  ros::Duration timeout(3);
  basePose.header.frame_id="/base_link";
  mapPose.header.frame_id="/map";
  
  try {
    tf_listener_.waitForTransform("/camera_1_link", "/map", acquisition_time, timeout);
    tf_listener_.transformPose("/map", acquisition_time,basePose,"/camera_1_link",mapPose);
    printf("pose #%d %f %f %f\n",pic_number++,mapPose.pose.position.x, mapPose.pose.position.y, tf::getYaw(mapPose.pose.orientation));
  }
  catch (tf::TransformException& ex) {
    ROS_WARN("[map_maker] TF exception:\n%s", ex.what());
    printf("[map_maker] TF exception:\n%s", ex.what());
    return;
  }
  cam_model.fromCameraInfo(info_msg);
  
  
  
  
  // printf("callback called\n");
  try
  {
    // if you want to work with color images, change from mono8 to bgr8
    if(image_rect==NULL){
      image_rect = cvCloneImage(bridge.imgMsgToCv(msg, "mono8"));
      last_image= cvCloneImage(bridge.imgMsgToCv(msg, "mono8"));
      pyrA=cvCreateImage(cvSize(last_image->width+8,last_image->height/3.0), IPL_DEPTH_32F, 1);
      pyrB=cvCloneImage(pyrA);
      //  printf("cloned image\n");
    }
    else{
      //save the last image
      cvCopy(image_rect,last_image);
      cvCopy(bridge.imgMsgToCv(msg, "mono8"),image_rect);
      // printf("copied image\n");
    }
    if(output_image==NULL){
      output_image =cvCloneImage(image_rect);
    }
    if(eigImage==NULL){
      eigImage =cvCloneImage(image_rect);
    }
    if(tempImage==NULL){
      tempImage =cvCloneImage(image_rect);
    }
  }
  catch (sensor_msgs::CvBridgeException& e)
  {
    ROS_ERROR("Could not convert from '%s' to 'mono8'.", msg->encoding.c_str());
    return;
  }
  
  if(image_rect!=NULL) {
    cvCopy(image_rect,output_image);
    
    printf("got image\n");
    
    track_features(mapPose);
    
    //draw features on the image
    for(int i=0;i<last_feature_count;i++){
      CvPoint center=cvPoint((int)features[i].x,(int)features[i].y);
      cvCircle(output_image,center,10,cvScalar(150),2);
      
      char strbuf [10];
      
      int n=sprintf(strbuf,"%d",current_feature_id[ i] );
      std::string text=std::string(strbuf,n);
      
      CvFont font;
      
      cvInitFont(&font,CV_FONT_HERSHEY_SIMPLEX,1,1);
      
      cvPutText(output_image,text.c_str(),cvPoint(center.x,center.y+20),&font,cvScalar(255));
      
      
      cv::Point3d tempRay;
      cv::Point2d tempPoint=cv::Point2d(features[i]);
      cam_model.projectPixelTo3dRay(tempPoint,tempRay);
  //    printf("%f x  %f y  %f z\n",tempRay.x,tempRay.y,tempRay.z);
    }
    
  //  featureList[0].print();
    
    //determine error gradient
    
    int min_features=10;
    
  //  printf("ypr %f %f %f\n",yaw,pitch,roll);
    
    cv::Point3d error_sum=calc_error(min_features,0, 0, 0);
    printf("total error is : %f\n",error_sum.x);
    
    for(int i=0;i<featureList.size();i++){
      if(min_features<featureList[i].numFeatures()){
	printf("\n\n\nfeature %d\n",i);
	printf("mean: %f %f %f\n",featureList[i].currentMean.x, featureList[i].currentMean.y, featureList[i].currentMean.z);
	
      }
    }
    
    
//    double error_up= calc_error(min_features,yalpha, 0, 0);
  //  printf("total up yaw error is : %f\n",error_up);
//    double error_down= calc_error(min_features,-yalpha, 0, 0);
  //  printf("total down yaw error is : %f\n",error_down);
  /*  
     
    double yaw_change=0;
    if(error_up<error_sum && error_up<error_down){
      yaw_change=yalpha;
    }else if(error_down<error_sum && error_down<error_up){
      yaw_change=-yalpha;
    }else if(error_down!=error_sum&&error_sum!=error_up){
      yalpha/=2;
    }
    
    error_up=   calc_error(min_features,0,palpha, 0);
   // printf("total up pitch error is : %f\n",error_up);
    error_down=   calc_error(min_features,0,-palpha, 0);
   // printf("total down pitch error is : %f\n",error_down);
    
    double pitch_change=0;
    if(error_up<error_sum && error_up<error_down){
      pitch_change=palpha;
    }else if(error_down<error_sum && error_down<error_up){
      pitch_change=-palpha;
    }else if(error_down!=error_sum&&error_sum!=error_up){
      //palpha/=2;
    }
    
    error_up=  calc_error(min_features,0,0,ralpha);
   // printf("total up roll error is : %f\n",error_up);
    
    error_down=   calc_error(min_features,0,0,-ralpha);
   // printf("total down roll error is : %f\n",error_down);
    
    double roll_change=0;
    if(error_up<error_sum && error_up<error_down){
      roll_change=ralpha;
    }else if(error_down<error_sum && error_down<error_up){
      roll_change=-ralpha;
    }else if(error_down!=error_sum&&error_sum!=error_up){
      ralpha/=2;
    }
    
  //  yaw+=yaw_change;
  
  //  pitch+=pitch_change;
 
  
  //   roll+=roll_change;
    */
    
    try{
      sensor_msgs::Image output_image_cvim =*bridge.cvToImgMsg(output_image, "mono8");
      output_image_cvim.header.stamp=msg->header.stamp;
      analyzed_pub_.publish(output_image_cvim);
    }
    catch (sensor_msgs::CvBridgeException& e){
      ROS_ERROR("Could not convert from '%s' to 'mono8'.", msg->encoding.c_str());
      return;
    }
    // printf("displaying image\n");
  }else{
    // printf("null image_rect\n");
  }
}
예제 #21
0
파일: main.cpp 프로젝트: Aharobot/bk-ros
// this function is called each frame
void glutDisplay (void)
{
	static ros::Duration pub_interval(1.0/pub_rate_temp_);
	static ros::Time     last_pub(0.0);
	static int           num_skipped = 0;
	
	num_skipped++;
	
	ros::Time now_time = ros::Time::now();
	
	// Update stuff from OpenNI
	XnStatus status = g_Context.WaitAndUpdateAll();
	if( status != XN_STATUS_OK ){
		ROS_ERROR_STREAM("Updating context failed: " << status);
		return;
	}
	
	xn::SceneMetaData sceneMD;
	xn::DepthMetaData depthMD;
	xn::ImageMetaData imageMD;
	g_DepthGenerator.GetMetaData(depthMD);
	g_UserGenerator.GetUserPixels(0, sceneMD);
	//g_ImageGenerator.GetMetaData(imageMD);
	
	cv::Mat depth_image;
	getDepthImage(depthMD, depth_image);
	
	double minval, maxval;
	cv::minMaxLoc(depth_image, &minval, &maxval);
	
	// Convert user pixels to an OpenCV image
	cv::Mat label_image;
	getUserLabelImage(sceneMD, label_image);
	
	sensor_msgs::PointCloud cloud;
	cloud.header.stamp    = now_time;
	cloud.header.frame_id = frame_id_;
	cloud.channels.resize(1);
	cloud.channels[0].name = "intensity";
	
	// Convert users into better format
	XnUserID aUsers[15];
	XnUInt16 nUsers = 15;
	g_UserGenerator.GetUsers(aUsers, nUsers);
	
	cv::Mat      this_mask;
	XnPoint3D    center_mass;
	double       pixel_area;
	cv::Scalar   s;
	vector<user> users;
		
	if( g_bhasCal && now_time-last_pub > pub_interval )
	{
		bool has_lock = false;
		last_pub = now_time;
		ROS_DEBUG_STREAM(num_skipped << " refreshes inbetween publishing");
		num_skipped = 0;
		
		cv::imshow( "Tracked user"        , user_cal_.getImage() );
		cv::imshow( "Original calibration", original_cal_.getImage() );
		
		cv::Mat rgb, rgb_mask;
//		getRGB(rgb, rgb_mask);
		rgb = getRGB(imageMD);
		
		for (unsigned int i = 0; i < nUsers; i++)
		{
			user this_user;
			this_user.uid = aUsers[i];
			
			// Bitwise mask of pixels belonging to this user
			this_mask = (label_image == this_user.uid);
			this_user.numpixels = cv::countNonZero(this_mask);
			
			// Compare this user to the target
			this_user.pc.init(rgb, this_mask);
			double similarity  = this_user.pc.compare(user_cal_    );
			double sim_to_orig = this_user.pc.compare(original_cal_);
			this_user.similarity         = similarity;
			this_user.similarity_to_orig = sim_to_orig;
			
			/*
			ros::Time t1 = ros::Time::now();
			double emd         = this_user.pc.getEMD (user_cal_    );
			double emd_to_orig = this_user.pc.getEMD (original_cal_);
			ros::Duration d = (ros::Time::now() - t1);
			ROS_INFO_STREAM("EMD took " << (d.sec) );*/
			
			if( now_time > save_timer_ ){
				ROS_WARN_STREAM("[bk_skeletal_tracker] Say Cheezbuger");
				save_timer_ = now_time + ros::Duration(60*60*24);
				g_bSaveFrame = true;
			}
			
			if( g_bSaveFrame )
			{
				time_t t = ros::WallTime::now().sec;
				char buf[1024] = "";
				struct tm* tms = localtime(&t);
				strftime(buf, 1024, "%Y-%m-%d-%H-%M-%S", tms);
				
				std::string prefix = ( boost::format("capture_%s_user%d") % buf % this_user.uid ).str();				
				cv::Mat rgb_masked;
				rgb.copyTo(rgb_masked, this_mask);
				
				saveMat(rgb_masked             , prefix + "_rgb"  );
				saveMat(this_mask              , prefix + "_mask" );
				saveMat(this_user.pc.getHist() , prefix + "_hist" );
				saveMat(this_user.pc.getImage(), prefix + "_himg" );
			}
			
			// Mean depth
			this_user.meandepth = cv::mean(depth_image, this_mask)[0];
		
			this_user.silhouette_area = 0;
		
			// Find the area of the silhouette in cartesian space
			for( int i=0; i<this_mask.rows; i++) {
				for( int j=0; j<this_mask.cols; j++ ) {
					if( this_mask.at<uchar>(i,j) != 0 )
					{
						pixel_area = cam_model_.getDeltaX(1, depth_image.at<float>(i,j))
										   * cam_model_.getDeltaY(1, depth_image.at<float>(i,j));
						this_user.silhouette_area += pixel_area;
					}
				}
			}
			// Find the center in 3D
			g_UserGenerator.GetCoM(this_user.uid, center_mass);
			this_user.center3d.point = vecToPt(center_mass);
		
			ROS_DEBUG_STREAM(boost::format("User %d: area %.3fm^2, mean depth %.3fm")
				% (unsigned int)this_user.uid % this_user.silhouette_area % this_user.meandepth);
		
			// Screen out unlikely users based on area
			if( this_user.meandepth > min_dist_ && this_user.silhouette_area < max_area_ && this_user.silhouette_area > min_area_ )
			{
				ROS_INFO_STREAM(boost::format("User %d   new: %.0f --- orig: %.0f")
					% ((int)this_user.uid) % (100*similarity) % (100*sim_to_orig) );
				/*ROS_INFO_STREAM(boost::format("EMD       new: %.2f --- orig: %.2f")
					% (emd) % (emd_to_orig) );*/
					
				if( similarity > PersonCal::getMatchThresh() ) {
					user_cal_.update(rgb, this_mask);
				}
				else{
					if( sim_to_orig > PersonCal::getMatchThresh() ) {
						ROS_WARN_STREAM("Reset to original calibration");
						user_cal_ = original_cal_;
					}
				}
			
				std::stringstream window_name;
				window_name << "user_" << ((int)this_user.uid);
				cv::imshow(window_name.str(), this_user.pc.getImage());
			
			
				ROS_DEBUG("Accepted user");
				users.push_back(this_user);
				
				// Visualization
				geometry_msgs::Point32 p;
				p.x = this_user.center3d.point.x;
				p.y = this_user.center3d.point.y;
				p.z = this_user.center3d.point.z;
				cloud.points.push_back(p);
				cloud.channels[0].values.push_back(0.0f);
			}
		}
	
	
		// Try to associate the tracker with a user
		if( latest_tracker_.first != "" )
		{
			// Transform the tracker to this time. Note that the pos time is updated but not the restamp.
			tf::Point pt;
			tf::pointMsgToTF(latest_tracker_.second.pos.pos, pt);
			tf::Stamped<tf::Point> loc(pt, latest_tracker_.second.pos.header.stamp, latest_tracker_.second.pos.header.frame_id);
			try {
				tfl_->transformPoint(frame_id_, now_time-ros::Duration(.1), loc, latest_tracker_.second.pos.header.frame_id, loc);
				latest_tracker_.second.pos.header.stamp    = now_time;
				latest_tracker_.second.pos.header.frame_id = frame_id_;
				latest_tracker_.second.pos.pos.x = loc[0];
				latest_tracker_.second.pos.pos.y = loc[1];
				latest_tracker_.second.pos.pos.z = loc[2];
			}
			catch (tf::TransformException& ex) {
				ROS_ERROR("(finding) Could not transform person to this time");
			}
			  
			people_msgs::PositionMeasurement pos;
			if( users.size() > 0 )
			{
				std::stringstream users_ss;
				users_ss << boost::format("(finding) Tracker \"%s\" = (%.2f,%.2f) Users = ") 
					% latest_tracker_.first % latest_tracker_.second.pos.pos.x   % latest_tracker_.second.pos.pos.y;
			
				// Find the closest user to the tracker
				user closest;
				closest.distance = BIGDIST_M;
	
				foreach(user u, users)
				{
					u.distance = pow(latest_tracker_.second.pos.pos.x - u.center3d.point.x, 2.0)
						         + pow(latest_tracker_.second.pos.pos.y - u.center3d.point.y, 2.0);
					
					users_ss << boost::format("(%.2f,%.2f), ") % u.center3d.point.x % u.center3d.point.y;
					
					if( u.distance < closest.distance )
					{
						if( u.similarity > PersonCal::getMatchThresh() ) {
							closest = u;
						}
						else {
							ROS_WARN_STREAM("Ignored close user not matching (" << u.uid << ")");
						}
					}
				}//foreach
void depthCallback(const sensor_msgs::ImageConstPtr& original_image, const sensor_msgs::CameraInfoConstPtr& info)
{
    if(is_robot_running==0 && balls_written==1 && ball_chosen==0){
        cv_bridge::CvImagePtr cv_ptr;
        try
        {
            cv_ptr = cv_bridge::toCvCopy(original_image, sensor_msgs::image_encodings::TYPE_16UC1);
        }
        catch (cv_bridge::Exception& e)
        {
            ROS_ERROR("tutorialROSOpenCV::main.cpp::cv_bridge exception: %s", e.what());
            return;
        }

        cam_model.fromCameraInfo(info);
        Mat depth_image = cv_ptr->image;

        if(balls_written == 1){
            for( size_t i = 0; i < circles_all.size(); i++ ){
                if (pilki[i]!=0){
                    cv::Point2d uv_point(circles_all[i][0], circles_all[i][1]);
                    unsigned short ball_depth;
                    ball_depth = depth_image.at<unsigned short>(circles_all[i][1], circles_all[i][0])+20;
                    cv::Point3d xy_point;
                    xy_point = cam_model.projectPixelTo3dRay(uv_point);
                    xy_point = xy_point * ball_depth;

                    geometry_msgs::PointStamped xy_point_stamped, odom_point_stamped;

                    xy_point_stamped.header.frame_id = "/camera_rgb_optical_frame";
                    xy_point_stamped.header.stamp = ros::Time::now();

                    xy_point_stamped.point.x = 0.001*xy_point.x;
                    xy_point_stamped.point.y = 0.001*xy_point.y;
                    xy_point_stamped.point.z = 0.001*xy_point.z;

                    try {
                        tf_listener->waitForTransform("/base_link", "/camera_rgb_optical_frame", ros::Time::now(), ros::Duration(10.0) );
                        tf_listener->transformPoint("/base_link", xy_point_stamped, odom_point_stamped);
                    }
                    catch (tf::TransformException ex) {
                        ROS_ERROR("%s",ex.what());
                    }

                    if(odom_point_stamped.point.z > 0.05 || odom_point_stamped.point.z < 0.001){
			std::cout << "Srodek pilki na wysokosci: " << odom_point_stamped.point.z << ".\n"; 
                        std::cout << "Skondensowane pilki: to nie pilka, jest za wysoko albo za nisko! \n";
                        pilki[i]=0;
                    }
                }
            }

            depth_written = 1;
            std::cout << "Policzylem wysokosc dla skondensowanych pilek \n";
            int closest_ball = choose_closest_ball();

            if(closest_ball == -1){
                std::cout << "NIE WYBRANO ZADNEJ PILKI \n";
                send_no_balls();
            }

            if(closest_ball != -1){
                int wybrana_x = circles_all[closest_ball][0];
                int wybrana_y = circles_all[closest_ball][1];
                int wybrana_z = circles_all[closest_ball][2];

                ball_chosen = 1;
                std::cout << "Wybrana jedna pilka \n";

                last_circle[0] = wybrana_x;
                last_circle[1] = wybrana_y;
                last_circle[2] = wybrana_z;

                wybrana.x = last_circle[0];
                wybrana.y = last_circle[1];
                wybrana.z = last_circle[2];

                draw_balls = 1;

                cv::Point2d uv_point(wybrana.x, wybrana.y);
                unsigned short ball_depth;

                ball_depth = depth_image.at<unsigned short>(wybrana.y,wybrana.x)+20;

                geometry_msgs::Point message_selected;
                cv::Point3d xy_point;
                xy_point = cam_model.projectPixelTo3dRay(uv_point);
                xy_point = xy_point * ball_depth;

                geometry_msgs::PointStamped xy_point_stamped, odom_point_stamped;

                xy_point_stamped.header.frame_id = "/camera_rgb_optical_frame";
                xy_point_stamped.header.stamp = ros::Time::now();

                xy_point_stamped.point.x = 0.001*xy_point.x;
                xy_point_stamped.point.y = 0.001*xy_point.y;
                xy_point_stamped.point.z = 0.001*xy_point.z;

                try {
                    tf_listener->waitForTransform("/odom", "/camera_rgb_optical_frame", ros::Time::now(), ros::Duration(10.0) );
                    tf_listener->transformPoint("/odom", xy_point_stamped, odom_point_stamped);
                }
                catch (tf::TransformException ex) {
                    ROS_ERROR("%s",ex.what());
                }

                if(odom_point_stamped.point.z > 0.05){
                    std::cout << "Wybrana pilka to nie pilka, jest za wysoko! \n";
                }

                std::cout << "Policzylem dla wybranej pilki i publikuje \n";
                //balls.publish(wybrana);
                selected_ball.publish(odom_point_stamped);
                licznik_depth = 0;
                ball_chosen = 0;
                circles_all.clear();
                pilki.clear();

                licznik_wybrane = 0;
                no_balls_counter = 0;

                balls_written = 0;
                depth_written = 0;
                is_robot_running = 1;

            }
    }
}
}
예제 #23
0
파일: locator.cpp 프로젝트: ashokzg/cpb
	void locator()
	{
		namedWindow("Tracking");
		int hMin, hMax, sMin, sMax, vMin, vMax,area_min;
		hMin = 0;
		//hMax = 124; // night values/???
		hMax = 255;
		//sMin = 95;
		sMin = 126;
		sMax = 255;
		//vMin = 139;
		vMin = 173;
		vMax = 255;
		area_min = 100;
		Mat smoothed, hsvImg, t_img;
		createTrackbar("blob min area","Tracking" ,&area_min ,1000);
		createTrackbar("Hue Min", "Tracking", &hMin, 255);
		createTrackbar("Hue Max", "Tracking", &hMax, 255);
		createTrackbar("Sat Min", "Tracking", &sMin, 255);
		createTrackbar("Sat Max", "Tracking", &sMax, 255);
		createTrackbar("Val Min", "Tracking", &vMin, 255);
		createTrackbar("Val MaX", "Tracking", &vMax, 255);
		while(ros::ok())
		{
			Mat source = imageB;
			Mat copy = imageB.clone();
			GaussianBlur(source, smoothed, Size(9,9), 4);
			cvtColor(smoothed, hsvImg, CV_BGR2HSV);
			inRange(hsvImg, Scalar(hMin, sMin, vMin), Scalar(hMax, sMax, vMax), t_img);

			CBlobResult blob;
			IplImage i_img = t_img;
			blob = CBlobResult(&i_img,NULL,0);
			int num_blobs = blob.GetNumBlobs();

			blob.Filter(blob, B_INCLUDE, CBlobGetArea(), B_INSIDE, area_min, blob_area_absolute_max_);
			num_blobs = blob.GetNumBlobs();

			std::string reference_frame = "/virtual_table"; // Table frame at ball_radius above the actual table plane

			tf::StampedTransform transform;
			tf_.waitForTransform(reference_frame, model.tfFrame(), ros::Time(0), ros::Duration(0.5));
			tf_.lookupTransform(reference_frame, model.tfFrame(), ros::Time(0), transform);

			for(int i =0;i<num_blobs;i++)
			{
				CBlob* bl = blob.GetBlob(i);
				Point2d uv(CBlobGetXCenter()(*bl), CBlobGetYCenter()(*bl));
				//Use the width as the height
				uv.y = bl->MinY() + (bl->MaxX() - bl->MinX()) * 0.5;
				circle(copy,uv,50,Scalar(255,0,0),5);

				cv::Point3d xyz;
				model.projectPixelTo3dRay(uv, xyz);
		
				// Intersect ray with plane in virtual table frame
				//Origin of camera frame wrt virtual table frame
				tf::Point P0 = transform.getOrigin();
				//Point at end of unit ray wrt virtual table frame
				tf::Point P1 = transform * tf::Point(xyz.x, xyz.y, xyz.z);
				// Origin of virtual table frame
				tf::Point V0 = tf::Point(0.0,0.0,0.0);
				// normal to the table plane
				tf::Vector3 n(0, 0, 1);
				// finding scaling value
				double scale = (n.dot(V0-P0))/(n.dot(P1-P0));
				tf::Point ball_pos = P0 + (P1-P0)*scale;
				cout <<ball_pos.x() << " " << ball_pos.y() << " " << ball_pos.z() <<endl;
			}
			imshow(WINDOW, copy);
			waitKey(3);

			imshow("edited", t_img);
			waitKey(3);

			ros::spinOnce();
		}
	}
예제 #24
0
void cameraInfoCallback(const sensor_msgs::CameraInfoConstPtr& infoMsg) 
{
	cameraModel.fromCameraInfo(infoMsg);
}
예제 #25
0
void FeatureTracker::track_features(geometry_msgs::PoseStamped mapPose){
  //set the initial number of features to the max number we want to find
  int feature_count=num_features;
  printf("pose %f %f %f\n",mapPose.pose.position.x, mapPose.pose.position.y, tf::getYaw(mapPose.pose.orientation));
  int edge_pixels=5;
  
  //check if there were features from the last image to keep tracking
  if(last_feature_count>0){
    //if there were call cvCalcOpticalFlowPyrLK();
    //find matches between last good features and current image features
    //    store matches in featuresB
    cvCalcOpticalFlowPyrLK(last_image,image_rect,pyrA,pyrB,features,featuresB, last_feature_count,cvSize(win_size,win_size) ,4,last_features_status,track_error, cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,.3),0);
  }
  
  printf("got image flow\n");
  //    assign last_feature_id values for matched features and set the non matched spots to -1
  
  //find new features and subpixel them
  
  //I SHOULD ADD THE IMAGE FLOW VALUES AS FEATURES NOW BEFORE FINDING NEW FEATURES
  
  //find all good features
  cvGoodFeaturesToTrack(image_rect, eigImage, tempImage, features, &feature_count, quality_level, min_distance, NULL, block_size);
  
  //subpixel good features
  cvFindCornerSubPix(image_rect,features,feature_count,cvSize(win_size,win_size),cvSize(-1,-1),cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03));
  
  
  printf("subpixeled image\n");
  
  //for all the features in features B, find their matches in the newly found features
  //add all the matches to their correct featuremanager, for the non matching, make a new
  //feature manager and add them to it
  
  //for all features by now we need their ray and the robot pose at that location
  //draw dots on image where features are
  
  
  
  //set the feature ids to a control value
  
  for(int i=0;i<num_features;i++){
    current_feature_id[i]=-1;
  }
  
  for(int i=0;i<last_feature_count;i++){
    //for the previously found features in list b
    if(last_features_status[i]>0){
      for(int j=0;j<feature_count;j++){
	//for every feature found in this image
	
	//determine if the two overlap in a meaningful way
	int xdiff=featuresB[i].x-features[j].x;
	int ydiff=featuresB[i].y-features[j].y;
	//if the pixels are within some margin of eachother
	if(sqrt(xdiff*xdiff + ydiff*ydiff)<pixel_tracking_margin){
	  //if they do set the current id for j to the id of i
	  current_feature_id[j]=last_feature_id[i];
	  printf("feature found %d %d",last_feature_id[i],i);
	}
      }
    }
  }
  
  printf("assigned IDs image\n");
  
  
  for(int i=0;i<feature_count;i++){
    
    printf("looping\n");
    if(current_feature_id[i]>=0){
    printf("prev feature match\n");
      //if we matched a previous feature
      //add our new feature to the previous features list
      cv::Point3d tempRay;
      cv::Point2d tempPoint=cv::Point2d(features[i]);
      cam_model.projectPixelTo3dRay(tempPoint,tempRay);
      
      if(tempPoint.x> edge_pixels && tempPoint.x < last_image->width- edge_pixels &&
	tempPoint.y> edge_pixels && tempPoint.y<last_image->height- edge_pixels){
	featureList[current_feature_id[i]].add(RawFeature(mapPose.pose.position.x, mapPose.pose.position.y, tf::getYaw(mapPose.pose.orientation), tempPoint,tempRay));
      }else{
	current_feature_id[i]=-1;
      }
      
    }else{
    printf("new feature \n");
      
      cv::Point3d tempRay;
      cv::Point2d tempPoint=cv::Point2d(features[i]);
      cam_model.projectPixelTo3dRay(tempPoint,tempRay);
      if(tempPoint.x> edge_pixels && tempPoint.x < last_image->width- edge_pixels &&
	tempPoint.y> edge_pixels && tempPoint.y<last_image->height- edge_pixels){
	printf("new good feature \n");
	//if we didn't
	//create a new feature group in the list
	current_feature_id[i]=feature_number;
	//add the new feature to the feature list
	featureList.push_back(FeatureManager());

	featureList[feature_number].add(RawFeature(mapPose.pose.position.x, mapPose.pose.position.y, tf::getYaw(mapPose.pose.orientation), tempPoint,tempRay));
	++feature_number;
      }
    }
  }
   
//  printf("features: ");
  for(int i=0;i<num_features;i++){
    if(i<feature_count){
     last_feature_id[i]=current_feature_id[i];
    }
    else{
      last_feature_id[i]=-1;
    }
 //   printf(" %d ",current_feature_id[i]);
  }
  printf("\n");
  
  
  last_feature_count=feature_count;
  
}
예제 #26
0
void PointXYZtoCameraPointXY(const geometry_msgs::Point input, geometry_msgs::Point &output, const image_geometry::PinholeCameraModel& model) {
    output.x = (input.x*model.fx()/input.z)+model.cx();
    output.y = (input.y*model.fy()/input.z)+model.cy();
}
예제 #27
0
cv::Mat projectWithEigen()
{
	// Transform meshes into camera frame

	// For each frame in vector
	for (int frame = 0; frame < mMeshFrameIDs.size(); frame++)
	{
		// Lookup current transform
		Eigen::Isometry3 transform;
		transform = transforms.at(mMeshFrameIDs[frame]);

		// Get copy of mesh for each frame
		ap::Mesh* sourceMesh;
		ap::Mesh* transformedMesh;
		//std::cerr << "Getting frame " << frame << " : " << mMeshFrameIDs[frame] << std::endl;
		MeshMap::iterator scene_i = scenes.find(mMeshFrameIDs[frame]);
		if (scenes.end() == scene_i) { continue; }
		sourceMesh = scene_i->second;

		MeshMap::iterator scene_t = transformedScenes.find(mMeshFrameIDs[frame]);
		if (transformedScenes.end() == scene_t) { continue; }
		transformedMesh = scene_t->second;

		// Transform mesh into camera frame
		for (int i = 0; i < sourceMesh->vertices.size(); i++)
		{
			Eigen::Vector3 newVertex = transform * sourceMesh->vertices[i];
			//std::cerr << mesh->vertices[i].transpose() << "\t->\t" << newVertex.transpose() << std::endl;
			transformedMesh->vertices[i] = newVertex;
		}
	}

	// For each pixel in camera image
	cv::Mat robotImage(mCameraModel.cameraInfo().height, mCameraModel.cameraInfo().width, CV_32F);
	float* pixelPtr = (float*)robotImage.data;
	float maxDepth = 0;
	for (int v = 0; v < robotImage.rows; v++)
	{
		for (int u = 0; u < robotImage.cols; u++)
		{
			// Create a ray through the pixel
			int pixelIdx = u + (v * robotImage.cols);
			//std::cerr << "Pixel (" << u << "," << v << ")" << std::endl;
			cv::Point2d pixel = cv::Point2d(u, v);
			cv::Point3d cvRay = mCameraModel.projectPixelTo3dRay(pixel);
			// Convert cvRay to ap::Ray
			ap::Ray ray;
			ray.point = Eigen::Vector3::Zero();
			ray.vector.x() = cvRay.x; ray.vector.y() = cvRay.y; ray.vector.z() = cvRay.z;
			ray.vector.normalize();
			//std::cerr << ray.vector.transpose() << std::endl;

			// For each frame in vector
			for (int frame = 0; frame < mMeshFrameIDs.size(); frame++)
			{
				MeshMap::iterator scene_i = transformedScenes.find(mMeshFrameIDs[frame]);
				if (transformedScenes.end() == scene_i)
				{
					continue;
				}
				ap::Mesh* mesh = scene_i->second;

				// For each triangle in mesh
				for (int i = 0; i < mesh->faces.size(); i++)
				{
					// Check for intersection. If finite, set distance

					ap::Triangle triangle(mesh->vertices[mesh->faces[i].vertices[0]],
										  mesh->vertices[mesh->faces[i].vertices[1]],
										  mesh->vertices[mesh->faces[i].vertices[2]]);

					Eigen::Vector3 intersection = ap::intersectRayTriangle(ray, triangle);
					if (std::isfinite(intersection.x()))
					{
						float d = intersection.norm();
						float val = pixelPtr[pixelIdx];
						if (val == 0 || val > d)
						{
							pixelPtr[pixelIdx] = d;
						}
						if (d > maxDepth)
						{
							maxDepth = d;
						}
					}
				}
			}
		}
	}

	// Return the matrix
	if (maxDepth == 0) { maxDepth = 1;}
	return robotImage/maxDepth;
}
	inline void
	filter_depth_projection(	const image_geometry::PinholeCameraModel &camera_model,
								const pcl::PointCloud<PointT> &in,
								pcl::PointCloud<PointT> &out,
								int rows,
								int cols,
								int k_neighbors = 2,
								float threshold = 0.3)
	{
		std::vector<std::vector<bool> > hit( cols, std::vector<bool>(rows));
		std::vector<int> points2d_indices;
		pcl::PointCloud<pcl::PointXY> points2d;
		pcl::PointCloud<PointT> z_filtred;

#ifdef DEBUG
		std::cout << "in points: "<< in.size() << " width: " << cols << " height: " << rows << "\n";
#endif

		project2d::Points2d<PointT> point_map;
		point_map.init(camera_model, in, rows, cols);
		point_map.get_points(z_filtred, 25);

		// Project points into image space
		for(unsigned int i=0; i < z_filtred.size();  ++i){
			const PointT* pt = &z_filtred.points.at(i);
			//if( pt->z > 1)
			{ // min distance from camera 1m

				cv::Point2i point_image = camera_model.project3dToPixel(cv::Point3d(pt->x, pt->y, pt->z));

				if( between<int>(0, point_image.x, cols )
					&& between<int>( 0, point_image.y, rows )
				)
				{
					// Point allready at this position?
					if(!hit[point_image.x][point_image.y]){
						hit[point_image.x][point_image.y] = true;

						pcl::PointXY p_image;
						p_image.x = point_image.x;
						p_image.y = point_image.y;

						points2d.push_back(p_image);
						points2d_indices.push_back(i);
					}
					else{
#ifdef DEBUG
						std::cout << "[" << point_image.x << "][" << point_image.y << "] already inserted " << pt << "\n";
#endif

					}

				}
			}
		}

#ifdef DEBUG
		std::cout << "Z_filtred: " << z_filtred.size() << " projected 2d points: "<< points2d.size() << " indices: " << points2d_indices.size() << "\n";
#endif

		pcl::search::KdTree<pcl::PointXY> tree_n;
		tree_n.setInputCloud(points2d.makeShared());
		tree_n.setEpsilon(0.5);

		for(unsigned int i=0; i < points2d.size(); ++i){
			std::vector<int> k_indices;
			std::vector<float> square_distance;

			//tree_n.nearestKSearch(points2d.at(i), k_neighbors, k_indices, square_distance);
			tree_n.radiusSearch(points2d.at(i), k_neighbors, k_indices, square_distance);

			look_up_indices(points2d_indices, k_indices);

			float distance_value;
			if(is_edge_threshold(z_filtred, points2d_indices.at(i), k_indices, square_distance, distance_value, threshold)){
				out.push_back(z_filtred.points.at(points2d_indices.at(i)));
				out.at(out.size()-1).intensity = sqrt(distance_value);
			}
		}

#ifdef DEBUG
		std::cout << "out 2d points: "<< out.size() << "\n";
#endif
	}
예제 #29
0
void depthCallback(const sensor_msgs::ImageConstPtr& original_image, const sensor_msgs::CameraInfoConstPtr& info){

    if(need_for_wall==2){
        need_for_wall = 0;
        cv_bridge::CvImagePtr cv_ptr;
        try
        {
            cv_ptr = cv_bridge::toCvCopy(original_image, enc::TYPE_16UC1);
        }
        catch (cv_bridge::Exception& e)
        {
            ROS_ERROR("tutorialROSOpenCV::main.cpp::cv_bridge exception: %s", e.what());
            return;
        }
        cam_model.fromCameraInfo(info);
        Mat depth_image = cv_ptr->image;

        float x = 0;
        float y = 360;
        float wall_height = 0;
        geometry_msgs::PointStamped xy_point_stamped, pipe_point_stamped;
        walls_detection::Walls walls_all_message;
        for(int i = 100; i<600; i+=110){
            wall_height = 0;
            y = 360;
            x = i;
            unsigned short wall_depth;
            while(wall_height < 0.05 && y > 0){
                    cv::Point2d uv_point(x,y);
                    unsigned short wall_depth;
                    wall_depth = depth_image.at<unsigned short>(y, x);
                    cv::Point3d xy_point;
                    xy_point = cam_model.projectPixelTo3dRay(uv_point);
                    xy_point = xy_point * wall_depth;

                    xy_point_stamped.header.frame_id = "/camera_rgb_optical_frame";
                    xy_point_stamped.header.stamp = ros::Time::now();

                    xy_point_stamped.point.x = 0.001*xy_point.x;
                    xy_point_stamped.point.y = 0.001*xy_point.y;
                    xy_point_stamped.point.z = 0.001*xy_point.z;

                    try {
                            tf_listener->waitForTransform("/pipe_link", "/camera_rgb_optical_frame", ros::Time::now(), ros::Duration(10.0) );
                            tf_listener->transformPoint("/pipe_link", xy_point_stamped, pipe_point_stamped);
                    }
                    catch (tf::TransformException ex) {
                            ROS_ERROR("%s",ex.what());
                    }

                wall_height = pipe_point_stamped.point.z;
                y = y - 120;
            }
            std::cout<<"Sciana nr "<< i <<" - x: "<< pipe_point_stamped.point.x <<", y: "<< pipe_point_stamped.point.y <<", z: "<< pipe_point_stamped.point.z <<"\n";
            if(wall_height < 0.05){
                std::cout << "Nie ma sciany\n";
                pipe_point_stamped.point.x = 0;
                pipe_point_stamped.point.y = 0;
                pipe_point_stamped.point.z = 0;
            }
            switch(i){
                case 100:
                walls_all_message.wall1 = pipe_point_stamped;
                break;

                case 210:
                walls_all_message.wall2 = pipe_point_stamped;
                break;

                case 320:
                walls_all_message.wall3 = pipe_point_stamped;
                break;

                case 430:
                walls_all_message.wall4 = pipe_point_stamped;
                break;

                case 540:
                walls_all_message.wall5 = pipe_point_stamped;
                break;

            }

	}
    walls_all.publish(walls_all_message);
    }
}
예제 #30
0
	inline void
	remove_cluster_2d(	const image_geometry::PinholeCameraModel &camera_model,
								const pcl::PointCloud<PointT> &in,
								pcl::PointCloud<PointT> &out,
								int rows,
								int cols,
								int k_neighbors = 4,
								int border = 25)
	{
		std::vector<int> points2d_indices;
		pcl::PointCloud<pcl::PointXY> points2d;

#ifdef DEBUG
		std::cout << "in points: "<< in.size() << "\n";
#endif

		// Project points into image space
		for(unsigned int i=0; i < in.points.size();  ++i){
			const PointT* pt = &in.points.at(i);
			if( pt->z > 1) { // min distance from camera 1m

				cv::Point2i point_image = camera_model.project3dToPixel(cv::Point3d(pt->x, pt->y, pt->z));

				if( between<int>(0+border, point_image.x, cols-border )
					&& between<int>( 0+border, point_image.y, rows-border )
				)
				{
					pcl::PointXY p_image;
					p_image.x = point_image.x;
					p_image.y = point_image.y;

					points2d.push_back(p_image);
					points2d_indices.push_back(i);
				}
			}
		}

#ifdef DEBUG
		std::cout << "projected 2d points: "<< points2d.size() << " indices: " << points2d_indices.size() << "\n";
#endif

		pcl::search::KdTree<pcl::PointXY> tree_n;
		tree_n.setInputCloud(points2d.makeShared());
		tree_n.setEpsilon(0.1);

		for(unsigned int i=0; i < points2d.size(); ++i){
			std::vector<int> k_indices;
			std::vector<float> square_distance;

			//tree_n.nearestKSearch(points2d.at(i), k_neighbors, k_indices, square_distance);
			tree_n.radiusSearch(points2d.at(i), k_neighbors, k_indices, square_distance);

			if(k_indices.empty()) continue; // Dont add points without neighbors

			look_up_indices(points2d_indices, k_indices);

			float edginess = 0;
			if(is_edge_z(in, points2d_indices.at(i), k_indices, square_distance, edginess, 0.1)){
				out.push_back(in.points.at(points2d_indices.at(i)));
				out.at(out.size()-1).intensity = edginess;
			}
		}

#ifdef DEBUG
		std::cout << "out 2d points: "<< out.size() << "\n";
#endif
	}