cv::Point3d GraphGridMapper::to3D(cv::Point3d &p, const Eigen::Isometry3d &camera_transform, const image_geometry::PinholeCameraModel &camera_model)
{
    int width = camera_model.cameraInfo().width;
    int height = camera_model.cameraInfo().height;

    int u = round(p.x);
    if(u < 0) {
        u = 0;
    } else if (u >= width) {
        u = width -1;
    }
    int v = round(p.y);
    if(v < 0) {
        v = 0;
    } else if (v >= height) {
        v = height - 1;
    }
    cv::Point3d p3D(-1.0,-1.0,std::numeric_limits<double>::infinity());

    if (p.z != 0 && !isnan(p.z))
    {
        p3D.x = (u - camera_model.cx()) * p.z / camera_model.fx();
        p3D.y = (v - camera_model.cy()) * p.z / camera_model.fy();
        p3D.z = p.z;

        Eigen::Vector3d vec(p3D.x, p3D.y, p3D.z);
        vec = camera_transform * vec.homogeneous();
        p3D.x = vec(0);
        p3D.y = vec(1);
        p3D.z = vec(2);
    }

    return p3D;
}
Beispiel #2
0
pcl::PointCloud<pcl::PointXYZ>::Ptr Conversions::toPointCloud(const Eigen::Isometry3d &T, const image_geometry::PinholeCameraModel &cam, const cv::Mat &depth_img)
{
    pcl::PointCloud<pcl::PointXYZ>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZ>());
    cloud->header.frame_id = "cam";
    cloud->is_dense = false;
    cloud->height = depth_img.rows;
    cloud->width = depth_img.cols;
    cloud->sensor_origin_ = Eigen::Vector4f( T.translation()(0), T.translation()(1), T.translation()(2), 1.f );
    cloud->sensor_orientation_ = Eigen::Quaternionf(T.rotation().cast<float>());
    cloud->points.resize( cloud->height * cloud->width );

    size_t idx = 0;
    const float* depthdata = reinterpret_cast<float*>( &depth_img.data[0] );
    for(int v = 0; v < depth_img.rows; ++v) {
        for(int u = 0; u < depth_img.cols; ++u) {
            pcl::PointXYZ& p = cloud->points[ idx ]; ++idx;

            float d = (*depthdata++);

            if (d > 0 && !isnan(d)) {
                p.z = d;
                p.x = (u - cam.cx()) * d / cam.fx();
                p.y = (v - cam.cy()) * d / cam.fy();
            } else {
                p.x = std::numeric_limits<float>::quiet_NaN();
                p.y = std::numeric_limits<float>::quiet_NaN();
                p.z = std::numeric_limits<float>::quiet_NaN();
            }
        }
    }
    return cloud;
}
Beispiel #3
0
void convert(const sensor_msgs::ImageConstPtr& depth_msg, PointCloud::Ptr& cloud_msg)
{
	// Use correct principal point from calibration
	float center_x = cameraModel.cx();
	float center_y = cameraModel.cy();

	// Combine unit conversion (if necessary) with scaling by focal length for computing (X,Y)
	double unit_scaling = DepthTraits<T>::toMeters( T(1) );
	float constant_x = unit_scaling / cameraModel.fx();
	float constant_y = unit_scaling / cameraModel.fy();
	float bad_point = std::numeric_limits<float>::quiet_NaN();

	sensor_msgs::PointCloud2Iterator<float> iter_x(*cloud_msg, "x");
	sensor_msgs::PointCloud2Iterator<float> iter_y(*cloud_msg, "y");
	sensor_msgs::PointCloud2Iterator<float> iter_z(*cloud_msg, "z");
	const T* depth_row = reinterpret_cast<const T*>(&depth_msg->data[0]);
	int row_step = depth_msg->step / sizeof(T);
	for (int v = 0; v < (int)cloud_msg->height; ++v, depth_row += row_step)
	{
		for (int u = 0; u < (int)cloud_msg->width; ++u, ++iter_x, ++iter_y, ++iter_z)
		{
			T depth = depth_row[u];

			// Missing points denoted by NaNs
			if (!DepthTraits<T>::valid(depth))
			{
				*iter_x = *iter_y = *iter_z = bad_point;
				continue;
			}

			// Fill in XYZ
			*iter_x = (u - center_x) * depth * constant_x;
			*iter_y = (v - center_y) * depth * constant_y;
			*iter_z = DepthTraits<T>::toMeters(depth);
		}
	}
}
void PointXYZtoCameraPointXY(const geometry_msgs::Point input, geometry_msgs::Point &output, const image_geometry::PinholeCameraModel& model) {
    output.x = (input.x*model.fx()/input.z)+model.cx();
    output.y = (input.y*model.fy()/input.z)+model.cy();
}