void align_images(const rs2_intrinsics& depth_intrin, const rs2_extrinsics& depth_to_other, const rs2_intrinsics& other_intrin, GET_DEPTH get_depth, TRANSFER_PIXEL transfer_pixel) { // Iterate over the pixels of the depth image #pragma omp parallel for schedule(dynamic) for (int depth_y = 0; depth_y < depth_intrin.height; ++depth_y) { int depth_pixel_index = depth_y * depth_intrin.width; for (int depth_x = 0; depth_x < depth_intrin.width; ++depth_x, ++depth_pixel_index) { // Skip over depth pixels with the value of zero, we have no depth data so we will not write anything into our aligned images if (float depth = get_depth(depth_pixel_index)) { // Map the top-left corner of the depth pixel onto the other image float depth_pixel[2] = { depth_x - 0.5f, depth_y - 0.5f }, depth_point[3], other_point[3], other_pixel[2]; rs2_deproject_pixel_to_point(depth_point, &depth_intrin, depth_pixel, depth); rs2_transform_point_to_point(other_point, &depth_to_other, depth_point); rs2_project_point_to_pixel(other_pixel, &other_intrin, other_point); const int other_x0 = static_cast<int>(other_pixel[0] + 0.5f); const int other_y0 = static_cast<int>(other_pixel[1] + 0.5f); // Map the bottom-right corner of the depth pixel onto the other image depth_pixel[0] = depth_x + 0.5f; depth_pixel[1] = depth_y + 0.5f; rs2_deproject_pixel_to_point(depth_point, &depth_intrin, depth_pixel, depth); rs2_transform_point_to_point(other_point, &depth_to_other, depth_point); rs2_project_point_to_pixel(other_pixel, &other_intrin, other_point); const int other_x1 = static_cast<int>(other_pixel[0] + 0.5f); const int other_y1 = static_cast<int>(other_pixel[1] + 0.5f); if (other_x0 < 0 || other_y0 < 0 || other_x1 >= other_intrin.width || other_y1 >= other_intrin.height) continue; // Transfer between the depth pixels and the pixels inside the rectangle on the other image for (int y = other_y0; y <= other_y1; ++y) { for (int x = other_x0; x <= other_x1; ++x) { transfer_pixel(depth_pixel_index, y * other_intrin.width + x); } } } } } }
int main(int argc, char * argv[]) try { std::cout << "Waiting for device..." << std::endl; // Declare RealSense pipeline, encapsulating the actual device and sensors rs2::pipeline pipe; // Create a configuration for configuring the pipeline with a non default profile rs2::config cfg; // Enable fisheye and pose streams cfg.enable_stream(RS2_STREAM_POSE, RS2_FORMAT_6DOF); cfg.enable_stream(RS2_STREAM_FISHEYE, 1); cfg.enable_stream(RS2_STREAM_FISHEYE, 2); // Start pipeline with chosen configuration rs2::pipeline_profile pipe_profile = pipe.start(cfg); // T265 has two fisheye sensors, we can choose any of them (index 1 or 2) const int fisheye_sensor_idx = 1; // Get fisheye sensor intrinsics parameters rs2::stream_profile fisheye_stream = pipe_profile.get_stream(RS2_STREAM_FISHEYE, fisheye_sensor_idx); rs2_intrinsics intrinsics = fisheye_stream.as<rs2::video_stream_profile>().get_intrinsics(); // Get fisheye sensor extrinsics parameters. // This is the pose of the fisheye sensor relative to the T265 coordinate system. rs2_extrinsics extrinsics = fisheye_stream.get_extrinsics_to(pipe_profile.get_stream(RS2_STREAM_POSE)); std::cout << "Device got. Streaming data" << std::endl; // Create an OpenGL display window and a texture to draw the fisheye image window app(intrinsics.width, intrinsics.height, "Intel RealSense T265 Augmented Reality Example"); window_key_listener key_watcher(app); texture fisheye_image; // Create the vertices of a simple virtual object. // This virtual object is 4 points in 3D space that describe 3 XYZ 20cm long axes. // These vertices are relative to the object's own coordinate system. const float length = 0.20; const object virtual_object = {{ { 0, 0, 0 }, // origin { length, 0, 0 }, // X { 0, length, 0 }, // Y { 0, 0, length } // Z }}; // This variable will hold the pose of the virtual object in world coordinates. // We we initialize it once we get the first pose frame. rs2_pose object_pose_in_world; bool object_pose_in_world_initialized = false; // Main loop while (app) { rs2_pose device_pose_in_world; // This will contain the current device pose { // Wait for the next set of frames from the camera auto frames = pipe.wait_for_frames(); // Get a frame from the fisheye stream rs2::video_frame fisheye_frame = frames.get_fisheye_frame(fisheye_sensor_idx); // Get a frame from the pose stream rs2::pose_frame pose_frame = frames.get_pose_frame(); // Copy current camera pose device_pose_in_world = pose_frame.get_pose_data(); // Render the fisheye image fisheye_image.render(fisheye_frame, { 0, 0, app.width(), app.height() }); // By closing the current scope we let frames be deallocated, so we do not fill up librealsense queues while we do other computation. } // If we have not set the virtual object in the world yet, set it in front of the camera now. if (!object_pose_in_world_initialized) { object_pose_in_world = reset_object_pose(device_pose_in_world); object_pose_in_world_initialized = true; } // Compute the pose of the object relative to the current pose of the device rs2_pose world_pose_in_device = pose_inverse(device_pose_in_world); rs2_pose object_pose_in_device = pose_multiply(world_pose_in_device, object_pose_in_world); // Get the object vertices in device coordinates object object_in_device = convert_object_coordinates(virtual_object, object_pose_in_device); // Convert object vertices from device coordinates into fisheye sensor coordinates using extrinsics object object_in_sensor; for (size_t i = 0; i < object_in_device.size(); ++i) { rs2_transform_point_to_point(object_in_sensor[i].f, &extrinsics, object_in_device[i].f); } for (size_t i = 1; i < object_in_sensor.size(); ++i) { // Discretize the virtual object line into smaller 1cm long segments std::vector<point3d> points_in_sensor = raster_line(object_in_sensor[0], object_in_sensor[i], 0.01); std::vector<pixel> projected_line; projected_line.reserve(points_in_sensor.size()); for (auto& point : points_in_sensor) { // A 3D point is visible in the image if its Z coordinate relative to the fisheye sensor is positive. if (point.z() > 0) { // Project 3D sensor coordinates to 2D fisheye image coordinates using intrinsics projected_line.emplace_back(); rs2_project_point_to_pixel(projected_line.back().f, &intrinsics, point.f); } } // Display the line in the image render_line(projected_line, i); } // Display text in the image render_text(app.height(), "Press spacebar to reset the pose of the virtual object. Press ESC to exit"); // Check if some key is pressed switch (key_watcher.get_key()) { case GLFW_KEY_SPACE: // Reset virtual object pose if user presses spacebar object_pose_in_world = reset_object_pose(device_pose_in_world); std::cout << "Setting new pose for virtual object: " << object_pose_in_world.translation << std::endl; break; case GLFW_KEY_ESCAPE: // Exit if user presses escape app.close(); break; } } return EXIT_SUCCESS; } catch (const rs2::error & e) { std::cerr << "RealSense error calling " << e.get_failed_function() << "(" << e.get_failed_args() << "):\n " << e.what() << std::endl; return EXIT_FAILURE; } catch (const std::exception& e) { std::cerr << e.what() << std::endl; return EXIT_FAILURE; }
float2 project(const rs2_intrinsics *intrin, const float3 & point) { float2 pixel = {}; rs2_project_point_to_pixel(&pixel.x, intrin, &point.x); return pixel; }