Esempio n. 1
0
static cv::Mat enhance_image(cv::Mat const& src, Settings const& settings, DebugImageWriter & w)
{
    cv::Mat grey;
    cv::cvtColor(src, grey, CV_RGB2GRAY);

    cv::Mat enhanced = remove_background(grey, settings, w);
    w.write("enhanced", enhanced);

    double const angle = find_optimal_angle(enhanced, settings, w);
    printf("Best angle: %.2f\n", angle);
    cv::Mat rotated = rotate_around_center(enhanced, angle);

    cv::Mat downscaled = downscale(rotated, settings, w);
    return downscaled;
}
Esempio n. 2
0
int main(int argc, char * argv[]) try
{
    // Create and initialize GUI related objects
    window app(1280, 720, "CPP - Align Example"); // Simple window handling
    ImGui_ImplGlfw_Init(app, false);      // ImGui library intializition
    rs2::colorizer c;                          // Helper to colorize depth images
    texture renderer;                     // Helper for renderig images

    const rs2_stream align_to = RS2_STREAM_COLOR;
    // Using the context to create a rs2::align object.
    // rs2::align allows you to perform aliment of depth frames to others
    rs2::align align(align_to);

    // Create a pipeline to easily configure and start the camera
    rs2::pipeline pipe;
    //Calling pipeline's start() without any additional parameters will start the first device
    // with its default streams.
    //The start function returns the pipeline profile which the pipeline used to start the device
    rs2::pipeline_profile profile = pipe.start();

    // Each depth camera might have different units for depth pixels, so we get it here
    float depth_scale;
    //Using the pipeline's profile, we can retrieve the device that the pipeline uses
    if (!try_get_depth_scale(profile.get_device(), depth_scale))
    {
        std::cerr << "Device does not have a depth sensor" << std::endl;
        return EXIT_FAILURE;
    }

    // Define a variable for controlling the distance to clip
    float depth_clipping_distance = 1.f;

    while (app) // Application still alive?
    {
        // Using the align object, we block the application until a frameset is available
        rs2::frameset frameset;

        while (!frameset.first_or_default(RS2_STREAM_DEPTH) || !frameset.first_or_default(align_to))
        {
            frameset = pipe.wait_for_frames();
        }

        auto proccessed = align.proccess(frameset);

        // Trying to get both color and aligned depth frames
        rs2::video_frame color_frame = proccessed.get_color_frame();
        rs2::depth_frame aligned_depth_frame = proccessed.get_depth_frame();

        //If one of them is unavailable, continue iteration
        if (!aligned_depth_frame || !color_frame)
        {
            continue;
        }
        // Passing both frames to remove_background so it will "strip" the background
        // NOTE: in this example, we alter the buffer of the color frame, instead of copying it and altering the copy
        //       This behavior is not recommended in real application since the color frame could be used elsewhere
        remove_background(color_frame, aligned_depth_frame, depth_scale, depth_clipping_distance);

        // Taking dimensions of the window for rendering purposes
        float w = static_cast<float>(app.width());
        float h = static_cast<float>(app.height());

        // At this point, "color_frame" is an altered color frame, stripped form its background
        // Calculating the position to place the frame in the window
        rect altered_color_frame_rect{ 0, 0, w, h };
        altered_color_frame_rect = altered_color_frame_rect.adjust_ratio({ static_cast<float>(color_frame.get_width()),static_cast<float>(color_frame.get_height()) });

        // Render aligned color
        renderer.render(color_frame, altered_color_frame_rect);

        // The example also renders the depth frame, as a picture-in-picture
        // Calculating the position to place the depth frame in the window
        rect pip_stream{ 0, 0, w / 5, h / 5 };
        pip_stream = pip_stream.adjust_ratio({ static_cast<float>(aligned_depth_frame.get_width()),static_cast<float>(aligned_depth_frame.get_height()) });
        pip_stream.x = altered_color_frame_rect.x + altered_color_frame_rect.w - pip_stream.w - (std::max(w, h) / 25);
        pip_stream.y = altered_color_frame_rect.y + altered_color_frame_rect.h - pip_stream.h - (std::max(w, h) / 25);

        // Render depth (as picture in pipcture)
        renderer.upload(c(aligned_depth_frame));
        renderer.show(pip_stream);

        // Using ImGui library to provide a slide controller to select the depth clipping distance
        ImGui_ImplGlfw_NewFrame(1);
        render_slider({ 5.f, 0, w, h }, depth_clipping_distance);
        ImGui::Render();

    }
    return EXIT_SUCCESS;
}
catch (const rs2::error & e)
{
    std::cerr << "RealSense error calling " << e.get_failed_function() << "(" << e.get_failed_args() << "):\n    " << e.what() << std::endl;
    return EXIT_FAILURE;
}
catch (const std::exception & e)
{
    std::cerr << e.what() << std::endl;
    return EXIT_FAILURE;
}