void draw_ground_plane_estimator(const GroundPlaneEstimator &ground_plane_estimator,
                                 const AbstractVideoInput::input_image_view_t &input_view,
                                 const StereoCameraCalibration &stereo_calibration,
                                 boost::gil::rgb8_view_t &screen_view)
{

    // copy right screen image ---
    copy_and_convert_pixels(input_view, screen_view);

    // draw v-disparity image in the right screen image --
    GroundPlaneEstimator::v_disparity_const_view_t raw_v_disparity_const_view =
            ground_plane_estimator.get_raw_v_disparity_view();

    boost::gil::rgb8_view_t screen_subview = boost::gil::subimage_view(screen_view,
                                                                       0,0,
                                                                       raw_v_disparity_const_view.width(),
                                                                       raw_v_disparity_const_view.height());
    copy_and_convert_pixels(raw_v_disparity_const_view, screen_subview);

    // copy v disparity into the blue channel -
    GroundPlaneEstimator::v_disparity_const_view_t v_disparity_const_view =
            ground_plane_estimator.get_v_disparity_view();
    copy_pixels(v_disparity_const_view, boost::gil::kth_channel_view<0>(screen_subview));


    draw_v_disparity_lines(ground_plane_estimator,
                           stereo_calibration,
                           screen_subview);

    return;
} // end of StixelWorldGui::draw_ground_plane_estimator(...)
int FeaturesDetectionApplication::main_loop(program_options::variables_map &options)
{
    printf("FeaturesDetectionApplication::main_loop says hello world !\n");


    //init_gui(options);
    //run_gui();

	// initialization ---
    gst_video_input_p.reset(new GstVideoInput(options));
    features_detector_p.reset(new SimpleFAST(options));

    // video output ---
    rgb8_cimg_t current_image(gst_video_input_p->get_image_dimensions());
    gst_video_input_p->get_new_image(current_image.view); // copy the data


    CImgDisplay video_display(current_image.dimx(), current_image.dimy(), get_application_title().c_str());
    video_display.show();
    video_display.display(current_image);

    // intermediary image --
    gray8_image_t gray_image(current_image.view.dimensions());

    // main loop ---

    do
    {
        // get new image --
        gst_video_input_p->get_new_image(current_image.view); // copy the data

        // color to gray_image
        copy_and_convert_pixels(current_image.view, boost::gil::view(gray_image));
        
        // compute features
        const vector<FASTFeature> &features =
            features_detector_p->detect_features((const_view(gray_image)));

        // plot features on output image
        draw_features(features, current_image);

        video_display.display(current_image);

        // add a delay ---	
        wait_some_seconds(0.1); // [seconds]


    }
    while (video_display.is_closed == false);

    return 0;

}
void draw_ground_plane_estimator(const FastGroundPlaneEstimator &ground_plane_estimator,
                                 const AbstractVideoInput::input_image_view_t &input_view,
                                 const StereoCameraCalibration &stereo_calibration,
                                 boost::gil::rgb8_view_t &screen_view)
{

    // copy right screen image ---
    copy_and_convert_pixels(input_view, screen_view);

    // draw v-disparity image in the right screen image --
    FastGroundPlaneEstimator::v_disparity_const_view_t raw_v_disparity_view =
            ground_plane_estimator.get_v_disparity_view();

    boost::gil::rgb8_view_t screen_subview = boost::gil::subimage_view(screen_view,
                                                                       0,0,
                                                                       raw_v_disparity_view.width(),
                                                                       screen_view.height());
    fill_pixels(screen_subview, boost::gil::rgb8_pixel_t());

    boost::gil::rgb8_view_t screen_subsubview = boost::gil::subimage_view(screen_subview,
                                                                          0, raw_v_disparity_view.height(),
                                                                          raw_v_disparity_view.width(),
                                                                          raw_v_disparity_view.height());
    Eigen::MatrixXf v_disparity_data;
    v_disparity_data_to_matrix(ground_plane_estimator.get_v_disparity(),
                               v_disparity_data);
    normalize_each_row(v_disparity_data);
    draw_matrix(v_disparity_data, screen_subsubview);

    if(false)
    {
        log_debug() << "(over)Writing ground_v_disparity_data.png" << std::endl;
        boost::gil::png_write_view("ground_v_disparity_data.png", screen_subsubview);
    }

    const bool draw_lines_on_top = true;
    if(draw_lines_on_top)
    {
        draw_v_disparity_lines(ground_plane_estimator,
                               stereo_calibration,
                               screen_subview);

        // draw the points used to estimate the objects
        typedef std::pair<int, int> point_t;
        const FastGroundPlaneEstimator::points_t &points = ground_plane_estimator.get_points();
        BOOST_FOREACH(const point_t &point, points)
        {
            *screen_subsubview.at(point.first, point.second) = rgb8_colors::orange;
        }
    }