예제 #1
0
void scan3d::reconstruct_model_simple(Pointcloud & pointcloud, CalibrationData const& calib, 
                                cv::Mat const& pattern_image, cv::Mat const& min_max_image, cv::Mat const& color_image,
                                cv::Size const& projector_size, int threshold, double max_dist, QWidget * parent_widget)
{
    if (!pattern_image.data || pattern_image.type()!=CV_32FC2)
    {   //pattern not correctly decoded
        std::cerr << "[reconstruct_model] ERROR invalid pattern_image\n";
        return;
    }
    if (!min_max_image.data || min_max_image.type()!=CV_8UC2)
    {   //pattern not correctly decoded
        std::cerr << "[reconstruct_model] ERROR invalid min_max_image\n";
        return;
    }
    if (color_image.data && color_image.type()!=CV_8UC3)
    {   //not standard RGB image
        std::cerr << "[reconstruct_model] ERROR invalid color_image\n";
        return;
    }
    if (!calib.is_valid())
    {   //invalid calibration
        return;
    }

    //parameters
    //const unsigned threshold = config.value("main/shadow_threshold", 70).toUInt();
    //const double   max_dist  = config.value("main/max_dist_threshold", 40).toDouble();
    //const bool     remove_background = config.value("main/remove_background", true).toBool();
    //const double   plane_dist = config.value("main/plane_dist", 100.0).toDouble();
    double plane_dist = 100.0;

    /* background removal
    cv::Point2i plane_coord[3];
    plane_coord[0] = cv::Point2i(config.value("background_plane/x1").toUInt(), config.value("background_plane/y1").toUInt());
    plane_coord[1] = cv::Point2i(config.value("background_plane/x2").toUInt(), config.value("background_plane/y2").toUInt());
    plane_coord[2] = cv::Point2i(config.value("background_plane/x3").toUInt(), config.value("background_plane/y3").toUInt());

    if (plane_coord[0].x<=0 || plane_coord[0].x>=pattern_local.cols
        || plane_coord[0].y<=0 || plane_coord[0].y>=pattern_local.rows)
    {
        plane_coord[0] = cv::Point2i(50, 50);
        config.setValue("background_plane/x1", plane_coord[0].x);
        config.setValue("background_plane/y1", plane_coord[0].y);
    }
    if (plane_coord[1].x<=0 || plane_coord[1].x>=pattern_local.cols
        || plane_coord[1].y<=0 || plane_coord[1].y>=pattern_local.rows)
    {
        plane_coord[1] = cv::Point2i(50, pattern_local.rows-50);
        config.setValue("background_plane/x2", plane_coord[1].x);
        config.setValue("background_plane/y2", plane_coord[1].y);
    }
    if (plane_coord[2].x<=0 || plane_coord[2].x>=pattern_local.cols
        || plane_coord[2].y<=0 || plane_coord[2].y>=pattern_local.rows)
    {
        plane_coord[2] = cv::Point2i(pattern_local.cols-50, 50);
        config.setValue("background_plane/x3", plane_coord[2].x);
        config.setValue("background_plane/y3", plane_coord[2].y);
    }
    */

    //init point cloud
    int scale_factor = 1;
    int out_cols = pattern_image.cols/scale_factor;
    int out_rows = pattern_image.rows/scale_factor;
    pointcloud.clear();
    pointcloud.init_points(out_rows, out_cols);
    pointcloud.init_color(out_rows, out_cols);

    //progress
    QProgressDialog * progress = NULL;
    if (parent_widget)
    {
        progress = new QProgressDialog("Reconstruction in progress.", "Abort", 0, pattern_image.rows, parent_widget, 
                                        Qt::Dialog|Qt::CustomizeWindowHint|Qt::WindowCloseButtonHint);
        progress->setWindowModality(Qt::WindowModal);
        progress->setWindowTitle("Processing");
        progress->setMinimumWidth(400);
    }

    //take 3 points in back plane
    /*cv::Mat plane;
    if (remove_background)
    {
        cv::Point3d p[3];
        for (unsigned i=0; i<3;i++)
        {
            for (unsigned j=0; 
                j<10 && (
                    INVALID(pattern_local.at<cv::Vec2f>(plane_coord[i].y, plane_coord[i].x)[0])
                    || INVALID(pattern_local.at<cv::Vec2f>(plane_coord[i].y, plane_coord[i].x)[1])); j++)
            {
                plane_coord[i].x += 1.f;
            }
            const cv::Vec2f & pattern = pattern_local.at<cv::Vec2f>(plane_coord[i].y, plane_coord[i].x);

            const float col = pattern[0];
            const float row = pattern[1];

            if (projector_size.width<=static_cast<int>(col) || projector_size.height<=static_cast<int>(row))
            {   //abort
                continue;
            }

            //shoot a ray through the image: u=\lambda*v + q
            cv::Point3d u1 = camera.to_world_coord(plane_coord[i].x, plane_coord[i].y);
            cv::Point3d v1 = camera.world_ray(plane_coord[i].x, plane_coord[i].y);

            //shoot a ray through the projector: u=\lambda*v + q
            cv::Point3d u2 = projector.to_world_coord(col, row);
            cv::Point3d v2 = projector.world_ray(col, row);

            //compute ray-ray approximate intersection
            double distance = 0.0;
            p[i] = geometry::approximate_ray_intersection(v1, u1, v2, u2, &distance);
            std::cout << "Plane point " << i << " distance " << distance << std::endl;
        }
        plane = geometry::get_plane(p[0], p[1], p[2]);
        if (cv::Mat(plane.rowRange(0,3).t()*cv::Mat(cv::Point3d(p[0].x, p[0].y, p[0].z-1.0)) + plane.at<double>(3,0)).at<double>(0,0)
                <0.0)
        {
            plane = -1.0*plane;
        }
        std::cout << "Background plane: " << plane << std::endl;
    }
    */

    cv::Mat Rt = calib.R.t();

    unsigned good = 0;
    unsigned bad  = 0;
    unsigned invalid = 0;
    unsigned repeated = 0;
    for (int h=0; h<pattern_image.rows; h+=scale_factor)
    {
        if (progress && h%4==0)
        {
            progress->setValue(h);
            progress->setLabelText(QString("Reconstruction in progress: %1 good points/%2 bad points").arg(good).arg(bad));
            QApplication::instance()->processEvents();
        }
        if (progress && progress->wasCanceled())
        {   //abort
            pointcloud.clear();
            return;
        }

        register const cv::Vec2f * curr_pattern_row = pattern_image.ptr<cv::Vec2f>(h);
        register const cv::Vec2b * min_max_row = min_max_image.ptr<cv::Vec2b>(h);
        for (register int w=0; w<pattern_image.cols; w+=scale_factor)
        {
            double distance = max_dist;  //quality meassure
            cv::Point3d p;               //reconstructed point
            //cv::Point3d normal(0.0, 0.0, 0.0);

            const cv::Vec2f & pattern = curr_pattern_row[w];
            const cv::Vec2b & min_max = min_max_row[w];

            if (sl::INVALID(pattern) || pattern[0]<0.f || pattern[1]<0.f
                || (min_max[1]-min_max[0])<static_cast<int>(threshold))
            {   //skip
                invalid++;
                continue;
            }

            const float col = pattern[0];
            const float row = pattern[1];

            if (projector_size.width<=static_cast<int>(col) || projector_size.height<=static_cast<int>(row))
            {   //abort
                continue;
            }

            cv::Vec3f & cloud_point = pointcloud.points.at<cv::Vec3f>(h/scale_factor, w/scale_factor);
            if (!sl::INVALID(cloud_point[0]))
            {   //point already reconstructed!
                repeated++;
                continue;
            }

            //standard
            cv::Point2d p1(w, h);
            cv::Point2d p2(col, row);
            triangulate_stereo(calib.cam_K, calib.cam_kc, calib.proj_K, calib.proj_kc, Rt, calib.T, p1, p2, p, &distance);

            //save texture coordinates
            /*
            normal.x = static_cast<float>(w)/static_cast<float>(color_image.cols);
            normal.y = static_cast<float>(h)/static_cast<float>(color_image.rows);
            normal.z = 0;
            */

            if (distance < max_dist)
            {   //good point

                //evaluate the plane
                double d = plane_dist+1;
                /*if (remove_background)
                {
                    d = cv::Mat(plane.rowRange(0,3).t()*cv::Mat(p) + plane.at<double>(3,0)).at<double>(0,0);
                }*/
                if (d>plane_dist)
                {   //object point, keep
                    good++;

                    cloud_point[0] = p.x;
                    cloud_point[1] = p.y;
                    cloud_point[2] = p.z;

                    //normal
                    /*cpoint.normal_x = normal.x;
                    cpoint.normal_y = normal.y;
                    cpoint.normal_z = normal.z;*/

                    if (color_image.data)
                    {
                        const cv::Vec3b & vec = color_image.at<cv::Vec3b>(h, w);
                        cv::Vec3b & cloud_color = pointcloud.colors.at<cv::Vec3b>(h/scale_factor, w/scale_factor);
                        cloud_color[0] = vec[0];
                        cloud_color[1] = vec[1];
                        cloud_color[2] = vec[2];
                    }
                }
            }
            else
            {   //skip
                bad++;
                //std::cout << " d = " << distance << std::endl;
            }
        }   //for each column
    }   //for each row

    if (progress)
    {
        progress->setValue(pattern_image.rows);
        progress->close();
        delete progress;
        progress = NULL;
    }

    std::cout << "Reconstructed points[simple]: " << good << " (" << bad << " skipped, " << invalid << " invalid) " << std::endl
                << " - repeated points: " << repeated << " (ignored) " << std::endl;
}
예제 #2
0
void scan3d::reconstruct_model_patch_center(Pointcloud & pointcloud, CalibrationData const& calib, 
                                cv::Mat const& pattern_image, cv::Mat const& min_max_image, cv::Mat const& color_image,
                                cv::Size const& projector_size, int threshold, double max_dist, QWidget * parent_widget)
{
    if (!pattern_image.data || pattern_image.type()!=CV_32FC2)
    {   //pattern not correctly decoded
        std::cerr << "[reconstruct_model] ERROR invalid pattern_image\n";
        return;
    }
    if (!min_max_image.data || min_max_image.type()!=CV_8UC2)
    {   //pattern not correctly decoded
        std::cerr << "[reconstruct_model] ERROR invalid min_max_image\n";
        return;
    }
    if (color_image.data && color_image.type()!=CV_8UC3)
    {   //not standard RGB image
        std::cerr << "[reconstruct_model] ERROR invalid color_image\n";
        return;
    }
    if (!calib.is_valid())
    {   //invalid calibration
        return;
    }

    //parameters
    //const unsigned threshold = config.value("main/shadow_threshold", 70).toUInt();
    //const double   max_dist  = config.value("main/max_dist_threshold", 40).toDouble();
    //const bool     remove_background = config.value("main/remove_background", true).toBool();
    //const double   plane_dist = config.value("main/plane_dist", 100.0).toDouble();
    double plane_dist = 100.0;

    /* background removal
    cv::Point2i plane_coord[3];
    plane_coord[0] = cv::Point2i(config.value("background_plane/x1").toUInt(), config.value("background_plane/y1").toUInt());
    plane_coord[1] = cv::Point2i(config.value("background_plane/x2").toUInt(), config.value("background_plane/y2").toUInt());
    plane_coord[2] = cv::Point2i(config.value("background_plane/x3").toUInt(), config.value("background_plane/y3").toUInt());

    if (plane_coord[0].x<=0 || plane_coord[0].x>=pattern_local.cols
        || plane_coord[0].y<=0 || plane_coord[0].y>=pattern_local.rows)
    {
        plane_coord[0] = cv::Point2i(50, 50);
        config.setValue("background_plane/x1", plane_coord[0].x);
        config.setValue("background_plane/y1", plane_coord[0].y);
    }
    if (plane_coord[1].x<=0 || plane_coord[1].x>=pattern_local.cols
        || plane_coord[1].y<=0 || plane_coord[1].y>=pattern_local.rows)
    {
        plane_coord[1] = cv::Point2i(50, pattern_local.rows-50);
        config.setValue("background_plane/x2", plane_coord[1].x);
        config.setValue("background_plane/y2", plane_coord[1].y);
    }
    if (plane_coord[2].x<=0 || plane_coord[2].x>=pattern_local.cols
        || plane_coord[2].y<=0 || plane_coord[2].y>=pattern_local.rows)
    {
        plane_coord[2] = cv::Point2i(pattern_local.cols-50, 50);
        config.setValue("background_plane/x3", plane_coord[2].x);
        config.setValue("background_plane/y3", plane_coord[2].y);
    }
    */

    //init point cloud
    //初始化点云数据为NAN,大小是经过缩放的
    int scale_factor_x = 1;
    int scale_factor_y = (projector_size.width>projector_size.height ? 1 : 2); //XXX HACK: preserve regular aspect ratio XXX HACK
    int out_cols = projector_size.width/scale_factor_x;
    int out_rows = projector_size.height/scale_factor_y;
    pointcloud.clear();
    pointcloud.init_points(out_rows, out_cols);//NAN点:point初始化(pointcloud成员变量)
    pointcloud.init_color(out_rows, out_cols);//白色图像:color初始化(pointcloud成员变量)

    //progress


    //take 3 points in back plane
    /*cv::Mat plane;
    if (remove_background)
    {
        cv::Point3d p[3];
        for (unsigned i=0; i<3;i++)
        {
            for (unsigned j=0; 
                j<10 && (
                    INVALID(pattern_local.at<cv::Vec2f>(plane_coord[i].y, plane_coord[i].x)[0])
                    || INVALID(pattern_local.at<cv::Vec2f>(plane_coord[i].y, plane_coord[i].x)[1])); j++)
            {
                plane_coord[i].x += 1.f;
            }
            const cv::Vec2f & pattern = pattern_local.at<cv::Vec2f>(plane_coord[i].y, plane_coord[i].x);

            const float col = pattern[0];
            const float row = pattern[1];

            if (projector_size.width<=static_cast<int>(col) || projector_size.height<=static_cast<int>(row))
            {   //abort
                continue;
            }

            //shoot a ray through the image: u=\lambda*v + q
            cv::Point3d u1 = camera.to_world_coord(plane_coord[i].x, plane_coord[i].y);
            cv::Point3d v1 = camera.world_ray(plane_coord[i].x, plane_coord[i].y);

            //shoot a ray through the projector: u=\lambda*v + q
            cv::Point3d u2 = projector.to_world_coord(col, row);
            cv::Point3d v2 = projector.world_ray(col, row);

            //compute ray-ray approximate intersection
            double distance = 0.0;
            p[i] = geometry::approximate_ray_intersection(v1, u1, v2, u2, &distance);
            std::cout << "Plane point " << i << " distance " << distance << std::endl;
        }
        plane = geometry::get_plane(p[0], p[1], p[2]);
        if (cv::Mat(plane.rowRange(0,3).t()*cv::Mat(cv::Point3d(p[0].x, p[0].y, p[0].z-1.0)) + plane.at<double>(3,0)).at<double>(0,0)
                <0.0)
        {
            plane = -1.0*plane;
        }
        std::cout << "Background plane: " << plane << std::endl;
    }
    */

    //candidate points
    QMap<unsigned, cv::Point2f> proj_points;
    QMap<unsigned, std::vector<cv::Point2f> > cam_points;

    //cv::Mat proj_image = cv::Mat::zeros(out_rows, out_cols, CV_8UC3);

    unsigned good = 0;
    unsigned bad  = 0;
    unsigned invalid = 0;
    unsigned repeated = 0;
    for (int h=0; h<pattern_image.rows; h++)
    {

        register const cv::Vec2f * curr_pattern_row = pattern_image.ptr<cv::Vec2f>(h);
        register const cv::Vec2b * min_max_row = min_max_image.ptr<cv::Vec2b>(h);
        for (register int w=0; w<pattern_image.cols; w++)
        {
            const cv::Vec2f & pattern = curr_pattern_row[w];
            const cv::Vec2b & min_max = min_max_row[w];

            if (sl::INVALID(pattern) 
                || pattern[0]<0.f || pattern[0]>=projector_size.width || pattern[1]<0.f || pattern[1]>=projector_size.height
                || (min_max[1]-min_max[0])<static_cast<int>(threshold))
            {   //skip
                continue;
            }

            //ok
            cv::Point2f proj_point(pattern[0]/scale_factor_x, pattern[1]/scale_factor_y);

            unsigned index = static_cast<unsigned>(proj_point.y)*out_cols + static_cast<unsigned>(proj_point.x);//索引是投影图像上所有的点(uncertain)

            proj_points.insert(index, proj_point);

             //std::cout<<"index:  "<<index<<std::endl;
            cam_points[index].push_back(cv::Point2f(w, h));

            //std::cout<<"cam_point:  "<<cam_points[index]<<std::endl;

            //proj_image.at<cv::Vec3b>(static_cast<unsigned>(proj_point.y), static_cast<unsigned>(proj_point.x)) = color_image.at<cv::Vec3b>(h, w);
        }

    }


    //cv::imwrite("proj_image.png", proj_image);
    


    cv::Mat Rt = calib.R.t();


    QMapIterator<unsigned, cv::Point2f> iter1(proj_points);
    unsigned n = 0;
    while (iter1.hasNext()) 
    {
        n++;



        iter1.next();
        unsigned index = iter1.key();
        const cv::Point2f & proj_point = iter1.value();
        const std::vector<cv::Point2f> & cam_point_list = cam_points.value(index);
        const unsigned count = static_cast<int>(cam_point_list.size());

        if (!count)
        {   //empty list
            continue;
        }

        //center average
        cv::Point2d sum(0.0, 0.0), sum2(0.0, 0.0);
        for (std::vector<cv::Point2f>::const_iterator iter2=cam_point_list.begin(); iter2!=cam_point_list.end(); iter2++)
        {
            sum.x += iter2->x;
            sum.y += iter2->y;
            sum2.x += (iter2->x)*(iter2->x);
            sum2.y += (iter2->y)*(iter2->y);
        }
        cv::Point2d cam(sum.x/count, sum.y/count);//
        cv::Point2d proj(proj_point.x*scale_factor_x, proj_point.y*scale_factor_y);

        //triangulate
        double distance = max_dist;  //quality meassure
        cv::Point3d p;          //reconstructed point
        triangulate_stereo(calib.cam_K, calib.cam_kc, calib.proj_K, calib.proj_kc, Rt, calib.T, cam, proj, p, &distance);

        if (distance < max_dist)
        {   //good point

            //evaluate the plane
            double d = plane_dist+1;
            /*if (remove_background)
            {
                d = cv::Mat(plane.rowRange(0,3).t()*cv::Mat(p) + plane.at<double>(3,0)).at<double>(0,0);
            }*/
            if (d>plane_dist)
            {   //object point, keep
                good++;

                cv::Vec3f & cloud_point = pointcloud.points.at<cv::Vec3f>(proj_point.y, proj_point.x);
                cloud_point[0] = p.x;
                cloud_point[1] = p.y;
                cloud_point[2] = p.z;
                //std::cout << " pointcloud.points= " <<cloud_point << std::endl;

                if (color_image.data)//每個像素點對應的彩色值,存入
                {
                    const cv::Vec3b & vec = color_image.at<cv::Vec3b>(static_cast<unsigned>(cam.y), static_cast<unsigned>(cam.x));
                    cv::Vec3b & cloud_color = pointcloud.colors.at<cv::Vec3b>(proj_point.y, proj_point.x);
                    cloud_color[0] = vec[0];
                    cloud_color[1] = vec[1];
                    cloud_color[2] = vec[2];
                }
            }

        }
        else
        {   //skip
            bad++;
            //std::cout << " d = " << distance << std::endl;
        }
    }   //while



    std::cout << "Reconstructed points [patch center]: " << good << " (" << bad << " skipped, " << invalid << " invalid) " << std::endl
                << " - repeated points: " << repeated << " (ignored) " << std::endl;
}